From 28cc3639476fae72bae3836e8776966386915142 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sat, 13 Aug 2022 17:42:11 +0200 Subject: [PATCH 01/14] stage2 ARM: improve Mir representation of mov and cmp --- src/arch/arm/CodeGen.zig | 81 ++++++++++++++++------------------------ src/arch/arm/Emit.zig | 48 ++++++++++++++++++------ src/arch/arm/Mir.zig | 14 +++++++ 3 files changed, 82 insertions(+), 61 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index cefcf3b114..0c683c6899 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -438,9 +438,8 @@ fn gen(self: *Self) !void { // mov fp, sp _ = try self.addInst(.{ .tag = .mov, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = .fp, - .rn = .r0, .op = Instruction.Operand.reg(.sp, Instruction.Operand.Shift.none), } }, }); @@ -531,9 +530,8 @@ fn gen(self: *Self) !void { // mov sp, fp _ = try self.addInst(.{ .tag = .mov, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = .sp, - .rn = .r0, .op = Instruction.Operand.reg(.fp, Instruction.Operand.Shift.none), } }, }); @@ -1240,9 +1238,8 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { _ = try self.addInst(.{ .tag = .mvn, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = dest_reg, - .rn = undefined, .op = Instruction.Operand.reg(op_reg, Instruction.Operand.Shift.none), } }, }); @@ -1337,9 +1334,8 @@ fn minMax( _ = try self.addInst(.{ .tag = .mov, .cond = cond_choose_lhs, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = dest_reg, - .rn = .r0, .op = Instruction.Operand.reg(lhs_reg, Instruction.Operand.Shift.none), } }, }); @@ -1348,9 +1344,8 @@ fn minMax( _ = try self.addInst(.{ .tag = .mov, .cond = cond_choose_rhs, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = dest_reg, - .rn = .r0, .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none), } }, }); @@ -1682,9 +1677,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { // mov rdlo, #0 _ = try self.addInst(.{ .tag = .mov, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = rdlo, - .rn = .r0, .op = Instruction.Operand.fromU32(0).?, } }, }); @@ -1693,9 +1687,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = try self.addInst(.{ .tag = .mov, .cond = .ne, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = rdlo, - .rn = .r0, .op = Instruction.Operand.fromU32(1).?, } }, }); @@ -1707,9 +1700,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = try self.addInst(.{ .tag = .mov, .cond = .ne, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = rdlo, - .rn = .r0, .op = Instruction.Operand.fromU32(1).?, } }, }); @@ -2670,7 +2662,7 @@ fn binOpRegister( defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg); const dest_reg = switch (mir_tag) { - .cmp => .r0, // cmp has no destination regardless + .cmp => undefined, // cmp has no destination regardless else => if (metadata) |md| blk: { if (lhs_is_register and self.reuseOperand(md.inst, md.lhs, 0, lhs)) { break :blk lhs_reg; @@ -2690,7 +2682,6 @@ fn binOpRegister( .adds, .sub, .subs, - .cmp, .@"and", .orr, .eor, @@ -2699,6 +2690,10 @@ fn binOpRegister( .rn = lhs_reg, .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none), } }, + .cmp => .{ .r_op_cmp = .{ + .rn = lhs_reg, + .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none), + } }, .lsl, .asr, .lsr, @@ -2767,7 +2762,7 @@ fn binOpImmediate( defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg); const dest_reg = switch (mir_tag) { - .cmp => .r0, // cmp has no destination reg + .cmp => undefined, // cmp has no destination reg else => if (metadata) |md| blk: { if (lhs_is_register and self.reuseOperand( md.inst, @@ -2789,7 +2784,6 @@ fn binOpImmediate( .adds, .sub, .subs, - .cmp, .@"and", .orr, .eor, @@ -2798,6 +2792,10 @@ fn binOpImmediate( .rn = lhs_reg, .op = Instruction.Operand.fromU32(rhs.immediate).?, } }, + .cmp => .{ .r_op_cmp = .{ + .rn = lhs_reg, + .op = Instruction.Operand.fromU32(rhs.immediate).?, + } }, .lsl, .asr, .lsr, @@ -3312,9 +3310,8 @@ fn genInlineMemcpy( // mov count, #0 _ = try self.addInst(.{ .tag = .mov, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = count, - .rn = .r0, .op = Instruction.Operand.imm(0, 0), } }, }); @@ -3323,8 +3320,7 @@ fn genInlineMemcpy( // cmp count, len _ = try self.addInst(.{ .tag = .cmp, - .data = .{ .rr_op = .{ - .rd = .r0, + .data = .{ .r_op_cmp = .{ .rn = count, .op = Instruction.Operand.reg(len, Instruction.Operand.Shift.none), } }, @@ -3418,9 +3414,8 @@ fn genInlineMemsetCode( // mov count, #0 _ = try self.addInst(.{ .tag = .mov, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = count, - .rn = .r0, .op = Instruction.Operand.imm(0, 0), } }, }); @@ -3429,8 +3424,7 @@ fn genInlineMemsetCode( // cmp count, len _ = try self.addInst(.{ .tag = .cmp, - .data = .{ .rr_op = .{ - .rd = .r0, + .data = .{ .r_op_cmp = .{ .rn = count, .op = Instruction.Operand.reg(len, Instruction.Operand.Shift.none), } }, @@ -4020,9 +4014,7 @@ fn condBr(self: *Self, condition: MCValue) !Mir.Inst.Index { // bne ... _ = try self.addInst(.{ .tag = .cmp, - .cond = .al, - .data = .{ .rr_op = .{ - .rd = .r0, + .data = .{ .r_op_cmp = .{ .rn = reg, .op = Instruction.Operand.imm(1, 0), } }, @@ -4196,8 +4188,7 @@ fn isNull(self: *Self, ty: Type, operand: MCValue) !MCValue { _ = try self.addInst(.{ .tag = .cmp, - .data = .{ .rr_op = .{ - .rd = undefined, + .data = .{ .r_op_cmp = .{ .rn = reg_mcv.register, .op = Instruction.Operand.fromU32(0).?, } }, @@ -4832,9 +4823,8 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro .register_v_flag => .vs, else => unreachable, }, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = cond_reg, - .rn = .r0, .op = Instruction.Operand.fromU32(1).?, } }, }); @@ -4935,9 +4925,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void // mov reg, 0 _ = try self.addInst(.{ .tag = .mov, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = reg, - .rn = .r0, .op = zero, } }, }); @@ -4946,9 +4935,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void _ = try self.addInst(.{ .tag = .mov, .cond = condition, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = reg, - .rn = .r0, .op = one, } }, }); @@ -4957,18 +4945,16 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void if (Instruction.Operand.fromU32(x)) |op| { _ = try self.addInst(.{ .tag = .mov, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = reg, - .rn = .r0, .op = op, } }, }); } else if (Instruction.Operand.fromU32(~x)) |op| { _ = try self.addInst(.{ .tag = .mvn, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = reg, - .rn = .r0, .op = op, } }, }); @@ -4984,9 +4970,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } else { _ = try self.addInst(.{ .tag = .mov, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = reg, - .rn = .r0, .op = Instruction.Operand.imm(@truncate(u8, x), 0), } }, }); @@ -5028,9 +5013,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void // orr reg, reg, #0xdd, 8 _ = try self.addInst(.{ .tag = .mov, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = reg, - .rn = .r0, .op = Instruction.Operand.imm(@truncate(u8, x), 0), } }, }); @@ -5069,9 +5053,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void // mov reg, src_reg _ = try self.addInst(.{ .tag = .mov, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = reg, - .rn = .r0, .op = Instruction.Operand.reg(src_reg, Instruction.Operand.Shift.none), } }, }); diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig index cf749792f0..8770ef1a24 100644 --- a/src/arch/arm/Emit.zig +++ b/src/arch/arm/Emit.zig @@ -385,20 +385,44 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { fn mirDataProcessing(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const cond = emit.mir.instructions.items(.cond)[inst]; - const rr_op = emit.mir.instructions.items(.data)[inst].rr_op; switch (tag) { - .add => try emit.writeInstruction(Instruction.add(cond, rr_op.rd, rr_op.rn, rr_op.op)), - .adds => try emit.writeInstruction(Instruction.adds(cond, rr_op.rd, rr_op.rn, rr_op.op)), - .@"and" => try emit.writeInstruction(Instruction.@"and"(cond, rr_op.rd, rr_op.rn, rr_op.op)), - .cmp => try emit.writeInstruction(Instruction.cmp(cond, rr_op.rn, rr_op.op)), - .eor => try emit.writeInstruction(Instruction.eor(cond, rr_op.rd, rr_op.rn, rr_op.op)), - .mov => try emit.writeInstruction(Instruction.mov(cond, rr_op.rd, rr_op.op)), - .mvn => try emit.writeInstruction(Instruction.mvn(cond, rr_op.rd, rr_op.op)), - .orr => try emit.writeInstruction(Instruction.orr(cond, rr_op.rd, rr_op.rn, rr_op.op)), - .rsb => try emit.writeInstruction(Instruction.rsb(cond, rr_op.rd, rr_op.rn, rr_op.op)), - .sub => try emit.writeInstruction(Instruction.sub(cond, rr_op.rd, rr_op.rn, rr_op.op)), - .subs => try emit.writeInstruction(Instruction.subs(cond, rr_op.rd, rr_op.rn, rr_op.op)), + .add, + .adds, + .@"and", + .eor, + .orr, + .rsb, + .sub, + .subs, + => { + const rr_op = emit.mir.instructions.items(.data)[inst].rr_op; + switch (tag) { + .add => try emit.writeInstruction(Instruction.add(cond, rr_op.rd, rr_op.rn, rr_op.op)), + .adds => try emit.writeInstruction(Instruction.adds(cond, rr_op.rd, rr_op.rn, rr_op.op)), + .@"and" => try emit.writeInstruction(Instruction.@"and"(cond, rr_op.rd, rr_op.rn, rr_op.op)), + .eor => try emit.writeInstruction(Instruction.eor(cond, rr_op.rd, rr_op.rn, rr_op.op)), + .orr => try emit.writeInstruction(Instruction.orr(cond, rr_op.rd, rr_op.rn, rr_op.op)), + .rsb => try emit.writeInstruction(Instruction.rsb(cond, rr_op.rd, rr_op.rn, rr_op.op)), + .sub => try emit.writeInstruction(Instruction.sub(cond, rr_op.rd, rr_op.rn, rr_op.op)), + .subs => try emit.writeInstruction(Instruction.subs(cond, rr_op.rd, rr_op.rn, rr_op.op)), + else => unreachable, + } + }, + .cmp => { + const r_op_cmp = emit.mir.instructions.items(.data)[inst].r_op_cmp; + try emit.writeInstruction(Instruction.cmp(cond, r_op_cmp.rn, r_op_cmp.op)); + }, + .mov, + .mvn, + => { + const r_op_mov = emit.mir.instructions.items(.data)[inst].r_op_mov; + switch (tag) { + .mov => try emit.writeInstruction(Instruction.mov(cond, r_op_mov.rd, r_op_mov.op)), + .mvn => try emit.writeInstruction(Instruction.mvn(cond, r_op_mov.rd, r_op_mov.op)), + else => unreachable, + } + }, else => unreachable, } } diff --git a/src/arch/arm/Mir.zig b/src/arch/arm/Mir.zig index d5da7e5d4e..45f89b8120 100644 --- a/src/arch/arm/Mir.zig +++ b/src/arch/arm/Mir.zig @@ -166,6 +166,20 @@ pub const Inst = struct { rd: Register, imm16: u16, }, + /// A register and an operand + /// + /// Used by mov and mvn + r_op_mov: struct { + rd: Register, + op: bits.Instruction.Operand, + }, + /// A register and an operand + /// + /// Used by cmp + r_op_cmp: struct { + rn: Register, + op: bits.Instruction.Operand, + }, /// Two registers and a shift amount /// /// Used by e.g. lsl From 0414ef591a0cb42629d7efb5912612f689ea8910 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Fri, 19 Aug 2022 12:39:39 +0200 Subject: [PATCH 02/14] stage2 ARM: introduce allocRegs This new register allocation mechanism which is designed to be more generic and flexible will replace binOp. --- src/arch/arm/CodeGen.zig | 392 ++++++++++++++++++++++++++------------- 1 file changed, 266 insertions(+), 126 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 0c683c6899..c10e0bb78d 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2232,7 +2232,13 @@ fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ un_op, .none, .none }); } -fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool { +fn reuseOperand( + self: *Self, + inst: Air.Inst.Index, + operand: Air.Inst.Ref, + op_index: Liveness.OperandInt, + mcv: MCValue, +) bool { if (!self.liveness.operandDies(inst, op_index)) return false; @@ -2580,39 +2586,206 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -/// Allocates a new register. If Inst in non-null, additionally tracks -/// this register and the corresponding int and removes all previous -/// tracking. Does not do the actual moving (that is handled by -/// genSetReg). -fn prepareNewRegForMoving( +/// An argument to a Mir instruction which is read (and possibly also +/// written to) by the respective instruction +const ReadArg = struct { + ty: Type, + bind: Bind, + class: RegisterManager.RegisterBitSet, + reg: *Register, + + const Bind = union(enum) { + inst: Air.Inst.Ref, + mcv: MCValue, + + fn resolveToMcv(bind: Bind, function: *Self) InnerError!MCValue { + return switch (bind) { + .inst => |inst| try function.resolveInst(inst), + .mcv => |mcv| mcv, + }; + } + }; +}; + +/// An argument to a Mir instruction which is written to (but not read +/// from) by the respective instruction +const WriteArg = struct { + ty: Type, + bind: Bind, + class: RegisterManager.RegisterBitSet, + reg: *Register, + + const Bind = union(enum) { + reg: Register, + none: void, + }; +}; + +/// Holds all data necessary for enabling the potential reuse of +/// operand registers as destinations +const ReuseMetadata = struct { + corresponding_inst: Air.Inst.Index, + + /// Maps every element index of read_args to the corresponding + /// index in the Air instruction + /// + /// When the order of read_args corresponds exactly to the order + /// of the inputs of the Air instruction, this would be e.g. + /// &.{ 0, 1 }. However, when the order is not the same or some + /// inputs to the Air instruction are omitted (e.g. when they can + /// be represented as immediates to the Mir instruction), + /// operand_mapping should reflect that fact. + operand_mapping: []const Liveness.OperandInt, +}; + +/// Allocate a set of registers for use as arguments for a Mir +/// instruction +/// +/// If the Mir instruction these registers are allocated for +/// corresponds exactly to a single Air instruction, populate +/// reuse_metadata in order to enable potential reuse of an operand as +/// the destination (provided that that operand dies in this +/// instruction). +/// +/// Reusing an operand register as destination is the only time two +/// arguments may share the same register. In all other cases, +/// allocRegs guarantees that a register will never be allocated to +/// more than one argument. +/// +/// Furthermore, allocReg guarantees that all arguments which are +/// already bound to registers before calling allocRegs will not +/// change their register binding. This is done by locking these +/// registers. +fn allocRegs( self: *Self, - track_inst: ?Air.Inst.Index, - register_class: RegisterManager.RegisterBitSet, - mcv: MCValue, -) !Register { - const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - const reg = try self.register_manager.allocReg(track_inst, register_class); + read_args: []const ReadArg, + write_args: []const WriteArg, + reuse_metadata: ?ReuseMetadata, +) InnerError!void { + // Air instructions have either one output or none (cmp) + assert(!(reuse_metadata != null and write_args.len > 1)); // see note above - if (track_inst) |inst| { - // Overwrite the MCValue associated with this inst - branch.inst_table.putAssumeCapacity(inst, .{ .register = reg }); + // The operand mapping is a 1:1 mapping of read args to their + // corresponding operand index in the Air instruction + assert(!(reuse_metadata != null and reuse_metadata.?.operand_mapping.len != read_args.len)); // see note above - // If the previous MCValue occupied some space we track, we - // need to make sure it is marked as free now. - switch (mcv) { - .cpsr_flags => { - assert(self.cpsr_flags_inst.? == inst); - self.cpsr_flags_inst = null; - }, - .register => |prev_reg| { - assert(!self.register_manager.isRegFree(prev_reg)); - self.register_manager.freeReg(prev_reg); - }, - else => {}, + const locks = try self.gpa.alloc(?RegisterLock, read_args.len + write_args.len); + defer self.gpa.free(locks); + const read_locks = locks[0..read_args.len]; + const write_locks = locks[read_args.len..]; + + std.mem.set(?RegisterLock, locks, null); + defer for (locks) |lock| { + if (lock) |locked_reg| self.register_manager.unlockReg(locked_reg); + }; + + // When we reuse a read_arg as a destination, the corresponding + // MCValue of the read_arg will be set to .dead. In that case, we + // skip allocating this read_arg. + var reused_read_arg: ?usize = null; + + // Lock all args which are already allocated to registers + for (read_args) |arg, i| { + const mcv = try arg.bind.resolveToMcv(self); + if (mcv == .register) { + read_locks[i] = self.register_manager.lockReg(mcv.register); } } - return reg; + for (write_args) |arg, i| { + if (arg.bind == .reg) { + write_locks[i] = self.register_manager.lockReg(arg.bind.reg); + } + } + + // Allocate registers for all args which aren't allocated to + // registers yet + for (read_args) |arg, i| { + const mcv = try arg.bind.resolveToMcv(self); + if (mcv == .register) { + arg.reg.* = mcv.register; + } else { + const track_inst: ?Air.Inst.Index = switch (arg.bind) { + .inst => |inst| Air.refToIndex(inst).?, + else => null, + }; + arg.reg.* = try self.register_manager.allocReg(track_inst, arg.class); + read_locks[i] = self.register_manager.lockReg(arg.reg.*); + } + } + + if (reuse_metadata != null and write_args.len > 0) { + const inst = reuse_metadata.?.corresponding_inst; + const operand_mapping = reuse_metadata.?.operand_mapping; + const arg = write_args[0]; + if (arg.bind == .reg) { + arg.reg.* = arg.bind.reg; + } else { + reuse_operand: for (read_args) |read_arg, i| { + if (read_arg.bind == .inst) { + const operand = read_arg.bind.inst; + const mcv = try self.resolveInst(operand); + if (mcv == .register and + std.meta.eql(arg.class, read_arg.class) and + self.reuseOperand(inst, operand, operand_mapping[i], mcv)) + { + arg.reg.* = mcv.register; + write_locks[0] = null; + reused_read_arg = i; + break :reuse_operand; + } + } + } else { + arg.reg.* = try self.register_manager.allocReg(inst, arg.class); + write_locks[0] = self.register_manager.lockReg(arg.reg.*); + } + } + } else { + for (write_args) |arg, i| { + if (arg.bind == .reg) { + arg.reg.* = arg.bind.reg; + } else { + arg.reg.* = try self.register_manager.allocReg(null, arg.class); + write_locks[i] = self.register_manager.lockReg(arg.reg.*); + } + } + } + + // For all read_args which need to be moved from non-register to + // register, perform the move + for (read_args) |arg, i| { + if (reused_read_arg) |j| { + // Check whether this read_arg was reused + if (i == j) continue; + } + + const mcv = try arg.bind.resolveToMcv(self); + if (mcv != .register) { + if (arg.bind == .inst) { + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + const inst = Air.refToIndex(arg.bind.inst).?; + + // Overwrite the MCValue associated with this inst + branch.inst_table.putAssumeCapacity(inst, .{ .register = arg.reg.* }); + + // If the previous MCValue occupied some space we track, we + // need to make sure it is marked as free now. + switch (mcv) { + .cpsr_flags => { + assert(self.cpsr_flags_inst.? == inst); + self.cpsr_flags_inst = null; + }, + .register => |prev_reg| { + assert(!self.register_manager.isRegFree(prev_reg)); + self.register_manager.freeReg(prev_reg); + }, + else => {}, + } + } + + try self.genSetReg(arg.ty, arg.reg.*, mcv); + } + } } /// Don't call this function directly. Use binOp instead. @@ -2632,50 +2805,33 @@ fn binOpRegister( rhs_ty: Type, metadata: ?BinOpMetadata, ) !MCValue { - const lhs_is_register = lhs == .register; - const rhs_is_register = rhs == .register; + var lhs_reg: Register = undefined; + var rhs_reg: Register = undefined; + var dest_reg: Register = undefined; - const lhs_lock: ?RegisterLock = if (lhs_is_register) - self.register_manager.lockReg(lhs.register) + const lhs_bind = if (metadata) |md| + ReadArg.Bind{ .inst = md.lhs } else - null; - defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg); - - const lhs_reg = if (lhs_is_register) lhs.register else blk: { - const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: { - break :inst Air.refToIndex(md.lhs).?; - } else null; - - break :blk try self.prepareNewRegForMoving(track_inst, gp, lhs); + ReadArg.Bind{ .mcv = lhs }; + const rhs_bind = if (metadata) |md| + ReadArg.Bind{ .inst = md.rhs } + else + ReadArg.Bind{ .mcv = rhs }; + const read_args = [_]ReadArg{ + .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, + .{ .ty = rhs_ty, .bind = rhs_bind, .class = gp, .reg = &rhs_reg }, }; - const new_lhs_lock = self.register_manager.lockReg(lhs_reg); - defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg); - - const rhs_reg = if (rhs_is_register) rhs.register else blk: { - const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: { - break :inst Air.refToIndex(md.rhs).?; - } else null; - - break :blk try self.prepareNewRegForMoving(track_inst, gp, rhs); + const write_args = [_]WriteArg{ + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, }; - const new_rhs_lock = self.register_manager.lockReg(rhs_reg); - defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg); - - const dest_reg = switch (mir_tag) { - .cmp => undefined, // cmp has no destination regardless - else => if (metadata) |md| blk: { - if (lhs_is_register and self.reuseOperand(md.inst, md.lhs, 0, lhs)) { - break :blk lhs_reg; - } else if (rhs_is_register and self.reuseOperand(md.inst, md.rhs, 1, rhs)) { - break :blk rhs_reg; - } else { - break :blk try self.register_manager.allocReg(md.inst, gp); - } - } else try self.register_manager.allocReg(null, gp), - }; - - if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs); - if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs); + try self.allocRegs( + &read_args, + if (mir_tag == .cmp) &.{} else &write_args, + if (metadata) |md| .{ + .corresponding_inst = md.inst, + .operand_mapping = &.{ 0, 1 }, + } else null, + ); const mir_data: Mir.Inst.Data = switch (mir_tag) { .add, @@ -2741,43 +2897,33 @@ fn binOpImmediate( lhs_and_rhs_swapped: bool, metadata: ?BinOpMetadata, ) !MCValue { - const lhs_is_register = lhs == .register; + var lhs_reg: Register = undefined; + var dest_reg: Register = undefined; - const lhs_lock: ?RegisterLock = if (lhs_is_register) - self.register_manager.lockReg(lhs.register) - else - null; - defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg); - - const lhs_reg = if (lhs_is_register) lhs.register else blk: { - const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: { - break :inst Air.refToIndex( - if (lhs_and_rhs_swapped) md.rhs else md.lhs, - ).?; - } else null; - - break :blk try self.prepareNewRegForMoving(track_inst, gp, lhs); - }; - const new_lhs_lock = self.register_manager.lockReg(lhs_reg); - defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg); - - const dest_reg = switch (mir_tag) { - .cmp => undefined, // cmp has no destination reg - else => if (metadata) |md| blk: { - if (lhs_is_register and self.reuseOperand( - md.inst, - if (lhs_and_rhs_swapped) md.rhs else md.lhs, - if (lhs_and_rhs_swapped) 1 else 0, - lhs, - )) { - break :blk lhs_reg; - } else { - break :blk try self.register_manager.allocReg(md.inst, gp); - } - } else try self.register_manager.allocReg(null, gp), + const lhs_bind = blk: { + if (metadata) |md| { + const inst = if (lhs_and_rhs_swapped) md.rhs else md.lhs; + break :blk ReadArg.Bind{ .inst = inst }; + } else { + break :blk ReadArg.Bind{ .mcv = lhs }; + } }; - if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs); + const read_args = [_]ReadArg{ + .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, + }; + const write_args = [_]WriteArg{ + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + const operand_mapping: []const Liveness.OperandInt = if (lhs_and_rhs_swapped) &.{1} else &.{0}; + try self.allocRegs( + &read_args, + if (mir_tag == .cmp) &.{} else &write_args, + if (metadata) |md| .{ + .corresponding_inst = md.inst, + .operand_mapping = operand_mapping, + } else null, + ); const mir_data: Mir.Inst.Data = switch (mir_tag) { .add, @@ -2983,33 +3129,27 @@ fn binOp( if (std.math.isPowerOfTwo(imm)) { const log2 = std.math.log2_int(u32, imm); - const lhs_is_register = lhs == .register; + var lhs_reg: Register = undefined; + var dest_reg: Register = undefined; - const lhs_lock: ?RegisterLock = if (lhs_is_register) - self.register_manager.lockReg(lhs.register) + const lhs_bind = if (metadata) |md| + ReadArg.Bind{ .inst = md.lhs } else - null; - defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg); - - const lhs_reg = if (lhs_is_register) lhs.register else blk: { - const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: { - break :inst Air.refToIndex(md.lhs).?; - } else null; - - break :blk try self.prepareNewRegForMoving(track_inst, gp, lhs); + ReadArg.Bind{ .mcv = lhs }; + const read_args = [_]ReadArg{ + .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, }; - const new_lhs_lock = self.register_manager.lockReg(lhs_reg); - defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg); - - const dest_reg = if (metadata) |md| blk: { - if (lhs_is_register and self.reuseOperand(md.inst, md.lhs, 0, lhs)) { - break :blk lhs_reg; - } else { - break :blk try self.register_manager.allocReg(md.inst, gp); - } - } else try self.register_manager.allocReg(null, gp); - - if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs); + const write_args = [_]WriteArg{ + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + if (metadata) |md| .{ + .corresponding_inst = md.inst, + .operand_mapping = &.{0}, + } else null, + ); try self.truncRegister(lhs_reg, dest_reg, int_info.signedness, log2); return MCValue{ .register = dest_reg }; From 86dd123392c8ab26432303ff2e5c96e73d747757 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sat, 20 Aug 2022 22:29:52 +0200 Subject: [PATCH 03/14] stage2 ARM: move cmp to new allocReg mechanism; remove from binOp --- src/arch/arm/CodeGen.zig | 270 ++++++++++++++++++++++++++------------- 1 file changed, 181 insertions(+), 89 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index c10e0bb78d..93db3dd76b 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -1315,7 +1315,13 @@ fn minMax( // register. assert(lhs_reg != rhs_reg); // see note above - _ = try self.binOpRegister(.cmp, .{ .register = lhs_reg }, .{ .register = rhs_reg }, lhs_ty, rhs_ty, null); + _ = try self.addInst(.{ + .tag = .cmp, + .data = .{ .r_op_cmp = .{ + .rn = lhs_reg, + .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none), + } }, + }); const cond_choose_lhs: Condition = switch (tag) { .max => switch (int_info.signedness) { @@ -1473,7 +1479,6 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMem(inst, tuple_size, tuple_align); try self.spillCompareFlagsIfOccupied(); - self.cpsr_flags_inst = null; const base_tag: Air.Inst.Tag = switch (tag) { .add_with_overflow => .add, @@ -1493,7 +1498,13 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits); // cmp dest, truncated - _ = try self.binOp(.cmp_eq, dest, .{ .register = truncated_reg }, Type.usize, Type.usize, null); + _ = try self.addInst(.{ + .tag = .cmp, + .data = .{ .r_op_cmp = .{ + .rn = dest_reg, + .op = Instruction.Operand.reg(truncated_reg, Instruction.Operand.Shift.none), + } }, + }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); @@ -1578,7 +1589,6 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMem(inst, tuple_size, tuple_align); try self.spillCompareFlagsIfOccupied(); - self.cpsr_flags_inst = null; const base_tag: Mir.Inst.Tag = switch (int_info.signedness) { .signed => .smulbb, @@ -1598,7 +1608,13 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits); // cmp dest, truncated - _ = try self.binOp(.cmp_eq, dest, .{ .register = truncated_reg }, Type.usize, Type.usize, null); + _ = try self.addInst(.{ + .tag = .cmp, + .data = .{ .r_op_cmp = .{ + .rn = dest_reg, + .op = Instruction.Operand.reg(truncated_reg, Instruction.Operand.Shift.none), + } }, + }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); @@ -1608,7 +1624,6 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMem(inst, tuple_size, tuple_align); try self.spillCompareFlagsIfOccupied(); - self.cpsr_flags_inst = null; const base_tag: Mir.Inst.Tag = switch (int_info.signedness) { .signed => .smull, @@ -1672,7 +1687,13 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); // cmp truncated, rdlo - _ = try self.binOp(.cmp_eq, .{ .register = truncated_reg }, .{ .register = rdlo }, Type.usize, Type.usize, null); + _ = try self.addInst(.{ + .tag = .cmp, + .data = .{ .r_op_cmp = .{ + .rn = truncated_reg, + .op = Instruction.Operand.reg(rdlo, Instruction.Operand.Shift.none), + } }, + }); // mov rdlo, #0 _ = try self.addInst(.{ @@ -1694,7 +1715,13 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }); // cmp rdhi, #0 - _ = try self.binOp(.cmp_eq, .{ .register = rdhi }, .{ .immediate = 0 }, Type.usize, Type.usize, null); + _ = try self.addInst(.{ + .tag = .cmp, + .data = .{ .r_op_cmp = .{ + .rn = rdhi, + .op = Instruction.Operand.fromU32(0).?, + } }, + }); // movne rdlo, #1 _ = try self.addInst(.{ @@ -1725,8 +1752,6 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); const result: MCValue = result: { - const lhs = try self.resolveInst(extra.lhs); - const rhs = try self.resolveInst(extra.rhs); const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); @@ -1742,28 +1767,107 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { if (int_info.bits <= 32) { const stack_offset = try self.allocMem(inst, tuple_size, tuple_align); - const lhs_lock: ?RegisterLock = if (lhs == .register) - self.register_manager.lockRegAssumeUnused(lhs.register) - else - null; - defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg); - try self.spillCompareFlagsIfOccupied(); - self.cpsr_flags_inst = null; - // lsl dest, lhs, rhs - const dest = try self.binOp(.shl, lhs, rhs, lhs_ty, rhs_ty, null); - const dest_reg = dest.register; - const dest_lock = self.register_manager.lockRegAssumeUnused(dest_reg); - defer self.register_manager.unlockReg(dest_lock); + const shr_mir_tag: Mir.Inst.Tag = switch (int_info.signedness) { + .signed => Mir.Inst.Tag.asr, + .unsigned => Mir.Inst.Tag.lsr, + }; - // asr/lsr reconstructed, dest, rhs - const reconstructed = try self.binOp(.shr, dest, rhs, lhs_ty, rhs_ty, null); + var lhs_reg: Register = undefined; + var rhs_reg: Register = undefined; + var dest_reg: Register = undefined; + var reconstructed_reg: Register = undefined; + + const rhs_mcv = try self.resolveInst(extra.rhs); + const rhs_immediate_ok = rhs_mcv == .immediate and Instruction.Operand.fromU32(rhs_mcv.immediate) != null; + + const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; + const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; + + if (rhs_immediate_ok) { + const read_args = [_]ReadArg{ + .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, + }; + const write_args = [_]WriteArg{ + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &reconstructed_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + null, + ); + + // lsl dest, lhs, rhs + _ = try self.addInst(.{ + .tag = .lsl, + .data = .{ .rr_shift = .{ + .rd = dest_reg, + .rm = lhs_reg, + .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_mcv.immediate)), + } }, + }); + + try self.truncRegister(dest_reg, dest_reg, int_info.signedness, int_info.bits); + + // asr/lsr reconstructed, dest, rhs + _ = try self.addInst(.{ + .tag = shr_mir_tag, + .data = .{ .rr_shift = .{ + .rd = reconstructed_reg, + .rm = dest_reg, + .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_mcv.immediate)), + } }, + }); + } else { + const read_args = [_]ReadArg{ + .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, + .{ .ty = rhs_ty, .bind = rhs_bind, .class = gp, .reg = &rhs_reg }, + }; + const write_args = [_]WriteArg{ + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &reconstructed_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + null, + ); + + // lsl dest, lhs, rhs + _ = try self.addInst(.{ + .tag = .lsl, + .data = .{ .rr_shift = .{ + .rd = dest_reg, + .rm = lhs_reg, + .shift_amount = Instruction.ShiftAmount.reg(rhs_reg), + } }, + }); + + try self.truncRegister(dest_reg, dest_reg, int_info.signedness, int_info.bits); + + // asr/lsr reconstructed, dest, rhs + _ = try self.addInst(.{ + .tag = shr_mir_tag, + .data = .{ .rr_shift = .{ + .rd = reconstructed_reg, + .rm = dest_reg, + .shift_amount = Instruction.ShiftAmount.reg(rhs_reg), + } }, + }); + } // cmp lhs, reconstructed - _ = try self.binOp(.cmp_eq, lhs, reconstructed, lhs_ty, lhs_ty, null); + _ = try self.addInst(.{ + .tag = .cmp, + .data = .{ .r_op_cmp = .{ + .rn = lhs_reg, + .op = Instruction.Operand.reg(reconstructed_reg, Instruction.Operand.Shift.none), + } }, + }); - try self.genSetStack(lhs_ty, stack_offset, dest); + try self.genSetStack(lhs_ty, stack_offset, .{ .register = dest_reg }); try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; @@ -2662,8 +2766,8 @@ fn allocRegs( write_args: []const WriteArg, reuse_metadata: ?ReuseMetadata, ) InnerError!void { - // Air instructions have either one output or none (cmp) - assert(!(reuse_metadata != null and write_args.len > 1)); // see note above + // Air instructions have exactly one output + assert(!(reuse_metadata != null and write_args.len != 1)); // see note above // The operand mapping is a 1:1 mapping of read args to their // corresponding operand index in the Air instruction @@ -2714,7 +2818,7 @@ fn allocRegs( } } - if (reuse_metadata != null and write_args.len > 0) { + if (reuse_metadata != null) { const inst = reuse_metadata.?.corresponding_inst; const operand_mapping = reuse_metadata.?.operand_mapping; const arg = write_args[0]; @@ -2826,7 +2930,7 @@ fn binOpRegister( }; try self.allocRegs( &read_args, - if (mir_tag == .cmp) &.{} else &write_args, + &write_args, if (metadata) |md| .{ .corresponding_inst = md.inst, .operand_mapping = &.{ 0, 1 }, @@ -2846,10 +2950,6 @@ fn binOpRegister( .rn = lhs_reg, .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none), } }, - .cmp => .{ .r_op_cmp = .{ - .rn = lhs_reg, - .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none), - } }, .lsl, .asr, .lsr, @@ -2918,7 +3018,7 @@ fn binOpImmediate( const operand_mapping: []const Liveness.OperandInt = if (lhs_and_rhs_swapped) &.{1} else &.{0}; try self.allocRegs( &read_args, - if (mir_tag == .cmp) &.{} else &write_args, + &write_args, if (metadata) |md| .{ .corresponding_inst = md.inst, .operand_mapping = operand_mapping, @@ -2938,10 +3038,6 @@ fn binOpImmediate( .rn = lhs_reg, .op = Instruction.Operand.fromU32(rhs.immediate).?, } }, - .cmp => .{ .r_op_cmp = .{ - .rn = lhs_reg, - .op = Instruction.Operand.fromU32(rhs.immediate).?, - } }, .lsl, .asr, .lsr, @@ -2991,7 +3087,6 @@ fn binOp( switch (tag) { .add, .sub, - .cmp_eq, => { switch (lhs_ty.zigTypeTag()) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), @@ -3006,15 +3101,12 @@ fn binOp( // operands const lhs_immediate_ok = switch (tag) { .add => lhs == .immediate and Instruction.Operand.fromU32(lhs.immediate) != null, - .sub, - .cmp_eq, - => false, + .sub => false, else => unreachable, }; const rhs_immediate_ok = switch (tag) { .add, .sub, - .cmp_eq, => rhs == .immediate and Instruction.Operand.fromU32(rhs.immediate) != null, else => unreachable, }; @@ -3022,7 +3114,6 @@ fn binOp( const mir_tag: Mir.Inst.Tag = switch (tag) { .add => .add, .sub => .sub, - .cmp_eq => .cmp, else => unreachable, }; @@ -4005,32 +4096,16 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const lhs_ty = self.air.typeOf(bin_op.lhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { - const operands: BinOpOperands = .{ .inst = .{ - .inst = inst, - .lhs = bin_op.lhs, - .rhs = bin_op.rhs, - } }; - break :blk try self.cmp(operands, lhs_ty, op); + break :blk try self.cmp(.{ .inst = bin_op.lhs }, .{ .inst = bin_op.rhs }, lhs_ty, op); }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -const BinOpOperands = union(enum) { - inst: struct { - inst: Air.Inst.Index, - lhs: Air.Inst.Ref, - rhs: Air.Inst.Ref, - }, - mcv: struct { - lhs: MCValue, - rhs: MCValue, - }, -}; - fn cmp( self: *Self, - operands: BinOpOperands, + lhs: ReadArg.Bind, + rhs: ReadArg.Bind, lhs_ty: Type, op: math.CompareOperator, ) !MCValue { @@ -4060,22 +4135,47 @@ fn cmp( if (int_info.bits <= 32) { try self.spillCompareFlagsIfOccupied(); - switch (operands) { - .inst => |inst_op| { - const metadata: BinOpMetadata = .{ - .inst = inst_op.inst, - .lhs = inst_op.lhs, - .rhs = inst_op.rhs, - }; - const lhs = try self.resolveInst(inst_op.lhs); - const rhs = try self.resolveInst(inst_op.rhs); + var lhs_reg: Register = undefined; + var rhs_reg: Register = undefined; - self.cpsr_flags_inst = inst_op.inst; - _ = try self.binOp(.cmp_eq, lhs, rhs, int_ty, int_ty, metadata); - }, - .mcv => |mcv_op| { - _ = try self.binOp(.cmp_eq, mcv_op.lhs, mcv_op.rhs, int_ty, int_ty, null); - }, + const rhs_mcv = try rhs.resolveToMcv(self); + const rhs_immediate_ok = rhs_mcv == .immediate and Instruction.Operand.fromU32(rhs_mcv.immediate) != null; + + if (rhs_immediate_ok) { + const read_args = [_]ReadArg{ + .{ .ty = int_ty, .bind = lhs, .class = gp, .reg = &lhs_reg }, + }; + try self.allocRegs( + &read_args, + &.{}, + null, // we won't be able to reuse a register as there are no write_regs + ); + + _ = try self.addInst(.{ + .tag = .cmp, + .data = .{ .r_op_cmp = .{ + .rn = lhs_reg, + .op = Instruction.Operand.fromU32(rhs_mcv.immediate).?, + } }, + }); + } else { + const read_args = [_]ReadArg{ + .{ .ty = int_ty, .bind = lhs, .class = gp, .reg = &lhs_reg }, + .{ .ty = int_ty, .bind = rhs, .class = gp, .reg = &rhs_reg }, + }; + try self.allocRegs( + &read_args, + &.{}, + null, // we won't be able to reuse a register as there are no write_regs + ); + + _ = try self.addInst(.{ + .tag = .cmp, + .data = .{ .r_op_cmp = .{ + .rn = lhs_reg, + .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none), + } }, + }); } return switch (int_info.signedness) { @@ -4349,14 +4449,13 @@ fn isNonNull(self: *Self, ty: Type, operand: MCValue) !MCValue { fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { const error_type = ty.errorUnionSet(); - const error_int_type = Type.initTag(.u16); if (error_type.errorSetIsEmpty()) { return MCValue{ .immediate = 0 }; // always false } const error_mcv = try self.errUnionErr(operand, ty); - _ = try self.binOp(.cmp_eq, error_mcv, .{ .immediate = 0 }, error_int_type, error_int_type, null); + _ = try self.cmp(.{ .mcv = error_mcv }, .{ .mcv = .{ .immediate = 0 } }, error_type, .neq); return MCValue{ .cpsr_flags = .hi }; } @@ -4587,14 +4686,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { defer self.gpa.free(branch_into_prong_relocs); for (items) |item, idx| { - const condition = try self.resolveInst(pl_op.operand); - const item_mcv = try self.resolveInst(item); - - const operands: BinOpOperands = .{ .mcv = .{ - .lhs = condition, - .rhs = item_mcv, - } }; - const cmp_result = try self.cmp(operands, condition_ty, .neq); + const cmp_result = try self.cmp(.{ .inst = pl_op.operand }, .{ .inst = item }, condition_ty, .neq); branch_into_prong_relocs[idx] = try self.condBr(cmp_result); } From ed4be06883427e2d8f97f2dd241d8996994a0c66 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sun, 21 Aug 2022 13:43:09 +0200 Subject: [PATCH 04/14] stage2 ARM: extract add+sub from binOp This commit also lays the groundwork for further extractions from binOp. --- src/arch/arm/CodeGen.zig | 329 +++++++++++++++++++++++++++++---------- 1 file changed, 246 insertions(+), 83 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 93db3dd76b..aacfff4f9c 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -1401,19 +1401,26 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.air.typeOf(bin_op.lhs); const rhs_ty = self.air.typeOf(bin_op.rhs); - const result: MCValue = if (self.liveness.isUnused(inst)) - .dead - else - try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{ - .lhs = bin_op.lhs, - .rhs = bin_op.rhs, - .inst = inst, - }); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; + const rhs_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + + switch (tag) { + .add, + .sub, + => break :result try self.addSub(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + else => break :result try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{ + .lhs = bin_op.lhs, + .rhs = bin_op.rhs, + .inst = inst, + }), + } + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -1459,8 +1466,8 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const lhs = try self.resolveInst(extra.lhs); - const rhs = try self.resolveInst(extra.rhs); + const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; + const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); @@ -1485,7 +1492,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { .sub_with_overflow => .sub, else => unreachable, }; - const dest = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, null); + const dest = try self.addSub(base_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, null); const dest_reg = dest.register; const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg); defer self.register_manager.unlockReg(dest_reg_lock); @@ -1511,6 +1518,9 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue{ .stack_offset = stack_offset }; } else if (int_info.bits == 32) { + const lhs = try self.resolveInst(extra.lhs); + const rhs = try self.resolveInst(extra.rhs); + // Only say yes if the operation is // commutative, i.e. we can swap both of the // operands @@ -2600,26 +2610,10 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; }, else => { - const offset_reg = try self.copyToTmpRegister(ptr_ty, .{ - .immediate = struct_field_offset, - }); - const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg); - defer self.register_manager.unlockReg(offset_reg_lock); + const lhs_bind: ReadArg.Bind = .{ .mcv = mcv }; + const rhs_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = struct_field_offset } }; - const addr_reg = try self.copyToTmpRegister(ptr_ty, mcv); - const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg); - defer self.register_manager.unlockReg(addr_reg_lock); - - const dest = try self.binOp( - .add, - .{ .register = addr_reg }, - .{ .register = offset_reg }, - Type.usize, - Type.usize, - null, - ); - - break :result dest; + break :result try self.addSub(.add, lhs_bind, rhs_bind, Type.usize, Type.usize, null); }, } }; @@ -2708,6 +2702,25 @@ const ReadArg = struct { .mcv => |mcv| mcv, }; } + + fn resolveToImmediate(bind: Bind, function: *Self) InnerError!?u32 { + switch (bind) { + .inst => |inst| { + // TODO resolve independently of inst_table + const mcv = try function.resolveInst(inst); + switch (mcv) { + .immediate => |imm| return imm, + else => return null, + } + }, + .mcv => |mcv| { + switch (mcv) { + .immediate => |imm| return imm, + else => return null, + } + }, + } + } }; }; @@ -3057,6 +3070,136 @@ fn binOpImmediate( return MCValue{ .register = dest_reg }; } +/// TODO +fn binOpRegisterNew( + self: *Self, + mir_tag: Mir.Inst.Tag, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) !MCValue { + var lhs_reg: Register = undefined; + var rhs_reg: Register = undefined; + var dest_reg: Register = undefined; + + const read_args = [_]ReadArg{ + .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, + .{ .ty = rhs_ty, .bind = rhs_bind, .class = gp, .reg = &rhs_reg }, + }; + const write_args = [_]WriteArg{ + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + if (maybe_inst) |inst| .{ + .corresponding_inst = inst, + .operand_mapping = &.{ 0, 1 }, + } else null, + ); + + const mir_data: Mir.Inst.Data = switch (mir_tag) { + .add, + .adds, + .sub, + .subs, + .@"and", + .orr, + .eor, + => .{ .rr_op = .{ + .rd = dest_reg, + .rn = lhs_reg, + .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none), + } }, + .lsl, + .asr, + .lsr, + => .{ .rr_shift = .{ + .rd = dest_reg, + .rm = lhs_reg, + .shift_amount = Instruction.ShiftAmount.reg(rhs_reg), + } }, + .mul, + .smulbb, + => .{ .rrr = .{ + .rd = dest_reg, + .rn = lhs_reg, + .rm = rhs_reg, + } }, + else => unreachable, + }; + + _ = try self.addInst(.{ + .tag = mir_tag, + .data = mir_data, + }); + + return MCValue{ .register = dest_reg }; +} + +/// TODO +fn binOpImmediateNew( + self: *Self, + mir_tag: Mir.Inst.Tag, + lhs_bind: ReadArg.Bind, + rhs_immediate: u32, + lhs_ty: Type, + lhs_and_rhs_swapped: bool, + maybe_inst: ?Air.Inst.Index, +) !MCValue { + var lhs_reg: Register = undefined; + var dest_reg: Register = undefined; + + const read_args = [_]ReadArg{ + .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, + }; + const write_args = [_]WriteArg{ + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + const operand_mapping: []const Liveness.OperandInt = if (lhs_and_rhs_swapped) &.{1} else &.{0}; + try self.allocRegs( + &read_args, + &write_args, + if (maybe_inst) |inst| .{ + .corresponding_inst = inst, + .operand_mapping = operand_mapping, + } else null, + ); + + const mir_data: Mir.Inst.Data = switch (mir_tag) { + .add, + .adds, + .sub, + .subs, + .@"and", + .orr, + .eor, + => .{ .rr_op = .{ + .rd = dest_reg, + .rn = lhs_reg, + .op = Instruction.Operand.fromU32(rhs_immediate).?, + } }, + .lsl, + .asr, + .lsr, + => .{ .rr_shift = .{ + .rd = dest_reg, + .rm = lhs_reg, + .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_immediate)), + } }, + else => unreachable, + }; + + _ = try self.addInst(.{ + .tag = mir_tag, + .data = mir_data, + }); + + return MCValue{ .register = dest_reg }; +} + const BinOpMetadata = struct { inst: Air.Inst.Index, lhs: Air.Inst.Ref, @@ -3085,53 +3228,6 @@ fn binOp( metadata: ?BinOpMetadata, ) InnerError!MCValue { switch (tag) { - .add, - .sub, - => { - switch (lhs_ty.zigTypeTag()) { - .Float => return self.fail("TODO ARM binary operations on floats", .{}), - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - .Int => { - const mod = self.bin_file.options.module.?; - assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); - if (int_info.bits <= 32) { - // Only say yes if the operation is - // commutative, i.e. we can swap both of the - // operands - const lhs_immediate_ok = switch (tag) { - .add => lhs == .immediate and Instruction.Operand.fromU32(lhs.immediate) != null, - .sub => false, - else => unreachable, - }; - const rhs_immediate_ok = switch (tag) { - .add, - .sub, - => rhs == .immediate and Instruction.Operand.fromU32(rhs.immediate) != null, - else => unreachable, - }; - - const mir_tag: Mir.Inst.Tag = switch (tag) { - .add => .add, - .sub => .sub, - else => unreachable, - }; - - if (rhs_immediate_ok) { - return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata); - } else if (lhs_immediate_ok) { - // swap lhs and rhs - return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata); - } else { - return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); - } - } else { - return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); - } - }, - else => unreachable, - } - }, .mul => { switch (lhs_ty.zigTypeTag()) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), @@ -3278,8 +3374,18 @@ fn binOp( else => unreachable, }; + const lhs_bind = if (metadata) |md| + ReadArg.Bind{ .inst = md.lhs } + else + ReadArg.Bind{ .mcv = lhs }; + const rhs_bind = if (metadata) |md| + ReadArg.Bind{ .inst = md.rhs } + else + ReadArg.Bind{ .mcv = rhs }; + // Generate an add/sub/mul - const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); + const maybe_inst: ?Air.Inst.Index = if (metadata) |md| md.inst else null; + const result = try self.addSub(base_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst); // Truncate if necessary switch (lhs_ty.zigTypeTag()) { @@ -3463,6 +3569,63 @@ fn binOp( } } +fn addSub( + self: *Self, + tag: Air.Inst.Tag, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + switch (lhs_ty.zigTypeTag()) { + .Float => return self.fail("TODO ARM binary operations on floats", .{}), + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .Int => { + const mod = self.bin_file.options.module.?; + assert(lhs_ty.eql(rhs_ty, mod)); + const int_info = lhs_ty.intInfo(self.target.*); + if (int_info.bits <= 32) { + const lhs_immediate = try lhs_bind.resolveToImmediate(self); + const rhs_immediate = try rhs_bind.resolveToImmediate(self); + + // Only say yes if the operation is + // commutative, i.e. we can swap both of the + // operands + const lhs_immediate_ok = switch (tag) { + .add => if (lhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false, + .sub => false, + else => unreachable, + }; + const rhs_immediate_ok = switch (tag) { + .add, + .sub, + => if (rhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false, + else => unreachable, + }; + + const mir_tag: Mir.Inst.Tag = switch (tag) { + .add => .add, + .sub => .sub, + else => unreachable, + }; + + if (rhs_immediate_ok) { + return try self.binOpImmediateNew(mir_tag, lhs_bind, rhs_immediate.?, lhs_ty, false, maybe_inst); + } else if (lhs_immediate_ok) { + // swap lhs and rhs + return try self.binOpImmediateNew(mir_tag, rhs_bind, lhs_immediate.?, rhs_ty, true, maybe_inst); + } else { + return try self.binOpRegisterNew(mir_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst); + } + } else { + return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); + } + }, + else => unreachable, + } +} + fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) !void { const abi_size = ty.abiSize(self.target.*); @@ -4138,8 +4301,8 @@ fn cmp( var lhs_reg: Register = undefined; var rhs_reg: Register = undefined; - const rhs_mcv = try rhs.resolveToMcv(self); - const rhs_immediate_ok = rhs_mcv == .immediate and Instruction.Operand.fromU32(rhs_mcv.immediate) != null; + const rhs_immediate = try rhs.resolveToImmediate(self); + const rhs_immediate_ok = if (rhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false; if (rhs_immediate_ok) { const read_args = [_]ReadArg{ @@ -4155,7 +4318,7 @@ fn cmp( .tag = .cmp, .data = .{ .r_op_cmp = .{ .rn = lhs_reg, - .op = Instruction.Operand.fromU32(rhs_mcv.immediate).?, + .op = Instruction.Operand.fromU32(rhs_immediate.?).?, } }, }); } else { From fdb2c80bdc12bfb6be5235de6a5792e0b0619da8 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sun, 21 Aug 2022 17:10:00 +0200 Subject: [PATCH 05/14] stage2 ARM: extract mul, div, and mod out of binOp --- src/arch/arm/CodeGen.zig | 371 ++++++++++++++++++++++++--------------- 1 file changed, 229 insertions(+), 142 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index aacfff4f9c..02981ce418 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -1410,16 +1410,29 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - switch (tag) { - .add, - .sub, - => break :result try self.addSub(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), - else => break :result try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{ + break :result switch (tag) { + .add => try self.addSub(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + .sub => try self.addSub(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + .mul => try self.mul(lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + .div_float => try self.divFloat(lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + .div_trunc => try self.div(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + .div_floor => try self.div(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + .div_exact => try self.divExact(lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + .rem => try self.rem(lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + .mod => try self.modulo(lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + else => try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{ .lhs = bin_op.lhs, .rhs = bin_op.rhs, .inst = inst, }), - } + }; }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -3228,141 +3241,6 @@ fn binOp( metadata: ?BinOpMetadata, ) InnerError!MCValue { switch (tag) { - .mul => { - switch (lhs_ty.zigTypeTag()) { - .Float => return self.fail("TODO ARM binary operations on floats", .{}), - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - .Int => { - const mod = self.bin_file.options.module.?; - assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); - if (int_info.bits <= 32) { - // TODO add optimisations for multiplication - // with immediates, for example a * 2 can be - // lowered to a << 1 - return try self.binOpRegister(.mul, lhs, rhs, lhs_ty, rhs_ty, metadata); - } else { - return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); - } - }, - else => unreachable, - } - }, - .div_float => { - switch (lhs_ty.zigTypeTag()) { - .Float => return self.fail("TODO ARM binary operations on floats", .{}), - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - else => unreachable, - } - }, - .div_trunc, .div_floor => { - switch (lhs_ty.zigTypeTag()) { - .Float => return self.fail("TODO ARM binary operations on floats", .{}), - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - .Int => { - const mod = self.bin_file.options.module.?; - assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); - if (int_info.bits <= 32) { - switch (int_info.signedness) { - .signed => { - return self.fail("TODO ARM signed integer division", .{}); - }, - .unsigned => { - switch (rhs) { - .immediate => |imm| { - if (std.math.isPowerOfTwo(imm)) { - const shift = MCValue{ .immediate = std.math.log2_int(u32, imm) }; - return try self.binOp(.shr, lhs, shift, lhs_ty, rhs_ty, metadata); - } else { - return self.fail("TODO ARM integer division by constants", .{}); - } - }, - else => return self.fail("TODO ARM integer division", .{}), - } - }, - } - } else { - return self.fail("TODO ARM integer division for integers > u32/i32", .{}); - } - }, - else => unreachable, - } - }, - .div_exact => { - switch (lhs_ty.zigTypeTag()) { - .Float => return self.fail("TODO ARM binary operations on floats", .{}), - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - .Int => return self.fail("TODO ARM div_exact", .{}), - else => unreachable, - } - }, - .rem => { - switch (lhs_ty.zigTypeTag()) { - .Float => return self.fail("TODO ARM binary operations on floats", .{}), - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - .Int => { - const mod = self.bin_file.options.module.?; - assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); - if (int_info.bits <= 32) { - switch (int_info.signedness) { - .signed => { - return self.fail("TODO ARM signed integer mod", .{}); - }, - .unsigned => { - switch (rhs) { - .immediate => |imm| { - if (std.math.isPowerOfTwo(imm)) { - const log2 = std.math.log2_int(u32, imm); - - var lhs_reg: Register = undefined; - var dest_reg: Register = undefined; - - const lhs_bind = if (metadata) |md| - ReadArg.Bind{ .inst = md.lhs } - else - ReadArg.Bind{ .mcv = lhs }; - const read_args = [_]ReadArg{ - .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, - }; - const write_args = [_]WriteArg{ - .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, - }; - try self.allocRegs( - &read_args, - &write_args, - if (metadata) |md| .{ - .corresponding_inst = md.inst, - .operand_mapping = &.{0}, - } else null, - ); - - try self.truncRegister(lhs_reg, dest_reg, int_info.signedness, log2); - return MCValue{ .register = dest_reg }; - } else { - return self.fail("TODO ARM integer mod by constants", .{}); - } - }, - else => return self.fail("TODO ARM integer mod", .{}), - } - }, - } - } else { - return self.fail("TODO ARM integer division for integers > u32/i32", .{}); - } - }, - else => unreachable, - } - }, - .mod => { - switch (lhs_ty.zigTypeTag()) { - .Float => return self.fail("TODO ARM binary operations on floats", .{}), - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - .Int => return self.fail("TODO ARM mod", .{}), - else => unreachable, - } - }, .addwrap, .subwrap, .mulwrap, @@ -3557,7 +3435,13 @@ fn binOp( } else { // convert the offset into a byte offset by // multiplying it with elem_size - const offset = try self.binOp(.mul, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize, null); + const rhs_bind = if (metadata) |md| + ReadArg.Bind{ .inst = md.rhs } + else + ReadArg.Bind{ .mcv = rhs }; + const imm_bind = ReadArg.Bind{ .mcv = .{ .immediate = elem_size } }; + + const offset = try self.mul(rhs_bind, imm_bind, Type.usize, Type.usize, null); const addr = try self.binOp(tag, lhs, offset, Type.initTag(.manyptr_u8), Type.usize, null); return addr; } @@ -3626,6 +3510,209 @@ fn addSub( } } +fn mul( + self: *Self, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + switch (lhs_ty.zigTypeTag()) { + .Float => return self.fail("TODO ARM binary operations on floats", .{}), + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .Int => { + const mod = self.bin_file.options.module.?; + assert(lhs_ty.eql(rhs_ty, mod)); + const int_info = lhs_ty.intInfo(self.target.*); + if (int_info.bits <= 32) { + // TODO add optimisations for multiplication + // with immediates, for example a * 2 can be + // lowered to a << 1 + return try self.binOpRegisterNew(.mul, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst); + } else { + return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); + } + }, + else => unreachable, + } +} + +fn divFloat( + self: *Self, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + _ = lhs_bind; + _ = rhs_bind; + _ = lhs_ty; + _ = rhs_ty; + _ = maybe_inst; + + switch (lhs_ty.zigTypeTag()) { + .Float => return self.fail("TODO ARM binary operations on floats", .{}), + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + else => unreachable, + } +} + +fn div( + self: *Self, + tag: Air.Inst.Tag, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + _ = tag; + + switch (lhs_ty.zigTypeTag()) { + .Float => return self.fail("TODO ARM binary operations on floats", .{}), + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .Int => { + const mod = self.bin_file.options.module.?; + assert(lhs_ty.eql(rhs_ty, mod)); + const int_info = lhs_ty.intInfo(self.target.*); + if (int_info.bits <= 32) { + switch (int_info.signedness) { + .signed => { + return self.fail("TODO ARM signed integer division", .{}); + }, + .unsigned => { + const rhs_immediate = try rhs_bind.resolveToImmediate(self); + + if (rhs_immediate) |imm| { + if (std.math.isPowerOfTwo(imm)) { + const shift = std.math.log2_int(u32, imm); + return try self.binOpImmediateNew(.lsr, lhs_bind, shift, lhs_ty, false, maybe_inst); + } else { + return self.fail("TODO ARM integer division by constants", .{}); + } + } else { + return self.fail("TODO ARM integer division", .{}); + } + }, + } + } else { + return self.fail("TODO ARM integer division for integers > u32/i32", .{}); + } + }, + else => unreachable, + } +} + +fn divExact( + self: *Self, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + _ = lhs_bind; + _ = rhs_bind; + _ = lhs_ty; + _ = rhs_ty; + _ = maybe_inst; + + switch (lhs_ty.zigTypeTag()) { + .Float => return self.fail("TODO ARM binary operations on floats", .{}), + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .Int => return self.fail("TODO ARM div_exact", .{}), + else => unreachable, + } +} + +fn rem( + self: *Self, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + switch (lhs_ty.zigTypeTag()) { + .Float => return self.fail("TODO ARM binary operations on floats", .{}), + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .Int => { + const mod = self.bin_file.options.module.?; + assert(lhs_ty.eql(rhs_ty, mod)); + const int_info = lhs_ty.intInfo(self.target.*); + if (int_info.bits <= 32) { + switch (int_info.signedness) { + .signed => { + return self.fail("TODO ARM signed integer mod", .{}); + }, + .unsigned => { + const rhs_immediate = try rhs_bind.resolveToImmediate(self); + + if (rhs_immediate) |imm| { + if (std.math.isPowerOfTwo(imm)) { + const log2 = std.math.log2_int(u32, imm); + + var lhs_reg: Register = undefined; + var dest_reg: Register = undefined; + + const read_args = [_]ReadArg{ + .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, + }; + const write_args = [_]WriteArg{ + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + if (maybe_inst) |inst| .{ + .corresponding_inst = inst, + .operand_mapping = &.{0}, + } else null, + ); + + try self.truncRegister(lhs_reg, dest_reg, int_info.signedness, log2); + + return MCValue{ .register = dest_reg }; + } else { + return self.fail("TODO ARM integer mod by constants", .{}); + } + } else { + return self.fail("TODO ARM integer mod", .{}); + } + }, + } + } else { + return self.fail("TODO ARM integer division for integers > u32/i32", .{}); + } + }, + else => unreachable, + } +} + +fn modulo( + self: *Self, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + _ = lhs_bind; + _ = rhs_bind; + _ = lhs_ty; + _ = rhs_ty; + _ = maybe_inst; + + switch (lhs_ty.zigTypeTag()) { + .Float => return self.fail("TODO ARM binary operations on floats", .{}), + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .Int => return self.fail("TODO ARM mod", .{}), + else => unreachable, + } +} + fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) !void { const abi_size = ty.abiSize(self.target.*); From 95b8a5f157aa7552e3f125e56968b889e254497a Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Thu, 25 Aug 2022 22:17:57 +0200 Subject: [PATCH 06/14] stage2 ARM: extract remaining operations out of binOp --- src/arch/arm/CodeGen.zig | 543 ++++++++++++++++++++------------------- 1 file changed, 283 insertions(+), 260 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 02981ce418..b7694291f2 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -1407,8 +1407,6 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); break :result switch (tag) { .add => try self.addSub(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), @@ -1427,11 +1425,24 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { .mod => try self.modulo(lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), - else => try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{ - .lhs = bin_op.lhs, - .rhs = bin_op.rhs, - .inst = inst, - }), + .addwrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + .subwrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + .mulwrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + .bit_and => try self.bitwise(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + .bit_or => try self.bitwise(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + .xor => try self.bitwise(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + .shl_exact => try self.shiftExact(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + .shr_exact => try self.shiftExact(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + .shl => try self.shiftNormal(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + .shr => try self.shiftNormal(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + .bool_and => try self.booleanOp(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + .bool_or => try self.booleanOp(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + else => unreachable, }; }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -1440,19 +1451,15 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.air.typeOf(bin_op.lhs); const rhs_ty = self.air.typeOf(bin_op.rhs); - const result: MCValue = if (self.liveness.isUnused(inst)) - .dead - else - try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{ - .lhs = bin_op.lhs, - .rhs = bin_op.rhs, - .inst = inst, - }); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; + const rhs_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; + + break :result try self.ptrArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst); + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -2247,7 +2254,11 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { }, else => { const dest = try self.allocRegOrMem(inst, true); - const addr = try self.binOp(.ptr_add, base_mcv, index_mcv, slice_ptr_field_type, Type.usize, null); + + const base_bind: ReadArg.Bind = .{ .mcv = base_mcv }; + const index_bind: ReadArg.Bind = .{ .mcv = index_mcv }; + + const addr = try self.ptrArithmetic(.ptr_add, base_bind, index_bind, slice_ptr_field_type, Type.usize, null); try self.load(dest, addr, slice_ptr_field_type); break :result dest; @@ -2262,12 +2273,15 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const slice_mcv = try self.resolveInst(extra.lhs); - const index_mcv = try self.resolveInst(extra.rhs); const base_mcv = slicePtr(slice_mcv); - const slice_ty = self.air.typeOf(extra.lhs); + const base_bind: ReadArg.Bind = .{ .mcv = base_mcv }; + const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const addr = try self.binOp(.ptr_add, base_mcv, index_mcv, slice_ty, Type.usize, null); + const slice_ty = self.air.typeOf(extra.lhs); + const index_ty = self.air.typeOf(extra.rhs); + + const addr = try self.ptrArithmetic(.ptr_add, base_bind, index_bind, slice_ty, index_ty, null); break :result addr; }; return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); @@ -2290,12 +2304,13 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_mcv = try self.resolveInst(extra.lhs); - const index_mcv = try self.resolveInst(extra.rhs); + const ptr_bind: ReadArg.Bind = .{ .inst = extra.lhs }; + const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; const ptr_ty = self.air.typeOf(extra.lhs); + const index_ty = self.air.typeOf(extra.rhs); - const addr = try self.binOp(.ptr_add, ptr_mcv, index_mcv, ptr_ty, Type.usize, null); + const addr = try self.ptrArithmetic(.ptr_add, ptr_bind, index_bind, ptr_ty, index_ty, null); break :result addr; }; return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); @@ -3219,240 +3234,6 @@ const BinOpMetadata = struct { rhs: Air.Inst.Ref, }; -/// For all your binary operation needs, this function will generate -/// the corresponding Mir instruction(s). Returns the location of the -/// result. -/// -/// If the binary operation itself happens to be an Air instruction, -/// pass the corresponding index in the inst parameter. That helps -/// this function do stuff like reusing operands. -/// -/// This function does not do any lowering to Mir itself, but instead -/// looks at the lhs and rhs and determines which kind of lowering -/// would be best suitable and then delegates the lowering to other -/// functions. -fn binOp( - self: *Self, - tag: Air.Inst.Tag, - lhs: MCValue, - rhs: MCValue, - lhs_ty: Type, - rhs_ty: Type, - metadata: ?BinOpMetadata, -) InnerError!MCValue { - switch (tag) { - .addwrap, - .subwrap, - .mulwrap, - => { - const base_tag: Air.Inst.Tag = switch (tag) { - .addwrap => .add, - .subwrap => .sub, - .mulwrap => .mul, - else => unreachable, - }; - - const lhs_bind = if (metadata) |md| - ReadArg.Bind{ .inst = md.lhs } - else - ReadArg.Bind{ .mcv = lhs }; - const rhs_bind = if (metadata) |md| - ReadArg.Bind{ .inst = md.rhs } - else - ReadArg.Bind{ .mcv = rhs }; - - // Generate an add/sub/mul - const maybe_inst: ?Air.Inst.Index = if (metadata) |md| md.inst else null; - const result = try self.addSub(base_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst); - - // Truncate if necessary - switch (lhs_ty.zigTypeTag()) { - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - .Int => { - const int_info = lhs_ty.intInfo(self.target.*); - if (int_info.bits <= 32) { - const result_reg = result.register; - - if (int_info.bits < 32) { - try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits); - return result; - } else return result; - } else { - return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); - } - }, - else => unreachable, - } - }, - .bit_and, - .bit_or, - .xor, - => { - switch (lhs_ty.zigTypeTag()) { - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - .Int => { - const mod = self.bin_file.options.module.?; - assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); - if (int_info.bits <= 32) { - const lhs_immediate_ok = lhs == .immediate and Instruction.Operand.fromU32(lhs.immediate) != null; - const rhs_immediate_ok = rhs == .immediate and Instruction.Operand.fromU32(rhs.immediate) != null; - - const mir_tag: Mir.Inst.Tag = switch (tag) { - .bit_and => .@"and", - .bit_or => .orr, - .xor => .eor, - else => unreachable, - }; - - if (rhs_immediate_ok) { - return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata); - } else if (lhs_immediate_ok) { - // swap lhs and rhs - return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata); - } else { - return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); - } - } else { - return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); - } - }, - else => unreachable, - } - }, - .shl_exact, - .shr_exact, - => { - switch (lhs_ty.zigTypeTag()) { - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - .Int => { - const int_info = lhs_ty.intInfo(self.target.*); - if (int_info.bits <= 32) { - const rhs_immediate_ok = rhs == .immediate; - - const mir_tag: Mir.Inst.Tag = switch (tag) { - .shl_exact => .lsl, - .shr_exact => switch (lhs_ty.intInfo(self.target.*).signedness) { - .signed => Mir.Inst.Tag.asr, - .unsigned => Mir.Inst.Tag.lsr, - }, - else => unreachable, - }; - - if (rhs_immediate_ok) { - return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata); - } else { - return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); - } - } else { - return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); - } - }, - else => unreachable, - } - }, - .shl, - .shr, - => { - const base_tag: Air.Inst.Tag = switch (tag) { - .shl => .shl_exact, - .shr => .shr_exact, - else => unreachable, - }; - - // Generate a shl_exact/shr_exact - const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); - - // Truncate if necessary - switch (tag) { - .shr => return result, - .shl => switch (lhs_ty.zigTypeTag()) { - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - .Int => { - const int_info = lhs_ty.intInfo(self.target.*); - if (int_info.bits <= 32) { - const result_reg = result.register; - - if (int_info.bits < 32) { - try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits); - return result; - } else return result; - } else { - return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); - } - }, - else => unreachable, - }, - else => unreachable, - } - }, - .bool_and, - .bool_or, - => { - switch (lhs_ty.zigTypeTag()) { - .Bool => { - const lhs_immediate_ok = lhs == .immediate; - const rhs_immediate_ok = rhs == .immediate; - - const mir_tag: Mir.Inst.Tag = switch (tag) { - .bool_and => .@"and", - .bool_or => .orr, - else => unreachable, - }; - - if (rhs_immediate_ok) { - return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata); - } else if (lhs_immediate_ok) { - // swap lhs and rhs - return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata); - } else { - return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); - } - }, - else => unreachable, - } - }, - .ptr_add, - .ptr_sub, - => { - switch (lhs_ty.zigTypeTag()) { - .Pointer => { - const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), - }; - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); - - if (elem_size == 1) { - const base_tag: Mir.Inst.Tag = switch (tag) { - .ptr_add => .add, - .ptr_sub => .sub, - else => unreachable, - }; - - return try self.binOpRegister(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); - } else { - // convert the offset into a byte offset by - // multiplying it with elem_size - const rhs_bind = if (metadata) |md| - ReadArg.Bind{ .inst = md.rhs } - else - ReadArg.Bind{ .mcv = rhs }; - const imm_bind = ReadArg.Bind{ .mcv = .{ .immediate = elem_size } }; - - const offset = try self.mul(rhs_bind, imm_bind, Type.usize, Type.usize, null); - const addr = try self.binOp(tag, lhs, offset, Type.initTag(.manyptr_u8), Type.usize, null); - return addr; - } - }, - else => unreachable, - } - }, - else => unreachable, - } -} - fn addSub( self: *Self, tag: Air.Inst.Tag, @@ -3713,6 +3494,248 @@ fn modulo( } } +fn wrappingArithmetic( + self: *Self, + tag: Air.Inst.Tag, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + const base_tag: Air.Inst.Tag = switch (tag) { + .addwrap => .add, + .subwrap => .sub, + .mulwrap => .mul, + else => unreachable, + }; + + // Generate an add/sub/mul + const result = try self.addSub(base_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst); + + // Truncate if necessary + switch (lhs_ty.zigTypeTag()) { + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .Int => { + const int_info = lhs_ty.intInfo(self.target.*); + if (int_info.bits <= 32) { + const result_reg = result.register; + + if (int_info.bits < 32) { + try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits); + return result; + } else return result; + } else { + return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); + } + }, + else => unreachable, + } +} + +fn bitwise( + self: *Self, + tag: Air.Inst.Tag, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + switch (lhs_ty.zigTypeTag()) { + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .Int => { + const mod = self.bin_file.options.module.?; + assert(lhs_ty.eql(rhs_ty, mod)); + const int_info = lhs_ty.intInfo(self.target.*); + if (int_info.bits <= 32) { + const lhs_immediate = try lhs_bind.resolveToImmediate(self); + const rhs_immediate = try rhs_bind.resolveToImmediate(self); + + const lhs_immediate_ok = if (lhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false; + const rhs_immediate_ok = if (rhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false; + + const mir_tag: Mir.Inst.Tag = switch (tag) { + .bit_and => .@"and", + .bit_or => .orr, + .xor => .eor, + else => unreachable, + }; + + if (rhs_immediate_ok) { + return try self.binOpImmediateNew(mir_tag, lhs_bind, rhs_immediate.?, lhs_ty, false, maybe_inst); + } else if (lhs_immediate_ok) { + // swap lhs and rhs + return try self.binOpImmediateNew(mir_tag, rhs_bind, lhs_immediate.?, rhs_ty, true, maybe_inst); + } else { + return try self.binOpRegisterNew(mir_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst); + } + } else { + return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); + } + }, + else => unreachable, + } +} + +fn shiftExact( + self: *Self, + tag: Air.Inst.Tag, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + switch (lhs_ty.zigTypeTag()) { + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .Int => { + const int_info = lhs_ty.intInfo(self.target.*); + if (int_info.bits <= 32) { + const rhs_immediate = try rhs_bind.resolveToImmediate(self); + + const mir_tag: Mir.Inst.Tag = switch (tag) { + .shl_exact => .lsl, + .shr_exact => switch (lhs_ty.intInfo(self.target.*).signedness) { + .signed => Mir.Inst.Tag.asr, + .unsigned => Mir.Inst.Tag.lsr, + }, + else => unreachable, + }; + + if (rhs_immediate) |imm| { + return try self.binOpImmediateNew(mir_tag, lhs_bind, imm, lhs_ty, false, maybe_inst); + } else { + return try self.binOpRegisterNew(mir_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst); + } + } else { + return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); + } + }, + else => unreachable, + } +} + +fn shiftNormal( + self: *Self, + tag: Air.Inst.Tag, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + const base_tag: Air.Inst.Tag = switch (tag) { + .shl => .shl_exact, + .shr => .shr_exact, + else => unreachable, + }; + + // Generate a shl_exact/shr_exact + const result = try self.shiftExact(base_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst); + + // Truncate if necessary + switch (tag) { + .shr => return result, + .shl => switch (lhs_ty.zigTypeTag()) { + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .Int => { + const int_info = lhs_ty.intInfo(self.target.*); + if (int_info.bits <= 32) { + const result_reg = result.register; + + if (int_info.bits < 32) { + try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits); + return result; + } else return result; + } else { + return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); + } + }, + else => unreachable, + }, + else => unreachable, + } +} + +fn booleanOp( + self: *Self, + tag: Air.Inst.Tag, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + switch (lhs_ty.zigTypeTag()) { + .Bool => { + const lhs_immediate = try lhs_bind.resolveToImmediate(self); + const rhs_immediate = try rhs_bind.resolveToImmediate(self); + + const mir_tag: Mir.Inst.Tag = switch (tag) { + .bool_and => .@"and", + .bool_or => .orr, + else => unreachable, + }; + + if (rhs_immediate) |imm| { + return try self.binOpImmediateNew(mir_tag, lhs_bind, imm, lhs_ty, false, maybe_inst); + } else if (lhs_immediate) |imm| { + // swap lhs and rhs + return try self.binOpImmediateNew(mir_tag, rhs_bind, imm, rhs_ty, true, maybe_inst); + } else { + return try self.binOpRegisterNew(mir_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst); + } + }, + else => unreachable, + } +} + +fn ptrArithmetic( + self: *Self, + tag: Air.Inst.Tag, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + switch (lhs_ty.zigTypeTag()) { + .Pointer => { + const mod = self.bin_file.options.module.?; + assert(rhs_ty.eql(Type.usize, mod)); + + const ptr_ty = lhs_ty; + const elem_ty = switch (ptr_ty.ptrSize()) { + .One => ptr_ty.childType().childType(), // ptr to array, so get array element type + else => ptr_ty.childType(), + }; + const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + + const base_tag: Air.Inst.Tag = switch (tag) { + .ptr_add => .add, + .ptr_sub => .sub, + else => unreachable, + }; + + if (elem_size == 1) { + return try self.addSub(base_tag, lhs_bind, rhs_bind, Type.usize, Type.usize, maybe_inst); + } else { + // convert the offset into a byte offset by + // multiplying it with elem_size + const imm_bind = ReadArg.Bind{ .mcv = .{ .immediate = elem_size } }; + + const offset = try self.mul(rhs_bind, imm_bind, Type.usize, Type.usize, null); + const offset_bind = ReadArg.Bind{ .mcv = offset }; + + const addr = try self.addSub(base_tag, lhs_bind, offset_bind, Type.usize, Type.usize, null); + return addr; + } + }, + else => unreachable, + } +} + fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) !void { const abi_size = ty.abiSize(self.target.*); @@ -4614,7 +4637,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { if (else_value == .dead) continue; // The instruction is only overridden in the else branch. - var i: usize = self.branch_stack.items.len - 2; + var i: usize = self.branch_stack.items.len - 1; while (true) { i -= 1; // If this overflows, the question is: why wasn't the instruction marked dead? if (self.branch_stack.items[i].inst_table.get(else_key)) |mcv| { @@ -4641,7 +4664,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { if (then_value == .dead) continue; const parent_mcv = blk: { - var i: usize = self.branch_stack.items.len - 2; + var i: usize = self.branch_stack.items.len - 1; while (true) { i -= 1; if (self.branch_stack.items[i].inst_table.get(then_key)) |mcv| { From 481bd4761ac9826336d13553e249989f509ba172 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Fri, 26 Aug 2022 19:27:20 +0200 Subject: [PATCH 07/14] stage2 ARM: remove remaining uses of binOp{Register,Immediate} --- src/arch/arm/CodeGen.zig | 310 ++++++++------------------------------- 1 file changed, 60 insertions(+), 250 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index b7694291f2..bf378e24ce 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -1264,11 +1264,11 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { fn minMax( self: *Self, tag: Air.Inst.Tag, - maybe_inst: ?Air.Inst.Index, - lhs: MCValue, - rhs: MCValue, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, lhs_ty: Type, rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, ) !MCValue { switch (lhs_ty.zigTypeTag()) { .Float => return self.fail("TODO ARM min/max on floats", .{}), @@ -1278,34 +1278,25 @@ fn minMax( assert(lhs_ty.eql(rhs_ty, mod)); const int_info = lhs_ty.intInfo(self.target.*); if (int_info.bits <= 32) { - const lhs_is_register = lhs == .register; - const rhs_is_register = rhs == .register; + var lhs_reg: Register = undefined; + var rhs_reg: Register = undefined; + var dest_reg: Register = undefined; - const lhs_reg = switch (lhs) { - .register => |r| r, - else => try self.copyToTmpRegister(lhs_ty, lhs), + const read_args = [_]ReadArg{ + .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, + .{ .ty = rhs_ty, .bind = rhs_bind, .class = gp, .reg = &rhs_reg }, }; - const lhs_reg_lock = self.register_manager.lockReg(lhs_reg); - defer if (lhs_reg_lock) |reg| self.register_manager.unlockReg(reg); - - const rhs_reg = switch (rhs) { - .register => |r| r, - else => try self.copyToTmpRegister(rhs_ty, rhs), + const write_args = [_]WriteArg{ + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, }; - const rhs_reg_lock = self.register_manager.lockReg(rhs_reg); - defer if (rhs_reg_lock) |reg| self.register_manager.unlockReg(reg); - - const dest_reg = if (maybe_inst) |inst| blk: { - const bin_op = self.air.instructions.items(.data)[inst].bin_op; - - if (lhs_is_register and self.reuseOperand(inst, bin_op.lhs, 0, lhs)) { - break :blk lhs_reg; - } else if (rhs_is_register and self.reuseOperand(inst, bin_op.rhs, 1, rhs)) { - break :blk rhs_reg; - } else { - break :blk try self.register_manager.allocReg(inst, gp); - } - } else try self.register_manager.allocReg(null, gp); + try self.allocRegs( + &read_args, + &write_args, + if (maybe_inst) |inst| .{ + .corresponding_inst = inst, + .operand_mapping = &.{ 0, 1 }, + } else null, + ); // lhs == reg should have been checked by airMinMax // @@ -1369,15 +1360,17 @@ fn minMax( fn airMinMax(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.air.typeOf(bin_op.lhs); const rhs_ty = self.air.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; + const rhs_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; + + const lhs = try self.resolveInst(bin_op.lhs); if (bin_op.lhs == bin_op.rhs) break :result lhs; - break :result try self.minMax(tag, inst, lhs, rhs, lhs_ty, rhs_ty); + break :result try self.minMax(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst); }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -1538,21 +1531,21 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue{ .stack_offset = stack_offset }; } else if (int_info.bits == 32) { - const lhs = try self.resolveInst(extra.lhs); - const rhs = try self.resolveInst(extra.rhs); + const lhs_immediate = try lhs_bind.resolveToImmediate(self); + const rhs_immediate = try rhs_bind.resolveToImmediate(self); // Only say yes if the operation is // commutative, i.e. we can swap both of the // operands const lhs_immediate_ok = switch (tag) { - .add_with_overflow => lhs == .immediate and Instruction.Operand.fromU32(lhs.immediate) != null, + .add_with_overflow => if (lhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false, .sub_with_overflow => false, else => unreachable, }; const rhs_immediate_ok = switch (tag) { .add_with_overflow, .sub_with_overflow, - => rhs == .immediate and Instruction.Operand.fromU32(rhs.immediate) != null, + => if (rhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false, else => unreachable, }; @@ -1567,12 +1560,12 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const dest = blk: { if (rhs_immediate_ok) { - break :blk try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, null); + break :blk try self.binOpImmediateNew(mir_tag, lhs_bind, rhs_immediate.?, lhs_ty, false, null); } else if (lhs_immediate_ok) { // swap lhs and rhs - break :blk try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, null); + break :blk try self.binOpImmediateNew(mir_tag, rhs_bind, lhs_immediate.?, rhs_ty, true, null); } else { - break :blk try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, null); + break :blk try self.binOpRegisterNew(mir_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, null); } }; @@ -1599,8 +1592,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); const result: MCValue = result: { - const lhs = try self.resolveInst(extra.lhs); - const rhs = try self.resolveInst(extra.rhs); + const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; + const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); @@ -1625,7 +1618,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .unsigned => .mul, }; - const dest = try self.binOpRegister(base_tag, lhs, rhs, lhs_ty, rhs_ty, null); + const dest = try self.binOpRegisterNew(base_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, null); const dest_reg = dest.register; const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg); defer self.register_manager.unlockReg(dest_reg_lock); @@ -1660,45 +1653,26 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .unsigned => .umull, }; - // TODO extract umull etc. to binOpTwoRegister - // once MCValue.rr is implemented - const lhs_is_register = lhs == .register; - const rhs_is_register = rhs == .register; + var lhs_reg: Register = undefined; + var rhs_reg: Register = undefined; + var rdhi: Register = undefined; + var rdlo: Register = undefined; + var truncated_reg: Register = undefined; - const lhs_lock: ?RegisterLock = if (lhs_is_register) - self.register_manager.lockReg(lhs.register) - else - null; - defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg); - - const lhs_reg = if (lhs_is_register) - lhs.register - else - try self.register_manager.allocReg(null, gp); - const new_lhs_lock = self.register_manager.lockReg(lhs_reg); - defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg); - - const rhs_reg = if (rhs_is_register) - rhs.register - else - try self.register_manager.allocReg(null, gp); - const new_rhs_lock = self.register_manager.lockReg(rhs_reg); - defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg); - - const dest_regs = try self.register_manager.allocRegs(2, .{ null, null }, gp); - const dest_regs_locks = self.register_manager.lockRegsAssumeUnused(2, dest_regs); - defer for (dest_regs_locks) |reg| { - self.register_manager.unlockReg(reg); + const read_args = [_]ReadArg{ + .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, + .{ .ty = rhs_ty, .bind = rhs_bind, .class = gp, .reg = &rhs_reg }, }; - const rdlo = dest_regs[0]; - const rdhi = dest_regs[1]; - - if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs); - if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs); - - const truncated_reg = try self.register_manager.allocReg(null, gp); - const truncated_reg_lock = self.register_manager.lockRegAssumeUnused(truncated_reg); - defer self.register_manager.unlockReg(truncated_reg_lock); + const write_args = [_]WriteArg{ + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &rdhi }, + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &rdlo }, + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &truncated_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + null, + ); _ = try self.addInst(.{ .tag = base_tag, @@ -2933,172 +2907,10 @@ fn allocRegs( } } -/// Don't call this function directly. Use binOp instead. +/// Wrapper around allocRegs and addInst tailored for specific Mir +/// instructions which are binary operations acting on two registers /// -/// Calling this function signals an intention to generate a Mir -/// instruction of the form -/// -/// op dest, lhs, rhs -/// -/// Asserts that generating an instruction of that form is possible. -fn binOpRegister( - self: *Self, - mir_tag: Mir.Inst.Tag, - lhs: MCValue, - rhs: MCValue, - lhs_ty: Type, - rhs_ty: Type, - metadata: ?BinOpMetadata, -) !MCValue { - var lhs_reg: Register = undefined; - var rhs_reg: Register = undefined; - var dest_reg: Register = undefined; - - const lhs_bind = if (metadata) |md| - ReadArg.Bind{ .inst = md.lhs } - else - ReadArg.Bind{ .mcv = lhs }; - const rhs_bind = if (metadata) |md| - ReadArg.Bind{ .inst = md.rhs } - else - ReadArg.Bind{ .mcv = rhs }; - const read_args = [_]ReadArg{ - .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, - .{ .ty = rhs_ty, .bind = rhs_bind, .class = gp, .reg = &rhs_reg }, - }; - const write_args = [_]WriteArg{ - .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, - }; - try self.allocRegs( - &read_args, - &write_args, - if (metadata) |md| .{ - .corresponding_inst = md.inst, - .operand_mapping = &.{ 0, 1 }, - } else null, - ); - - const mir_data: Mir.Inst.Data = switch (mir_tag) { - .add, - .adds, - .sub, - .subs, - .@"and", - .orr, - .eor, - => .{ .rr_op = .{ - .rd = dest_reg, - .rn = lhs_reg, - .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none), - } }, - .lsl, - .asr, - .lsr, - => .{ .rr_shift = .{ - .rd = dest_reg, - .rm = lhs_reg, - .shift_amount = Instruction.ShiftAmount.reg(rhs_reg), - } }, - .mul, - .smulbb, - => .{ .rrr = .{ - .rd = dest_reg, - .rn = lhs_reg, - .rm = rhs_reg, - } }, - else => unreachable, - }; - - _ = try self.addInst(.{ - .tag = mir_tag, - .data = mir_data, - }); - - return MCValue{ .register = dest_reg }; -} - -/// Don't call this function directly. Use binOp instead. -/// -/// Calling this function signals an intention to generate a Mir -/// instruction of the form -/// -/// op dest, lhs, #rhs_imm -/// -/// Set lhs_and_rhs_swapped to true iff inst.bin_op.lhs corresponds to -/// rhs and vice versa. This parameter is only used when maybe_inst != -/// null. -/// -/// Asserts that generating an instruction of that form is possible. -fn binOpImmediate( - self: *Self, - mir_tag: Mir.Inst.Tag, - lhs: MCValue, - rhs: MCValue, - lhs_ty: Type, - lhs_and_rhs_swapped: bool, - metadata: ?BinOpMetadata, -) !MCValue { - var lhs_reg: Register = undefined; - var dest_reg: Register = undefined; - - const lhs_bind = blk: { - if (metadata) |md| { - const inst = if (lhs_and_rhs_swapped) md.rhs else md.lhs; - break :blk ReadArg.Bind{ .inst = inst }; - } else { - break :blk ReadArg.Bind{ .mcv = lhs }; - } - }; - - const read_args = [_]ReadArg{ - .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, - }; - const write_args = [_]WriteArg{ - .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, - }; - const operand_mapping: []const Liveness.OperandInt = if (lhs_and_rhs_swapped) &.{1} else &.{0}; - try self.allocRegs( - &read_args, - &write_args, - if (metadata) |md| .{ - .corresponding_inst = md.inst, - .operand_mapping = operand_mapping, - } else null, - ); - - const mir_data: Mir.Inst.Data = switch (mir_tag) { - .add, - .adds, - .sub, - .subs, - .@"and", - .orr, - .eor, - => .{ .rr_op = .{ - .rd = dest_reg, - .rn = lhs_reg, - .op = Instruction.Operand.fromU32(rhs.immediate).?, - } }, - .lsl, - .asr, - .lsr, - => .{ .rr_shift = .{ - .rd = dest_reg, - .rm = lhs_reg, - .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs.immediate)), - } }, - else => unreachable, - }; - - _ = try self.addInst(.{ - .tag = mir_tag, - .data = mir_data, - }); - - return MCValue{ .register = dest_reg }; -} - -/// TODO +/// Returns the destination register fn binOpRegisterNew( self: *Self, mir_tag: Mir.Inst.Tag, @@ -3167,7 +2979,11 @@ fn binOpRegisterNew( return MCValue{ .register = dest_reg }; } -/// TODO +/// Wrapper around allocRegs and addInst tailored for specific Mir +/// instructions which are binary operations acting on a register and +/// an immediate +/// +/// Returns the destination register fn binOpImmediateNew( self: *Self, mir_tag: Mir.Inst.Tag, @@ -3228,12 +3044,6 @@ fn binOpImmediateNew( return MCValue{ .register = dest_reg }; } -const BinOpMetadata = struct { - inst: Air.Inst.Index, - lhs: Air.Inst.Ref, - rhs: Air.Inst.Ref, -}; - fn addSub( self: *Self, tag: Air.Inst.Tag, From e2b029e2c8ad761c32886d27aa0227655a60eb9e Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Fri, 26 Aug 2022 22:19:27 +0200 Subject: [PATCH 08/14] stage2 ARM: implement field_parent_ptr --- src/arch/arm/CodeGen.zig | 21 ++++++++++++++++++--- test/behavior/field_parent_ptr.zig | 2 -- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index bf378e24ce..c0a28f1f94 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2681,9 +2681,24 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFieldParentPtr", .{}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const field_ptr = try self.resolveInst(extra.field_ptr); + const struct_ty = self.air.getRefType(ty_pl.ty).childType(); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, self.target.*)); + switch (field_ptr) { + .ptr_stack_offset => |off| { + break :result MCValue{ .ptr_stack_offset = off + struct_field_offset }; + }, + else => { + const lhs_bind: ReadArg.Bind = .{ .mcv = field_ptr }; + const rhs_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = struct_field_offset } }; + + break :result try self.addSub(.sub, lhs_bind, rhs_bind, Type.usize, Type.usize, null); + }, + } + }; + return self.finishAir(inst, result, .{ extra.field_ptr, .none, .none }); } /// An argument to a Mir instruction which is read (and possibly also diff --git a/test/behavior/field_parent_ptr.zig b/test/behavior/field_parent_ptr.zig index 3aefb0ef47..570a1f9522 100644 --- a/test/behavior/field_parent_ptr.zig +++ b/test/behavior/field_parent_ptr.zig @@ -2,7 +2,6 @@ const expect = @import("std").testing.expect; const builtin = @import("builtin"); test "@fieldParentPtr non-first field" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; @@ -11,7 +10,6 @@ test "@fieldParentPtr non-first field" { } test "@fieldParentPtr first field" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; From 261fec8036e1e5518951d91c5fc27b53c1a511d8 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sat, 27 Aug 2022 20:22:54 +0200 Subject: [PATCH 09/14] stage2 ARM: amend implementation of various AIR instructions - unwrap_errunion_err for registers - unwrap_errunion_payload for registers - ptr_slice_len_ptr for all MCValues - ptr_slice_ptr_ptr for all MCValues --- src/arch/arm/CodeGen.zig | 297 ++++++++++++++++++++--------- test/behavior/alignof.zig | 1 - test/behavior/basic.zig | 1 - test/behavior/cast.zig | 7 - test/behavior/comptime_memory.zig | 2 - test/behavior/enum.zig | 5 - test/behavior/error.zig | 4 - test/behavior/eval.zig | 2 - test/behavior/merge_error_sets.zig | 1 - test/behavior/slice.zig | 2 - test/behavior/struct.zig | 1 - test/behavior/switch.zig | 1 - test/behavior/this.zig | 1 - test/behavior/try.zig | 1 - test/behavior/while.zig | 1 - 15 files changed, 208 insertions(+), 119 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index c0a28f1f94..2f7028c565 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -451,9 +451,7 @@ fn gen(self: *Self) !void { // The address of where to store the return value is in // r0. As this register might get overwritten along the // way, save the address to the stack. - const stack_offset = mem.alignForwardGeneric(u32, self.next_stack_offset, 4) + 4; - self.next_stack_offset = stack_offset; - self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset); + const stack_offset = try self.allocMem(4, 4, null); try self.genSetStack(Type.usize, stack_offset, MCValue{ .register = .r0 }); self.ret_mcv = MCValue{ .stack_offset = stack_offset }; @@ -893,17 +891,30 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { try table.ensureUnusedCapacity(self.gpa, additional_count); } -fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 { +fn allocMem( + self: *Self, + abi_size: u32, + abi_align: u32, + maybe_inst: ?Air.Inst.Index, +) !u32 { + assert(abi_size > 0); + assert(abi_align > 0); + if (abi_align > self.stack_align) self.stack_align = abi_align; + // TODO find a free slot instead of always appending const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size; self.next_stack_offset = offset; self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset); - try self.stack.putNoClobber(self.gpa, offset, .{ - .inst = inst, - .size = abi_size, - }); + + if (maybe_inst) |inst| { + try self.stack.putNoClobber(self.gpa, offset, .{ + .inst = inst, + .size = abi_size, + }); + } + return offset; } @@ -925,7 +936,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { }; // TODO swap this for inst.ty.ptrAlign const abi_align = elem_ty.abiAlignment(self.target.*); - return self.allocMem(inst, abi_size, abi_align); + return self.allocMem(abi_size, abi_align, inst); } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { @@ -948,7 +959,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { } } } - const stack_offset = try self.allocMem(inst, abi_size, abi_align); + const stack_offset = try self.allocMem(abi_size, abi_align, inst); return MCValue{ .stack_offset = stack_offset }; } @@ -1182,29 +1193,32 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const operand = try self.resolveInst(ty_op.operand); + const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; const operand_ty = self.air.typeOf(ty_op.operand); - switch (operand) { + switch (try operand_bind.resolveToMcv(self)) { .dead => unreachable, .unreach => unreachable, .cpsr_flags => |cond| break :result MCValue{ .cpsr_flags = cond.negate() }, else => { switch (operand_ty.zigTypeTag()) { .Bool => { - const op_reg = switch (operand) { - .register => |r| r, - else => try self.copyToTmpRegister(operand_ty, operand), - }; - const op_reg_lock = self.register_manager.lockRegAssumeUnused(op_reg); - defer self.register_manager.unlockReg(op_reg_lock); + var op_reg: Register = undefined; + var dest_reg: Register = undefined; - const dest_reg = blk: { - if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) { - break :blk op_reg; - } - - break :blk try self.register_manager.allocReg(null, gp); + const read_args = [_]ReadArg{ + .{ .ty = operand_ty, .bind = operand_bind, .class = gp, .reg = &op_reg }, }; + const write_args = [_]WriteArg{ + .{ .ty = operand_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + ReuseMetadata{ + .corresponding_inst = inst, + .operand_mapping = &.{0}, + }, + ); _ = try self.addInst(.{ .tag = .eor, @@ -1221,20 +1235,23 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { .Int => { const int_info = operand_ty.intInfo(self.target.*); if (int_info.bits <= 32) { - const op_reg = switch (operand) { - .register => |r| r, - else => try self.copyToTmpRegister(operand_ty, operand), - }; - const op_reg_lock = self.register_manager.lockRegAssumeUnused(op_reg); - defer self.register_manager.unlockReg(op_reg_lock); + var op_reg: Register = undefined; + var dest_reg: Register = undefined; - const dest_reg = blk: { - if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) { - break :blk op_reg; - } - - break :blk try self.register_manager.allocReg(null, gp); + const read_args = [_]ReadArg{ + .{ .ty = operand_ty, .bind = operand_bind, .class = gp, .reg = &op_reg }, }; + const write_args = [_]WriteArg{ + .{ .ty = operand_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + ReuseMetadata{ + .corresponding_inst = inst, + .operand_mapping = &.{0}, + }, + ); _ = try self.addInst(.{ .tag = .mvn, @@ -1384,7 +1401,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const len = try self.resolveInst(bin_op.rhs); const len_ty = self.air.typeOf(bin_op.rhs); - const stack_offset = try self.allocMem(inst, 8, 4); + const stack_offset = try self.allocMem(8, 4, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); try self.genSetStack(len_ty, stack_offset - 4, len); break :result MCValue{ .stack_offset = stack_offset }; @@ -1496,7 +1513,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { assert(lhs_ty.eql(rhs_ty, mod)); const int_info = lhs_ty.intInfo(self.target.*); if (int_info.bits < 32) { - const stack_offset = try self.allocMem(inst, tuple_size, tuple_align); + const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); try self.spillCompareFlagsIfOccupied(); @@ -1609,7 +1626,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { assert(lhs_ty.eql(rhs_ty, mod)); const int_info = lhs_ty.intInfo(self.target.*); if (int_info.bits <= 16) { - const stack_offset = try self.allocMem(inst, tuple_size, tuple_align); + const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); try self.spillCompareFlagsIfOccupied(); @@ -1644,7 +1661,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue{ .stack_offset = stack_offset }; } else if (int_info.bits <= 32) { - const stack_offset = try self.allocMem(inst, tuple_size, tuple_align); + const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); try self.spillCompareFlagsIfOccupied(); @@ -1769,7 +1786,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .Int => { const int_info = lhs_ty.intInfo(self.target.*); if (int_info.bits <= 32) { - const stack_offset = try self.allocMem(inst, tuple_size, tuple_align); + const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); try self.spillCompareFlagsIfOccupied(); @@ -1926,19 +1943,57 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { } /// Given an error union, returns the error -fn errUnionErr(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue { +fn errUnionErr( + self: *Self, + error_union_bind: ReadArg.Bind, + error_union_ty: Type, + maybe_inst: ?Air.Inst.Index, +) !MCValue { const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); if (err_ty.errorSetIsEmpty()) { return MCValue{ .immediate = 0 }; } if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - return error_union_mcv; + return try error_union_bind.resolveToMcv(self); } const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, self.target.*)); - switch (error_union_mcv) { - .register => return self.fail("TODO errUnionErr for registers", .{}), + switch (try error_union_bind.resolveToMcv(self)) { + .register => { + var operand_reg: Register = undefined; + var dest_reg: Register = undefined; + + const read_args = [_]ReadArg{ + .{ .ty = error_union_ty, .bind = error_union_bind, .class = gp, .reg = &operand_reg }, + }; + const write_args = [_]WriteArg{ + .{ .ty = err_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + if (maybe_inst) |inst| .{ + .corresponding_inst = inst, + .operand_mapping = &.{0}, + } else null, + ); + + const err_bit_offset = err_offset * 8; + const err_bit_size = @intCast(u32, err_ty.abiSize(self.target.*)) * 8; + + _ = try self.addInst(.{ + .tag = .ubfx, // errors are unsigned integers + .data = .{ .rr_lsb_width = .{ + .rd = dest_reg, + .rn = operand_reg, + .lsb = @intCast(u5, err_bit_offset), + .width = @intCast(u6, err_bit_size), + } }, + }); + + return MCValue{ .register = dest_reg }; + }, .stack_argument_offset => |off| { return MCValue{ .stack_argument_offset = off + err_offset }; }, @@ -1955,27 +2010,66 @@ fn errUnionErr(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCV fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; const error_union_ty = self.air.typeOf(ty_op.operand); - const mcv = try self.resolveInst(ty_op.operand); - break :result try self.errUnionErr(mcv, error_union_ty); + + break :result try self.errUnionErr(error_union_bind, error_union_ty, inst); }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } /// Given an error union, returns the payload -fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue { +fn errUnionPayload( + self: *Self, + error_union_bind: ReadArg.Bind, + error_union_ty: Type, + maybe_inst: ?Air.Inst.Index, +) !MCValue { const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); if (err_ty.errorSetIsEmpty()) { - return error_union_mcv; + return try error_union_bind.resolveToMcv(self); } if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { return MCValue.none; } const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*)); - switch (error_union_mcv) { - .register => return self.fail("TODO errUnionPayload for registers", .{}), + switch (try error_union_bind.resolveToMcv(self)) { + .register => { + var operand_reg: Register = undefined; + var dest_reg: Register = undefined; + + const read_args = [_]ReadArg{ + .{ .ty = error_union_ty, .bind = error_union_bind, .class = gp, .reg = &operand_reg }, + }; + const write_args = [_]WriteArg{ + .{ .ty = err_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + if (maybe_inst) |inst| .{ + .corresponding_inst = inst, + .operand_mapping = &.{0}, + } else null, + ); + + const payload_bit_offset = payload_offset * 8; + const payload_bit_size = @intCast(u32, payload_ty.abiSize(self.target.*)) * 8; + + _ = try self.addInst(.{ + .tag = if (payload_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx, + .data = .{ .rr_lsb_width = .{ + .rd = dest_reg, + .rn = operand_reg, + .lsb = @intCast(u5, payload_bit_offset), + .width = @intCast(u6, payload_bit_size), + } }, + }); + + return MCValue{ .register = dest_reg }; + }, .stack_argument_offset => |off| { return MCValue{ .stack_argument_offset = off + payload_offset }; }, @@ -1992,9 +2086,10 @@ fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; const error_union_ty = self.air.typeOf(ty_op.operand); - const error_union = try self.resolveInst(ty_op.operand); - break :result try self.errUnionPayload(error_union, error_union_ty); + + break :result try self.errUnionPayload(error_union_bind, error_union_ty, inst); }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } @@ -2038,17 +2133,18 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); + const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); const abi_align = error_union_ty.abiAlignment(self.target.*); - const stack_offset = @intCast(u32, try self.allocMem(inst, abi_size, abi_align)); + const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst)); const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); const err_off = errUnionErrorOffset(payload_ty, self.target.*); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand); - try self.genSetStack(Type.anyerror, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 }); + try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 }); break :result MCValue{ .stack_offset = stack_offset }; }; @@ -2060,16 +2156,17 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); + const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); const abi_align = error_union_ty.abiAlignment(self.target.*); - const stack_offset = @intCast(u32, try self.allocMem(inst, abi_size, abi_align)); + const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst)); const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); const err_off = errUnionErrorOffset(payload_ty, self.target.*); - try self.genSetStack(Type.anyerror, stack_offset - @intCast(u32, err_off), operand); + try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef); break :result MCValue{ .stack_offset = stack_offset }; @@ -2108,7 +2205,6 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mcv = try self.resolveInst(ty_op.operand); switch (mcv) { - .dead, .unreach => unreachable, .register => unreachable, // a slice doesn't fit in one register .stack_argument_offset => |off| { break :result MCValue{ .stack_argument_offset = off + 4 }; @@ -2119,7 +2215,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { .memory => |addr| { break :result MCValue{ .memory = addr + 4 }; }, - else => return self.fail("TODO implement slice_len for {}", .{mcv}), + else => unreachable, // invalid MCValue for a slice } }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -2134,7 +2230,12 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - 4 }; }, - else => return self.fail("TODO implement ptr_slice_len_ptr for {}", .{mcv}), + else => { + const lhs_bind: ReadArg.Bind = .{ .mcv = mcv }; + const rhs_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = 4 } }; + + break :result try self.addSub(.add, lhs_bind, rhs_bind, Type.usize, Type.usize, null); + }, } }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -2149,7 +2250,13 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off }; }, - else => return self.fail("TODO implement ptr_slice_ptr_ptr for {}", .{mcv}), + else => { + if (self.reuseOperand(inst, ty_op.operand, 0, mcv)) { + break :result mcv; + } else { + break :result MCValue{ .register = try self.copyToTmpRegister(Type.usize, mcv) }; + } + }, } }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -3891,7 +3998,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { .register => |reg| blk: { const abi_size = @intCast(u32, ty.abiSize(self.target.*)); const abi_align = ty.abiAlignment(self.target.*); - const stack_offset = try self.allocMem(inst, abi_size, abi_align); + const stack_offset = try self.allocMem(abi_size, abi_align, inst); try self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); break :blk MCValue{ .stack_offset = stack_offset }; @@ -3978,7 +4085,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. const ret_ty = fn_ty.fnReturnType(); const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*)); - const stack_offset = try self.allocMem(inst, ret_abi_size, ret_abi_align); + const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, @@ -4166,14 +4273,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { const abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); const abi_align = ret_ty.abiAlignment(self.target.*); - // This is essentially allocMem without the - // instruction tracking - if (abi_align > self.stack_align) - self.stack_align = abi_align; - // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size; - self.next_stack_offset = offset; - self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset); + const offset = try self.allocMem(abi_size, abi_align, null); const tmp_mcv = MCValue{ .stack_offset = offset }; try self.load(tmp_mcv, ptr, ptr_ty); @@ -4545,20 +4645,28 @@ fn isNonNull(self: *Self, ty: Type, operand: MCValue) !MCValue { return MCValue{ .cpsr_flags = .ne }; } -fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { - const error_type = ty.errorUnionSet(); +fn isErr( + self: *Self, + error_union_bind: ReadArg.Bind, + error_union_ty: Type, +) !MCValue { + const error_type = error_union_ty.errorUnionSet(); if (error_type.errorSetIsEmpty()) { return MCValue{ .immediate = 0 }; // always false } - const error_mcv = try self.errUnionErr(operand, ty); + const error_mcv = try self.errUnionErr(error_union_bind, error_union_ty, null); _ = try self.cmp(.{ .mcv = error_mcv }, .{ .mcv = .{ .immediate = 0 } }, error_type, .neq); return MCValue{ .cpsr_flags = .hi }; } -fn isNonErr(self: *Self, ty: Type, operand: MCValue) !MCValue { - const is_err_result = try self.isErr(ty, operand); +fn isNonErr( + self: *Self, + error_union_bind: ReadArg.Bind, + error_union_ty: Type, +) !MCValue { + const is_err_result = try self.isErr(error_union_bind, error_union_ty); switch (is_err_result) { .cpsr_flags => |cond| { assert(cond == .hi); @@ -4637,9 +4745,10 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); - break :result try self.isErr(ty, operand); + const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; + const error_union_ty = self.air.typeOf(un_op); + + break :result try self.isErr(error_union_bind, error_union_ty); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -4658,7 +4767,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { } }; try self.load(operand, operand_ptr, ptr_ty); - break :result try self.isErr(ptr_ty.elemType(), operand); + break :result try self.isErr(.{ .mcv = operand }, ptr_ty.elemType()); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -4666,9 +4775,10 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); - break :result try self.isNonErr(ty, operand); + const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; + const error_union_ty = self.air.typeOf(un_op); + + break :result try self.isNonErr(error_union_bind, error_union_ty); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -4687,7 +4797,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { } }; try self.load(operand, operand_ptr, ptr_ty); - break :result try self.isNonErr(ptr_ty.elemType(), operand); + break :result try self.isNonErr(.{ .mcv = operand }, ptr_ty.elemType()); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -5620,7 +5730,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const array_ty = ptr_ty.childType(); const array_len = @intCast(u32, array_ty.arrayLen()); - const stack_offset = try self.allocMem(inst, 8, 8); + const stack_offset = try self.allocMem(8, 8, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); try self.genSetStack(Type.initTag(.usize), stack_offset - 4, .{ .immediate = array_len }); break :result MCValue{ .stack_offset = stack_offset }; @@ -5774,15 +5884,24 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; const result: MCValue = result: { + const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; const error_union_ty = self.air.typeOf(pl_op.operand); - const error_union = try self.resolveInst(pl_op.operand); - const is_err_result = try self.isErr(error_union_ty, error_union); + const error_union_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); + const error_union_align = error_union_ty.abiAlignment(self.target.*); + + // The error union will die in the body. However, we need the + // error union after the body in order to extract the payload + // of the error union, so we create a copy of it + const error_union_copy = try self.allocMem(error_union_size, error_union_align, null); + try self.genSetStack(error_union_ty, error_union_copy, try error_union_bind.resolveToMcv(self)); + + const is_err_result = try self.isErr(error_union_bind, error_union_ty); const reloc = try self.condBr(is_err_result); try self.genBody(body); - try self.performReloc(reloc); - break :result try self.errUnionPayload(error_union, error_union_ty); + + break :result try self.errUnionPayload(.{ .mcv = .{ .stack_offset = error_union_copy } }, error_union_ty, null); }; return self.finishAir(inst, result, .{ pl_op.operand, .none, .none }); } diff --git a/test/behavior/alignof.zig b/test/behavior/alignof.zig index b065e4b87f..d6491ff22e 100644 --- a/test/behavior/alignof.zig +++ b/test/behavior/alignof.zig @@ -13,7 +13,6 @@ const Foo = struct { test "@alignOf(T) before referencing T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; comptime try expect(@alignOf(Foo) != maxInt(usize)); if (native_arch == .x86_64) { comptime try expect(@alignOf(Foo) == 4); diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index 4d8b176fbf..6661bc2783 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -1060,7 +1060,6 @@ comptime { test "switch inside @as gets correct type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO var a: u32 = 0; var b: [2]u32 = undefined; diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 4c6dab2dbb..675017961d 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -523,7 +523,6 @@ fn testCastConstArrayRefToConstSlice() !void { test "peer type resolution: error and [N]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO try expect(mem.eql(u8, try testPeerErrorAndArray(0), "OK")); comptime try expect(mem.eql(u8, try testPeerErrorAndArray(0), "OK")); @@ -548,7 +547,6 @@ fn testPeerErrorAndArray2(x: u8) anyerror![]const u8 { test "single-item pointer of array to slice to unknown length pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO try testCastPtrOfArrayToSliceAndPtr(); comptime try testCastPtrOfArrayToSliceAndPtr(); @@ -649,7 +647,6 @@ test "@floatCast cast down" { test "peer type resolution: unreachable, error set, unreachable" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const Error = error{ FileDescriptorAlreadyPresentInSet, @@ -964,7 +961,6 @@ test "peer cast [:x]T to [*:x]T" { test "peer type resolution implicit cast to return type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { @@ -984,7 +980,6 @@ test "peer type resolution implicit cast to return type" { test "peer type resolution implicit cast to variable type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { @@ -1026,7 +1021,6 @@ test "cast between C pointer with different but compatible types" { test "peer type resolve string lit with sentinel-terminated mutable slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO var array: [4:0]u8 = undefined; @@ -1079,7 +1073,6 @@ test "comptime float casts" { test "pointer reinterpret const float to int" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO // The hex representation is 0x3fe3333333333303. const float: f64 = 5.99999999999994648725e-01; diff --git a/test/behavior/comptime_memory.zig b/test/behavior/comptime_memory.zig index 8fa5fc503e..f9c0073d34 100644 --- a/test/behavior/comptime_memory.zig +++ b/test/behavior/comptime_memory.zig @@ -87,7 +87,6 @@ fn bigToNativeEndian(comptime T: type, v: T) T { test "type pun endianness" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO comptime { const StructOfBytes = extern struct { x: [4]u8 }; @@ -398,7 +397,6 @@ test "offset field ptr by enclosing array element size" { test "accessing reinterpreted memory of parent object" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = extern struct { a: f32, b: [4]u8, diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig index 28c8785e64..938c966d22 100644 --- a/test/behavior/enum.zig +++ b/test/behavior/enum.zig @@ -606,7 +606,6 @@ fn testEnumWithSpecifiedTagValues(x: MultipleChoice) !void { } test "enum with specified tag values" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; try testEnumWithSpecifiedTagValues(MultipleChoice.C); @@ -614,7 +613,6 @@ test "enum with specified tag values" { } test "non-exhaustive enum" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; const S = struct { @@ -677,7 +675,6 @@ test "empty non-exhaustive enum" { } test "single field non-exhaustive enum" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; const S = struct { @@ -741,7 +738,6 @@ test "cast integer literal to enum" { } test "enum with specified and unspecified tag values" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2.D); @@ -925,7 +921,6 @@ test "enum literal casting to tagged union" { const Bar = enum { A, B, C, D }; test "enum literal casting to error union with payload enum" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; var bar: error{B}!Bar = undefined; diff --git a/test/behavior/error.zig b/test/behavior/error.zig index 684b01a797..d483afc300 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -222,7 +222,6 @@ fn testErrorSetType() !void { test "explicit error set cast" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO try testExplicitErrorSetCast(Set1.A); @@ -282,7 +281,6 @@ test "inferred empty error set comptime catch" { } test "error union peer type resolution" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO try testErrorUnionPeerTypeResolution(1); @@ -327,7 +325,6 @@ fn foo3(b: usize) Error!usize { test "error: Infer error set from literals" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO _ = nullLiteral("n") catch |err| handleErrors(err); _ = floatLiteral("n") catch |err| handleErrors(err); @@ -700,7 +697,6 @@ test "ret_ptr doesn't cause own inferred error set to be resolved" { test "simple else prong allowed even when all errors handled" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO const S = struct { diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index bc1c3628d7..142b08810a 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -69,7 +69,6 @@ fn constExprEvalOnSingleExprBlocksFn(x: i32, b: bool) i32 { } test "constant expressions" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO var array: [array_size]u8 = undefined; @@ -565,7 +564,6 @@ test "inlined loop has array literal with elided runtime scope on first iteratio } test "ptr to local array argument at comptime" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO comptime { diff --git a/test/behavior/merge_error_sets.zig b/test/behavior/merge_error_sets.zig index 725ff5f9da..9033e7795a 100644 --- a/test/behavior/merge_error_sets.zig +++ b/test/behavior/merge_error_sets.zig @@ -12,7 +12,6 @@ fn foo() C!void { } test "merge error sets" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (foo()) { diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index 5e0498342c..fad6cd643f 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -28,7 +28,6 @@ comptime { test "slicing" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var array: [20]i32 = undefined; @@ -283,7 +282,6 @@ test "slice type with custom alignment" { test "obtaining a null terminated slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // here we have a normal array var buf: [50]u8 = undefined; diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 12c874f8ba..06e3cacbd9 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -104,7 +104,6 @@ fn testMutation(foo: *StructFoo) void { test "struct byval assign" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var foo1: StructFoo = undefined; var foo2: StructFoo = undefined; diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index d218fb6bc6..29dcd8491a 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -490,7 +490,6 @@ test "switch prongs with error set cases make a new error set type for capture v } test "return result loc and then switch with range implicit casted to error union" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const S = struct { diff --git a/test/behavior/this.zig b/test/behavior/this.zig index 71a083d2f0..527fff53fe 100644 --- a/test/behavior/this.zig +++ b/test/behavior/this.zig @@ -25,7 +25,6 @@ test "this refer to module call private fn" { } test "this refer to container" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; var pt: Point(i32) = undefined; diff --git a/test/behavior/try.zig b/test/behavior/try.zig index b0559d4549..59309e53d0 100644 --- a/test/behavior/try.zig +++ b/test/behavior/try.zig @@ -3,7 +3,6 @@ const builtin = @import("builtin"); const expect = std.testing.expect; test "try on error union" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; try tryOnErrorUnionImpl(); diff --git a/test/behavior/while.zig b/test/behavior/while.zig index 62d5bf90fa..333ed1bd77 100644 --- a/test/behavior/while.zig +++ b/test/behavior/while.zig @@ -175,7 +175,6 @@ test "while with optional as condition with else" { test "while with error union condition" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; numbers_left = 10; From 25729d6155682933d7ab3aa30c7e060519b2f4e1 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Thu, 1 Sep 2022 16:51:42 +0200 Subject: [PATCH 10/14] stage2 ARM: fix multiple uses of reuseOperand - add missing checks whether destination fits into the operand - remove reuseOperand invocations from airIsNullPtr and similar functions as we need to load the operands into temporary locations --- src/arch/arm/CodeGen.zig | 265 ++++++++++++++++++--------------------- 1 file changed, 122 insertions(+), 143 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 2f7028c565..5951434e20 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -936,35 +936,34 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { }; // TODO swap this for inst.ty.ptrAlign const abi_align = elem_ty.abiAlignment(self.target.*); + return self.allocMem(abi_size, abi_align, inst); } -fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - const elem_ty = self.air.typeOfIndex(inst); +fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue { const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { const mod = self.bin_file.options.module.?; return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; const abi_align = elem_ty.abiAlignment(self.target.*); - if (abi_align > self.stack_align) - self.stack_align = abi_align; if (reg_ok) { // Make sure the type can fit in a register before we try to allocate one. const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); if (abi_size <= ptr_bytes) { - if (self.register_manager.tryAllocReg(inst, gp)) |reg| { + if (self.register_manager.tryAllocReg(maybe_inst, gp)) |reg| { return MCValue{ .register = reg }; } } } - const stack_offset = try self.allocMem(abi_size, abi_align, inst); + + const stack_offset = try self.allocMem(abi_size, abi_align, maybe_inst); return MCValue{ .stack_offset = stack_offset }; } pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { - const stack_mcv = try self.allocRegOrMem(inst, false); + const stack_mcv = try self.allocRegOrMem(self.air.typeOfIndex(inst), false, inst); log.debug("spilling {} (%{d}) to stack mcv {any}", .{ reg, inst, stack_mcv }); const reg_mcv = self.getResolvedInstValue(inst); @@ -985,12 +984,13 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void /// occupied fn spillCompareFlagsIfOccupied(self: *Self) !void { if (self.cpsr_flags_inst) |inst_to_save| { + const ty = self.air.typeOfIndex(inst_to_save); const mcv = self.getResolvedInstValue(inst_to_save); const new_mcv = switch (mcv) { - .cpsr_flags => try self.allocRegOrMem(inst_to_save, true), + .cpsr_flags => try self.allocRegOrMem(ty, true, inst_to_save), .register_c_flag, .register_v_flag, - => try self.allocRegOrMem(inst_to_save, false), + => try self.allocRegOrMem(ty, false, inst_to_save), else => unreachable, // mcv doesn't occupy the compare flags }; @@ -1121,10 +1121,11 @@ fn truncRegister( }); } +/// Asserts that both operand_ty and dest_ty are integer types fn trunc( self: *Self, maybe_inst: ?Air.Inst.Index, - operand: MCValue, + operand_bind: ReadArg.Bind, operand_ty: Type, dest_ty: Type, ) !MCValue { @@ -1132,39 +1133,38 @@ fn trunc( const info_b = dest_ty.intInfo(self.target.*); if (info_b.bits <= 32) { - const operand_reg = switch (operand) { - .register => |r| r, - else => operand_reg: { - if (info_a.bits <= 32) { - break :operand_reg try self.copyToTmpRegister(operand_ty, operand); - } else { - return self.fail("TODO load least significant word into register", .{}); - } - }, + if (info_a.bits > 32) { + return self.fail("TODO load least significant word into register", .{}); + } + + var operand_reg: Register = undefined; + var dest_reg: Register = undefined; + + const read_args = [_]ReadArg{ + .{ .ty = operand_ty, .bind = operand_bind, .class = gp, .reg = &operand_reg }, }; - const operand_reg_lock = self.register_manager.lockReg(operand_reg); - defer if (operand_reg_lock) |reg| self.register_manager.unlockReg(reg); - - const dest_reg = if (maybe_inst) |inst| blk: { - const ty_op = self.air.instructions.items(.data)[inst].ty_op; - - if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) { - break :blk operand_reg; - } else { - break :blk try self.register_manager.allocReg(inst, gp); - } - } else try self.register_manager.allocReg(null, gp); + const write_args = [_]WriteArg{ + .{ .ty = dest_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + if (maybe_inst) |inst| .{ + .corresponding_inst = inst, + .operand_mapping = &.{0}, + } else null, + ); switch (info_b.bits) { 32 => { try self.genSetReg(operand_ty, dest_reg, .{ .register = operand_reg }); - return MCValue{ .register = dest_reg }; }, else => { try self.truncRegister(operand_reg, dest_reg, info_b.signedness, info_b.bits); - return MCValue{ .register = dest_reg }; }, } + + return MCValue{ .register = dest_reg }; } else { return self.fail("TODO: truncate to ints > 32 bits", .{}); } @@ -1172,12 +1172,12 @@ fn trunc( fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand = try self.resolveInst(ty_op.operand); + const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; const operand_ty = self.air.typeOf(ty_op.operand); const dest_ty = self.air.typeOfIndex(inst); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { - break :blk try self.trunc(inst, operand, operand_ty, dest_ty); + break :blk try self.trunc(inst, operand_bind, operand_ty, dest_ty); }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -2334,7 +2334,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { break :result dst_mcv; }, else => { - const dest = try self.allocRegOrMem(inst, true); + const dest = try self.allocRegOrMem(self.air.typeOfIndex(inst), true, inst); const base_bind: ReadArg.Bind = .{ .mcv = base_mcv }; const index_bind: ReadArg.Bind = .{ .mcv = index_mcv }; @@ -2583,16 +2583,18 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; - const dst_mcv: MCValue = blk: { - if (self.reuseOperand(inst, ty_op.operand, 0, ptr)) { + const dest_mcv: MCValue = blk: { + const ptr_fits_dest = elem_ty.abiSize(self.target.*) <= 4; + if (ptr_fits_dest and self.reuseOperand(inst, ty_op.operand, 0, ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk ptr; } else { - break :blk try self.allocRegOrMem(inst, true); + break :blk try self.allocRegOrMem(elem_ty, true, inst); } }; - try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); - break :result dst_mcv; + try self.load(dest_mcv, ptr, self.air.typeOf(ty_op.operand)); + + break :result dest_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } @@ -4615,36 +4617,84 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, .unreach, .{ .none, .none, .none }); } -fn isNull(self: *Self, ty: Type, operand: MCValue) !MCValue { - if (ty.isPtrLikeOptional()) { - assert(ty.abiSize(self.target.*) == 4); +fn isNull( + self: *Self, + operand_bind: ReadArg.Bind, + operand_ty: Type, +) !MCValue { + if (operand_ty.isPtrLikeOptional()) { + assert(operand_ty.abiSize(self.target.*) == 4); - const reg_mcv: MCValue = switch (operand) { - .register => operand, - else => .{ .register = try self.copyToTmpRegister(ty, operand) }, - }; - - _ = try self.addInst(.{ - .tag = .cmp, - .data = .{ .r_op_cmp = .{ - .rn = reg_mcv.register, - .op = Instruction.Operand.fromU32(0).?, - } }, - }); - - return MCValue{ .cpsr_flags = .eq }; + const imm_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = 0 } }; + return self.cmp(operand_bind, imm_bind, Type.usize, .eq); } else { return self.fail("TODO implement non-pointer optionals", .{}); } } -fn isNonNull(self: *Self, ty: Type, operand: MCValue) !MCValue { - const is_null_result = try self.isNull(ty, operand); +fn isNonNull( + self: *Self, + operand_bind: ReadArg.Bind, + operand_ty: Type, +) !MCValue { + const is_null_result = try self.isNull(operand_bind, operand_ty); assert(is_null_result.cpsr_flags == .eq); return MCValue{ .cpsr_flags = .ne }; } +fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { + const un_op = self.air.instructions.items(.data)[inst].un_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_bind: ReadArg.Bind = .{ .inst = un_op }; + const operand_ty = self.air.typeOf(un_op); + + break :result try self.isNull(operand_bind, operand_ty); + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); +} + +fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const un_op = self.air.instructions.items(.data)[inst].un_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_ptr = try self.resolveInst(un_op); + const ptr_ty = self.air.typeOf(un_op); + const elem_ty = ptr_ty.elemType(); + + const operand = try self.allocRegOrMem(elem_ty, true, null); + try self.load(operand, operand_ptr, ptr_ty); + + break :result try self.isNull(.{ .mcv = operand }, elem_ty); + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); +} + +fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { + const un_op = self.air.instructions.items(.data)[inst].un_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_bind: ReadArg.Bind = .{ .inst = un_op }; + const operand_ty = self.air.typeOf(un_op); + + break :result try self.isNonNull(operand_bind, operand_ty); + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); +} + +fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const un_op = self.air.instructions.items(.data)[inst].un_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_ptr = try self.resolveInst(un_op); + const ptr_ty = self.air.typeOf(un_op); + const elem_ty = ptr_ty.elemType(); + + const operand = try self.allocRegOrMem(elem_ty, true, null); + try self.load(operand, operand_ptr, ptr_ty); + + break :result try self.isNonNull(.{ .mcv = operand }, elem_ty); + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); +} + fn isErr( self: *Self, error_union_bind: ReadArg.Bind, @@ -4657,8 +4707,7 @@ fn isErr( } const error_mcv = try self.errUnionErr(error_union_bind, error_union_ty, null); - _ = try self.cmp(.{ .mcv = error_mcv }, .{ .mcv = .{ .immediate = 0 } }, error_type, .neq); - return MCValue{ .cpsr_flags = .hi }; + return try self.cmp(.{ .mcv = error_mcv }, .{ .mcv = .{ .immediate = 0 } }, error_type, .gt); } fn isNonErr( @@ -4680,68 +4729,6 @@ fn isNonErr( } } -fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[inst].un_op; - - try self.spillCompareFlagsIfOccupied(); - self.cpsr_flags_inst = inst; - - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); - break :result try self.isNull(ty, operand); - }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); -} - -fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[inst].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } - }; - try self.load(operand, operand_ptr, ptr_ty); - break :result try self.isNull(ptr_ty.elemType(), operand); - }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); -} - -fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[inst].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); - break :result try self.isNonNull(ty, operand); - }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); -} - -fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[inst].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } - }; - try self.load(operand, operand_ptr, ptr_ty); - break :result try self.isNonNull(ptr_ty.elemType(), operand); - }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); -} - fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { @@ -4758,16 +4745,12 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.air.typeOf(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } - }; + const elem_ty = ptr_ty.elemType(); + + const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); - break :result try self.isErr(.{ .mcv = operand }, ptr_ty.elemType()); + + break :result try self.isErr(.{ .mcv = operand }, elem_ty); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -4788,16 +4771,12 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.air.typeOf(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } - }; + const elem_ty = ptr_ty.elemType(); + + const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); - break :result try self.isNonErr(.{ .mcv = operand }, ptr_ty.elemType()); + + break :result try self.isNonErr(.{ .mcv = operand }, elem_ty); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -5010,7 +4989,7 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { .none, .dead, .unreach => unreachable, .register, .stack_offset, .memory => operand_mcv, .immediate, .stack_argument_offset, .cpsr_flags => blk: { - const new_mcv = try self.allocRegOrMem(block, true); + const new_mcv = try self.allocRegOrMem(self.air.typeOfIndex(block), true, block); try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv); break :blk new_mcv; }, From 3794f2c493c9744e19cd7df23c3d4b32565aaa96 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sun, 4 Sep 2022 09:00:14 +0200 Subject: [PATCH 11/14] stage2 ARM: implement struct_field_val for registers --- src/arch/arm/CodeGen.zig | 37 ++++++++++++++++++++++++++++- test/behavior/array.zig | 2 -- test/behavior/basic.zig | 2 -- test/behavior/bitcast.zig | 1 - test/behavior/enum.zig | 1 - test/behavior/eval.zig | 1 - test/behavior/for.zig | 1 - test/behavior/pointers.zig | 1 - test/behavior/ptrcast.zig | 3 --- test/behavior/sizeof_and_typeof.zig | 2 -- test/behavior/struct.zig | 7 ------ test/behavior/switch.zig | 1 - test/behavior/usingnamespace.zig | 1 - 13 files changed, 36 insertions(+), 24 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 5951434e20..7de0b6ac22 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2739,6 +2739,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const mcv = try self.resolveInst(operand); const struct_ty = self.air.typeOf(operand); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_ty = struct_ty.structFieldType(index); switch (mcv) { .dead, .unreach => unreachable, @@ -2776,11 +2777,45 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } else { // Copy to new register const dest_reg = try self.register_manager.allocReg(null, gp); - try self.genSetReg(struct_ty.structFieldType(index), dest_reg, field); + try self.genSetReg(struct_field_ty, dest_reg, field); break :result MCValue{ .register = dest_reg }; } }, + .register => { + var operand_reg: Register = undefined; + var dest_reg: Register = undefined; + + const read_args = [_]ReadArg{ + .{ .ty = struct_ty, .bind = .{ .mcv = mcv }, .class = gp, .reg = &operand_reg }, + }; + const write_args = [_]WriteArg{ + .{ .ty = struct_field_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + ReuseMetadata{ + .corresponding_inst = inst, + .operand_mapping = &.{0}, + }, + ); + + const field_bit_offset = struct_field_offset * 8; + const field_bit_size = @intCast(u32, struct_field_ty.abiSize(self.target.*)) * 8; + + _ = try self.addInst(.{ + .tag = if (struct_field_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx, + .data = .{ .rr_lsb_width = .{ + .rd = dest_reg, + .rn = operand_reg, + .lsb = @intCast(u5, field_bit_offset), + .width = @intCast(u6, field_bit_size), + } }, + }); + + break :result MCValue{ .register = dest_reg }; + }, else => return self.fail("TODO implement codegen struct_field_val for {}", .{mcv}), } }; diff --git a/test/behavior/array.zig b/test/behavior/array.zig index b99ac27651..1e5e848c09 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -175,7 +175,6 @@ test "nested arrays of integers" { test "implicit comptime in array type size" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var arr: [plusOne(10)]bool = undefined; try expect(arr.len == 11); @@ -484,7 +483,6 @@ test "sentinel element count towards the ABI size calculation" { test "zero-sized array with recursive type definition" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const U = struct { fn foo(comptime T: type, comptime n: usize) type { diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index 6661bc2783..a8909df107 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -465,7 +465,6 @@ fn nine() u8 { test "struct inside function" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; try testStructInFn(); comptime try testStructInFn(); @@ -514,7 +513,6 @@ var global_foo: *i32 = undefined; test "peer result location with typed parent, runtime condition, comptime prongs" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const S = struct { fn doTheTest(arg: i32) i32 { diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index 27a0692a44..3a7719191d 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -138,7 +138,6 @@ test "@bitCast extern structs at runtime and comptime" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const Full = extern struct { number: u16, diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig index 938c966d22..e2645058f7 100644 --- a/test/behavior/enum.zig +++ b/test/behavior/enum.zig @@ -1127,7 +1127,6 @@ test "tag name functions are unique" { test "size of enum with only one tag which has explicit integer tag type" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; const E = enum(u8) { nope = 10 }; diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index 142b08810a..fb744612ad 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -954,7 +954,6 @@ test "const local with comptime init through array init" { test "closure capture type of runtime-known parameter" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct { fn b(c: anytype) !void { diff --git a/test/behavior/for.zig b/test/behavior/for.zig index da6f0717ae..20a88a3131 100644 --- a/test/behavior/for.zig +++ b/test/behavior/for.zig @@ -213,7 +213,6 @@ test "for on slice with allowzero ptr" { test "else continue outer for" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO var i: usize = 6; var buf: [5]u8 = undefined; diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index dcdea1ff80..6206f22a45 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -66,7 +66,6 @@ test "initialize const optional C pointer to null" { test "assigning integer to C pointer" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; var x: i32 = 0; diff --git a/test/behavior/ptrcast.zig b/test/behavior/ptrcast.zig index c827cb6ef7..21e8b544a8 100644 --- a/test/behavior/ptrcast.zig +++ b/test/behavior/ptrcast.zig @@ -4,7 +4,6 @@ const expect = std.testing.expect; const native_endian = builtin.target.cpu.arch.endian(); test "reinterpret bytes as integer with nonzero offset" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO try testReinterpretBytesAsInteger(); @@ -39,7 +38,6 @@ fn testReinterpretWithOffsetAndNoWellDefinedLayout() !void { } test "reinterpret bytes inside auto-layout struct as integer with nonzero offset" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO try testReinterpretStructWrappedBytesAsInteger(); @@ -179,7 +177,6 @@ test "lower reinterpreted comptime field ptr" { } test "reinterpret struct field at comptime" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const numNative = comptime Bytes.init(0x12345678); diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig index ab2d59bf83..748fefa695 100644 --- a/test/behavior/sizeof_and_typeof.zig +++ b/test/behavior/sizeof_and_typeof.zig @@ -18,7 +18,6 @@ test "@sizeOf on compile-time types" { } test "@TypeOf() with multiple arguments" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; { @@ -77,7 +76,6 @@ const P = packed struct { }; test "@offsetOf" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // Packed structs have fixed memory layout diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 06e3cacbd9..12d45be9ae 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -10,7 +10,6 @@ top_level_field: i32, test "top level fields" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var instance = @This(){ .top_level_field = 1234, @@ -239,7 +238,6 @@ test "usingnamespace within struct scope" { test "struct field init with catch" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -280,7 +278,6 @@ const Val = struct { test "struct point to self" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO var root: Node = undefined; root.val.x = 1; @@ -296,7 +293,6 @@ test "struct point to self" { test "void struct fields" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const foo = VoidStructFieldsFoo{ .a = void{}, @@ -760,7 +756,6 @@ test "packed struct with u0 field access" { } test "access to global struct fields" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO @@ -1259,7 +1254,6 @@ test "typed init through error unions and optionals" { test "initialize struct with empty literal" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct { x: i32 = 1234 }; var s: S = .{}; @@ -1361,7 +1355,6 @@ test "store to comptime field" { test "struct field init value is size of the struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const namespace = struct { const S = extern struct { diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index 29dcd8491a..9552ea5008 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -348,7 +348,6 @@ test "switch on const enum with var" { } test "anon enum literal used in switch on union enum" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const Foo = union(enum) { diff --git a/test/behavior/usingnamespace.zig b/test/behavior/usingnamespace.zig index 426f0aa6b9..83f720ff85 100644 --- a/test/behavior/usingnamespace.zig +++ b/test/behavior/usingnamespace.zig @@ -58,7 +58,6 @@ test "two files usingnamespace import each other" { } test { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const AA = struct { From a0a7d15142cfffbab934860064a44b7615f9dd55 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sun, 4 Sep 2022 22:28:59 +0200 Subject: [PATCH 12/14] stage2 ARM: support larger function stacks This is done by introducing a new Mir pseudo-instruction --- src/arch/arm/CodeGen.zig | 12 ++++------ src/arch/arm/Emit.zig | 52 ++++++++++++++++++++++++++++++++++++++++ src/arch/arm/Mir.zig | 9 +++++++ test/behavior/eval.zig | 1 - 4 files changed, 65 insertions(+), 9 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 7de0b6ac22..3b378af581 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -488,14 +488,10 @@ fn gen(self: *Self) !void { const aligned_total_stack_end = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align); const stack_size = aligned_total_stack_end - self.saved_regs_stack_space; self.max_end_stack = stack_size; - if (Instruction.Operand.fromU32(stack_size)) |op| { - self.mir_instructions.set(sub_reloc, .{ - .tag = .sub, - .data = .{ .rr_op = .{ .rd = .sp, .rn = .sp, .op = op } }, - }); - } else { - return self.failSymbol("TODO ARM: allow larger stacks", .{}); - } + self.mir_instructions.set(sub_reloc, .{ + .tag = .sub_sp_scratch_r0, + .data = .{ .imm32 = stack_size }, + }); _ = try self.addInst(.{ .tag = .dbg_epilogue_begin, diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig index 8770ef1a24..188f5a5cfe 100644 --- a/src/arch/arm/Emit.zig +++ b/src/arch/arm/Emit.zig @@ -11,6 +11,7 @@ const link = @import("../../link.zig"); const Module = @import("../../Module.zig"); const Type = @import("../../type.zig").Type; const ErrorMsg = Module.ErrorMsg; +const Target = std.Target; const assert = std.debug.assert; const DW = std.dwarf; const leb128 = std.leb; @@ -93,6 +94,8 @@ pub fn emitMir( .sub => try emit.mirDataProcessing(inst), .subs => try emit.mirDataProcessing(inst), + .sub_sp_scratch_r0 => try emit.mirSubStackPointer(inst), + .asr => try emit.mirShift(inst), .lsl => try emit.mirShift(inst), .lsr => try emit.mirShift(inst), @@ -190,6 +193,24 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize { .dbg_epilogue_begin, .dbg_prologue_end, => return 0, + + .sub_sp_scratch_r0 => { + const imm32 = emit.mir.instructions.items(.data)[inst].imm32; + + if (imm32 == 0) { + return 0 * 4; + } else if (Instruction.Operand.fromU32(imm32) != null) { + // sub + return 1 * 4; + } else if (Target.arm.featureSetHas(emit.target.cpu.features, .has_v7)) { + // movw; movt; sub + return 3 * 4; + } else { + // mov; orr; orr; orr; sub + return 5 * 4; + } + }, + else => return 4, } } @@ -427,6 +448,37 @@ fn mirDataProcessing(emit: *Emit, inst: Mir.Inst.Index) !void { } } +fn mirSubStackPointer(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + const cond = emit.mir.instructions.items(.cond)[inst]; + const imm32 = emit.mir.instructions.items(.data)[inst].imm32; + + switch (tag) { + .sub_sp_scratch_r0 => { + if (imm32 == 0) return; + + const operand = Instruction.Operand.fromU32(imm32) orelse blk: { + const scratch: Register = .r0; + + if (Target.arm.featureSetHas(emit.target.cpu.features, .has_v7)) { + try emit.writeInstruction(Instruction.movw(cond, scratch, @truncate(u16, imm32))); + try emit.writeInstruction(Instruction.movt(cond, scratch, @truncate(u16, imm32 >> 16))); + } else { + try emit.writeInstruction(Instruction.mov(cond, scratch, Instruction.Operand.imm(@truncate(u8, imm32), 0))); + try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 8), 12))); + try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 16), 8))); + try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 24), 4))); + } + + break :blk Instruction.Operand.reg(scratch, Instruction.Operand.Shift.none); + }; + + try emit.writeInstruction(Instruction.sub(cond, .sp, .sp, operand)); + }, + else => unreachable, + } +} + fn mirShift(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const cond = emit.mir.instructions.items(.cond)[inst]; diff --git a/src/arch/arm/Mir.zig b/src/arch/arm/Mir.zig index 45f89b8120..38cf4da3fd 100644 --- a/src/arch/arm/Mir.zig +++ b/src/arch/arm/Mir.zig @@ -111,6 +111,11 @@ pub const Inst = struct { strh, /// Subtract sub, + /// Pseudo-instruction: Subtract 32-bit immediate from stack + /// + /// r0 can be used by Emit as a scratch register for loading + /// the immediate + sub_sp_scratch_r0, /// Subtract, update condition flags subs, /// Supervisor Call @@ -144,6 +149,10 @@ pub const Inst = struct { /// /// Used by e.g. svc imm24: u24, + /// A 32-bit immediate value. + /// + /// Used by e.g. sub_sp_scratch_r0 + imm32: u32, /// Index into `extra`. Meaning of what can be found there is context-dependent. /// /// Used by e.g. load_memory diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index fb744612ad..0c07a7b5bb 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -1333,7 +1333,6 @@ test "lazy sizeof is resolved in division" { } test "lazy value is resolved as slice operand" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const A = struct { a: u32 }; From b976997e16835e822ef9400973ac12a20e3d0705 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Tue, 6 Sep 2022 12:34:27 +0200 Subject: [PATCH 13/14] stage2 ARM: implement ptr_elem_val --- src/arch/arm/CodeGen.zig | 156 ++++++++++++++-------------- test/behavior/basic.zig | 1 - test/behavior/cast.zig | 3 - test/behavior/const_slice_child.zig | 1 - test/behavior/eval.zig | 5 - test/behavior/generics.zig | 1 - test/behavior/pointers.zig | 4 - test/behavior/union.zig | 1 - 8 files changed, 78 insertions(+), 94 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 3b378af581..857c49fd78 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2258,89 +2258,84 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } +fn ptrElemVal( + self: *Self, + ptr_bind: ReadArg.Bind, + index_bind: ReadArg.Bind, + ptr_ty: Type, + maybe_inst: ?Air.Inst.Index, +) !MCValue { + const elem_ty = ptr_ty.childType(); + const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + + switch (elem_size) { + 1, 4 => { + var base_reg: Register = undefined; + var index_reg: Register = undefined; + var dest_reg: Register = undefined; + + const read_args = [_]ReadArg{ + .{ .ty = ptr_ty, .bind = ptr_bind, .class = gp, .reg = &base_reg }, + .{ .ty = Type.usize, .bind = index_bind, .class = gp, .reg = &index_reg }, + }; + const write_args = [_]WriteArg{ + .{ .ty = elem_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + if (maybe_inst) |inst| .{ + .corresponding_inst = inst, + .operand_mapping = &.{ 0, 1 }, + } else null, + ); + + const tag: Mir.Inst.Tag = switch (elem_size) { + 1 => .ldrb, + 4 => .ldr, + else => unreachable, + }; + const shift: u5 = switch (elem_size) { + 1 => 0, + 4 => 2, + else => unreachable, + }; + + _ = try self.addInst(.{ + .tag = tag, + .data = .{ .rr_offset = .{ + .rt = dest_reg, + .rn = base_reg, + .offset = .{ .offset = Instruction.Offset.reg(index_reg, .{ .lsl = shift }) }, + } }, + }); + + return MCValue{ .register = dest_reg }; + }, + else => { + const addr = try self.ptrArithmetic(.ptr_add, ptr_bind, index_bind, ptr_ty, Type.usize, null); + + const dest = try self.allocRegOrMem(elem_ty, true, maybe_inst); + try self.load(dest, addr, ptr_ty); + return dest; + }, + } +} + fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { - const is_volatile = false; // TODO const bin_op = self.air.instructions.items(.data)[inst].bin_op; - - if (!is_volatile and self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); - const result: MCValue = result: { - const slice_mcv = try self.resolveInst(bin_op.lhs); - - // TODO optimize for the case where the index is a constant, - // i.e. index_mcv == .immediate - const index_mcv = try self.resolveInst(bin_op.rhs); - const index_is_register = index_mcv == .register; - - const slice_ty = self.air.typeOf(bin_op.lhs); - const elem_ty = slice_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); - + const slice_ty = self.air.typeOf(bin_op.lhs); + const result: MCValue = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); - - const index_lock: ?RegisterLock = if (index_is_register) - self.register_manager.lockRegAssumeUnused(index_mcv.register) - else - null; - defer if (index_lock) |reg| self.register_manager.unlockReg(reg); + const ptr_ty = slice_ty.slicePtrFieldType(&buf); + const slice_mcv = try self.resolveInst(bin_op.lhs); const base_mcv = slicePtr(slice_mcv); - switch (elem_size) { - 1, 4 => { - const base_reg = switch (base_mcv) { - .register => |r| r, - else => try self.copyToTmpRegister(slice_ptr_field_type, base_mcv), - }; - const base_reg_lock = self.register_manager.lockRegAssumeUnused(base_reg); - defer self.register_manager.unlockReg(base_reg_lock); + const base_bind: ReadArg.Bind = .{ .mcv = base_mcv }; + const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; - const dst_reg = try self.register_manager.allocReg(inst, gp); - const dst_mcv = MCValue{ .register = dst_reg }; - const dst_reg_lock = self.register_manager.lockRegAssumeUnused(dst_reg); - defer self.register_manager.unlockReg(dst_reg_lock); - - const index_reg: Register = switch (index_mcv) { - .register => |reg| reg, - else => try self.copyToTmpRegister(Type.usize, index_mcv), - }; - const index_reg_lock = self.register_manager.lockReg(index_reg); - defer if (index_reg_lock) |lock| self.register_manager.unlockReg(lock); - - const tag: Mir.Inst.Tag = switch (elem_size) { - 1 => .ldrb, - 4 => .ldr, - else => unreachable, - }; - const shift: u5 = switch (elem_size) { - 1 => 0, - 4 => 2, - else => unreachable, - }; - - _ = try self.addInst(.{ - .tag = tag, - .data = .{ .rr_offset = .{ - .rt = dst_reg, - .rn = base_reg, - .offset = .{ .offset = Instruction.Offset.reg(index_reg, .{ .lsl = shift }) }, - } }, - }); - - break :result dst_mcv; - }, - else => { - const dest = try self.allocRegOrMem(self.air.typeOfIndex(inst), true, inst); - - const base_bind: ReadArg.Bind = .{ .mcv = base_mcv }; - const index_bind: ReadArg.Bind = .{ .mcv = index_mcv }; - - const addr = try self.ptrArithmetic(.ptr_add, base_bind, index_bind, slice_ptr_field_type, Type.usize, null); - try self.load(dest, addr, slice_ptr_field_type); - - break :result dest; - }, - } + break :result try self.ptrElemVal(base_bind, index_bind, ptr_ty, inst); }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -2371,9 +2366,14 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { - const is_volatile = false; // TODO const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_elem_val for {}", .{self.target.cpu.arch}); + const ptr_ty = self.air.typeOf(bin_op.lhs); + const result: MCValue = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { + const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; + const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; + + break :result try self.ptrElemVal(base_bind, index_bind, ptr_ty, inst); + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index a8909df107..d073bd9316 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -641,7 +641,6 @@ test "global constant is loaded with a runtime-known index" { test "multiline string literal is null terminated" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const s1 = \\one diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 675017961d..dac3c12b0d 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -576,7 +576,6 @@ fn testCastPtrOfArrayToSliceAndPtr() !void { test "cast *[1][*]const u8 to [*]const ?[*]const u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const window_name = [1][*]const u8{"window name"}; @@ -919,7 +918,6 @@ test "peer cast *[N:x]T to *[N]T" { test "peer cast [*:x]T to [*]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { @@ -1004,7 +1002,6 @@ test "variable initialization uses result locations properly with regards to the test "cast between C pointer with different but compatible types" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct { fn foo(arg: [*]c_ushort) u16 { diff --git a/test/behavior/const_slice_child.zig b/test/behavior/const_slice_child.zig index 2006d6c280..5a6525d152 100644 --- a/test/behavior/const_slice_child.zig +++ b/test/behavior/const_slice_child.zig @@ -9,7 +9,6 @@ var argv: [*]const [*]const u8 = undefined; test "const slice child" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const strs = [_][*]const u8{ "one", "two", "three" }; argv = &strs; diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index 0c07a7b5bb..373e4e33c6 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -137,7 +137,6 @@ test "pointer to type" { test "a type constructed in a global expression" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO var l: List = undefined; l.array[0] = 10; @@ -804,7 +803,6 @@ test "array concatenation sets the sentinel - value" { test "array concatenation sets the sentinel - pointer" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; var a = [2]u3{ 1, 7 }; @@ -1071,7 +1069,6 @@ test "comptime break operand passing through runtime switch converted to runtime test "no dependency loop for alignment of self struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { @@ -1108,7 +1105,6 @@ test "no dependency loop for alignment of self struct" { test "no dependency loop for alignment of self bare union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { @@ -1145,7 +1141,6 @@ test "no dependency loop for alignment of self bare union" { test "no dependency loop for alignment of self tagged union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig index ba4bca0c1a..f8c19ea416 100644 --- a/test/behavior/generics.zig +++ b/test/behavior/generics.zig @@ -91,7 +91,6 @@ fn max_f64(a: f64, b: f64) f64 { test "type constructed by comptime function call" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; var l: SimpleList(10) = undefined; diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index 6206f22a45..28be72cf76 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -18,7 +18,6 @@ fn testDerefPtr() !void { test "pointer arithmetic" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; var ptr: [*]const u8 = "abcd"; @@ -280,7 +279,6 @@ test "array initialization types" { test "null terminated pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { @@ -298,7 +296,6 @@ test "null terminated pointer" { test "allow any sentinel" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { @@ -314,7 +311,6 @@ test "allow any sentinel" { test "pointer sentinel with enums" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { diff --git a/test/behavior/union.zig b/test/behavior/union.zig index b94034adf4..ddad27e150 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -92,7 +92,6 @@ const FooExtern = extern union { }; test "basic extern unions" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; var foo = FooExtern{ .int = 1 }; From 94499898e5cd31209ddfdae3f0c9b418b7f67e60 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Fri, 9 Sep 2022 17:01:09 +0200 Subject: [PATCH 14/14] stage2 ARM: implement basic array_elem_val --- src/arch/arm/CodeGen.zig | 61 +++++++++++++++++++++++++++++++++++++++- test/behavior/array.zig | 3 -- test/behavior/eval.zig | 1 - test/behavior/for.zig | 3 -- test/behavior/slice.zig | 1 - 5 files changed, 60 insertions(+), 9 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 857c49fd78..0eeb7a7ded 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2359,9 +2359,68 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } +fn arrayElemVal( + self: *Self, + array_bind: ReadArg.Bind, + index_bind: ReadArg.Bind, + array_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + const elem_ty = array_ty.childType(); + + const mcv = try array_bind.resolveToMcv(self); + switch (mcv) { + .stack_offset, + .memory, + .stack_argument_offset, + => { + const ptr_to_mcv = switch (mcv) { + .stack_offset => |off| MCValue{ .ptr_stack_offset = off }, + .memory => |addr| MCValue{ .immediate = @intCast(u32, addr) }, + .stack_argument_offset => |off| blk: { + const reg = try self.register_manager.allocReg(null, gp); + + _ = try self.addInst(.{ + .tag = .ldr_ptr_stack_argument, + .data = .{ .r_stack_offset = .{ + .rt = reg, + .stack_offset = off, + } }, + }); + + break :blk MCValue{ .register = reg }; + }, + else => unreachable, + }; + const ptr_to_mcv_lock: ?RegisterLock = switch (ptr_to_mcv) { + .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (ptr_to_mcv_lock) |lock| self.register_manager.unlockReg(lock); + + const base_bind: ReadArg.Bind = .{ .mcv = ptr_to_mcv }; + + var ptr_ty_payload: Type.Payload.ElemType = .{ + .base = .{ .tag = .single_mut_pointer }, + .data = elem_ty, + }; + const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + + return try self.ptrElemVal(base_bind, index_bind, ptr_ty, maybe_inst); + }, + else => return self.fail("TODO implement array_elem_val for {}", .{mcv}), + } +} + fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement array_elem_val for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const array_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; + const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; + const array_ty = self.air.typeOf(bin_op.lhs); + + break :result try self.arrayElemVal(array_bind, index_bind, array_ty, inst); + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } diff --git a/test/behavior/array.zig b/test/behavior/array.zig index 1e5e848c09..54f87927f5 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -244,7 +244,6 @@ const Sub = struct { b: u8 }; const Str = struct { a: []Sub }; test "set global var array via slice embedded in struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO var s = Str{ .a = s_array[0..] }; @@ -297,7 +296,6 @@ fn testArrayByValAtComptime(b: [2]u8) u8 { test "comptime evaluating function that takes array by value" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const arr = [_]u8{ 1, 2 }; const x = comptime testArrayByValAtComptime(arr); @@ -426,7 +424,6 @@ test "anonymous literal in array" { test "access the null element of a null terminated array" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index 373e4e33c6..47d2e4374e 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -336,7 +336,6 @@ fn doesAlotT(comptime T: type, value: usize) T { } test "@setEvalBranchQuota at same scope as generic function call" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO try expect(doesAlotT(u32, 2) == 2); diff --git a/test/behavior/for.zig b/test/behavior/for.zig index 20a88a3131..7f2cd2ab8d 100644 --- a/test/behavior/for.zig +++ b/test/behavior/for.zig @@ -5,7 +5,6 @@ const expectEqual = std.testing.expectEqual; const mem = std.mem; test "continue in for loop" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; const array = [_]i32{ 1, 2, 3, 4, 5 }; @@ -130,7 +129,6 @@ test "for with null and T peer types and inferred result location type" { } test "2 break statements and an else" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; const S = struct { @@ -177,7 +175,6 @@ fn mangleString(s: []u8) void { } test "for copies its payload" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const S = struct { diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index fad6cd643f..b9bae08878 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -268,7 +268,6 @@ fn sliceSum(comptime q: []const u8) i32 { test "slice type with custom alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const LazilyResolvedType = struct { anything: i32,