From 12207bbbd620903c81b0134816e71e373751b2d6 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Wed, 9 Mar 2022 22:36:23 +0100 Subject: [PATCH] stage2 AArch64: Implement bit shifting with immediate operands --- src/arch/aarch64/CodeGen.zig | 27 +++++++++- src/arch/aarch64/Emit.zig | 39 ++++++++------ src/arch/aarch64/Mir.zig | 16 +++++- src/arch/aarch64/bits.zig | 100 +++++++++++++++++++++++++++++++++++ test/behavior/math.zig | 2 +- 5 files changed, 163 insertions(+), 21 deletions(-) diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 911aac336f..3d7a2e2420 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -1277,6 +1277,15 @@ fn binOpImmediate( const mir_tag: Mir.Inst.Tag = switch (tag) { .add => .add_immediate, .sub => .sub_immediate, + .shl, + .shl_exact, + => .lsl_immediate, + .shr, + .shr_exact, + => switch (lhs_ty.intInfo(self.target.*).signedness) { + .signed => Mir.Inst.Tag.asr_immediate, + .unsigned => Mir.Inst.Tag.lsr_immediate, + }, else => unreachable, }; const mir_data: Mir.Inst.Data = switch (tag) { @@ -1287,6 +1296,15 @@ fn binOpImmediate( .rn = lhs_reg, .imm12 = @intCast(u12, rhs.immediate), } }, + .shl, + .shl_exact, + .shr, + .shr_exact, + => .{ .rr_shift = .{ + .rd = dest_reg, + .rn = lhs_reg, + .shift = @intCast(u6, rhs.immediate), + } }, else => unreachable, }; @@ -1407,8 +1425,13 @@ fn binOp( .Int => { const int_info = lhs_ty.intInfo(self.target.*); if (int_info.bits <= 64) { - // TODO immediate shifts - return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); + const rhs_immediate_ok = rhs == .immediate; + + if (rhs_immediate_ok) { + return try self.binOpImmediate(tag, maybe_inst, lhs, rhs, lhs_ty, false); + } else { + return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); + } } else { return self.fail("TODO binary operations on int with bits > 64", .{}); } diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig index 1a3e522e36..2957389b32 100644 --- a/src/arch/aarch64/Emit.zig +++ b/src/arch/aarch64/Emit.zig @@ -84,6 +84,10 @@ pub fn emitMir( .lsl_register => try emit.mirShiftRegister(inst), .lsr_register => try emit.mirShiftRegister(inst), + .asr_immediate => try emit.mirShiftImmediate(inst), + .lsl_immediate => try emit.mirShiftImmediate(inst), + .lsr_immediate => try emit.mirShiftImmediate(inst), + .b_cond => try emit.mirConditionalBranchImmediate(inst), .b => try emit.mirBranch(inst), @@ -378,20 +382,6 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { return error.EmitFail; } -fn moveImmediate(emit: *Emit, reg: Register, imm64: u64) !void { - try emit.writeInstruction(Instruction.movz(reg, @truncate(u16, imm64), 0)); - - if (imm64 > math.maxInt(u16)) { - try emit.writeInstruction(Instruction.movk(reg, @truncate(u16, imm64 >> 16), 16)); - } - if (imm64 > math.maxInt(u32)) { - try emit.writeInstruction(Instruction.movk(reg, @truncate(u16, imm64 >> 32), 32)); - } - if (imm64 > math.maxInt(u48)) { - try emit.writeInstruction(Instruction.movk(reg, @truncate(u16, imm64 >> 48), 48)); - } -} - fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line); const delta_pc: usize = self.code.items.len - self.prev_di_pc; @@ -481,9 +471,24 @@ fn mirShiftRegister(emit: *Emit, inst: Mir.Inst.Index) !void { const rm = rrr.rm; switch (tag) { - .asr_register => try emit.writeInstruction(Instruction.asrv(rd, rn, rm)), - .lsl_register => try emit.writeInstruction(Instruction.lslv(rd, rn, rm)), - .lsr_register => try emit.writeInstruction(Instruction.lsrv(rd, rn, rm)), + .asr_register => try emit.writeInstruction(Instruction.asrRegister(rd, rn, rm)), + .lsl_register => try emit.writeInstruction(Instruction.lslRegister(rd, rn, rm)), + .lsr_register => try emit.writeInstruction(Instruction.lsrRegister(rd, rn, rm)), + else => unreachable, + } +} + +fn mirShiftImmediate(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + const rr_shift = emit.mir.instructions.items(.data)[inst].rr_shift; + const rd = rr_shift.rd; + const rn = rr_shift.rn; + const shift = rr_shift.shift; + + switch (tag) { + .asr_immediate => try emit.writeInstruction(Instruction.asrImmediate(rd, rn, shift)), + .lsl_immediate => try emit.writeInstruction(Instruction.lslImmediate(rd, rn, shift)), + .lsr_immediate => try emit.writeInstruction(Instruction.lsrImmediate(rd, rn, shift)), else => unreachable, } } diff --git a/src/arch/aarch64/Mir.zig b/src/arch/aarch64/Mir.zig index 4cae413bc6..679daf8ae2 100644 --- a/src/arch/aarch64/Mir.zig +++ b/src/arch/aarch64/Mir.zig @@ -30,6 +30,8 @@ pub const Inst = struct { add_shifted_register, /// Bitwise AND (shifted register) and_shifted_register, + /// Arithmetic Shift Right (immediate) + asr_immediate, /// Arithmetic Shift Right (register) asr_register, /// Branch conditionally @@ -98,8 +100,12 @@ pub const Inst = struct { ldrh_immediate, /// Load Register Halfword (register) ldrh_register, + /// Logical Shift Left (immediate) + lsl_immediate, /// Logical Shift Left (register) lsl_register, + /// Logical Shift Right (immediate) + lsr_immediate, /// Logical Shift Right (register) lsr_register, /// Move (to/from SP) @@ -263,7 +269,15 @@ pub const Inst = struct { immr: u6, n: u1, }, - /// Two registers + /// Two registers and a 6-bit unsigned shift + /// + /// Used by e.g. lsl_immediate + rr_shift: struct { + rd: Register, + rn: Register, + shift: u6, + }, + /// Three registers /// /// Used by e.g. mul rrr: struct { diff --git a/src/arch/aarch64/bits.zig b/src/arch/aarch64/bits.zig index 3f6e302b84..e28a8485ca 100644 --- a/src/arch/aarch64/bits.zig +++ b/src/arch/aarch64/bits.zig @@ -308,6 +308,16 @@ pub const Instruction = union(enum) { opc: u2, sf: u1, }, + bitfield: packed struct { + rd: u5, + rn: u5, + imms: u6, + immr: u6, + n: u1, + fixed: u6 = 0b100110, + opc: u2, + sf: u1, + }, add_subtract_shifted_register: packed struct { rd: u5, rn: u5, @@ -483,6 +493,7 @@ pub const Instruction = union(enum) { .logical_shifted_register => |v| @bitCast(u32, v), .add_subtract_immediate => |v| @bitCast(u32, v), .logical_immediate => |v| @bitCast(u32, v), + .bitfield => |v| @bitCast(u32, v), .add_subtract_shifted_register => |v| @bitCast(u32, v), // TODO once packed structs work, this can be refactored .conditional_branch => |v| @as(u32, v.cond) | (@as(u32, v.o0) << 4) | (@as(u32, v.imm19) << 5) | (@as(u32, v.o1) << 24) | (@as(u32, v.fixed) << 25), @@ -922,6 +933,31 @@ pub const Instruction = union(enum) { }; } + fn bitfield( + opc: u2, + n: u1, + rd: Register, + rn: Register, + immr: u6, + imms: u6, + ) Instruction { + return Instruction{ + .bitfield = .{ + .rd = rd.enc(), + .rn = rn.enc(), + .imms = imms, + .immr = immr, + .n = n, + .opc = opc, + .sf = switch (rd.size()) { + 32 => 0b0, + 64 => 0b1, + else => unreachable, // unexpected register size + }, + }, + }; + } + pub const AddSubtractShiftedRegisterShift = enum(u2) { lsl, lsr, asr, _ }; fn addSubtractShiftedRegister( @@ -1334,6 +1370,50 @@ pub const Instruction = union(enum) { return logicalImmediate(0b11, rd, rn, imms, immr, n); } + // Bitfield + + pub fn sbfm(rd: Register, rn: Register, immr: u6, imms: u6) Instruction { + const n: u1 = switch (rd.size()) { + 32 => 0b0, + 64 => 0b1, + else => unreachable, // unexpected register size + }; + return bitfield(0b00, n, rd, rn, immr, imms); + } + + pub fn bfm(rd: Register, rn: Register, immr: u6, imms: u6) Instruction { + const n: u1 = switch (rd.size()) { + 32 => 0b0, + 64 => 0b1, + else => unreachable, // unexpected register size + }; + return bitfield(0b01, n, rd, rn, immr, imms); + } + + pub fn ubfm(rd: Register, rn: Register, immr: u6, imms: u6) Instruction { + const n: u1 = switch (rd.size()) { + 32 => 0b0, + 64 => 0b1, + else => unreachable, // unexpected register size + }; + return bitfield(0b10, n, rd, rn, immr, imms); + } + + pub fn asrImmediate(rd: Register, rn: Register, shift: u6) Instruction { + const imms = @intCast(u6, rd.size() - 1); + return sbfm(rd, rn, shift, imms); + } + + pub fn lslImmediate(rd: Register, rn: Register, shift: u6) Instruction { + const size = @intCast(u6, rd.size() - 1); + return ubfm(rd, rn, size - shift + 1, size - shift); + } + + pub fn lsrImmediate(rd: Register, rn: Register, shift: u6) Instruction { + const imms = @intCast(u6, rd.size() - 1); + return ubfm(rd, rn, shift, imms); + } + // Add/subtract (shifted register) pub fn addShiftedRegister( @@ -1441,6 +1521,10 @@ pub const Instruction = union(enum) { pub fn asrv(rd: Register, rn: Register, rm: Register) Instruction { return dataProcessing2Source(0b0, 0b001010, rd, rn, rm); } + + pub const asrRegister = asrv; + pub const lslRegister = lslv; + pub const lsrRegister = lsrv; }; test { @@ -1622,6 +1706,22 @@ test "serialize instructions" { .inst = Instruction.lslv(.x6, .x9, .x10), .expected = 0b1_0_0_11010110_01010_0010_00_01001_00110, }, + .{ // lsl x4, x2, #42 + .inst = Instruction.lslImmediate(.x4, .x2, 42), + .expected = 0b1_10_100110_1_010110_010101_00010_00100, + }, + .{ // lsl x4, x2, #63 + .inst = Instruction.lslImmediate(.x4, .x2, 63), + .expected = 0b1_10_100110_1_000001_000000_00010_00100, + }, + .{ // lsr x4, x2, #42 + .inst = Instruction.lsrImmediate(.x4, .x2, 42), + .expected = 0b1_10_100110_1_101010_111111_00010_00100, + }, + .{ // lsr x4, x2, #63 + .inst = Instruction.lsrImmediate(.x4, .x2, 63), + .expected = 0b1_10_100110_1_111111_111111_00010_00100, + }, }; for (testcases) |case| { diff --git a/test/behavior/math.zig b/test/behavior/math.zig index a8daac54b5..413cf53044 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -573,7 +573,7 @@ test "bit shift a u1" { test "truncating shift right" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO +// if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO try testShrTrunc(maxInt(u16)); comptime try testShrTrunc(maxInt(u16));