Merge pull request #11150 from joachimschmidt557/stage2-aarch64

stage2 AArch64: misc improvements
This commit is contained in:
Joachim Schmidt 2022-03-13 18:43:18 +01:00 committed by GitHub
commit bb859a0be7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 348 additions and 106 deletions

View File

@ -85,6 +85,8 @@ blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{},
register_manager: RegisterManager = .{},
/// Maps offset to what is stored there.
stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{},
/// Tracks the current instruction allocated to the compare flags
compare_flags_inst: ?Air.Inst.Index = null,
/// Offset from the stack base, representing the end of the stack frame.
max_end_stack: u32 = 0,
@ -536,12 +538,12 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.sub, .ptr_sub => try self.airBinOp(inst),
.subwrap => try self.airSubWrap(inst),
.sub_sat => try self.airSubSat(inst),
.mul => try self.airMul(inst),
.mul => try self.airBinOp(inst),
.mulwrap => try self.airMulWrap(inst),
.mul_sat => try self.airMulSat(inst),
.rem => try self.airRem(inst),
.mod => try self.airMod(inst),
.shl, .shl_exact => try self.airShl(inst),
.shl, .shl_exact => try self.airBinOp(inst),
.shl_sat => try self.airShlSat(inst),
.min => try self.airMin(inst),
.max => try self.airMax(inst),
@ -581,7 +583,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.bit_and => try self.airBinOp(inst),
.bit_or => try self.airBinOp(inst),
.xor => try self.airBinOp(inst),
.shr, .shr_exact => try self.airShr(inst),
.shr, .shr_exact => try self.airBinOp(inst),
.alloc => try self.airAlloc(inst),
.ret_ptr => try self.airRetPtr(inst),
@ -722,6 +724,9 @@ fn processDeath(self: *Self, inst: Air.Inst.Index) void {
const canon_reg = toCanonicalReg(reg);
self.register_manager.freeReg(canon_reg);
},
.compare_flags_signed, .compare_flags_unsigned => {
self.compare_flags_inst = null;
},
else => {}, // TODO process stack allocation death
}
}
@ -815,7 +820,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
const elem_ty = self.air.typeOfIndex(inst).elemType();
if (!elem_ty.hasRuntimeBits()) {
return self.allocMem(inst, @sizeOf(usize), @alignOf(usize));
// As this stack item will never be dereferenced at runtime,
// return the current stack offset
return self.next_stack_offset;
}
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
@ -857,6 +864,24 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv);
}
/// Save the current instruction stored in the compare flags if
/// occupied
fn spillCompareFlagsIfOccupied(self: *Self) !void {
if (self.compare_flags_inst) |inst_to_save| {
const mcv = self.getResolvedInstValue(inst_to_save);
assert(mcv == .compare_flags_signed or mcv == .compare_flags_unsigned);
const new_mcv = try self.allocRegOrMem(inst_to_save, true);
try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv);
log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv });
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
try branch.inst_table.put(self.gpa, inst_to_save, new_mcv);
self.compare_flags_inst = null;
}
}
/// Copies a value to a register without tracking the register. The register is not considered
/// allocated. A second call to `copyToTmpRegister` may return the same register.
/// This can have a side effect of spilling instructions to the stack to free up a register.
@ -1156,6 +1181,15 @@ fn binOpRegister(
.bit_or,
.bool_or,
=> .orr_shifted_register,
.shl,
.shl_exact,
=> .lsl_register,
.shr,
.shr_exact,
=> switch (lhs_ty.intInfo(self.target.*).signedness) {
.signed => Mir.Inst.Tag.asr_register,
.unsigned => Mir.Inst.Tag.lsr_register,
},
.xor => .eor_shifted_register,
else => unreachable,
};
@ -1171,7 +1205,12 @@ fn binOpRegister(
.imm6 = 0,
.shift = .lsl,
} },
.mul => .{ .rrr = .{
.mul,
.shl,
.shl_exact,
.shr,
.shr_exact,
=> .{ .rrr = .{
.rd = dest_reg,
.rn = lhs_reg,
.rm = rhs_reg,
@ -1263,6 +1302,15 @@ fn binOpImmediate(
const mir_tag: Mir.Inst.Tag = switch (tag) {
.add => .add_immediate,
.sub => .sub_immediate,
.shl,
.shl_exact,
=> .lsl_immediate,
.shr,
.shr_exact,
=> switch (lhs_ty.intInfo(self.target.*).signedness) {
.signed => Mir.Inst.Tag.asr_immediate,
.unsigned => Mir.Inst.Tag.lsr_immediate,
},
else => unreachable,
};
const mir_data: Mir.Inst.Data = switch (tag) {
@ -1273,6 +1321,15 @@ fn binOpImmediate(
.rn = lhs_reg,
.imm12 = @intCast(u12, rhs.immediate),
} },
.shl,
.shl_exact,
.shr,
.shr_exact,
=> .{ .rr_shift = .{
.rd = dest_reg,
.rn = lhs_reg,
.shift = @intCast(u6, rhs.immediate),
} },
else => unreachable,
};
@ -1304,7 +1361,7 @@ fn binOp(
rhs: MCValue,
lhs_ty: Type,
rhs_ty: Type,
) !MCValue {
) InnerError!MCValue {
switch (tag) {
// Arithmetic operations on integers and floats
.add,
@ -1385,6 +1442,28 @@ fn binOp(
else => unreachable,
}
},
.shl,
.shr,
=> {
switch (lhs_ty.zigTypeTag()) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 64) {
const rhs_immediate_ok = rhs == .immediate;
if (rhs_immediate_ok) {
return try self.binOpImmediate(tag, maybe_inst, lhs, rhs, lhs_ty, false);
} else {
return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
}
} else {
return self.fail("TODO binary operations on int with bits > 64", .{});
}
},
else => unreachable,
}
},
.bool_and,
.bool_or,
=> {
@ -1404,16 +1483,21 @@ fn binOp(
switch (lhs_ty.zigTypeTag()) {
.Pointer => {
const ptr_ty = lhs_ty;
const pointee_ty = switch (ptr_ty.ptrSize()) {
const elem_ty = switch (ptr_ty.ptrSize()) {
.One => ptr_ty.childType().childType(), // ptr to array, so get array element type
else => ptr_ty.childType(),
};
const elem_size = elem_ty.abiSize(self.target.*);
if (pointee_ty.abiSize(self.target.*) > 1) {
return self.fail("TODO ptr_add, ptr_sub with more element sizes", .{});
if (elem_size == 1) {
return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
} else {
// convert the offset into a byte offset by
// multiplying it with elem_size
const offset = try self.binOp(.mul, null, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize);
const addr = try self.binOp(tag, null, lhs, offset, Type.initTag(.manyptr_u8), Type.usize);
return addr;
}
return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
},
else => unreachable,
}
@ -1458,12 +1542,6 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airMul(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement mul for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airMulWrap(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement mulwrap for {}", .{self.target.cpu.arch});
@ -1514,24 +1592,12 @@ fn airMod(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airShl(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement shl for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airShlSat(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airShr(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement shr for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement .optional_payload for {}", .{self.target.cpu.arch});
@ -1735,29 +1801,11 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
switch (elem_size) {
else => {
const dst_mcv = try self.allocRegOrMem(inst, true);
const dest = try self.allocRegOrMem(inst, true);
const addr = try self.binOp(.ptr_add, null, base_mcv, index_mcv, slice_ty, Type.usize);
try self.load(dest, addr, slice_ptr_field_type);
const offset_mcv = try self.binOp(
.mul,
null,
index_mcv,
.{ .immediate = elem_size },
Type.usize,
Type.usize,
);
assert(offset_mcv == .register); // result of multiplication should always be register
self.register_manager.freezeRegs(&.{offset_mcv.register});
const addr_mcv = try self.binOp(.add, null, base_mcv, offset_mcv, Type.usize, Type.usize);
// At this point in time, neither the base register
// nor the offset register contains any valuable data
// anymore.
self.register_manager.unfreezeRegs(&.{ base_mcv.register, offset_mcv.register });
try self.load(dst_mcv, addr_mcv, slice_ptr_field_type);
break :result dst_mcv;
break :result dest;
},
}
};
@ -2360,6 +2408,16 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
var info = try self.resolveCallingConventionValues(fn_ty);
defer info.deinit(self);
// According to the Procedure Call Standard for the ARM
// Architecture, compare flags are not preserved across
// calls. Therefore, if some value is currently stored there, we
// need to save it.
//
// TODO once caller-saved registers are implemented, save them
// here too, but crucially *after* we save the compare flags as
// saving compare flags may require a new caller-saved register
try self.spillCompareFlagsIfOccupied();
for (info.args) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
@ -2551,6 +2609,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
return self.fail("TODO cmp for types with size > 8", .{});
}
try self.spillCompareFlagsIfOccupied();
self.compare_flags_inst = inst;
const signedness: std.builtin.Signedness = blk: {
// by default we tell the operand type is unsigned (i.e. bools and enum values)
if (ty.zigTypeTag() != .Int) break :blk .unsigned;
@ -2713,12 +2774,24 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
},
};
// If the condition dies here in this condbr instruction, process
// that death now instead of later as this has an effect on
// whether it needs to be spilled in the branches
if (self.liveness.operandDies(inst, 0)) {
const op_int = @enumToInt(pl_op.operand);
if (op_int >= Air.Inst.Ref.typed_value_map.len) {
const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len);
self.processDeath(op_index);
}
}
// Capture the state of register and stack allocation state so that we can revert to it.
const parent_next_stack_offset = self.next_stack_offset;
const parent_free_registers = self.register_manager.free_registers;
var parent_stack = try self.stack.clone(self.gpa);
defer parent_stack.deinit(self.gpa);
const parent_registers = self.register_manager.registers;
const parent_compare_flags_inst = self.compare_flags_inst;
try self.branch_stack.append(.{});
@ -2734,6 +2807,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
defer saved_then_branch.deinit(self.gpa);
self.register_manager.registers = parent_registers;
self.compare_flags_inst = parent_compare_flags_inst;
self.stack.deinit(self.gpa);
self.stack = parent_stack;
@ -2825,7 +2899,9 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
self.branch_stack.pop().deinit(self.gpa);
return self.finishAir(inst, .unreach, .{ pl_op.operand, .none, .none });
// We already took care of pl_op.operand earlier, so we're going
// to pass .none here
return self.finishAir(inst, .unreach, .{ .none, .none, .none });
}
fn isNull(self: *Self, operand: MCValue) !MCValue {
@ -2843,8 +2919,6 @@ fn isNonNull(self: *Self, operand: MCValue) !MCValue {
}
fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
_ = operand;
const error_type = ty.errorUnionSet();
const payload_type = ty.errorUnionPayload();
@ -3323,7 +3397,13 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
} else {
// TODO optimize the register allocation
var ptr_ty_payload: Type.Payload.ElemType = .{
.base = .{ .tag = .single_mut_pointer },
.data = ty,
};
const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
// TODO call extern memcpy
const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null });
self.register_manager.freezeRegs(&regs);
defer self.register_manager.unfreezeRegs(&regs);
@ -3337,16 +3417,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
switch (mcv) {
.stack_offset => |off| {
// sub src_reg, fp, #off
const adj_src_offset = off + abi_size;
const src_offset = math.cast(u12, adj_src_offset) catch return self.fail("TODO load: larger stack offsets", .{});
_ = try self.addInst(.{
.tag = .sub_immediate,
.data = .{ .rr_imm12_sh = .{
.rd = src_reg,
.rn = .x29,
.imm12 = src_offset,
} },
});
try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = off });
},
.memory => |addr| try self.genSetReg(Type.usize, src_reg, .{ .immediate = addr }),
.got_load,
@ -3372,16 +3443,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
}
// sub dst_reg, fp, #stack_offset
const adj_dst_off = stack_offset + abi_size;
const dst_offset = math.cast(u12, adj_dst_off) catch return self.fail("TODO load: larger stack offsets", .{});
_ = try self.addInst(.{
.tag = .sub_immediate,
.data = .{ .rr_imm12_sh = .{
.rd = dst_reg,
.rn = .x29,
.imm12 = dst_offset,
} },
});
try self.genSetReg(ptr_ty, dst_reg, .{ .ptr_stack_offset = stack_offset });
// mov len, #abi_size
try self.genSetReg(Type.usize, len_reg, .{ .immediate = abi_size });

View File

@ -80,6 +80,14 @@ pub fn emitMir(
.cmp_immediate => try emit.mirAddSubtractImmediate(inst),
.sub_immediate => try emit.mirAddSubtractImmediate(inst),
.asr_register => try emit.mirShiftRegister(inst),
.lsl_register => try emit.mirShiftRegister(inst),
.lsr_register => try emit.mirShiftRegister(inst),
.asr_immediate => try emit.mirShiftImmediate(inst),
.lsl_immediate => try emit.mirShiftImmediate(inst),
.lsr_immediate => try emit.mirShiftImmediate(inst),
.b_cond => try emit.mirConditionalBranchImmediate(inst),
.b => try emit.mirBranch(inst),
@ -374,20 +382,6 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
return error.EmitFail;
}
fn moveImmediate(emit: *Emit, reg: Register, imm64: u64) !void {
try emit.writeInstruction(Instruction.movz(reg, @truncate(u16, imm64), 0));
if (imm64 > math.maxInt(u16)) {
try emit.writeInstruction(Instruction.movk(reg, @truncate(u16, imm64 >> 16), 16));
}
if (imm64 > math.maxInt(u32)) {
try emit.writeInstruction(Instruction.movk(reg, @truncate(u16, imm64 >> 32), 32));
}
if (imm64 > math.maxInt(u48)) {
try emit.writeInstruction(Instruction.movk(reg, @truncate(u16, imm64 >> 48), 48));
}
}
fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line);
const delta_pc: usize = self.code.items.len - self.prev_di_pc;
@ -469,6 +463,36 @@ fn mirAddSubtractImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
}
}
fn mirShiftRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const rrr = emit.mir.instructions.items(.data)[inst].rrr;
const rd = rrr.rd;
const rn = rrr.rn;
const rm = rrr.rm;
switch (tag) {
.asr_register => try emit.writeInstruction(Instruction.asrRegister(rd, rn, rm)),
.lsl_register => try emit.writeInstruction(Instruction.lslRegister(rd, rn, rm)),
.lsr_register => try emit.writeInstruction(Instruction.lsrRegister(rd, rn, rm)),
else => unreachable,
}
}
fn mirShiftImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const rr_shift = emit.mir.instructions.items(.data)[inst].rr_shift;
const rd = rr_shift.rd;
const rn = rr_shift.rn;
const shift = rr_shift.shift;
switch (tag) {
.asr_immediate => try emit.writeInstruction(Instruction.asrImmediate(rd, rn, shift)),
.lsl_immediate => try emit.writeInstruction(Instruction.lslImmediate(rd, rn, shift)),
.lsr_immediate => try emit.writeInstruction(Instruction.lsrImmediate(rd, rn, shift)),
else => unreachable,
}
}
fn mirConditionalBranchImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const inst_cond = emit.mir.instructions.items(.data)[inst].inst_cond;

View File

@ -30,6 +30,10 @@ pub const Inst = struct {
add_shifted_register,
/// Bitwise AND (shifted register)
and_shifted_register,
/// Arithmetic Shift Right (immediate)
asr_immediate,
/// Arithmetic Shift Right (register)
asr_register,
/// Branch conditionally
b_cond,
/// Branch
@ -96,6 +100,14 @@ pub const Inst = struct {
ldrh_immediate,
/// Load Register Halfword (register)
ldrh_register,
/// Logical Shift Left (immediate)
lsl_immediate,
/// Logical Shift Left (register)
lsl_register,
/// Logical Shift Right (immediate)
lsr_immediate,
/// Logical Shift Right (register)
lsr_register,
/// Move (to/from SP)
mov_to_from_sp,
/// Move (register)
@ -257,7 +269,15 @@ pub const Inst = struct {
immr: u6,
n: u1,
},
/// Two registers
/// Two registers and a 6-bit unsigned shift
///
/// Used by e.g. lsl_immediate
rr_shift: struct {
rd: Register,
rn: Register,
shift: u6,
},
/// Three registers
///
/// Used by e.g. mul
rrr: struct {

View File

@ -308,6 +308,16 @@ pub const Instruction = union(enum) {
opc: u2,
sf: u1,
},
bitfield: packed struct {
rd: u5,
rn: u5,
imms: u6,
immr: u6,
n: u1,
fixed: u6 = 0b100110,
opc: u2,
sf: u1,
},
add_subtract_shifted_register: packed struct {
rd: u5,
rn: u5,
@ -356,6 +366,16 @@ pub const Instruction = union(enum) {
op54: u2,
sf: u1,
},
data_processing_2_source: packed struct {
rd: u5,
rn: u5,
opcode: u6,
rm: u5,
fixed_1: u8 = 0b11010110,
s: u1,
fixed_2: u1 = 0b0,
sf: u1,
},
pub const Condition = enum(u4) {
/// Integer: Equal
@ -473,12 +493,14 @@ pub const Instruction = union(enum) {
.logical_shifted_register => |v| @bitCast(u32, v),
.add_subtract_immediate => |v| @bitCast(u32, v),
.logical_immediate => |v| @bitCast(u32, v),
.bitfield => |v| @bitCast(u32, v),
.add_subtract_shifted_register => |v| @bitCast(u32, v),
// TODO once packed structs work, this can be refactored
.conditional_branch => |v| @as(u32, v.cond) | (@as(u32, v.o0) << 4) | (@as(u32, v.imm19) << 5) | (@as(u32, v.o1) << 24) | (@as(u32, v.fixed) << 25),
.compare_and_branch => |v| @as(u32, v.rt) | (@as(u32, v.imm19) << 5) | (@as(u32, v.op) << 24) | (@as(u32, v.fixed) << 25) | (@as(u32, v.sf) << 31),
.conditional_select => |v| @as(u32, v.rd) | @as(u32, v.rn) << 5 | @as(u32, v.op2) << 10 | @as(u32, v.cond) << 12 | @as(u32, v.rm) << 16 | @as(u32, v.fixed) << 21 | @as(u32, v.s) << 29 | @as(u32, v.op) << 30 | @as(u32, v.sf) << 31,
.data_processing_3_source => |v| @bitCast(u32, v),
.data_processing_2_source => |v| @bitCast(u32, v),
};
}
@ -911,6 +933,31 @@ pub const Instruction = union(enum) {
};
}
fn bitfield(
opc: u2,
n: u1,
rd: Register,
rn: Register,
immr: u6,
imms: u6,
) Instruction {
return Instruction{
.bitfield = .{
.rd = rd.enc(),
.rn = rn.enc(),
.imms = imms,
.immr = immr,
.n = n,
.opc = opc,
.sf = switch (rd.size()) {
32 => 0b0,
64 => 0b1,
else => unreachable, // unexpected register size
},
},
};
}
pub const AddSubtractShiftedRegisterShift = enum(u2) { lsl, lsr, asr, _ };
fn addSubtractShiftedRegister(
@ -1031,6 +1078,29 @@ pub const Instruction = union(enum) {
};
}
fn dataProcessing2Source(
s: u1,
opcode: u6,
rd: Register,
rn: Register,
rm: Register,
) Instruction {
return Instruction{
.data_processing_2_source = .{
.rd = rd.enc(),
.rn = rn.enc(),
.opcode = opcode,
.rm = rm.enc(),
.s = s,
.sf = switch (rd.size()) {
32 => 0b0,
64 => 0b1,
else => unreachable, // unexpected register size
},
},
};
}
// Helper functions for assembly syntax functions
// Move wide (immediate)
@ -1300,6 +1370,50 @@ pub const Instruction = union(enum) {
return logicalImmediate(0b11, rd, rn, imms, immr, n);
}
// Bitfield
pub fn sbfm(rd: Register, rn: Register, immr: u6, imms: u6) Instruction {
const n: u1 = switch (rd.size()) {
32 => 0b0,
64 => 0b1,
else => unreachable, // unexpected register size
};
return bitfield(0b00, n, rd, rn, immr, imms);
}
pub fn bfm(rd: Register, rn: Register, immr: u6, imms: u6) Instruction {
const n: u1 = switch (rd.size()) {
32 => 0b0,
64 => 0b1,
else => unreachable, // unexpected register size
};
return bitfield(0b01, n, rd, rn, immr, imms);
}
pub fn ubfm(rd: Register, rn: Register, immr: u6, imms: u6) Instruction {
const n: u1 = switch (rd.size()) {
32 => 0b0,
64 => 0b1,
else => unreachable, // unexpected register size
};
return bitfield(0b10, n, rd, rn, immr, imms);
}
pub fn asrImmediate(rd: Register, rn: Register, shift: u6) Instruction {
const imms = @intCast(u6, rd.size() - 1);
return sbfm(rd, rn, shift, imms);
}
pub fn lslImmediate(rd: Register, rn: Register, shift: u6) Instruction {
const size = @intCast(u6, rd.size() - 1);
return ubfm(rd, rn, size - shift + 1, size - shift);
}
pub fn lsrImmediate(rd: Register, rn: Register, shift: u6) Instruction {
const imms = @intCast(u6, rd.size() - 1);
return ubfm(rd, rn, shift, imms);
}
// Add/subtract (shifted register)
pub fn addShiftedRegister(
@ -1393,6 +1507,24 @@ pub const Instruction = union(enum) {
pub fn mneg(rd: Register, rn: Register, rm: Register) Instruction {
return msub(rd, rn, rm, .xzr);
}
// Data processing (2 source)
pub fn lslv(rd: Register, rn: Register, rm: Register) Instruction {
return dataProcessing2Source(0b0, 0b001000, rd, rn, rm);
}
pub fn lsrv(rd: Register, rn: Register, rm: Register) Instruction {
return dataProcessing2Source(0b0, 0b001001, rd, rn, rm);
}
pub fn asrv(rd: Register, rn: Register, rm: Register) Instruction {
return dataProcessing2Source(0b0, 0b001010, rd, rn, rm);
}
pub const asrRegister = asrv;
pub const lslRegister = lslv;
pub const lsrRegister = lsrv;
};
test {
@ -1570,6 +1702,26 @@ test "serialize instructions" {
.inst = Instruction.eorImmediate(.x3, .x5, 0b000000, 0b000000, 0b1),
.expected = 0b1_10_100100_1_000000_000000_00101_00011,
},
.{ // lslv x6, x9, x10
.inst = Instruction.lslv(.x6, .x9, .x10),
.expected = 0b1_0_0_11010110_01010_0010_00_01001_00110,
},
.{ // lsl x4, x2, #42
.inst = Instruction.lslImmediate(.x4, .x2, 42),
.expected = 0b1_10_100110_1_010110_010101_00010_00100,
},
.{ // lsl x4, x2, #63
.inst = Instruction.lslImmediate(.x4, .x2, 63),
.expected = 0b1_10_100110_1_000001_000000_00010_00100,
},
.{ // lsr x4, x2, #42
.inst = Instruction.lsrImmediate(.x4, .x2, 42),
.expected = 0b1_10_100110_1_101010_111111_00010_00100,
},
.{ // lsr x4, x2, #63
.inst = Instruction.lsrImmediate(.x4, .x2, 63),
.expected = 0b1_10_100110_1_111111_111111_00010_00100,
},
};
for (testcases) |case| {

View File

@ -351,7 +351,6 @@ test "read 128-bit field from default aligned struct in global memory" {
}
test "struct field explicit alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;

View File

@ -7,7 +7,6 @@ const expectEqual = testing.expectEqual;
test "array to slice" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const a: u32 align(4) = 3;
const b: u32 align(8) = 4;

View File

@ -269,7 +269,6 @@ test "bitcast passed as tuple element" {
test "triple level result location with bitcast sandwich passed as tuple element" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {

View File

@ -50,7 +50,6 @@ fn constant() !void {
test "pointer-to-array constness for zero-size elements, var" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try mutable();
comptime try mutable();
@ -58,7 +57,6 @@ test "pointer-to-array constness for zero-size elements, var" {
test "pointer-to-array constness for zero-size elements, const" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try constant();
comptime try constant();

View File

@ -573,7 +573,6 @@ test "bit shift a u1" {
test "truncating shift right" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try testShrTrunc(maxInt(u16));
comptime try testShrTrunc(maxInt(u16));

View File

@ -187,7 +187,6 @@ test "@sizeOf(T) == 0 doesn't force resolving struct size" {
test "@TypeOf() has no runtime side effects" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
const S = struct {
@ -204,7 +203,6 @@ test "@TypeOf() has no runtime side effects" {
test "branching logic inside @TypeOf" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
const S = struct {

View File

@ -204,7 +204,6 @@ test "slicing zero length array" {
const x = @intToPtr([*]i32, 0x1000)[0..0x500];
const y = x[0x100..];
test "compile time slice of pointer to hard coded address" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;

View File

@ -927,7 +927,6 @@ test "anonymous struct literal syntax" {
test "fully anonymous struct" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
@ -953,7 +952,6 @@ test "fully anonymous struct" {
test "fully anonymous list literal" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
@ -983,7 +981,6 @@ test "tuple assigned to variable" {
test "comptime struct field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const T = struct {
a: i32,

View File

@ -15,7 +15,6 @@ fn add(args: anytype) i32 {
test "add arbitrary args" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try expect(add(.{ @as(i32, 1), @as(i32, 2), @as(i32, 3), @as(i32, 4) }) == 10);
try expect(add(.{@as(i32, 1234)}) == 1234);
@ -27,7 +26,6 @@ fn readFirstVarArg(args: anytype) void {
}
test "send void arg to var args" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -90,7 +88,6 @@ fn foo2(args: anytype) bool {
}
test "array of var args functions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -99,7 +96,6 @@ test "array of var args functions" {
}
test "pass zero length array to var args param" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO