Merge pull request #10959 from joachimschmidt557/stage2-aarch64

stage2 AArch64: misc improvements
This commit is contained in:
Andrew Kelley 2022-02-22 01:30:49 -05:00 committed by GitHub
commit 6dc5ce931c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 331 additions and 184 deletions

View File

@ -372,7 +372,7 @@ fn gen(self: *Self) !void {
.data = .{ .load_store_register_pair = .{ .data = .{ .load_store_register_pair = .{
.rt = .x29, .rt = .x29,
.rt2 = .x30, .rt2 = .x30,
.rn = Register.sp, .rn = .sp,
.offset = Instruction.LoadStorePairOffset.pre_index(-16), .offset = Instruction.LoadStorePairOffset.pre_index(-16),
} }, } },
}); });
@ -407,7 +407,7 @@ fn gen(self: *Self) !void {
self.saved_regs_stack_space = 16; self.saved_regs_stack_space = 16;
inline for (callee_preserved_regs) |reg| { inline for (callee_preserved_regs) |reg| {
if (self.register_manager.isRegAllocated(reg)) { if (self.register_manager.isRegAllocated(reg)) {
saved_regs |= @as(u32, 1) << reg.id(); saved_regs |= @as(u32, 1) << @intCast(u5, reg.id());
self.saved_regs_stack_space += 8; self.saved_regs_stack_space += 8;
} }
} }
@ -449,7 +449,7 @@ fn gen(self: *Self) !void {
// the code. Therefore, we can just delete // the code. Therefore, we can just delete
// the space initially reserved for the // the space initially reserved for the
// jump // jump
self.mir_instructions.len -= 1; self.mir_instructions.orderedRemove(self.exitlude_jump_relocs.items[0]);
} else for (self.exitlude_jump_relocs.items) |jmp_reloc| { } else for (self.exitlude_jump_relocs.items) |jmp_reloc| {
self.mir_instructions.set(jmp_reloc, .{ self.mir_instructions.set(jmp_reloc, .{
.tag = .b, .tag = .b,
@ -475,7 +475,7 @@ fn gen(self: *Self) !void {
.data = .{ .load_store_register_pair = .{ .data = .{ .load_store_register_pair = .{
.rt = .x29, .rt = .x29,
.rt2 = .x30, .rt2 = .x30,
.rn = Register.sp, .rn = .sp,
.offset = Instruction.LoadStorePairOffset.post_index(16), .offset = Instruction.LoadStorePairOffset.post_index(16),
} }, } },
}); });
@ -1041,6 +1041,7 @@ fn binOpRegister(
const mir_tag: Mir.Inst.Tag = switch (tag) { const mir_tag: Mir.Inst.Tag = switch (tag) {
.add, .ptr_add => .add_shifted_register, .add, .ptr_add => .add_shifted_register,
.sub, .ptr_sub => .sub_shifted_register, .sub, .ptr_sub => .sub_shifted_register,
.mul => .mul,
.xor => .eor_shifted_register, .xor => .eor_shifted_register,
else => unreachable, else => unreachable,
}; };
@ -1056,6 +1057,11 @@ fn binOpRegister(
.imm6 = 0, .imm6 = 0,
.shift = .lsl, .shift = .lsl,
} }, } },
.mul => .{ .rrr = .{
.rd = dest_reg,
.rn = lhs_reg,
.rm = rhs_reg,
} },
.xor => .{ .rrr_imm6_logical_shift = .{ .xor => .{ .rrr_imm6_logical_shift = .{
.rd = dest_reg, .rd = dest_reg,
.rn = lhs_reg, .rn = lhs_reg,
@ -1222,6 +1228,24 @@ fn binOp(
else => unreachable, else => unreachable,
} }
}, },
.mul => {
switch (lhs_ty.zigTypeTag()) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 64) {
// TODO add optimisations for multiplication
// with immediates, for example a * 2 can be
// lowered to a << 1
return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
} else {
return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
}
},
else => unreachable,
}
},
// Bitwise operations on integers // Bitwise operations on integers
.xor => { .xor => {
switch (lhs_ty.zigTypeTag()) { switch (lhs_ty.zigTypeTag()) {
@ -1551,88 +1575,37 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
}; };
self.register_manager.freezeRegs(&.{base_mcv.register}); self.register_manager.freezeRegs(&.{base_mcv.register});
// TODO implement optimized ldr for airSliceElemVal switch (elem_size) {
const dst_mcv = try self.allocRegOrMem(inst, true); else => {
const dst_mcv = try self.allocRegOrMem(inst, true);
const offset_mcv = try self.genMulConstant(bin_op.rhs, @intCast(u32, elem_size)); const offset_mcv = try self.binOp(
assert(offset_mcv == .register); // result of multiplication should always be register .mul,
self.register_manager.freezeRegs(&.{offset_mcv.register}); null,
index_mcv,
.{ .immediate = elem_size },
Type.usize,
Type.usize,
);
assert(offset_mcv == .register); // result of multiplication should always be register
self.register_manager.freezeRegs(&.{offset_mcv.register});
const addr_reg = try self.register_manager.allocReg(null); const addr_mcv = try self.binOp(.add, null, base_mcv, offset_mcv, Type.usize, Type.usize);
self.register_manager.freezeRegs(&.{addr_reg});
defer self.register_manager.unfreezeRegs(&.{addr_reg});
_ = try self.addInst(.{ // At this point in time, neither the base register
.tag = .add_shifted_register, // nor the offset register contains any valuable data
.data = .{ .rrr_imm6_shift = .{ // anymore.
.rd = addr_reg, self.register_manager.unfreezeRegs(&.{ base_mcv.register, offset_mcv.register });
.rn = base_mcv.register,
.rm = offset_mcv.register,
.imm6 = 0,
.shift = .lsl,
} },
});
// At this point in time, neither the base register try self.load(dst_mcv, addr_mcv, slice_ptr_field_type);
// nor the offset register contains any valuable data
// anymore.
self.register_manager.unfreezeRegs(&.{ base_mcv.register, offset_mcv.register });
try self.load(dst_mcv, .{ .register = addr_reg }, slice_ptr_field_type); break :result dst_mcv;
},
break :result dst_mcv; }
}; };
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
} }
fn genMulConstant(self: *Self, op: Air.Inst.Ref, imm: u32) !MCValue {
const lhs = try self.resolveInst(op);
const rhs = MCValue{ .immediate = imm };
const lhs_is_register = lhs == .register;
if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register});
defer if (lhs_is_register) self.register_manager.unfreezeRegs(&.{lhs.register});
// Destination must be a register
// LHS must be a register
// RHS must be a register
var dst_mcv: MCValue = undefined;
var lhs_mcv: MCValue = lhs;
var rhs_mcv: MCValue = rhs;
// Allocate registers for operands and/or destination
// Allocate 1 or 2 registers
if (lhs_is_register) {
// Move RHS to register
dst_mcv = MCValue{ .register = try self.register_manager.allocReg(null) };
rhs_mcv = dst_mcv;
} else {
// Move LHS and RHS to register
const regs = try self.register_manager.allocRegs(2, .{ null, null });
lhs_mcv = MCValue{ .register = regs[0] };
rhs_mcv = MCValue{ .register = regs[1] };
dst_mcv = lhs_mcv;
}
// Move the operands to the newly allocated registers
if (!lhs_is_register) {
try self.genSetReg(self.air.typeOf(op), lhs_mcv.register, lhs);
}
try self.genSetReg(Type.initTag(.usize), rhs_mcv.register, rhs);
_ = try self.addInst(.{
.tag = .mul,
.data = .{ .rrr = .{
.rd = dst_mcv.register,
.rn = lhs_mcv.register,
.rm = rhs_mcv.register,
} },
});
return dst_mcv;
}
fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
@ -1763,23 +1736,17 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
self.register_manager.freezeRegs(&.{addr_reg}); self.register_manager.freezeRegs(&.{addr_reg});
defer self.register_manager.unfreezeRegs(&.{addr_reg}); defer self.register_manager.unfreezeRegs(&.{addr_reg});
const abi_size = elem_ty.abiSize(self.target.*);
switch (dst_mcv) { switch (dst_mcv) {
.dead => unreachable, .dead => unreachable,
.undef => unreachable, .undef => unreachable,
.compare_flags_signed, .compare_flags_unsigned => unreachable, .compare_flags_signed, .compare_flags_unsigned => unreachable,
.embedded_in_code => unreachable, .embedded_in_code => unreachable,
.register => |dst_reg| { .register => |dst_reg| {
_ = try self.addInst(.{ try self.genLdrRegister(dst_reg, addr_reg, abi_size);
.tag = .ldr_immediate,
.data = .{ .load_store_register_immediate = .{
.rt = dst_reg,
.rn = addr_reg,
.offset = Instruction.LoadStoreOffset.none.immediate,
} },
});
}, },
.stack_offset => |off| { .stack_offset => |off| {
if (elem_ty.abiSize(self.target.*) <= 8) { if (abi_size <= 8) {
const tmp_reg = try self.register_manager.allocReg(null); const tmp_reg = try self.register_manager.allocReg(null);
self.register_manager.freezeRegs(&.{tmp_reg}); self.register_manager.freezeRegs(&.{tmp_reg});
defer self.register_manager.unfreezeRegs(&.{tmp_reg}); defer self.register_manager.unfreezeRegs(&.{tmp_reg});
@ -1940,6 +1907,100 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
} }
fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, abi_size: u64) !void {
switch (abi_size) {
1 => {
_ = try self.addInst(.{
.tag = .ldrb_immediate,
.data = .{ .load_store_register_immediate = .{
.rt = value_reg.to32(),
.rn = addr_reg,
.offset = Instruction.LoadStoreOffset.none.immediate,
} },
});
},
2 => {
_ = try self.addInst(.{
.tag = .ldrh_immediate,
.data = .{ .load_store_register_immediate = .{
.rt = value_reg.to32(),
.rn = addr_reg,
.offset = Instruction.LoadStoreOffset.none.immediate,
} },
});
},
4 => {
_ = try self.addInst(.{
.tag = .ldr_immediate,
.data = .{ .load_store_register_immediate = .{
.rt = value_reg.to32(),
.rn = addr_reg,
.offset = Instruction.LoadStoreOffset.none.immediate,
} },
});
},
8 => {
_ = try self.addInst(.{
.tag = .ldr_immediate,
.data = .{ .load_store_register_immediate = .{
.rt = value_reg.to64(),
.rn = addr_reg,
.offset = Instruction.LoadStoreOffset.none.immediate,
} },
});
},
3, 5, 6, 7 => return self.fail("TODO: genLdrRegister for more abi_sizes", .{}),
else => unreachable,
}
}
fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, abi_size: u64) !void {
switch (abi_size) {
1 => {
_ = try self.addInst(.{
.tag = .strb_immediate,
.data = .{ .load_store_register_immediate = .{
.rt = value_reg.to32(),
.rn = addr_reg,
.offset = Instruction.LoadStoreOffset.none.immediate,
} },
});
},
2 => {
_ = try self.addInst(.{
.tag = .strh_immediate,
.data = .{ .load_store_register_immediate = .{
.rt = value_reg.to32(),
.rn = addr_reg,
.offset = Instruction.LoadStoreOffset.none.immediate,
} },
});
},
4 => {
_ = try self.addInst(.{
.tag = .str_immediate,
.data = .{ .load_store_register_immediate = .{
.rt = value_reg.to32(),
.rn = addr_reg,
.offset = Instruction.LoadStoreOffset.none.immediate,
} },
});
},
8 => {
_ = try self.addInst(.{
.tag = .str_immediate,
.data = .{ .load_store_register_immediate = .{
.rt = value_reg.to64(),
.rn = addr_reg,
.offset = Instruction.LoadStoreOffset.none.immediate,
} },
});
},
3, 5, 6, 7 => return self.fail("TODO: genStrRegister for more abi_sizes", .{}),
else => unreachable,
}
}
fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void {
switch (ptr) { switch (ptr) {
.none => unreachable, .none => unreachable,
@ -1960,8 +2021,28 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.embedded_in_code => { .embedded_in_code => {
return self.fail("TODO implement storing to MCValue.embedded_in_code", .{}); return self.fail("TODO implement storing to MCValue.embedded_in_code", .{});
}, },
.register => { .register => |addr_reg| {
return self.fail("TODO implement storing to MCValue.register", .{}); self.register_manager.freezeRegs(&.{addr_reg});
defer self.register_manager.unfreezeRegs(&.{addr_reg});
const abi_size = value_ty.abiSize(self.target.*);
switch (value) {
.register => |value_reg| {
try self.genStrRegister(value_reg, addr_reg, abi_size);
},
else => {
if (abi_size <= 8) {
const tmp_reg = try self.register_manager.allocReg(null);
self.register_manager.freezeRegs(&.{tmp_reg});
defer self.register_manager.unfreezeRegs(&.{tmp_reg});
try self.genSetReg(value_ty, tmp_reg, value);
try self.store(ptr, .{ .register = tmp_reg }, ptr_ty, value_ty);
} else {
return self.fail("TODO implement memcpy", .{});
}
},
}
}, },
.memory, .memory,
.stack_offset, .stack_offset,
@ -2005,7 +2086,8 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue {
return if (self.liveness.isUnused(inst)) .dead else result: { return if (self.liveness.isUnused(inst)) .dead else result: {
const mcv = try self.resolveInst(operand); const mcv = try self.resolveInst(operand);
const struct_ty = self.air.typeOf(operand).childType(); const ptr_ty = self.air.typeOf(operand);
const struct_ty = ptr_ty.childType();
const struct_size = @intCast(u32, struct_ty.abiSize(self.target.*)); const struct_size = @intCast(u32, struct_ty.abiSize(self.target.*));
const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*));
const struct_field_ty = struct_ty.structFieldType(index); const struct_field_ty = struct_ty.structFieldType(index);
@ -2014,7 +2096,28 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
.ptr_stack_offset => |off| { .ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off + struct_size - struct_field_offset - struct_field_size }; break :result MCValue{ .ptr_stack_offset = off + struct_size - struct_field_offset - struct_field_size };
}, },
else => return self.fail("TODO implement codegen struct_field_ptr for {}", .{mcv}), else => {
const offset_reg = try self.copyToTmpRegister(ptr_ty, .{
.immediate = struct_field_offset,
});
self.register_manager.freezeRegs(&.{offset_reg});
defer self.register_manager.unfreezeRegs(&.{offset_reg});
const addr_reg = try self.copyToTmpRegister(ptr_ty, mcv);
self.register_manager.freezeRegs(&.{addr_reg});
defer self.register_manager.unfreezeRegs(&.{addr_reg});
const dest = try self.binOp(
.add,
null,
.{ .register = addr_reg },
.{ .register = offset_reg },
Type.usize,
Type.usize,
);
break :result dest;
},
} }
}; };
} }
@ -2983,8 +3086,6 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
const abi_size = ty.abiSize(self.target.*); const abi_size = ty.abiSize(self.target.*);
switch (mcv) { switch (mcv) {
.dead => unreachable, .dead => unreachable,
.ptr_stack_offset => unreachable,
.ptr_embedded_in_code => unreachable,
.unreach, .none => return, // Nothing to do. .unreach, .none => return, // Nothing to do.
.undef => { .undef => {
if (!self.wantSafety()) if (!self.wantSafety())
@ -3001,6 +3102,8 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
.compare_flags_unsigned, .compare_flags_unsigned,
.compare_flags_signed, .compare_flags_signed,
.immediate, .immediate,
.ptr_stack_offset,
.ptr_embedded_in_code,
=> { => {
const reg = try self.copyToTmpRegister(ty, mcv); const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
@ -3043,17 +3146,18 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
_ = sym_index; _ = sym_index;
return self.fail("TODO implement set stack variable from {}", .{mcv}); return self.fail("TODO implement set stack variable from {}", .{mcv});
}, },
.memory => |vaddr| { .memory,
_ = vaddr; .stack_offset,
return self.fail("TODO implement set stack variable from memory vaddr", .{}); => {
}, switch (mcv) {
.stack_offset => |off| { .stack_offset => |off| {
if (stack_offset == off) if (stack_offset == off)
return; // Copy stack variable to itself; nothing to do. return; // Copy stack variable to itself; nothing to do.
},
else => {},
}
const ptr_bits = self.target.cpu.arch.ptrBitWidth(); if (abi_size <= 8) {
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
if (abi_size <= ptr_bytes) {
const reg = try self.copyToTmpRegister(ty, mcv); const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
} else { } else {
@ -3068,17 +3172,23 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
const count_reg = regs[3]; const count_reg = regs[3];
const tmp_reg = regs[4]; const tmp_reg = regs[4];
// sub src_reg, fp, #off switch (mcv) {
const adj_src_offset = off + abi_size; .stack_offset => |off| {
const src_offset = math.cast(u12, adj_src_offset) catch return self.fail("TODO load: larger stack offsets", .{}); // sub src_reg, fp, #off
_ = try self.addInst(.{ const adj_src_offset = off + abi_size;
.tag = .sub_immediate, const src_offset = math.cast(u12, adj_src_offset) catch return self.fail("TODO load: larger stack offsets", .{});
.data = .{ .rr_imm12_sh = .{ _ = try self.addInst(.{
.rd = src_reg, .tag = .sub_immediate,
.rn = .x29, .data = .{ .rr_imm12_sh = .{
.imm12 = src_offset, .rd = src_reg,
} }, .rn = .x29,
}); .imm12 = src_offset,
} },
});
},
.memory => |addr| try self.genSetReg(Type.usize, src_reg, .{ .immediate = addr }),
else => unreachable,
}
// sub dst_reg, fp, #stack_offset // sub dst_reg, fp, #stack_offset
const adj_dst_off = stack_offset + abi_size; const adj_dst_off = stack_offset + abi_size;
@ -3105,7 +3215,6 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
switch (mcv) { switch (mcv) {
.dead => unreachable, .dead => unreachable,
.ptr_stack_offset => unreachable,
.ptr_embedded_in_code => unreachable, .ptr_embedded_in_code => unreachable,
.unreach, .none => return, // Nothing to do. .unreach, .none => return, // Nothing to do.
.undef => { .undef => {
@ -3118,6 +3227,24 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
else => unreachable, // unexpected register size else => unreachable, // unexpected register size
} }
}, },
.ptr_stack_offset => |unadjusted_off| {
// TODO: maybe addressing from sp instead of fp
const elem_ty = ty.childType();
const abi_size = elem_ty.abiSize(self.target.*);
const adj_off = unadjusted_off + abi_size;
const imm12 = math.cast(u12, adj_off) catch
return self.fail("TODO larger stack offsets", .{});
_ = try self.addInst(.{
.tag = .sub_immediate,
.data = .{ .rr_imm12_sh = .{
.rd = reg,
.rn = .x29,
.imm12 = imm12,
} },
});
},
.compare_flags_unsigned, .compare_flags_unsigned,
.compare_flags_signed, .compare_flags_signed,
=> |op| { => |op| {

View File

@ -909,7 +909,7 @@ fn mirPushPopRegs(emit: *Emit, inst: Mir.Inst.Index) !void {
var other_reg: Register = undefined; var other_reg: Register = undefined;
while (i > 0) : (i -= 1) { while (i > 0) : (i -= 1) {
const reg = @intToEnum(Register, i - 1); const reg = @intToEnum(Register, i - 1);
if (reg_list & @as(u32, 1) << reg.id() != 0) { if (reg_list & @as(u32, 1) << @intCast(u5, reg.id()) != 0) {
if (count % 2 == 0) { if (count % 2 == 0) {
if (count == number_of_regs - 1) { if (count == number_of_regs - 1) {
try emit.writeInstruction(Instruction.ldr( try emit.writeInstruction(Instruction.ldr(
@ -939,7 +939,7 @@ fn mirPushPopRegs(emit: *Emit, inst: Mir.Inst.Index) !void {
var other_reg: Register = undefined; var other_reg: Register = undefined;
while (i < 32) : (i += 1) { while (i < 32) : (i += 1) {
const reg = @intToEnum(Register, i); const reg = @intToEnum(Register, i);
if (reg_list & @as(u32, 1) << reg.id() != 0) { if (reg_list & @as(u32, 1) << @intCast(u5, reg.id()) != 0) {
if (count % 2 == 0) { if (count % 2 == 0) {
if (count == number_of_regs - 1) { if (count == number_of_regs - 1) {
try emit.writeInstruction(Instruction.str( try emit.writeInstruction(Instruction.str(

View File

@ -7,7 +7,7 @@ const testing = std.testing;
// zig fmt: off // zig fmt: off
/// General purpose registers in the AArch64 instruction set /// General purpose registers in the AArch64 instruction set
pub const Register = enum(u6) { pub const Register = enum(u7) {
// 64-bit registers // 64-bit registers
x0, x1, x2, x3, x4, x5, x6, x7, x0, x1, x2, x3, x4, x5, x6, x7,
x8, x9, x10, x11, x12, x13, x14, x15, x8, x9, x10, x11, x12, x13, x14, x15,
@ -20,10 +20,23 @@ pub const Register = enum(u6) {
w16, w17, w18, w19, w20, w21, w22, w23, w16, w17, w18, w19, w20, w21, w22, w23,
w24, w25, w26, w27, w28, w29, w30, wzr, w24, w25, w26, w27, w28, w29, w30, wzr,
pub const sp = Register.xzr; // Stack pointer
sp, wsp,
pub fn id(self: Register) u5 { pub fn id(self: Register) u6 {
return @truncate(u5, @enumToInt(self)); return switch (@enumToInt(self)) {
0...63 => return @as(u6, @truncate(u5, @enumToInt(self))),
64...65 => 32,
else => unreachable,
};
}
pub fn enc(self: Register) u5 {
return switch (@enumToInt(self)) {
0...63 => return @truncate(u5, @enumToInt(self)),
64...65 => 31,
else => unreachable,
};
} }
/// Returns the bit-width of the register. /// Returns the bit-width of the register.
@ -31,17 +44,32 @@ pub const Register = enum(u6) {
return switch (@enumToInt(self)) { return switch (@enumToInt(self)) {
0...31 => 64, 0...31 => 64,
32...63 => 32, 32...63 => 32,
64 => 64,
65 => 32,
else => unreachable,
}; };
} }
/// Convert from any register to its 64 bit alias. /// Convert from any register to its 64 bit alias.
pub fn to64(self: Register) Register { pub fn to64(self: Register) Register {
return @intToEnum(Register, self.id()); return switch (@enumToInt(self)) {
0...31 => self,
32...63 => @intToEnum(Register, @enumToInt(self) - 32),
64 => .sp,
65 => .sp,
else => unreachable,
};
} }
/// Convert from any register to its 32 bit alias. /// Convert from any register to its 32 bit alias.
pub fn to32(self: Register) Register { pub fn to32(self: Register) Register {
return @intToEnum(Register, @as(u6, self.id()) + 32); return switch (@enumToInt(self)) {
0...31 => @intToEnum(Register, @enumToInt(self) + 32),
32...63 => self,
64 => .wsp,
65 => .wsp,
else => unreachable,
};
} }
/// Returns the index into `callee_preserved_regs`. /// Returns the index into `callee_preserved_regs`.
@ -53,7 +81,7 @@ pub const Register = enum(u6) {
} }
pub fn dwarfLocOp(self: Register) u8 { pub fn dwarfLocOp(self: Register) u8 {
return @as(u8, self.id()) + DW.OP.reg0; return @as(u8, self.enc()) + DW.OP.reg0;
} }
}; };
@ -76,15 +104,15 @@ pub const callee_preserved_regs = callee_preserved_regs_impl.callee_preserved_re
pub const c_abi_int_param_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 }; pub const c_abi_int_param_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 };
pub const c_abi_int_return_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 }; pub const c_abi_int_return_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 };
test "Register.id" { test "Register.enc" {
try testing.expectEqual(@as(u5, 0), Register.x0.id()); try testing.expectEqual(@as(u5, 0), Register.x0.enc());
try testing.expectEqual(@as(u5, 0), Register.w0.id()); try testing.expectEqual(@as(u5, 0), Register.w0.enc());
try testing.expectEqual(@as(u5, 31), Register.xzr.id()); try testing.expectEqual(@as(u5, 31), Register.xzr.enc());
try testing.expectEqual(@as(u5, 31), Register.wzr.id()); try testing.expectEqual(@as(u5, 31), Register.wzr.enc());
try testing.expectEqual(@as(u5, 31), Register.sp.id()); try testing.expectEqual(@as(u5, 31), Register.sp.enc());
try testing.expectEqual(@as(u5, 31), Register.sp.id()); try testing.expectEqual(@as(u5, 31), Register.sp.enc());
} }
test "Register.size" { test "Register.size" {
@ -479,7 +507,7 @@ pub const Instruction = union(enum) {
assert(shift % 16 == 0 and shift <= 16); assert(shift % 16 == 0 and shift <= 16);
return Instruction{ return Instruction{
.move_wide_immediate = .{ .move_wide_immediate = .{
.rd = rd.id(), .rd = rd.enc(),
.imm16 = imm16, .imm16 = imm16,
.hw = @intCast(u2, shift / 16), .hw = @intCast(u2, shift / 16),
.opc = opc, .opc = opc,
@ -491,7 +519,7 @@ pub const Instruction = union(enum) {
assert(shift % 16 == 0 and shift <= 48); assert(shift % 16 == 0 and shift <= 48);
return Instruction{ return Instruction{
.move_wide_immediate = .{ .move_wide_immediate = .{
.rd = rd.id(), .rd = rd.enc(),
.imm16 = imm16, .imm16 = imm16,
.hw = @intCast(u2, shift / 16), .hw = @intCast(u2, shift / 16),
.opc = opc, .opc = opc,
@ -508,7 +536,7 @@ pub const Instruction = union(enum) {
const imm21_u = @bitCast(u21, imm21); const imm21_u = @bitCast(u21, imm21);
return Instruction{ return Instruction{
.pc_relative_address = .{ .pc_relative_address = .{
.rd = rd.id(), .rd = rd.enc(),
.immlo = @truncate(u2, imm21_u), .immlo = @truncate(u2, imm21_u),
.immhi = @truncate(u19, imm21_u >> 2), .immhi = @truncate(u19, imm21_u >> 2),
.op = op, .op = op,
@ -580,7 +608,7 @@ pub const Instruction = union(enum) {
pub fn reg(rm: Register) LoadStoreOffset { pub fn reg(rm: Register) LoadStoreOffset {
return .{ return .{
.register = .{ .register = .{
.rm = rm.id(), .rm = rm.enc(),
.shift = .{ .shift = .{
.lsl = 0, .lsl = 0,
}, },
@ -592,7 +620,7 @@ pub const Instruction = union(enum) {
assert(rm.size() == 32 and (shift == 0 or shift == 2)); assert(rm.size() == 32 and (shift == 0 or shift == 2));
return .{ return .{
.register = .{ .register = .{
.rm = rm.id(), .rm = rm.enc(),
.shift = .{ .shift = .{
.uxtw = shift, .uxtw = shift,
}, },
@ -604,7 +632,7 @@ pub const Instruction = union(enum) {
assert(rm.size() == 64 and (shift == 0 or shift == 3)); assert(rm.size() == 64 and (shift == 0 or shift == 3));
return .{ return .{
.register = .{ .register = .{
.rm = rm.id(), .rm = rm.enc(),
.shift = .{ .shift = .{
.lsl = shift, .lsl = shift,
}, },
@ -616,7 +644,7 @@ pub const Instruction = union(enum) {
assert(rm.size() == 32 and (shift == 0 or shift == 2)); assert(rm.size() == 32 and (shift == 0 or shift == 2));
return .{ return .{
.register = .{ .register = .{
.rm = rm.id(), .rm = rm.enc(),
.shift = .{ .shift = .{
.sxtw = shift, .sxtw = shift,
}, },
@ -628,7 +656,7 @@ pub const Instruction = union(enum) {
assert(rm.size() == 64 and (shift == 0 or shift == 3)); assert(rm.size() == 64 and (shift == 0 or shift == 3));
return .{ return .{
.register = .{ .register = .{
.rm = rm.id(), .rm = rm.enc(),
.shift = .{ .shift = .{
.sxtx = shift, .sxtx = shift,
}, },
@ -676,8 +704,8 @@ pub const Instruction = union(enum) {
}; };
return Instruction{ return Instruction{
.load_store_register = .{ .load_store_register = .{
.rt = rt.id(), .rt = rt.enc(),
.rn = rn.id(), .rn = rn.enc(),
.offset = off, .offset = off,
.opc = opc, .opc = opc,
.op1 = op1, .op1 = op1,
@ -711,9 +739,9 @@ pub const Instruction = union(enum) {
const imm7 = @truncate(u7, @bitCast(u9, offset >> 2)); const imm7 = @truncate(u7, @bitCast(u9, offset >> 2));
return Instruction{ return Instruction{
.load_store_register_pair = .{ .load_store_register_pair = .{
.rt1 = rt1.id(), .rt1 = rt1.enc(),
.rn = rn.id(), .rn = rn.enc(),
.rt2 = rt2.id(), .rt2 = rt2.enc(),
.imm7 = imm7, .imm7 = imm7,
.load = @boolToInt(load), .load = @boolToInt(load),
.encoding = encoding, .encoding = encoding,
@ -726,9 +754,9 @@ pub const Instruction = union(enum) {
const imm7 = @truncate(u7, @bitCast(u9, offset >> 3)); const imm7 = @truncate(u7, @bitCast(u9, offset >> 3));
return Instruction{ return Instruction{
.load_store_register_pair = .{ .load_store_register_pair = .{
.rt1 = rt1.id(), .rt1 = rt1.enc(),
.rn = rn.id(), .rn = rn.enc(),
.rt2 = rt2.id(), .rt2 = rt2.enc(),
.imm7 = imm7, .imm7 = imm7,
.load = @boolToInt(load), .load = @boolToInt(load),
.encoding = encoding, .encoding = encoding,
@ -743,7 +771,7 @@ pub const Instruction = union(enum) {
fn loadLiteral(rt: Register, imm19: u19) Instruction { fn loadLiteral(rt: Register, imm19: u19) Instruction {
return Instruction{ return Instruction{
.load_literal = .{ .load_literal = .{
.rt = rt.id(), .rt = rt.enc(),
.imm19 = imm19, .imm19 = imm19,
.opc = switch (rt.size()) { .opc = switch (rt.size()) {
32 => 0b00, 32 => 0b00,
@ -782,7 +810,7 @@ pub const Instruction = union(enum) {
return Instruction{ return Instruction{
.unconditional_branch_register = .{ .unconditional_branch_register = .{
.op4 = op4, .op4 = op4,
.rn = rn.id(), .rn = rn.enc(),
.op3 = op3, .op3 = op3,
.op2 = op2, .op2 = op2,
.opc = opc, .opc = opc,
@ -818,10 +846,10 @@ pub const Instruction = union(enum) {
assert(amount < 32); assert(amount < 32);
return Instruction{ return Instruction{
.logical_shifted_register = .{ .logical_shifted_register = .{
.rd = rd.id(), .rd = rd.enc(),
.rn = rn.id(), .rn = rn.enc(),
.imm6 = amount, .imm6 = amount,
.rm = rm.id(), .rm = rm.enc(),
.n = n, .n = n,
.shift = @enumToInt(shift), .shift = @enumToInt(shift),
.opc = opc, .opc = opc,
@ -832,10 +860,10 @@ pub const Instruction = union(enum) {
64 => { 64 => {
return Instruction{ return Instruction{
.logical_shifted_register = .{ .logical_shifted_register = .{
.rd = rd.id(), .rd = rd.enc(),
.rn = rn.id(), .rn = rn.enc(),
.imm6 = amount, .imm6 = amount,
.rm = rm.id(), .rm = rm.enc(),
.n = n, .n = n,
.shift = @enumToInt(shift), .shift = @enumToInt(shift),
.opc = opc, .opc = opc,
@ -857,8 +885,8 @@ pub const Instruction = union(enum) {
) Instruction { ) Instruction {
return Instruction{ return Instruction{
.add_subtract_immediate = .{ .add_subtract_immediate = .{
.rd = rd.id(), .rd = rd.enc(),
.rn = rn.id(), .rn = rn.enc(),
.imm12 = imm12, .imm12 = imm12,
.sh = @boolToInt(shift), .sh = @boolToInt(shift),
.s = s, .s = s,
@ -885,10 +913,10 @@ pub const Instruction = union(enum) {
) Instruction { ) Instruction {
return Instruction{ return Instruction{
.add_subtract_shifted_register = .{ .add_subtract_shifted_register = .{
.rd = rd.id(), .rd = rd.enc(),
.rn = rn.id(), .rn = rn.enc(),
.imm6 = imm6, .imm6 = imm6,
.rm = rm.id(), .rm = rm.enc(),
.shift = @enumToInt(shift), .shift = @enumToInt(shift),
.s = s, .s = s,
.op = op, .op = op,
@ -926,7 +954,7 @@ pub const Instruction = union(enum) {
assert(offset & 0b11 == 0b00); assert(offset & 0b11 == 0b00);
return Instruction{ return Instruction{
.compare_and_branch = .{ .compare_and_branch = .{
.rt = rt.id(), .rt = rt.enc(),
.imm19 = @bitCast(u19, @intCast(i19, offset >> 2)), .imm19 = @bitCast(u19, @intCast(i19, offset >> 2)),
.op = op, .op = op,
.sf = switch (rt.size()) { .sf = switch (rt.size()) {
@ -949,11 +977,11 @@ pub const Instruction = union(enum) {
) Instruction { ) Instruction {
return Instruction{ return Instruction{
.conditional_select = .{ .conditional_select = .{
.rd = rd.id(), .rd = rd.enc(),
.rn = rn.id(), .rn = rn.enc(),
.op2 = op2, .op2 = op2,
.cond = @enumToInt(cond), .cond = @enumToInt(cond),
.rm = rm.id(), .rm = rm.enc(),
.s = s, .s = s,
.op = op, .op = op,
.sf = switch (rd.size()) { .sf = switch (rd.size()) {
@ -976,11 +1004,11 @@ pub const Instruction = union(enum) {
) Instruction { ) Instruction {
return Instruction{ return Instruction{
.data_processing_3_source = .{ .data_processing_3_source = .{
.rd = rd.id(), .rd = rd.enc(),
.rn = rn.id(), .rn = rn.enc(),
.ra = ra.id(), .ra = ra.enc(),
.o0 = o0, .o0 = o0,
.rm = rm.id(), .rm = rm.enc(),
.op31 = op31, .op31 = op31,
.op54 = op54, .op54 = op54,
.sf = switch (rd.size()) { .sf = switch (rd.size()) {

View File

@ -27,7 +27,6 @@ test "default alignment allows unspecified in type syntax" {
} }
test "implicitly decreasing pointer alignment" { test "implicitly decreasing pointer alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const a: u32 align(4) = 3; const a: u32 align(4) = 3;
const b: u32 align(8) = 4; const b: u32 align(8) = 4;
try expect(addUnaligned(&a, &b) == 7); try expect(addUnaligned(&a, &b) == 7);
@ -38,7 +37,6 @@ fn addUnaligned(a: *align(1) const u32, b: *align(1) const u32) u32 {
} }
test "@alignCast pointers" { test "@alignCast pointers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var x: u32 align(4) = 1; var x: u32 align(4) = 1;
expectsOnly1(&x); expectsOnly1(&x);
try expect(x == 2); try expect(x == 2);
@ -313,7 +311,7 @@ fn testIndex2(ptr: [*]align(4) u8, index: usize, comptime T: type) !void {
} }
test "alignment of function with c calling convention" { test "alignment of function with c calling convention" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64 and builtin.os.tag == .macos) return error.SkipZigTest;
if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage1) return error.SkipZigTest;
var runtime_nothing = &nothing; var runtime_nothing = &nothing;

View File

@ -48,7 +48,7 @@ const g1: i32 = 1233 + 1;
var g2: i32 = 0; var g2: i32 = 0;
test "global variables" { test "global variables" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64 and builtin.os.tag == .macos) return error.SkipZigTest;
try expect(g2 == 0); try expect(g2 == 0);
g2 = g1; g2 = g1;
try expect(g2 == 1234); try expect(g2 == 1234);
@ -327,7 +327,6 @@ const FnPtrWrapper = struct {
}; };
test "const ptr from var variable" { test "const ptr from var variable" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var x: u64 = undefined; var x: u64 = undefined;
@ -611,7 +610,7 @@ test "comptime cast fn to ptr" {
} }
test "equality compare fn ptrs" { test "equality compare fn ptrs" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64 and builtin.os.tag == .macos) return error.SkipZigTest;
if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage1) return error.SkipZigTest;
var a = &emptyFn; var a = &emptyFn;
@ -619,7 +618,7 @@ test "equality compare fn ptrs" {
} }
test "self reference through fn ptr field" { test "self reference through fn ptr field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64 and builtin.os.tag == .macos) return error.SkipZigTest;
if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;

View File

@ -8,7 +8,6 @@ test "@bitReverse large exotic integer" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
// Currently failing on stage1 for big-endian targets // Currently failing on stage1 for big-endian targets
if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage1) return error.SkipZigTest;

View File

@ -6,7 +6,7 @@ const ptr = &global;
var global: usize = 123; var global: usize = 123;
test "constant pointer to global variable causes runtime load" { test "constant pointer to global variable causes runtime load" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64 and builtin.os.tag == .macos) return error.SkipZigTest;
global = 1234; global = 1234;
try expect(&global == ptr); try expect(&global == ptr);
try expect(ptr.* == 1234); try expect(ptr.* == 1234);

View File

@ -6,7 +6,7 @@ const S = struct {
p: *S, p: *S,
}; };
test "bug 2006" { test "bug 2006" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64 and builtin.os.tag == .macos) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
var a: S = undefined; var a: S = undefined;
a = S{ .p = undefined }; a = S{ .p = undefined };

View File

@ -211,7 +211,6 @@ test "implicit cast from *[N]T to [*c]T" {
} }
test "*usize to *void" { test "*usize to *void" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var i = @as(usize, 0); var i = @as(usize, 0);
var v = @ptrCast(*void, &i); var v = @ptrCast(*void, &i);
v.* = {}; v.* = {};
@ -1014,7 +1013,7 @@ test "cast from array reference to fn: comptime fn ptr" {
try expect(@ptrToInt(f) == @ptrToInt(&global_array)); try expect(@ptrToInt(f) == @ptrToInt(&global_array));
} }
test "cast from array reference to fn: runtime fn ptr" { test "cast from array reference to fn: runtime fn ptr" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64 and builtin.os.tag == .macos) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO

View File

@ -36,7 +36,6 @@ test "optional pointer to size zero struct" {
} }
test "equality compare optional pointers" { test "equality compare optional pointers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO

View File

@ -81,7 +81,7 @@ fn assertLenIsZero(msg: []const u8) !void {
} }
test "access len index of sentinel-terminated slice" { test "access len index of sentinel-terminated slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64 and builtin.os.tag == .macos) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct { const S = struct {

View File

@ -43,7 +43,6 @@ const StructWithFields = struct {
}; };
test "non-packed struct has fields padded out to the required alignment" { test "non-packed struct has fields padded out to the required alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const foo = StructWithFields{ .a = 5, .b = 1, .c = 10, .d = 2 }; const foo = StructWithFields{ .a = 5, .b = 1, .c = 10, .d = 2 };
@ -67,7 +66,7 @@ const SmallStruct = struct {
}; };
test "lower unnamed constants" { test "lower unnamed constants" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64 and builtin.os.tag == .macos) return error.SkipZigTest;
var foo = SmallStruct{ .a = 1, .b = 255 }; var foo = SmallStruct{ .a = 1, .b = 255 };
try expect(foo.first() == 1); try expect(foo.first() == 1);
try expect(foo.second() == 255); try expect(foo.second() == 255);
@ -186,7 +185,6 @@ test "store member function in variable" {
} }
test "member functions" { test "member functions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const r = MemberFnRand{ .seed = 1234 }; const r = MemberFnRand{ .seed = 1234 };
try expect(r.getSeed() == 1234); try expect(r.getSeed() == 1234);
} }