Merge pull request #11059 from joachimschmidt557/stage2-aarch64

stage2 AArch64: various improvements
This commit is contained in:
Joachim Schmidt 2022-03-05 14:30:02 +01:00 committed by GitHub
commit ac936c0aba
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 330 additions and 262 deletions

View File

@ -443,14 +443,17 @@ fn gen(self: *Self) !void {
});
// exitlude jumps
if (self.exitlude_jump_relocs.items.len == 1) {
// There is only one relocation. Hence,
// this relocation must be at the end of
// the code. Therefore, we can just delete
// the space initially reserved for the
// jump
self.mir_instructions.orderedRemove(self.exitlude_jump_relocs.items[0]);
} else for (self.exitlude_jump_relocs.items) |jmp_reloc| {
if (self.exitlude_jump_relocs.items.len > 0 and
self.exitlude_jump_relocs.items[self.exitlude_jump_relocs.items.len - 1] == self.mir_instructions.len - 2)
{
// If the last Mir instruction (apart from the
// dbg_epilogue_begin) is the last exitlude jump
// relocation (which would just jump one instruction
// further), it can be safely removed
self.mir_instructions.orderedRemove(self.exitlude_jump_relocs.pop());
}
for (self.exitlude_jump_relocs.items) |jmp_reloc| {
self.mir_instructions.set(jmp_reloc, .{
.tag = .b,
.data = .{ .inst = @intCast(u32, self.mir_instructions.len) },
@ -564,11 +567,11 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.cmp_gt => try self.airCmp(inst, .gt),
.cmp_neq => try self.airCmp(inst, .neq),
.bool_and => try self.airBoolOp(inst),
.bool_or => try self.airBoolOp(inst),
.bit_and => try self.airBitAnd(inst),
.bit_or => try self.airBitOr(inst),
.xor => try self.airXor(inst),
.bool_and => try self.airBinOp(inst),
.bool_or => try self.airBinOp(inst),
.bit_and => try self.airBinOp(inst),
.bit_or => try self.airBinOp(inst),
.xor => try self.airBinOp(inst),
.shr, .shr_exact => try self.airShr(inst),
.alloc => try self.airAlloc(inst),
@ -815,9 +818,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
if (reg_ok) {
// Make sure the type can fit in a register before we try to allocate one.
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
if (abi_size <= ptr_bytes) {
if (abi_size <= 8) {
if (self.register_manager.tryAllocReg(inst)) |reg| {
return MCValue{ .register = registerAlias(reg, abi_size) };
}
@ -950,10 +951,69 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
switch (operand_ty.zigTypeTag()) {
.Bool => {
// TODO convert this to mvn + and
const dest = try self.binOp(.xor, null, operand, .{ .immediate = 1 }, operand_ty, Type.bool);
break :result dest;
const op_reg = switch (operand) {
.register => |r| r,
else => try self.copyToTmpRegister(operand_ty, operand),
};
self.register_manager.freezeRegs(&.{op_reg});
defer self.register_manager.unfreezeRegs(&.{op_reg});
const dest_reg = blk: {
if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
break :blk op_reg;
}
break :blk try self.register_manager.allocReg(null);
};
_ = try self.addInst(.{
.tag = .eor_immediate,
.data = .{ .rr_bitmask = .{
.rd = dest_reg,
.rn = op_reg,
.imms = 0b000000,
.immr = 0b000000,
.n = 0b1,
} },
});
break :result MCValue{ .register = dest_reg };
},
else => return self.fail("TODO bitwise not", .{}),
.Vector => return self.fail("TODO bitwise not for vectors", .{}),
.Int => {
const int_info = operand_ty.intInfo(self.target.*);
if (int_info.bits <= 64) {
const op_reg = switch (operand) {
.register => |r| r,
else => try self.copyToTmpRegister(operand_ty, operand),
};
self.register_manager.freezeRegs(&.{op_reg});
defer self.register_manager.unfreezeRegs(&.{op_reg});
const dest_reg = blk: {
if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
break :blk op_reg;
}
break :blk try self.register_manager.allocReg(null);
};
_ = try self.addInst(.{
.tag = .mvn,
.data = .{ .rr_imm6_shift = .{
.rd = dest_reg,
.rm = op_reg,
.imm6 = 0,
.shift = .lsl,
} },
});
break :result MCValue{ .register = dest_reg };
} else {
return self.fail("TODO AArch64 not on integers > u64/i64", .{});
}
},
else => unreachable,
}
},
}
@ -976,7 +1036,20 @@ fn airMax(self: *Self, inst: Air.Inst.Index) !void {
fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice for {}", .{self.target.cpu.arch});
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr = try self.resolveInst(bin_op.lhs);
const ptr_ty = self.air.typeOf(bin_op.lhs);
const len = try self.resolveInst(bin_op.rhs);
const len_ty = self.air.typeOf(bin_op.rhs);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8);
const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2);
try self.genSetStack(ptr_ty, stack_offset + ptr_bytes, ptr);
try self.genSetStack(len_ty, stack_offset, len);
break :result MCValue{ .stack_offset = stack_offset };
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@ -1051,9 +1124,19 @@ fn binOpRegister(
if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs);
const mir_tag: Mir.Inst.Tag = switch (tag) {
.add, .ptr_add => .add_shifted_register,
.sub, .ptr_sub => .sub_shifted_register,
.add,
.ptr_add,
=> .add_shifted_register,
.sub,
.ptr_sub,
=> .sub_shifted_register,
.mul => .mul,
.bit_and,
.bool_and,
=> .and_shifted_register,
.bit_or,
.bool_or,
=> .orr_shifted_register,
.xor => .eor_shifted_register,
else => unreachable,
};
@ -1074,7 +1157,12 @@ fn binOpRegister(
.rn = lhs_reg,
.rm = rhs_reg,
} },
.xor => .{ .rrr_imm6_logical_shift = .{
.bit_and,
.bool_and,
.bit_or,
.bool_or,
.xor,
=> .{ .rrr_imm6_logical_shift = .{
.rd = dest_reg,
.rn = lhs_reg,
.rm = rhs_reg,
@ -1252,20 +1340,40 @@ fn binOp(
// lowered to a << 1
return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
} else {
return self.fail("TODO ARM binary operations on integers > u32/i32", .{});
return self.fail("TODO binary operations on int with bits > 64", .{});
}
},
else => unreachable,
}
},
// Bitwise operations on integers
.xor => {
.bit_and,
.bit_or,
.xor,
=> {
switch (lhs_ty.zigTypeTag()) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => return self.fail("TODO binary operations on vectors", .{}),
.Bool => {
.Int => {
assert(lhs_ty.eql(rhs_ty));
// TODO boolean operations with immediates
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 64) {
// TODO implement bitwise operations with immediates
return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
} else {
return self.fail("TODO binary operations on int with bits > 64", .{});
}
},
else => unreachable,
}
},
.bool_and,
.bool_or,
=> {
switch (lhs_ty.zigTypeTag()) {
.Bool => {
assert(lhs != .immediate); // should have been handled by Sema
assert(rhs != .immediate); // should have been handled by Sema
return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
},
else => unreachable,
@ -1387,24 +1495,6 @@ fn airMod(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement bitwise and for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airBitOr(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement bitwise or for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airXor(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement xor for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airShl(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement shl for {}", .{self.target.cpu.arch});
@ -1523,22 +1613,39 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice_ptr for {}", .{self.target.cpu.arch});
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8);
const mcv = try self.resolveInst(ty_op.operand);
switch (mcv) {
.dead, .unreach, .none => unreachable,
.register => unreachable, // a slice doesn't fit in one register
.stack_offset => |off| {
break :result MCValue{ .stack_offset = off + ptr_bytes };
},
.memory => |addr| {
break :result MCValue{ .memory = addr };
},
else => return self.fail("TODO implement slice_len for {}", .{mcv}),
}
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8);
const mcv = try self.resolveInst(ty_op.operand);
switch (mcv) {
.dead, .unreach => unreachable,
.dead, .unreach, .none => unreachable,
.register => unreachable, // a slice doesn't fit in one register
.stack_offset => |off| {
break :result MCValue{ .stack_offset = off };
},
.memory => |addr| {
break :result MCValue{ .memory = addr + 8 };
break :result MCValue{ .memory = addr + ptr_bytes };
},
else => return self.fail("TODO implement slice_len for {}", .{mcv}),
}
@ -1548,13 +1655,33 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_slice_len_ptr for {}", .{self.target.cpu.arch});
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8);
const mcv = try self.resolveInst(ty_op.operand);
switch (mcv) {
.dead, .unreach, .none => unreachable,
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off + ptr_bytes };
},
else => return self.fail("TODO implement ptr_slice_len_ptr for {}", .{mcv}),
}
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_slice_ptr_ptr for {}", .{self.target.cpu.arch});
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const mcv = try self.resolveInst(ty_op.operand);
switch (mcv) {
.dead, .unreach, .none => unreachable,
.ptr_stack_offset => |off| {
break :result MCValue{ .ptr_stack_offset = off };
},
else => return self.fail("TODO implement ptr_slice_len_ptr for {}", .{mcv}),
}
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@ -2882,7 +3009,17 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void {
const body = self.air.extra[extra.end..][0..extra.data.body_len];
try self.genBody(body);
for (self.blocks.getPtr(inst).?.relocs.items) |reloc| try self.performReloc(reloc);
// relocations for `br` instructions
const relocs = &self.blocks.getPtr(inst).?.relocs;
if (relocs.items.len > 0 and relocs.items[relocs.items.len - 1] == self.mir_instructions.len - 1) {
// If the last Mir instruction is the last relocation (which
// would just jump one instruction further), it can be safely
// removed
self.mir_instructions.orderedRemove(relocs.pop());
}
for (relocs.items) |reloc| {
try self.performReloc(reloc);
}
const result = self.blocks.getPtr(inst).?.mcv;
return self.finishAir(inst, result, .{ .none, .none, .none });
@ -2912,15 +3049,6 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, .dead, .{ branch.operand, .none, .none });
}
fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const air_tags = self.air.instructions.items(.tag);
_ = air_tags;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement boolean operations for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
const block_data = self.blocks.getPtr(block).?;
@ -3136,11 +3264,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
4, 8 => .str_stack,
else => unreachable, // unexpected abi size
};
const rt: Register = switch (abi_size) {
1, 2, 4 => reg.to32(),
8 => reg.to64(),
else => unreachable, // unexpected abi size
};
const rt = registerAlias(reg, abi_size);
_ = try self.addInst(.{
.tag = tag,
@ -3399,9 +3523,20 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airArrayToSlice for {}", .{
self.target.cpu.arch,
});
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const ptr_ty = self.air.typeOf(ty_op.operand);
const ptr = try self.resolveInst(ty_op.operand);
const array_ty = ptr_ty.childType();
const array_len = @intCast(u32, array_ty.arrayLen());
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes = @divExact(ptr_bits, 8);
const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2);
try self.genSetStack(ptr_ty, stack_offset + ptr_bytes, ptr);
try self.genSetStack(Type.initTag(.usize), stack_offset, .{ .immediate = array_len });
break :result MCValue{ .stack_offset = stack_offset };
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@ -3622,7 +3757,6 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
if (typed_value.val.isUndef())
return MCValue{ .undef = {} };
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
if (typed_value.val.castTag(.decl_ref)) |payload| {
return self.lowerDeclRef(typed_value, payload.data);
@ -3652,13 +3786,19 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
},
.Int => {
const info = typed_value.ty.intInfo(self.target.*);
if (info.bits <= ptr_bits and info.signedness == .signed) {
return MCValue{ .immediate = @bitCast(u64, typed_value.val.toSignedInt()) };
if (info.bits <= 64) {
const unsigned = switch (info.signedness) {
.signed => blk: {
const signed = typed_value.val.toSignedInt();
break :blk @bitCast(u64, signed);
},
.unsigned => typed_value.val.toUnsignedInt(),
};
return MCValue{ .immediate = unsigned };
} else {
return self.lowerUnnamedConst(typed_value);
}
if (info.bits > ptr_bits or info.signedness == .signed) {
return self.fail("TODO const int bigger than ptr and signed int", .{});
}
return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
},
.Bool => {
return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) };
@ -3875,7 +4015,7 @@ fn parseRegName(name: []const u8) ?Register {
return std.meta.stringToEnum(Register, name);
}
fn registerAlias(reg: Register, size_bytes: u32) Register {
fn registerAlias(reg: Register, size_bytes: u64) Register {
if (size_bytes == 0) {
unreachable; // should be comptime known
} else if (size_bytes <= 4) {

View File

@ -95,6 +95,8 @@ pub fn emitMir(
.call_extern => try emit.mirCallExtern(inst),
.eor_immediate => try emit.mirLogicalImmediate(inst),
.add_shifted_register => try emit.mirAddSubtractShiftedRegister(inst),
.cmp_shifted_register => try emit.mirAddSubtractShiftedRegister(inst),
.sub_shifted_register => try emit.mirAddSubtractShiftedRegister(inst),
@ -106,7 +108,9 @@ pub fn emitMir(
.dbg_prologue_end => try emit.mirDebugPrologueEnd(),
.dbg_epilogue_begin => try emit.mirDebugEpilogueBegin(),
.and_shifted_register => try emit.mirLogicalShiftedRegister(inst),
.eor_shifted_register => try emit.mirLogicalShiftedRegister(inst),
.orr_shifted_register => try emit.mirLogicalShiftedRegister(inst),
.load_memory_got => try emit.mirLoadMemoryPie(inst),
.load_memory_direct => try emit.mirLoadMemoryPie(inst),
@ -605,6 +609,21 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void {
}
}
fn mirLogicalImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const rr_bitmask = emit.mir.instructions.items(.data)[inst].rr_bitmask;
const rd = rr_bitmask.rd;
const rn = rr_bitmask.rn;
const imms = rr_bitmask.imms;
const immr = rr_bitmask.immr;
const n = rr_bitmask.n;
switch (tag) {
.eor_immediate => try emit.writeInstruction(Instruction.eorImmediate(rd, rn, imms, immr, n)),
else => unreachable,
}
}
fn mirAddSubtractShiftedRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const rrr_imm6_shift = emit.mir.instructions.items(.data)[inst].rrr_imm6_shift;
@ -643,7 +662,9 @@ fn mirLogicalShiftedRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
const imm6 = rrr_imm6_logical_shift.imm6;
switch (tag) {
.eor_shifted_register => try emit.writeInstruction(Instruction.eor(rd, rn, rm, shift, imm6)),
.and_shifted_register => try emit.writeInstruction(Instruction.andShiftedRegister(rd, rn, rm, shift, imm6)),
.eor_shifted_register => try emit.writeInstruction(Instruction.eorShiftedRegister(rd, rn, rm, shift, imm6)),
.orr_shifted_register => try emit.writeInstruction(Instruction.orrShiftedRegister(rd, rn, rm, shift, imm6)),
else => unreachable,
}
}
@ -844,7 +865,7 @@ fn mirMoveRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
switch (tag) {
.mov_register => {
const rr = emit.mir.instructions.items(.data)[inst].rr;
try emit.writeInstruction(Instruction.orr(rr.rd, .xzr, rr.rn, .lsl, 0));
try emit.writeInstruction(Instruction.orrShiftedRegister(rr.rd, .xzr, rr.rn, .lsl, 0));
},
.mov_to_from_sp => {
const rr = emit.mir.instructions.items(.data)[inst].rr;
@ -852,7 +873,7 @@ fn mirMoveRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
},
.mvn => {
const rr_imm6_shift = emit.mir.instructions.items(.data)[inst].rr_imm6_shift;
try emit.writeInstruction(Instruction.orn(rr_imm6_shift.rd, .xzr, rr_imm6_shift.rm, .lsl, 0));
try emit.writeInstruction(Instruction.ornShiftedRegister(rr_imm6_shift.rd, .xzr, rr_imm6_shift.rm, rr_imm6_shift.shift, rr_imm6_shift.imm6));
},
else => unreachable,
}

View File

@ -28,6 +28,8 @@ pub const Inst = struct {
add_immediate,
/// Add (shifted register)
add_shifted_register,
/// Bitwise AND (shifted register)
and_shifted_register,
/// Branch conditionally
b_cond,
/// Branch
@ -54,6 +56,8 @@ pub const Inst = struct {
dbg_epilogue_begin,
/// Pseudo-instruction: Update debug line
dbg_line,
/// Bitwise Exclusive OR (immediate)
eor_immediate,
/// Bitwise Exclusive OR (shifted register)
eor_shifted_register,
/// Loads the contents into a register
@ -106,6 +110,8 @@ pub const Inst = struct {
mvn,
/// No Operation
nop,
/// Bitwise inclusive OR (shifted register)
orr_shifted_register,
/// Pseudo-instruction: Pop multiple registers
pop_regs,
/// Psuedo-instruction: Push multiple registers
@ -231,14 +237,25 @@ pub const Inst = struct {
imm12: u12,
sh: u1 = 0,
},
/// Two registers and a shift (shift type and 6-bit amount)
/// Two registers and a shift (logical instruction version)
/// (shift type and 6-bit amount)
///
/// Used by e.g. mvn
rr_imm6_shift: struct {
rd: Register,
rm: Register,
imm6: u6,
shift: bits.Instruction.AddSubtractShiftedRegisterShift,
shift: bits.Instruction.LogicalShiftedRegisterShift,
},
/// Two registers and a bitmask immediate
///
/// Used by e.g. eor_immediate
rr_bitmask: struct {
rd: Register,
rn: Register,
imms: u6,
immr: u6,
n: u1,
},
/// Two registers
///

View File

@ -323,6 +323,16 @@ pub const Instruction = union(enum) {
op: u1,
sf: u1,
},
logical_immediate: packed struct {
rd: u5,
rn: u5,
imms: u6,
immr: u6,
n: u1,
fixed: u6 = 0b100100,
opc: u2,
sf: u1,
},
add_subtract_shifted_register: packed struct {
rd: u5,
rn: u5,
@ -487,6 +497,7 @@ pub const Instruction = union(enum) {
.no_operation => |v| @bitCast(u32, v),
.logical_shifted_register => |v| @bitCast(u32, v),
.add_subtract_immediate => |v| @bitCast(u32, v),
.logical_immediate => |v| @bitCast(u32, v),
.add_subtract_shifted_register => |v| @bitCast(u32, v),
// TODO once packed structs work, this can be refactored
.conditional_branch => |v| @as(u32, v.cond) | (@as(u32, v.o0) << 4) | (@as(u32, v.imm19) << 5) | (@as(u32, v.o1) << 24) | (@as(u32, v.fixed) << 25),
@ -900,6 +911,31 @@ pub const Instruction = union(enum) {
};
}
fn logicalImmediate(
opc: u2,
rd: Register,
rn: Register,
imms: u6,
immr: u6,
n: u1,
) Instruction {
return Instruction{
.logical_immediate = .{
.rd = rd.enc(),
.rn = rn.enc(),
.imms = imms,
.immr = immr,
.n = n,
.opc = opc,
.sf = switch (rd.size()) {
32 => 0b0,
64 => 0b1,
else => unreachable, // unexpected register size
},
},
};
}
pub const AddSubtractShiftedRegisterShift = enum(u2) { lsl, lsr, asr, _ };
fn addSubtractShiftedRegister(
@ -1173,7 +1209,7 @@ pub const Instruction = union(enum) {
// Logical (shifted register)
pub fn @"and"(
pub fn andShiftedRegister(
rd: Register,
rn: Register,
rm: Register,
@ -1183,7 +1219,7 @@ pub const Instruction = union(enum) {
return logicalShiftedRegister(0b00, 0b0, rd, rn, rm, shift, amount);
}
pub fn bic(
pub fn bicShiftedRegister(
rd: Register,
rn: Register,
rm: Register,
@ -1193,7 +1229,7 @@ pub const Instruction = union(enum) {
return logicalShiftedRegister(0b00, 0b1, rd, rn, rm, shift, amount);
}
pub fn orr(
pub fn orrShiftedRegister(
rd: Register,
rn: Register,
rm: Register,
@ -1203,7 +1239,7 @@ pub const Instruction = union(enum) {
return logicalShiftedRegister(0b01, 0b0, rd, rn, rm, shift, amount);
}
pub fn orn(
pub fn ornShiftedRegister(
rd: Register,
rn: Register,
rm: Register,
@ -1213,7 +1249,7 @@ pub const Instruction = union(enum) {
return logicalShiftedRegister(0b01, 0b1, rd, rn, rm, shift, amount);
}
pub fn eor(
pub fn eorShiftedRegister(
rd: Register,
rn: Register,
rm: Register,
@ -1223,7 +1259,7 @@ pub const Instruction = union(enum) {
return logicalShiftedRegister(0b10, 0b0, rd, rn, rm, shift, amount);
}
pub fn eon(
pub fn eonShiftedRegister(
rd: Register,
rn: Register,
rm: Register,
@ -1233,7 +1269,7 @@ pub const Instruction = union(enum) {
return logicalShiftedRegister(0b10, 0b1, rd, rn, rm, shift, amount);
}
pub fn ands(
pub fn andsShiftedRegister(
rd: Register,
rn: Register,
rm: Register,
@ -1243,7 +1279,7 @@ pub const Instruction = union(enum) {
return logicalShiftedRegister(0b11, 0b0, rd, rn, rm, shift, amount);
}
pub fn bics(
pub fn bicsShiftedRegister(
rd: Register,
rn: Register,
rm: Register,
@ -1271,6 +1307,24 @@ pub const Instruction = union(enum) {
return addSubtractImmediate(0b1, 0b1, rd, rn, imm, shift);
}
// Logical (immediate)
pub fn andImmediate(rd: Register, rn: Register, imms: u6, immr: u6, n: u1) Instruction {
return logicalImmediate(0b00, rd, rn, imms, immr, n);
}
pub fn orrImmediate(rd: Register, rn: Register, imms: u6, immr: u6, n: u1) Instruction {
return logicalImmediate(0b01, rd, rn, imms, immr, n);
}
pub fn eorImmediate(rd: Register, rn: Register, imms: u6, immr: u6, n: u1) Instruction {
return logicalImmediate(0b10, rd, rn, imms, immr, n);
}
pub fn andsImmediate(rd: Register, rn: Register, imms: u6, immr: u6, n: u1) Instruction {
return logicalImmediate(0b11, rd, rn, imms, immr, n);
}
// Add/subtract (shifted register)
pub fn addShiftedRegister(
@ -1378,11 +1432,11 @@ test "serialize instructions" {
const testcases = [_]Testcase{
.{ // orr x0, xzr, x1
.inst = Instruction.orr(.x0, .xzr, .x1, .lsl, 0),
.inst = Instruction.orrShiftedRegister(.x0, .xzr, .x1, .lsl, 0),
.expected = 0b1_01_01010_00_0_00001_000000_11111_00000,
},
.{ // orn x0, xzr, x1
.inst = Instruction.orn(.x0, .xzr, .x1, .lsl, 0),
.inst = Instruction.ornShiftedRegister(.x0, .xzr, .x1, .lsl, 0),
.expected = 0b1_01_01010_00_1_00001_000000_11111_00000,
},
.{ // movz x1, #4
@ -1502,11 +1556,11 @@ test "serialize instructions" {
.expected = 0b10_101_0_001_1_0000010_00010_11111_00001,
},
.{ // and x0, x4, x2
.inst = Instruction.@"and"(.x0, .x4, .x2, .lsl, 0),
.inst = Instruction.andShiftedRegister(.x0, .x4, .x2, .lsl, 0),
.expected = 0b1_00_01010_00_0_00010_000000_00100_00000,
},
.{ // and x0, x4, x2, lsl #0x8
.inst = Instruction.@"and"(.x0, .x4, .x2, .lsl, 0x8),
.inst = Instruction.andShiftedRegister(.x0, .x4, .x2, .lsl, 0x8),
.expected = 0b1_00_01010_00_0_00010_001000_00100_00000,
},
.{ // add x0, x10, #10
@ -1537,6 +1591,10 @@ test "serialize instructions" {
.inst = Instruction.mul(.x1, .x4, .x9),
.expected = 0b1_00_11011_000_01001_0_11111_00100_00001,
},
.{ // eor x3, x5, #1
.inst = Instruction.eorImmediate(.x3, .x5, 0b000000, 0b000000, 0b1),
.expected = 0b1_10_100100_1_000000_000000_00101_00011,
},
};
for (testcases) |case| {

View File

@ -269,7 +269,6 @@ fn whyWouldYouEverDoThis(comptime align_bytes: u8) align(align_bytes) u8 {
test "runtime known array index has best alignment possible" {
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
// take full advantage of over-alignment

View File

@ -142,8 +142,6 @@ test "array with sentinels" {
}
test "void arrays" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var array: [4]void = undefined;
array[0] = void{};
array[1] = array[2];

View File

@ -75,8 +75,6 @@ fn conv_uN(comptime N: usize, x: std.meta.Int(.unsigned, N)) std.meta.Int(.signe
}
test "nested bitcast" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = struct {
fn moo(x: isize) !void {
try expect(@intCast(isize, 42) == x);
@ -94,8 +92,6 @@ test "nested bitcast" {
}
test "@bitCast enum to its integer type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const SOCK = enum(c_int) {
A,
B,
@ -113,15 +109,11 @@ test "@bitCast enum to its integer type" {
// issue #3010: compiler segfault
test "bitcast literal [4]u8 param to u32" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const ip = @bitCast(u32, [_]u8{ 255, 255, 255, 255 });
try expect(ip == maxInt(u32));
}
test "bitcast generates a temporary value" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var y = @as(u16, 0x55AA);
const x = @bitCast(u16, @bitCast([2]u8, y));
try expect(y == x);
@ -240,7 +232,6 @@ test "implicit cast to error union by returning" {
test "bitcast packed struct literal to byte" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const Foo = packed struct {
value: u8,
@ -252,7 +243,6 @@ test "bitcast packed struct literal to byte" {
test "comptime bitcast used in expression has the correct type" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const Foo = packed struct {
value: u8,

View File

@ -10,7 +10,6 @@ const Mixin = struct {
};
test "container member access usingnamespace decls" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
var foo = Foo{};
foo.two();

View File

@ -7,8 +7,6 @@ const Container = struct {
};
test "fixed" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var ctr = Container{
.params = NoteParams{},
};

View File

@ -6,8 +6,6 @@ const xxx = struct {
}
};
test "bug 704" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var x: xxx = undefined;
x.bar();
}

View File

@ -984,7 +984,6 @@ test "peer type resolve array pointers, one of them const" {
test "peer type resolve array pointer and unknown pointer" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@ -1255,7 +1254,6 @@ test "assignment to optional pointer result loc" {
}
test "cast between *[N]void and []void" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
var a: [4]void = undefined;

View File

@ -5,8 +5,6 @@ const expectEqual = std.testing.expectEqual;
const expectError = std.testing.expectError;
test "break and continue inside loop inside defer expression" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
testBreakContInDefer(10);
comptime testBreakContInDefer(10);
}
@ -23,8 +21,6 @@ fn testBreakContInDefer(x: usize) void {
}
test "defer and labeled break" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var i = @as(usize, 0);
blk: {

View File

@ -11,8 +11,6 @@ fn shouldEqual(n: Number, expected: u3) !void {
}
test "enum to int" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try shouldEqual(Number.Zero, 0);
try shouldEqual(Number.One, 1);
try shouldEqual(Number.Two, 2);
@ -558,8 +556,6 @@ const ValueCount257 = enum {
};
test "enum sizes" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
comptime {
try expect(@sizeOf(ValueCount1) == 0);
try expect(@sizeOf(ValueCount2) == 1);
@ -569,8 +565,6 @@ test "enum sizes" {
}
test "enum literal equality" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const x = .hi;
const y = .ok;
const z = .hi;
@ -580,8 +574,6 @@ test "enum literal equality" {
}
test "enum literal cast to enum" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const Color = enum { Auto, Off, On };
var color1: Color = .Auto;
@ -590,8 +582,6 @@ test "enum literal cast to enum" {
}
test "peer type resolution with enum literal" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const Items = enum { one, two };
try expect(Items.two == .two);
@ -668,8 +658,6 @@ test "non-exhaustive enum" {
}
test "empty non-exhaustive enum" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = struct {
const E = enum(u8) { _ };
@ -732,8 +720,6 @@ const EnumWithTagValues = enum(u4) {
D = 1 << 3,
};
test "enum with tag values don't require parens" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try expect(@enumToInt(EnumWithTagValues.C) == 0b0100);
}
@ -750,8 +736,6 @@ const MultipleChoice2 = enum(u32) {
};
test "cast integer literal to enum" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try expect(@intToEnum(MultipleChoice2, 0) == MultipleChoice2.Unspecified1);
try expect(@intToEnum(MultipleChoice2, 40) == MultipleChoice2.B);
}
@ -783,8 +767,6 @@ const Small2 = enum(u2) { One, Two };
const Small = enum(u2) { One, Two, Three, Four };
test "set enum tag type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
{
var x = Small.One;
x = Small.Two;
@ -798,8 +780,6 @@ test "set enum tag type" {
}
test "casting enum to its tag type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try testCastEnumTag(Small2.Two);
comptime try testCastEnumTag(Small2.Two);
}
@ -809,8 +789,6 @@ fn testCastEnumTag(value: Small2) !void {
}
test "enum with 1 field but explicit tag type should still have the tag type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const Enum = enum(u8) {
B = 2,
};
@ -818,8 +796,6 @@ test "enum with 1 field but explicit tag type should still have the tag type" {
}
test "signed integer as enum tag" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const SignedEnum = enum(i2) {
A0 = -1,
A1 = 0,
@ -832,8 +808,6 @@ test "signed integer as enum tag" {
}
test "enum with one member and custom tag type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const E = enum(u2) {
One,
};
@ -845,8 +819,6 @@ test "enum with one member and custom tag type" {
}
test "enum with one member and u1 tag type @enumToInt" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const Enum = enum(u1) {
Test,
};
@ -854,8 +826,6 @@ test "enum with one member and u1 tag type @enumToInt" {
}
test "enum with comptime_int tag type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const Enum = enum(comptime_int) {
One = 3,
Two = 2,
@ -865,8 +835,6 @@ test "enum with comptime_int tag type" {
}
test "enum with one member default to u0 tag type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const E0 = enum { X };
comptime try expect(Tag(E0) == u0);
}
@ -883,15 +851,11 @@ fn doALoopThing(id: EnumWithOneMember) void {
}
test "comparison operator on enum with one member is comptime known" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
doALoopThing(EnumWithOneMember.Eof);
}
const State = enum { Start };
test "switch on enum with one member is comptime known" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var state = State.Start;
switch (state) {
State.Start => return,
@ -900,8 +864,6 @@ test "switch on enum with one member is comptime known" {
}
test "method call on an enum" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = struct {
const E = enum {
one,
@ -1141,8 +1103,6 @@ fn getC(data: *const BitFieldOfEnums) C {
}
test "enum literal in array literal" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const Items = enum { one, two };
const array = [_]Items{ .one, .two };

View File

@ -6,16 +6,12 @@ const expectEqual = std.testing.expectEqual;
const mem = std.mem;
test "error values" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const a = @errorToInt(error.err1);
const b = @errorToInt(error.err2);
try expect(a != b);
}
test "redefinition of error values allowed" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
shouldBeNotEqual(error.AnError, error.SecondError);
}
fn shouldBeNotEqual(a: anyerror, b: anyerror) void {
@ -36,8 +32,6 @@ fn errBinaryOperatorG(x: bool) anyerror!isize {
}
test "empty error union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const x = error{} || error{};
_ = x;
}
@ -91,8 +85,6 @@ fn makeANonErr() anyerror!i32 {
}
test "syntax: optional operator in front of error union operator" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
comptime {
try expect(?(anyerror!i32) == ?(anyerror!i32));
}
@ -147,8 +139,6 @@ test "implicit cast to optional to error union to return result loc" {
}
test "error: fn returning empty error set can be passed as fn returning any error" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
entry();
comptime entry();
}
@ -165,7 +155,6 @@ fn foo2(f: fn () anyerror!void) void {
fn bar2() (error{}!void) {}
test "error union type " {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@ -182,7 +171,6 @@ fn testErrorUnionType() !void {
}
test "error set type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@ -209,7 +197,6 @@ fn testErrorSetType() !void {
}
test "explicit error set cast" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO

View File

@ -24,7 +24,6 @@ test "floating point comparisons" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try testFloatComparisons();
comptime try testFloatComparisons();
@ -96,7 +95,6 @@ test "negative f128 floatToInt at compile-time" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const a: f128 = -2;
var b = @floatToInt(i64, a);

View File

@ -5,8 +5,6 @@ const expect = testing.expect;
const expectEqual = testing.expectEqual;
test "params" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try expect(testParamsAdd(22, 11) == 33);
}
fn testParamsAdd(a: i32, b: i32) i32 {
@ -14,8 +12,6 @@ fn testParamsAdd(a: i32, b: i32) i32 {
}
test "local variables" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
testLocVars(2);
}
fn testLocVars(b: i32) void {
@ -24,8 +20,6 @@ fn testLocVars(b: i32) void {
}
test "mutable local variables" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var zero: i32 = 0;
try expect(zero == 0);
@ -37,8 +31,6 @@ test "mutable local variables" {
}
test "separate block scopes" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
{
const no_conflict: i32 = 5;
try expect(no_conflict == 5);
@ -55,14 +47,10 @@ fn @"weird function name"() i32 {
return 1234;
}
test "weird function name" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try expect(@"weird function name"() == 1234);
}
test "assign inline fn to const variable" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const a = inlineFn;
a();
}
@ -80,8 +68,6 @@ fn outer(y: u32) *const fn (u32) u32 {
}
test "return inner function which references comptime variable of outer function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
var func = outer(10);
@ -149,8 +135,6 @@ test "inline function call that calls optional function pointer, return pointer
}
test "implicit cast function unreachable return" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
wantsFnWithVoid(fnWithUnreachable);
}
@ -348,8 +332,6 @@ fn fn4() u32 {
}
test "number literal as an argument" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try numberLiteralArg(3);
comptime try numberLiteralArg(3);
}
@ -380,8 +362,6 @@ test "function call with anon list literal" {
}
test "ability to give comptime types and non comptime types to same parameter" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
var x: i32 = 1;

View File

@ -21,8 +21,6 @@ test "continue in for loop" {
}
test "break from outer for loop" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try testBreakOuter();
comptime try testBreakOuter();
}
@ -40,8 +38,6 @@ fn testBreakOuter() !void {
}
test "continue outer for loop" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try testContinueOuter();
comptime try testContinueOuter();
}
@ -59,8 +55,6 @@ fn testContinueOuter() !void {
}
test "ignore lval with underscore (for loop)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
for ([_]void{}) |_, i| {
_ = i;
for ([_]void{}) |_, j| {

View File

@ -5,8 +5,6 @@ const expect = testing.expect;
const expectEqual = testing.expectEqual;
test "one param, explicit comptime" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var x: usize = 0;
x += checkSize(i32);
x += checkSize(bool);
@ -42,8 +40,6 @@ fn add(comptime a: i32, b: i32) i32 {
const the_max = max(u32, 1234, 5678);
test "compile time generic eval" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try expect(the_max == 5678);
}
@ -142,8 +138,6 @@ pub fn SmallList(comptime T: type, comptime STATIC_SIZE: usize) type {
}
test "const decls in struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try expect(GenericDataThing(3).count_plus_one == 4);
}
fn GenericDataThing(comptime count: isize) type {
@ -153,8 +147,6 @@ fn GenericDataThing(comptime count: isize) type {
}
test "use generic param in generic param" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try expect(aGenericFn(i32, 3, 4) == 7);
}
fn aGenericFn(comptime T: type, comptime a: T, b: T) T {
@ -197,7 +189,6 @@ test "generic fn keeps non-generic parameter types" {
}
test "array of generic fns" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
try expect(foos[0](true));

View File

@ -4,8 +4,6 @@ const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
test "if statements" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
shouldBeEqual(1, 1);
firstEqlThird(2, 1, 2);
}
@ -29,8 +27,6 @@ fn firstEqlThird(a: i32, b: i32, c: i32) void {
}
test "else if expression" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try expect(elseIfExpressionF(1) == 1);
}
fn elseIfExpressionF(c: u8) u8 {
@ -64,8 +60,6 @@ test "unwrap mutable global var" {
}
test "labeled break inside comptime if inside runtime if" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var answer: i32 = 0;
var c = true;
if (c) {
@ -77,8 +71,6 @@ test "labeled break inside comptime if inside runtime if" {
}
test "const result loc, runtime if cond, else unreachable" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const Num = enum { One, Two };
var t = true;

View File

@ -2,7 +2,6 @@ const builtin = @import("builtin");
test "casting integer address to function pointer" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
addressToFunction();
comptime addressToFunction();

View File

@ -312,7 +312,6 @@ test "comptime_int multi-limb partial shift right" {
test "xor" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try test_xor();
comptime try test_xor();
@ -732,7 +731,6 @@ test "overflow arithmetic with u0 values" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
var result: u0 = undefined;
try expect(!@addWithOverflow(u0, 0, 0, &result));

View File

@ -125,8 +125,6 @@ fn baz(x: ?Empty) ?Empty {
}
test "null with default unwrap" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const x: i32 = null orelse 1;
try expect(x == 1);
}

View File

@ -218,8 +218,6 @@ test "compile time slice of pointer to hard coded address" {
}
test "slice string literal has correct type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
comptime {
try expect(@TypeOf("aoeu"[0..]) == *const [4:0]u8);
const array = [_]i32{ 1, 2, 3, 4 };

View File

@ -213,8 +213,6 @@ fn makeBar2(x: i32, y: i32) Bar {
}
test "call method with mutable reference to struct with no fields" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = struct {
fn doC(s: *const @This()) bool {
_ = s;
@ -768,7 +766,6 @@ test "pointer to packed struct member in a stack variable" {
test "packed struct with u0 field access" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = packed struct {

View File

@ -190,8 +190,6 @@ test "switch with disjoint range" {
}
test "switch variable for range and multiple prongs" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
var u: u8 = 16;
@ -357,8 +355,6 @@ fn returnsFalse() bool {
}
}
test "switch on const enum with var" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try expect(!returnsFalse());
}

View File

@ -21,8 +21,6 @@ fn add(x: i32, y: i32) i32 {
}
test "this refer to module call private fn" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try expect(module.add(1, 2) == 3);
}

View File

@ -3,62 +3,46 @@ const builtin = @import("builtin");
const expect = std.testing.expect;
test "truncate u0 to larger integer allowed and has comptime known result" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var x: u0 = 0;
const y = @truncate(u8, x);
comptime try expect(y == 0);
}
test "truncate.u0.literal" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var z = @truncate(u0, 0);
try expect(z == 0);
}
test "truncate.u0.const" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const c0: usize = 0;
var z = @truncate(u0, c0);
try expect(z == 0);
}
test "truncate.u0.var" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var d: u8 = 2;
var z = @truncate(u0, d);
try expect(z == 0);
}
test "truncate i0 to larger integer allowed and has comptime known result" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var x: i0 = 0;
const y = @truncate(i8, x);
comptime try expect(y == 0);
}
test "truncate.i0.literal" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var z = @truncate(i0, 0);
try expect(z == 0);
}
test "truncate.i0.const" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const c0: isize = 0;
var z = @truncate(i0, c0);
try expect(z == 0);
}
test "truncate.i0.var" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var d: i8 = 2;
var z = @truncate(i0, d);
try expect(z == 0);

View File

@ -24,8 +24,6 @@ fn returnsTen() anyerror!i32 {
}
test "try without vars" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const result1 = if (failIfTrue(true)) 1 else |_| @as(i32, 2);
try expect(result1 == 2);
@ -42,8 +40,6 @@ fn failIfTrue(ok: bool) anyerror!void {
}
test "try then not executed with assignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (failIfTrue(true)) {
unreachable;
} else |err| {

View File

@ -11,8 +11,6 @@ const C = struct {
};
test "basic usingnamespace" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try std.testing.expect(C.B == bool);
}
@ -23,8 +21,6 @@ fn Foo(comptime T: type) type {
}
test "usingnamespace inside a generic struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const std2 = Foo(std);
const testing2 = Foo(std.testing);
try std2.testing.expect(true);
@ -36,8 +32,6 @@ usingnamespace struct {
};
test "usingnamespace does not redeclare an imported variable" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
comptime try std.testing.expect(@This().foo == 42);
}
@ -54,8 +48,6 @@ fn privateFunction() bool {
}
test {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
_ = @import("usingnamespace/import_segregation.zig");
}

View File

@ -247,8 +247,6 @@ fn returnTrue() bool {
}
test "return with implicit cast from while loop" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
returnWithImplicitCastFromWhileLoopTest() catch unreachable;
}
fn returnWithImplicitCastFromWhileLoopTest() anyerror!void {