Merge pull request #10893 from joachimschmidt557/stage2-aarch64

stage2 AArch64: get zig test working; enable behavior tests
This commit is contained in:
Jakub Konka 2022-02-15 07:05:24 +01:00 committed by GitHub
commit be98f30a2d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
33 changed files with 1477 additions and 359 deletions

View File

@ -7,8 +7,9 @@ ZIG=$DEBUG_STAGING/bin/zig
$ZIG test test/behavior.zig -fno-stage1 -I test -fLLVM
$ZIG test test/behavior.zig -fno-stage1 -I test -fLLVM -target aarch64-linux --test-cmd qemu-aarch64 --test-cmd-bin
$ZIG test test/behavior.zig -fno-stage1 -I test -ofmt=c
$ZIG test test/behavior.zig -fno-stage1 -I test -target wasm32-wasi --test-cmd wasmtime --test-cmd-bin
$ZIG test test/behavior.zig -fno-stage1 -I test -target arm-linux --test-cmd qemu-arm --test-cmd-bin
$ZIG test test/behavior.zig -fno-stage1 -I test -target wasm32-wasi --test-cmd wasmtime --test-cmd-bin
$ZIG test test/behavior.zig -fno-stage1 -I test -target arm-linux --test-cmd qemu-arm --test-cmd-bin
$ZIG test test/behavior.zig -fno-stage1 -I test -target aarch64-linux --test-cmd qemu-aarch64 --test-cmd-bin
$ZIG test test/behavior.zig -fno-stage1 -I test
$ZIG build test-behavior -fqemu -fwasmtime

File diff suppressed because it is too large Load Diff

View File

@ -50,11 +50,13 @@ const InnerError = error{
};
const BranchType = enum {
cbz,
b_cond,
unconditional_branch_immediate,
fn default(tag: Mir.Inst.Tag) BranchType {
return switch (tag) {
.cbz => .cbz,
.b, .bl => .unconditional_branch_immediate,
.b_cond => .b_cond,
else => unreachable,
@ -83,6 +85,8 @@ pub fn emitMir(
.b => try emit.mirBranch(inst),
.bl => try emit.mirBranch(inst),
.cbz => try emit.mirCompareAndBranch(inst),
.blr => try emit.mirUnconditionalBranchRegister(inst),
.ret => try emit.mirUnconditionalBranchRegister(inst),
@ -91,7 +95,9 @@ pub fn emitMir(
.call_extern => try emit.mirCallExtern(inst),
.add_shifted_register => try emit.mirAddSubtractShiftedRegister(inst),
.cmp_shifted_register => try emit.mirAddSubtractShiftedRegister(inst),
.sub_shifted_register => try emit.mirAddSubtractShiftedRegister(inst),
.cset => try emit.mirConditionalSelect(inst),
@ -100,6 +106,8 @@ pub fn emitMir(
.dbg_prologue_end => try emit.mirDebugPrologueEnd(),
.dbg_epilogue_begin => try emit.mirDebugEpilogueBegin(),
.eor_shifted_register => try emit.mirLogicalShiftedRegister(inst),
.load_memory => try emit.mirLoadMemory(inst),
.ldp => try emit.mirLoadStoreRegisterPair(inst),
@ -128,10 +136,13 @@ pub fn emitMir(
.mov_register => try emit.mirMoveRegister(inst),
.mov_to_from_sp => try emit.mirMoveRegister(inst),
.mvn => try emit.mirMoveRegister(inst),
.movk => try emit.mirMoveWideImmediate(inst),
.movz => try emit.mirMoveWideImmediate(inst),
.mul => try emit.mirDataProcessing3Source(inst),
.nop => try emit.mirNop(),
.push_regs => try emit.mirPushPopRegs(inst),
@ -156,15 +167,22 @@ fn optimalBranchType(emit: *Emit, tag: Mir.Inst.Tag, offset: i64) !BranchType {
assert(offset & 0b11 == 0);
switch (tag) {
.cbz => {
if (std.math.cast(i19, @shrExact(offset, 2))) |_| {
return BranchType.cbz;
} else |_| {
return emit.fail("TODO support cbz branches larger than +-1 MiB", .{});
}
},
.b, .bl => {
if (std.math.cast(i26, offset >> 2)) |_| {
if (std.math.cast(i26, @shrExact(offset, 2))) |_| {
return BranchType.unconditional_branch_immediate;
} else |_| {
return emit.fail("TODO support branches larger than +-128 MiB", .{});
return emit.fail("TODO support unconditional branches larger than +-128 MiB", .{});
}
},
.b_cond => {
if (std.math.cast(i19, offset >> 2)) |_| {
if (std.math.cast(i19, @shrExact(offset, 2))) |_| {
return BranchType.b_cond;
} else |_| {
return emit.fail("TODO support conditional branches larger than +-1 MiB", .{});
@ -179,8 +197,10 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize {
if (isBranch(tag)) {
switch (emit.branch_types.get(inst).?) {
.unconditional_branch_immediate => return 4,
.b_cond => return 4,
.cbz,
.unconditional_branch_immediate,
.b_cond,
=> return 4,
}
}
@ -201,6 +221,12 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize {
return 5 * 4;
}
},
.pop_regs, .push_regs => {
const reg_list = emit.mir.instructions.items(.data)[inst].reg_list;
const number_of_regs = @popCount(u32, reg_list);
const number_of_insts = std.math.divCeil(u6, number_of_regs, 2) catch unreachable;
return number_of_insts * 4;
},
.call_extern => return 4,
.dbg_line,
.dbg_epilogue_begin,
@ -212,7 +238,11 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize {
fn isBranch(tag: Mir.Inst.Tag) bool {
return switch (tag) {
.b, .bl, .b_cond => true,
.cbz,
.b,
.bl,
.b_cond,
=> true,
else => false,
};
}
@ -221,6 +251,7 @@ fn branchTarget(emit: *Emit, inst: Mir.Inst.Index) Mir.Inst.Index {
const tag = emit.mir.instructions.items(.tag)[inst];
switch (tag) {
.cbz => return emit.mir.instructions.items(.data)[inst].r_inst.inst,
.b, .bl => return emit.mir.instructions.items(.data)[inst].inst,
.b_cond => return emit.mir.instructions.items(.data)[inst].inst_cond.inst,
else => unreachable,
@ -414,27 +445,30 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
fn mirAddSubtractImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const rr_imm12_sh = emit.mir.instructions.items(.data)[inst].rr_imm12_sh;
switch (tag) {
.add_immediate => try emit.writeInstruction(Instruction.add(
rr_imm12_sh.rd,
rr_imm12_sh.rn,
rr_imm12_sh.imm12,
rr_imm12_sh.sh == 1,
)),
.cmp_immediate => try emit.writeInstruction(Instruction.subs(
rr_imm12_sh.rd,
rr_imm12_sh.rn,
rr_imm12_sh.imm12,
rr_imm12_sh.sh == 1,
)),
.sub_immediate => try emit.writeInstruction(Instruction.sub(
rr_imm12_sh.rd,
rr_imm12_sh.rn,
rr_imm12_sh.imm12,
rr_imm12_sh.sh == 1,
)),
.add_immediate,
.sub_immediate,
=> {
const rr_imm12_sh = emit.mir.instructions.items(.data)[inst].rr_imm12_sh;
const rd = rr_imm12_sh.rd;
const rn = rr_imm12_sh.rn;
const imm12 = rr_imm12_sh.imm12;
const sh = rr_imm12_sh.sh == 1;
switch (tag) {
.add_immediate => try emit.writeInstruction(Instruction.add(rd, rn, imm12, sh)),
.sub_immediate => try emit.writeInstruction(Instruction.sub(rd, rn, imm12, sh)),
else => unreachable,
}
},
.cmp_immediate => {
const r_imm12_sh = emit.mir.instructions.items(.data)[inst].r_imm12_sh;
const rn = r_imm12_sh.rn;
const imm12 = r_imm12_sh.imm12;
const sh = r_imm12_sh.sh == 1;
try emit.writeInstruction(Instruction.subs(.xzr, rn, imm12, sh));
},
else => unreachable,
}
}
@ -481,6 +515,23 @@ fn mirBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
}
}
fn mirCompareAndBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const r_inst = emit.mir.instructions.items(.data)[inst].r_inst;
const offset = @intCast(i64, emit.code_offset_mapping.get(r_inst.inst).?) - @intCast(i64, emit.code.items.len);
const branch_type = emit.branch_types.get(inst).?;
log.debug("mirCompareAndBranch: {} offset={}", .{ inst, offset });
switch (branch_type) {
.cbz => switch (tag) {
.cbz => try emit.writeInstruction(Instruction.cbz(r_inst.rt, @intCast(i21, offset))),
else => unreachable,
},
else => unreachable,
}
}
fn mirUnconditionalBranchRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const reg = emit.mir.instructions.items(.data)[inst].reg;
@ -565,30 +616,42 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void {
fn mirAddSubtractShiftedRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const rrr_imm6_shift = emit.mir.instructions.items(.data)[inst].rrr_imm6_shift;
const rd = rrr_imm6_shift.rd;
const rn = rrr_imm6_shift.rn;
const rm = rrr_imm6_shift.rm;
const shift = rrr_imm6_shift.shift;
const imm6 = rrr_imm6_shift.imm6;
switch (tag) {
.cmp_shifted_register => try emit.writeInstruction(Instruction.subsShiftedRegister(
rrr_imm6_shift.rd,
rrr_imm6_shift.rn,
rrr_imm6_shift.rm,
rrr_imm6_shift.shift,
rrr_imm6_shift.imm6,
)),
.add_shifted_register => try emit.writeInstruction(Instruction.addShiftedRegister(rd, rn, rm, shift, imm6)),
.cmp_shifted_register => try emit.writeInstruction(Instruction.subsShiftedRegister(rd, rn, rm, shift, imm6)),
.sub_shifted_register => try emit.writeInstruction(Instruction.subShiftedRegister(rd, rn, rm, shift, imm6)),
else => unreachable,
}
}
fn mirConditionalSelect(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const rrr_cond = emit.mir.instructions.items(.data)[inst].rrr_cond;
switch (tag) {
.cset => {
const r_cond = emit.mir.instructions.items(.data)[inst].r_cond;
try emit.writeInstruction(Instruction.csinc(r_cond.rd, .xzr, .xzr, r_cond.cond));
},
else => unreachable,
}
}
fn mirLogicalShiftedRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const rrr_imm6_logical_shift = emit.mir.instructions.items(.data)[inst].rrr_imm6_logical_shift;
const rd = rrr_imm6_logical_shift.rd;
const rn = rrr_imm6_logical_shift.rn;
const rm = rrr_imm6_logical_shift.rm;
const shift = rrr_imm6_logical_shift.shift;
const imm6 = rrr_imm6_logical_shift.imm6;
switch (tag) {
.cset => try emit.writeInstruction(Instruction.csinc(
rrr_cond.rd,
rrr_cond.rn,
rrr_cond.rm,
rrr_cond.cond,
)),
.eor_shifted_register => try emit.writeInstruction(Instruction.eor(rd, rn, rm, shift, imm6)),
else => unreachable,
}
}
@ -653,20 +716,14 @@ fn mirLoadMemory(emit: *Emit, inst: Mir.Inst.Index) !void {
fn mirLoadStoreRegisterPair(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const load_store_register_pair = emit.mir.instructions.items(.data)[inst].load_store_register_pair;
const rt = load_store_register_pair.rt;
const rt2 = load_store_register_pair.rt2;
const rn = load_store_register_pair.rn;
const offset = load_store_register_pair.offset;
switch (tag) {
.stp => try emit.writeInstruction(Instruction.stp(
load_store_register_pair.rt,
load_store_register_pair.rt2,
load_store_register_pair.rn,
load_store_register_pair.offset,
)),
.ldp => try emit.writeInstruction(Instruction.ldp(
load_store_register_pair.rt,
load_store_register_pair.rt2,
load_store_register_pair.rn,
load_store_register_pair.offset,
)),
.stp => try emit.writeInstruction(Instruction.stp(rt, rt2, rn, offset)),
.ldp => try emit.writeInstruction(Instruction.ldp(rt, rt2, rn, offset)),
else => unreachable,
}
}
@ -782,11 +839,19 @@ fn mirLoadStoreRegisterRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
fn mirMoveRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const rr = emit.mir.instructions.items(.data)[inst].rr;
switch (tag) {
.mov_register => try emit.writeInstruction(Instruction.orr(rr.rd, .xzr, rr.rn, Instruction.Shift.none)),
.mov_to_from_sp => try emit.writeInstruction(Instruction.add(rr.rd, rr.rn, 0, false)),
.mov_register => {
const rr = emit.mir.instructions.items(.data)[inst].rr;
try emit.writeInstruction(Instruction.orr(rr.rd, .xzr, rr.rn, .lsl, 0));
},
.mov_to_from_sp => {
const rr = emit.mir.instructions.items(.data)[inst].rr;
try emit.writeInstruction(Instruction.add(rr.rd, rr.rn, 0, false));
},
.mvn => {
const rr_imm6_shift = emit.mir.instructions.items(.data)[inst].rr_imm6_shift;
try emit.writeInstruction(Instruction.orn(rr_imm6_shift.rd, .xzr, rr_imm6_shift.rm, .lsl, 0));
},
else => unreachable,
}
}
@ -802,6 +867,16 @@ fn mirMoveWideImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
}
}
fn mirDataProcessing3Source(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const rrr = emit.mir.instructions.items(.data)[inst].rrr;
switch (tag) {
.mul => try emit.writeInstruction(Instruction.mul(rrr.rd, rrr.rn, rrr.rm)),
else => unreachable,
}
}
fn mirNop(emit: *Emit) !void {
try emit.writeInstruction(Instruction.nop());
}

View File

@ -26,6 +26,8 @@ pub const Inst = struct {
pub const Tag = enum(u16) {
/// Add (immediate)
add_immediate,
/// Add (shifted register)
add_shifted_register,
/// Branch conditionally
b_cond,
/// Branch
@ -38,6 +40,8 @@ pub const Inst = struct {
brk,
/// Pseudo-instruction: Call extern
call_extern,
/// Compare and Branch on Zero
cbz,
/// Compare (immediate)
cmp_immediate,
/// Compare (shifted register)
@ -50,6 +54,8 @@ pub const Inst = struct {
dbg_epilogue_begin,
/// Pseudo-instruction: Update debug line
dbg_line,
/// Bitwise Exclusive OR (shifted register)
eor_shifted_register,
/// Pseudo-instruction: Load memory
///
/// Payload is `LoadMemory`
@ -82,6 +88,10 @@ pub const Inst = struct {
movk,
/// Move wide with zero
movz,
/// Multiply
mul,
/// Bitwise NOT
mvn,
/// No Operation
nop,
/// Pseudo-instruction: Pop multiple registers
@ -112,6 +122,8 @@ pub const Inst = struct {
strh_register,
/// Subtract (immediate)
sub_immediate,
/// Subtract (shifted register)
sub_shifted_register,
/// Supervisor Call
svc,
};
@ -171,6 +183,20 @@ pub const Inst = struct {
imm16: u16,
hw: u2 = 0,
},
/// A register and a condition
///
/// Used by e.g. cset
r_cond: struct {
rd: Register,
cond: bits.Instruction.Condition,
},
/// A register and another instruction
///
/// Used by e.g. cbz
r_inst: struct {
rt: Register,
inst: Index,
},
/// Two registers
///
/// Used by e.g. mov_register
@ -178,6 +204,14 @@ pub const Inst = struct {
rd: Register,
rn: Register,
},
/// A register, an unsigned 12-bit immediate, and an optional shift
///
/// Used by e.g. cmp_immediate
r_imm12_sh: struct {
rn: Register,
imm12: u12,
sh: u1 = 0,
},
/// Two registers, an unsigned 12-bit immediate, and an optional shift
///
/// Used by e.g. sub_immediate
@ -187,6 +221,23 @@ pub const Inst = struct {
imm12: u12,
sh: u1 = 0,
},
/// Two registers and a shift (shift type and 6-bit amount)
///
/// Used by e.g. mvn
rr_imm6_shift: struct {
rd: Register,
rm: Register,
imm6: u6,
shift: bits.Instruction.AddSubtractShiftedRegisterShift,
},
/// Two registers
///
/// Used by e.g. mul
rrr: struct {
rd: Register,
rn: Register,
rm: Register,
},
/// Three registers and a shift (shift type and 6-bit amount)
///
/// Used by e.g. cmp_shifted_register
@ -197,18 +248,20 @@ pub const Inst = struct {
imm6: u6,
shift: bits.Instruction.AddSubtractShiftedRegisterShift,
},
/// Three registers and a condition
/// Three registers and a shift (logical instruction version)
/// (shift type and 6-bit amount)
///
/// Used by e.g. cset
rrr_cond: struct {
/// Used by e.g. eor_shifted_register
rrr_imm6_logical_shift: struct {
rd: Register,
rn: Register,
rm: Register,
cond: bits.Instruction.Condition,
imm6: u6,
shift: bits.Instruction.LogicalShiftedRegisterShift,
},
/// Two registers and a LoadStoreOffsetImmediate
///
/// Used by e.g. str_register
/// Used by e.g. str_immediate
load_store_register_immediate: struct {
rt: Register,
rn: Register,
@ -224,7 +277,7 @@ pub const Inst = struct {
},
/// A registers and a stack offset
///
/// Used by e.g. str_register
/// Used by e.g. str_stack
load_store_stack: struct {
rt: Register,
offset: u32,

View File

@ -332,23 +332,17 @@ pub const Instruction = union(enum) {
op: u1,
sf: u1,
},
pub const Shift = struct {
shift: Type = .lsl,
amount: u6 = 0,
pub const Type = enum(u2) {
lsl,
lsr,
asr,
ror,
};
pub const none = Shift{
.shift = .lsl,
.amount = 0,
};
};
data_processing_3_source: packed struct {
rd: u5,
rn: u5,
ra: u5,
o0: u1,
rm: u5,
op31: u3,
fixed: u5 = 0b11011,
op54: u2,
sf: u1,
},
pub const Condition = enum(u4) {
/// Integer: Equal
@ -470,6 +464,7 @@ pub const Instruction = union(enum) {
.conditional_branch => |v| @as(u32, v.cond) | (@as(u32, v.o0) << 4) | (@as(u32, v.imm19) << 5) | (@as(u32, v.o1) << 24) | (@as(u32, v.fixed) << 25),
.compare_and_branch => |v| @as(u32, v.rt) | (@as(u32, v.imm19) << 5) | (@as(u32, v.op) << 24) | (@as(u32, v.fixed) << 25) | (@as(u32, v.sf) << 31),
.conditional_select => |v| @as(u32, v.rd) | @as(u32, v.rn) << 5 | @as(u32, v.op2) << 10 | @as(u32, v.cond) << 12 | @as(u32, v.rm) << 16 | @as(u32, v.fixed) << 21 | @as(u32, v.s) << 29 | @as(u32, v.op) << 30 | @as(u32, v.sf) << 31,
.data_processing_3_source => |v| @bitCast(u32, v),
};
}
@ -807,25 +802,28 @@ pub const Instruction = union(enum) {
};
}
pub const LogicalShiftedRegisterShift = enum(u2) { lsl, lsr, asr, ror };
fn logicalShiftedRegister(
opc: u2,
n: u1,
shift: Shift,
rd: Register,
rn: Register,
rm: Register,
shift: LogicalShiftedRegisterShift,
amount: u6,
) Instruction {
switch (rd.size()) {
32 => {
assert(shift.amount < 32);
assert(amount < 32);
return Instruction{
.logical_shifted_register = .{
.rd = rd.id(),
.rn = rn.id(),
.imm6 = shift.amount,
.imm6 = amount,
.rm = rm.id(),
.n = n,
.shift = @enumToInt(shift.shift),
.shift = @enumToInt(shift),
.opc = opc,
.sf = 0b0,
},
@ -836,10 +834,10 @@ pub const Instruction = union(enum) {
.logical_shifted_register = .{
.rd = rd.id(),
.rn = rn.id(),
.imm6 = shift.amount,
.imm6 = amount,
.rm = rm.id(),
.n = n,
.shift = @enumToInt(shift.shift),
.shift = @enumToInt(shift),
.opc = opc,
.sf = 0b1,
},
@ -967,6 +965,33 @@ pub const Instruction = union(enum) {
};
}
fn dataProcessing3Source(
op54: u2,
op31: u3,
o0: u1,
rd: Register,
rn: Register,
rm: Register,
ra: Register,
) Instruction {
return Instruction{
.data_processing_3_source = .{
.rd = rd.id(),
.rn = rn.id(),
.ra = ra.id(),
.o0 = o0,
.rm = rm.id(),
.op31 = op31,
.op54 = op54,
.sf = switch (rd.size()) {
32 => 0b0,
64 => 0b1,
else => unreachable, // unexpected register size
},
},
};
}
// Helper functions for assembly syntax functions
// Move wide (immediate)
@ -1120,36 +1145,84 @@ pub const Instruction = union(enum) {
// Logical (shifted register)
pub fn @"and"(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
return logicalShiftedRegister(0b00, 0b0, shift, rd, rn, rm);
pub fn @"and"(
rd: Register,
rn: Register,
rm: Register,
shift: LogicalShiftedRegisterShift,
amount: u6,
) Instruction {
return logicalShiftedRegister(0b00, 0b0, rd, rn, rm, shift, amount);
}
pub fn bic(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
return logicalShiftedRegister(0b00, 0b1, shift, rd, rn, rm);
pub fn bic(
rd: Register,
rn: Register,
rm: Register,
shift: LogicalShiftedRegisterShift,
amount: u6,
) Instruction {
return logicalShiftedRegister(0b00, 0b1, rd, rn, rm, shift, amount);
}
pub fn orr(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
return logicalShiftedRegister(0b01, 0b0, shift, rd, rn, rm);
pub fn orr(
rd: Register,
rn: Register,
rm: Register,
shift: LogicalShiftedRegisterShift,
amount: u6,
) Instruction {
return logicalShiftedRegister(0b01, 0b0, rd, rn, rm, shift, amount);
}
pub fn orn(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
return logicalShiftedRegister(0b01, 0b1, shift, rd, rn, rm);
pub fn orn(
rd: Register,
rn: Register,
rm: Register,
shift: LogicalShiftedRegisterShift,
amount: u6,
) Instruction {
return logicalShiftedRegister(0b01, 0b1, rd, rn, rm, shift, amount);
}
pub fn eor(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
return logicalShiftedRegister(0b10, 0b0, shift, rd, rn, rm);
pub fn eor(
rd: Register,
rn: Register,
rm: Register,
shift: LogicalShiftedRegisterShift,
amount: u6,
) Instruction {
return logicalShiftedRegister(0b10, 0b0, rd, rn, rm, shift, amount);
}
pub fn eon(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
return logicalShiftedRegister(0b10, 0b1, shift, rd, rn, rm);
pub fn eon(
rd: Register,
rn: Register,
rm: Register,
shift: LogicalShiftedRegisterShift,
amount: u6,
) Instruction {
return logicalShiftedRegister(0b10, 0b1, rd, rn, rm, shift, amount);
}
pub fn ands(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
return logicalShiftedRegister(0b11, 0b0, shift, rd, rn, rm);
pub fn ands(
rd: Register,
rn: Register,
rm: Register,
shift: LogicalShiftedRegisterShift,
amount: u6,
) Instruction {
return logicalShiftedRegister(0b11, 0b0, rd, rn, rm, shift, amount);
}
pub fn bics(rd: Register, rn: Register, rm: Register, shift: Shift) Instruction {
return logicalShiftedRegister(0b11, 0b1, shift, rd, rn, rm);
pub fn bics(
rd: Register,
rn: Register,
rm: Register,
shift: LogicalShiftedRegisterShift,
amount: u6,
) Instruction {
return logicalShiftedRegister(0b11, 0b1, rd, rn, rm, shift, amount);
}
// Add/subtract (immediate)
@ -1245,6 +1318,24 @@ pub const Instruction = union(enum) {
pub fn csneg(rd: Register, rn: Register, rm: Register, cond: Condition) Instruction {
return conditionalSelect(0b01, 0b1, 0b0, rd, rn, rm, cond);
}
// Data processing (3 source)
pub fn madd(rd: Register, rn: Register, rm: Register, ra: Register) Instruction {
return dataProcessing3Source(0b00, 0b000, 0b0, rd, rn, rm, ra);
}
pub fn msub(rd: Register, rn: Register, rm: Register, ra: Register) Instruction {
return dataProcessing3Source(0b00, 0b000, 0b1, rd, rn, rm, ra);
}
pub fn mul(rd: Register, rn: Register, rm: Register) Instruction {
return madd(rd, rn, rm, .xzr);
}
pub fn mneg(rd: Register, rn: Register, rm: Register) Instruction {
return msub(rd, rn, rm, .xzr);
}
};
test {
@ -1259,11 +1350,11 @@ test "serialize instructions" {
const testcases = [_]Testcase{
.{ // orr x0, xzr, x1
.inst = Instruction.orr(.x0, .xzr, .x1, Instruction.Shift.none),
.inst = Instruction.orr(.x0, .xzr, .x1, .lsl, 0),
.expected = 0b1_01_01010_00_0_00001_000000_11111_00000,
},
.{ // orn x0, xzr, x1
.inst = Instruction.orn(.x0, .xzr, .x1, Instruction.Shift.none),
.inst = Instruction.orn(.x0, .xzr, .x1, .lsl, 0),
.expected = 0b1_01_01010_00_1_00001_000000_11111_00000,
},
.{ // movz x1, #4
@ -1383,11 +1474,11 @@ test "serialize instructions" {
.expected = 0b10_101_0_001_1_0000010_00010_11111_00001,
},
.{ // and x0, x4, x2
.inst = Instruction.@"and"(.x0, .x4, .x2, .{}),
.inst = Instruction.@"and"(.x0, .x4, .x2, .lsl, 0),
.expected = 0b1_00_01010_00_0_00010_000000_00100_00000,
},
.{ // and x0, x4, x2, lsl #0x8
.inst = Instruction.@"and"(.x0, .x4, .x2, .{ .shift = .lsl, .amount = 0x8 }),
.inst = Instruction.@"and"(.x0, .x4, .x2, .lsl, 0x8),
.expected = 0b1_00_01010_00_0_00010_001000_00100_00000,
},
.{ // add x0, x10, #10
@ -1414,6 +1505,10 @@ test "serialize instructions" {
.inst = Instruction.csinc(.x1, .x2, .x4, .eq),
.expected = 0b1_0_0_11010100_00100_0000_0_1_00010_00001,
},
.{ // mul x1, x4, x9
.inst = Instruction.mul(.x1, .x4, .x9),
.expected = 0b1_00_11011_000_01001_0_11111_00100_00001,
},
};
for (testcases) |case| {

View File

@ -54,7 +54,7 @@ test {
_ = @import("behavior/decltest.zig");
}
if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64) {
if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64 and builtin.zig_backend != .stage2_aarch64) {
// Tests that pass (partly) for stage1, llvm backend, C backend, wasm backend.
_ = @import("behavior/bitcast.zig");
_ = @import("behavior/bugs/624.zig");

View File

@ -27,6 +27,7 @@ test "default alignment allows unspecified in type syntax" {
}
test "implicitly decreasing pointer alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const a: u32 align(4) = 3;
const b: u32 align(8) = 4;
try expect(addUnaligned(&a, &b) == 7);
@ -37,6 +38,7 @@ fn addUnaligned(a: *align(1) const u32, b: *align(1) const u32) u32 {
}
test "@alignCast pointers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var x: u32 align(4) = 1;
expectsOnly1(&x);
try expect(x == 2);
@ -102,6 +104,7 @@ fn fnWithAlignedStack() i32 {
}
test "implicitly decreasing slice alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const a: u32 align(4) = 3;
@ -113,6 +116,7 @@ fn addUnalignedSlice(a: []align(1) const u32, b: []align(1) const u32) u32 {
}
test "specifying alignment allows pointer cast" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try testBytesAlign(0x33);
@ -124,6 +128,7 @@ fn testBytesAlign(b: u8) !void {
}
test "@alignCast slices" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var array align(4) = [_]u32{ 1, 1 };
@ -139,6 +144,7 @@ fn sliceExpects4(slice: []align(4) u32) void {
}
test "return error union with 128-bit integer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try expect(3 == try give());
@ -148,6 +154,7 @@ fn give() anyerror!u128 {
}
test "page aligned array on stack" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@ -173,6 +180,7 @@ fn noop1() align(1) void {}
fn noop4() align(4) void {}
test "function alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -189,6 +197,7 @@ test "function alignment" {
}
test "implicitly decreasing fn alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
@ -216,6 +225,7 @@ fn alignedBig() align(16) i32 {
}
test "@alignCast functions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
@ -239,6 +249,7 @@ fn simple4() align(4) i32 {
}
test "generic function with align param" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
@ -260,6 +271,7 @@ fn whyWouldYouEverDoThis(comptime align_bytes: u8) align(align_bytes) u8 {
}
test "runtime known array index has best alignment possible" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@ -302,6 +314,7 @@ fn testIndex2(ptr: [*]align(4) u8, index: usize, comptime T: type) !void {
}
test "alignment of function with c calling convention" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
var runtime_nothing = &nothing;
@ -318,6 +331,7 @@ const DefaultAligned = struct {
};
test "read 128-bit field from default aligned struct in stack memory" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -337,6 +351,7 @@ var default_aligned_global = DefaultAligned{
};
test "read 128-bit field from default aligned struct in global memory" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
@ -348,6 +363,7 @@ test "read 128-bit field from default aligned struct in global memory" {
}
test "struct field explicit alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
@ -369,6 +385,7 @@ test "struct field explicit alignment" {
}
test "align(@alignOf(T)) T does not force resolution of T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
@ -397,6 +414,7 @@ test "align(@alignOf(T)) T does not force resolution of T" {
}
test "align(N) on functions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;

View File

@ -11,6 +11,7 @@ const Foo = struct {
};
test "@alignOf(T) before referencing T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;

View File

@ -6,6 +6,7 @@ const expect = testing.expect;
const expectEqual = testing.expectEqual;
test "array to slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const a: u32 align(4) = 3;
@ -20,6 +21,7 @@ test "array to slice" {
}
test "arrays" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var array: [5]u32 = undefined;
@ -46,6 +48,7 @@ fn getArrayLen(a: []const u32) usize {
}
test "array init with mult" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const a = 'a';
@ -57,6 +60,7 @@ test "array init with mult" {
}
test "array literal with explicit type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const hex_mult: [4]u16 = .{ 4096, 256, 16, 1 };
@ -86,6 +90,7 @@ const ArrayDotLenConstExpr = struct {
const some_array = [_]u8{ 0, 1, 2, 3 };
test "array literal with specified size" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var array = [2]u8{ 1, 2 };
@ -94,6 +99,7 @@ test "array literal with specified size" {
}
test "array len field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var arr = [4]u8{ 0, 0, 0, 0 };
@ -105,6 +111,7 @@ test "array len field" {
}
test "array with sentinels" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
@ -134,6 +141,7 @@ test "array with sentinels" {
}
test "void arrays" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var array: [4]void = undefined;
@ -144,6 +152,7 @@ test "void arrays" {
}
test "nested arrays" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const array_of_strings = [_][]const u8{ "hello", "this", "is", "my", "thing" };
@ -157,6 +166,7 @@ test "nested arrays" {
}
test "implicit comptime in array type size" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var arr: [plusOne(10)]bool = undefined;
@ -168,6 +178,7 @@ fn plusOne(x: u32) u32 {
}
test "single-item pointer to array indexing and slicing" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try testSingleItemPtrArrayIndexSlice();
@ -193,6 +204,7 @@ fn doSomeMangling(array: *[4]u8) void {
}
test "implicit cast zero sized array ptr to slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
{
@ -208,6 +220,7 @@ test "implicit cast zero sized array ptr to slice" {
}
test "anonymous list literal syntax" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
@ -227,6 +240,7 @@ var s_array: [8]Sub = undefined;
const Sub = struct { b: u8 };
const Str = struct { a: []Sub };
test "set global var array via slice embedded in struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -243,6 +257,7 @@ test "set global var array via slice embedded in struct" {
}
test "read/write through global variable array of struct fields initialized via array mult" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -264,6 +279,7 @@ test "read/write through global variable array of struct fields initialized via
}
test "implicit cast single-item pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -284,6 +300,7 @@ fn testArrayByValAtComptime(b: [2]u8) u8 {
}
test "comptime evaluating function that takes array by value" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -296,6 +313,7 @@ test "comptime evaluating function that takes array by value" {
}
test "runtime initialize array elem and then implicit cast to slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -306,6 +324,7 @@ test "runtime initialize array elem and then implicit cast to slice" {
}
test "array literal as argument to function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -334,6 +353,7 @@ test "array literal as argument to function" {
}
test "double nested array to const slice cast in array literal" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -395,6 +415,7 @@ test "double nested array to const slice cast in array literal" {
}
test "anonymous literal in array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -420,6 +441,7 @@ test "anonymous literal in array" {
}
test "access the null element of a null terminated array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -437,6 +459,7 @@ test "access the null element of a null terminated array" {
}
test "type deduction for array subscript expression" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -455,6 +478,7 @@ test "type deduction for array subscript expression" {
}
test "sentinel element count towards the ABI size calculation" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@ -481,6 +505,7 @@ test "sentinel element count towards the ABI size calculation" {
}
test "zero-sized array with recursive type definition" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@ -505,6 +530,7 @@ test "zero-sized array with recursive type definition" {
}
test "type coercion of anon struct literal to array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -540,6 +566,7 @@ test "type coercion of anon struct literal to array" {
}
test "type coercion of pointer to anon struct literal to pointer to array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO

View File

@ -15,6 +15,7 @@ test "empty function with comments" {
}
test "truncate" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try expect(testTruncate(0x10fd) == 0xfd);
@ -25,6 +26,7 @@ fn testTruncate(x: u32) u8 {
}
test "truncate to non-power-of-two integers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try testTrunc(u32, u1, 0b10101, 0b1);
@ -46,6 +48,7 @@ const g1: i32 = 1233 + 1;
var g2: i32 = 0;
test "global variables" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try expect(g2 == 0);
g2 = g1;
try expect(g2 == 1234);
@ -112,6 +115,7 @@ fn first4KeysOfHomeRow() []const u8 {
}
test "return string from function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -119,12 +123,14 @@ test "return string from function" {
}
test "hex escape" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try expect(mem.eql(u8, "\x68\x65\x6c\x6c\x6f", "hello"));
}
test "multiline string" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const s1 =
@ -137,6 +143,7 @@ test "multiline string" {
}
test "multiline string comments at start" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const s1 =
@ -149,6 +156,7 @@ test "multiline string comments at start" {
}
test "multiline string comments at end" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const s1 =
@ -161,6 +169,7 @@ test "multiline string comments at end" {
}
test "multiline string comments in middle" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const s1 =
@ -173,6 +182,7 @@ test "multiline string comments in middle" {
}
test "multiline string comments at multiple places" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const s1 =
@ -191,6 +201,7 @@ test "string concatenation" {
}
test "array mult operator" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try expect(mem.eql(u8, "ab" ** 5, "ababababab"));
@ -216,6 +227,7 @@ test "compile time global reinterpret" {
}
test "cast undefined" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const array: [100]u8 = undefined;
@ -227,6 +239,7 @@ fn testCastUndefined(x: []const u8) void {
}
test "implicit cast after unreachable" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try expect(outer() == 1234);
@ -284,6 +297,7 @@ fn fB() []const u8 {
}
test "call function pointer in struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -310,6 +324,7 @@ const FnPtrWrapper = struct {
};
test "const ptr from var variable" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var x: u64 = undefined;
@ -326,6 +341,7 @@ fn copy(src: *const u64, dst: *u64) void {
}
test "call result of if else expression" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@ -339,6 +355,7 @@ fn f2(x: bool) []const u8 {
}
test "memcpy and memset intrinsics" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@ -361,6 +378,7 @@ fn testMemcpyMemset() !void {
}
test "variable is allowed to be a pointer to an opaque type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@ -374,6 +392,7 @@ fn hereIsAnOpaqueType(ptr: *OpaqueA) *OpaqueA {
}
test "take address of parameter" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -400,6 +419,7 @@ fn testPointerToVoidReturnType2() *const void {
}
test "array 2D const double ptr" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -419,6 +439,7 @@ fn testArray2DConstDoublePtr(ptr: *const f32) !void {
}
test "double implicit cast in same expression" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -430,6 +451,7 @@ fn nine() u8 {
}
test "struct inside function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try testStructInFn();
@ -451,6 +473,7 @@ fn testStructInFn() !void {
}
test "fn call returning scalar optional in equality expression" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try expect(getNull() == null);
}
@ -459,6 +482,7 @@ fn getNull() ?*i32 {
}
test "global variable assignment with optional unwrapping with var initialized to undefined" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
@ -476,6 +500,7 @@ test "global variable assignment with optional unwrapping with var initialized t
var global_foo: *i32 = undefined;
test "peer result location with typed parent, runtime condition, comptime prongs" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -550,6 +575,7 @@ test "comptime cast fn to ptr" {
}
test "equality compare fn ptrs" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
var a = &emptyFn;
@ -557,6 +583,7 @@ test "equality compare fn ptrs" {
}
test "self reference through fn ptr field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
@ -576,6 +603,7 @@ test "self reference through fn ptr field" {
}
test "global variable initialized to global variable array element" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -593,6 +621,7 @@ var gdt = [_]GDTEntry{
var global_ptr = &gdt[0];
test "global constant is loaded with a runtime-known index" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
@ -610,6 +639,7 @@ test "global constant is loaded with a runtime-known index" {
}
test "multiline string literal is null terminated" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -643,6 +673,7 @@ test "explicit cast optional pointers" {
}
test "pointer comparison" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -655,6 +686,7 @@ fn ptrEql(a: *const []const u8, b: *const []const u8) bool {
}
test "string concatenation" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;

View File

@ -61,6 +61,7 @@ fn ShardedTable(comptime Key: type, comptime mask_bit_count: comptime_int, compt
}
test "sharded table" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;

View File

@ -12,6 +12,7 @@ const A = union(enum) {
};
test "union that needs padding bytes inside an array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var as = [_]A{

View File

@ -1,10 +1,12 @@
const std = @import("std");
const expect = std.testing.expect;
const builtin = @import("builtin");
const ptr = &global;
var global: usize = 123;
test "constant pointer to global variable causes runtime load" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
global = 1234;
try expect(&global == ptr);
try expect(ptr.* == 1234);

View File

@ -42,6 +42,7 @@ const a = struct {
};
test "initialization" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;

View File

@ -2,6 +2,7 @@ const std = @import("std");
const builtin = @import("builtin");
test "fixed" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const x: f32 align(128) = 12.34;

View File

@ -6,6 +6,7 @@ const S = struct {
p: *S,
};
test "bug 2006" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
var a: S = undefined;
a = S{ .p = undefined };

View File

@ -12,6 +12,7 @@ fn bar(pointer: ?*anyopaque) void {
}
test "fixed" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO

View File

@ -19,6 +19,7 @@ fn get_foo() Foo.FooError!*Foo {
}
test "fixed" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO

View File

@ -12,6 +12,7 @@ fn prev(p: ?State) void {
}
test "zig test crash" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;

View File

@ -10,6 +10,7 @@ const Mixin = struct {
};
test "container member access usingnamespace decls" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var foo = Foo{};

View File

@ -11,6 +11,7 @@ const expect = @import("std").testing.expect;
const builtin = @import("builtin");
test "bug 394 fixed" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const x = S{

View File

@ -11,6 +11,7 @@ const Value = struct {
};
test "optional if after an if in a switch prong of a switch with 2 prongs in an else" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
try foo(false, true);

View File

@ -14,6 +14,7 @@ threadlocal var g_uart0 = nrfx_uart_t{
};
test "reference a global threadlocal variable" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;

View File

@ -18,6 +18,7 @@ test "integer literal to pointer cast" {
}
test "peer type resolution: ?T and T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try expect(peerTypeTAndOptionalT(true, false).? == 0);
@ -94,6 +95,7 @@ test "comptime_int @intToFloat" {
}
test "@floatToInt" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -116,6 +118,7 @@ fn expectFloatToInt(comptime F: type, f: F, comptime I: type, i: I) !void {
}
test "implicitly cast indirect pointer to maybe-indirect pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
@ -174,6 +177,7 @@ test "@floatCast comptime_int and comptime_float" {
}
test "coerce undefined to optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
try expect(MakeType(void).getNull() == null);
@ -193,6 +197,7 @@ fn MakeType(comptime T: type) type {
}
test "implicit cast from *[N]T to [*c]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var x: [4]u16 = [4]u16{ 0, 1, 2, 3 };
@ -205,6 +210,7 @@ test "implicit cast from *[N]T to [*c]T" {
}
test "*usize to *void" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var i = @as(usize, 0);
var v = @ptrCast(*void, &i);
v.* = {};
@ -230,6 +236,7 @@ test "@intCast to u0 and use the result" {
}
test "peer result null and comptime_int" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
@ -253,6 +260,7 @@ test "peer result null and comptime_int" {
}
test "*const ?[*]const T to [*c]const [*c]const T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var array = [_]u8{ 'o', 'k' };
@ -264,6 +272,7 @@ test "*const ?[*]const T to [*c]const [*c]const T" {
}
test "array coersion to undefined at runtime" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@setRuntimeSafety(true);
@ -293,6 +302,7 @@ fn implicitIntLitToOptional() void {
}
test "return u8 coercing into ?u32 return type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
@ -313,6 +323,7 @@ test "cast from ?[*]T to ??[*]T" {
}
test "peer type unsigned int to signed" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -325,6 +336,7 @@ test "peer type unsigned int to signed" {
}
test "expected [*c]const u8, found [*:0]const u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -384,6 +396,7 @@ fn castToOptionalTypeError(z: i32) !void {
}
test "implicitly cast from [0]T to anyerror![]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -455,6 +468,7 @@ fn testCastConstArrayRefToConstSlice() !void {
}
test "peer type resolution: error and [N]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -689,6 +703,7 @@ test "type coercion related to sentinel-termination" {
}
test "peer type resolution implicit cast to return type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -710,6 +725,7 @@ test "peer type resolution implicit cast to return type" {
}
test "peer type resolution implicit cast to variable type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -806,6 +822,7 @@ test "comptime float casts" {
}
test "pointer reinterpret const float to int" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -822,6 +839,7 @@ test "pointer reinterpret const float to int" {
}
test "implicit cast from [*]T to ?*anyopaque" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -840,6 +858,7 @@ fn incrementVoidPtrArray(array: ?*anyopaque, len: usize) void {
}
test "compile time int to ptr of function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@ -857,6 +876,7 @@ fn foobar(func: PFN_void) !void {
}
test "implicit ptr to *anyopaque" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -871,6 +891,7 @@ test "implicit ptr to *anyopaque" {
}
test "return null from fn() anyerror!?&T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -887,6 +908,7 @@ fn returnNullLitFromOptionalTypeErrorRef() anyerror!?*A {
}
test "peer type resolution: [0]u8 and []const u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -907,6 +929,7 @@ fn peerTypeEmptyArrayAndSlice(a: bool, slice: []const u8) []const u8 {
}
test "implicitly cast from [N]T to ?[]const T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -920,6 +943,7 @@ fn castToOptionalSlice() ?[]const u8 {
}
test "cast u128 to f128 and back" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -941,6 +965,7 @@ fn cast128Float(x: u128) f128 {
}
test "implicit cast from *[N]T to ?[*]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -956,6 +981,7 @@ test "implicit cast from *[N]T to ?[*]T" {
}
test "implicit cast from *T to ?*anyopaque" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -970,6 +996,7 @@ fn incrementVoidPtrValue(value: ?*anyopaque) void {
}
test "implicit cast *[0]T to E![]const u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -987,6 +1014,7 @@ test "cast from array reference to fn: comptime fn ptr" {
try expect(@ptrToInt(f) == @ptrToInt(&global_array));
}
test "cast from array reference to fn: runtime fn ptr" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -996,6 +1024,7 @@ test "cast from array reference to fn: runtime fn ptr" {
}
test "*const [N]null u8 to ?[]const u8" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -1034,6 +1063,7 @@ test "cast between [*c]T and ?[*:0]T on fn parameter" {
var global_struct: struct { f0: usize } = undefined;
test "assignment to optional pointer result loc" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -1043,6 +1073,7 @@ test "assignment to optional pointer result loc" {
}
test "cast between *[N]void and []void" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -1052,6 +1083,7 @@ test "cast between *[N]void and []void" {
}
test "peer resolve arrays of different size to const slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -1065,6 +1097,7 @@ fn boolToStr(b: bool) []const u8 {
}
test "cast f16 to wider types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -1083,6 +1116,7 @@ test "cast f16 to wider types" {
}
test "cast f128 to narrower types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -1101,6 +1135,7 @@ test "cast f128 to narrower types" {
}
test "peer type resolution: unreachable, null, slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -1119,6 +1154,7 @@ test "peer type resolution: unreachable, null, slice" {
}
test "cast i8 fn call peers to i32 result" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO

View File

@ -32,6 +32,7 @@ fn custom(comptime T: type, comptime num: u64) fn (T) u64 {
}
test "fn delegation" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const foo = Foo{};

View File

@ -18,6 +18,7 @@ fn getErrInt() anyerror!i32 {
}
test "ir block deps" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO

View File

@ -5,6 +5,7 @@ const expect = testing.expect;
const expectEqual = testing.expectEqual;
test "passing an optional integer as a parameter" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -25,6 +26,7 @@ test "passing an optional integer as a parameter" {
pub const EmptyStruct = struct {};
test "optional pointer to size zero struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -34,6 +36,7 @@ test "optional pointer to size zero struct" {
}
test "equality compare optional pointers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -58,6 +61,7 @@ fn testNullPtrsEql() !void {
}
test "optional with void type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -69,6 +73,7 @@ test "optional with void type" {
}
test "address of unwrap optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -89,6 +94,7 @@ test "address of unwrap optional" {
}
test "nested optional field in struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -105,6 +111,7 @@ test "nested optional field in struct" {
}
test "equality compare optional with non-optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -142,6 +149,7 @@ fn test_cmp_optional_non_optional() !void {
}
test "unwrap function call with optional pointer return value" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -163,6 +171,7 @@ test "unwrap function call with optional pointer return value" {
}
test "nested orelse" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -189,6 +198,7 @@ test "nested orelse" {
}
test "self-referential struct through a slice of optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO

View File

@ -28,6 +28,7 @@ fn dummy(a: bool, b: i32, c: f32) i32 {
}
test "reflection: @field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO

View File

@ -27,6 +27,7 @@ comptime {
}
test "slicing" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@ -68,6 +69,7 @@ test "comptime slice of undefined pointer of length 0" {
}
test "implicitly cast array of size 0 to slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@ -80,6 +82,7 @@ fn assertLenIsZero(msg: []const u8) !void {
}
test "access len index of sentinel-terminated slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
@ -129,6 +132,7 @@ test "slice of type" {
}
test "generic malloc free" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -187,6 +191,7 @@ test "comptime pointer cast array and then slice" {
}
test "slicing zero length array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -202,6 +207,7 @@ test "slicing zero length array" {
const x = @intToPtr([*]i32, 0x1000)[0..0x500];
const y = x[0x100..];
test "compile time slice of pointer to hard coded address" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
@ -215,6 +221,7 @@ test "compile time slice of pointer to hard coded address" {
}
test "slice string literal has correct type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -230,6 +237,7 @@ test "slice string literal has correct type" {
}
test "result location zero sized array inside struct field implicit cast to slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const E = struct {
@ -240,6 +248,7 @@ test "result location zero sized array inside struct field implicit cast to slic
}
test "runtime safety lets us slice from len..len" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -252,6 +261,7 @@ fn sliceFromLenToLen(a_slice: []u8, start: usize, end: usize) []u8 {
}
test "C pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -262,6 +272,7 @@ test "C pointer" {
}
test "C pointer slice access" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -291,6 +302,7 @@ fn sliceSum(comptime q: []const u8) i32 {
}
test "slice type with custom alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
@ -305,6 +317,7 @@ test "slice type with custom alignment" {
}
test "obtaining a null terminated slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@ -350,6 +363,7 @@ test "empty array to slice" {
}
test "@ptrCast slice to pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;

View File

@ -9,6 +9,7 @@ const maxInt = std.math.maxInt;
top_level_field: i32,
test "top level fields" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var instance = @This(){
@ -42,6 +43,7 @@ const StructWithFields = struct {
};
test "non-packed struct has fields padded out to the required alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const foo = StructWithFields{ .a = 5, .b = 1, .c = 10, .d = 2 };
@ -65,6 +67,7 @@ const SmallStruct = struct {
};
test "lower unnamed constants" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var foo = SmallStruct{ .a = 1, .b = 255 };
try expect(foo.first() == 1);
try expect(foo.second() == 255);
@ -83,6 +86,7 @@ const StructFoo = struct {
};
test "structs" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var foo: StructFoo = undefined;
@ -101,6 +105,7 @@ fn testMutation(foo: *StructFoo) void {
}
test "struct byval assign" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var foo1: StructFoo = undefined;
@ -134,6 +139,7 @@ fn returnEmptyStructInstance() StructWithNoFields {
}
test "fn call of struct field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const Foo = struct {
@ -165,12 +171,14 @@ const MemberFnTestFoo = struct {
};
test "call member function directly" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const instance = MemberFnTestFoo{ .x = 1234 };
const result = MemberFnTestFoo.member(instance);
try expect(result == 1234);
}
test "store member function in variable" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const instance = MemberFnTestFoo{ .x = 1234 };
const memberFn = MemberFnTestFoo.member;
const result = memberFn(instance);
@ -178,6 +186,7 @@ test "store member function in variable" {
}
test "member functions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const r = MemberFnRand{ .seed = 1234 };
try expect(r.getSeed() == 1234);
}
@ -189,6 +198,7 @@ const MemberFnRand = struct {
};
test "return struct byval from function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const bar = makeBar2(1234, 5678);
@ -206,6 +216,7 @@ fn makeBar2(x: i32, y: i32) Bar {
}
test "call method with mutable reference to struct with no fields" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
@ -238,6 +249,7 @@ test "usingnamespace within struct scope" {
}
test "struct field init with catch" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const S = struct {
@ -296,6 +308,7 @@ const Val = struct {
};
test "struct point to self" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -314,6 +327,7 @@ test "struct point to self" {
}
test "void struct fields" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -334,6 +348,7 @@ const VoidStructFieldsFoo = struct {
};
test "return empty struct from fn" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -347,6 +362,7 @@ fn testReturnEmptyStructFromFn() EmptyStruct2 {
}
test "pass slice of empty struct to fn" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -359,6 +375,7 @@ fn testPassSliceOfEmptyStructToFn(slice: []const EmptyStruct2) usize {
}
test "self-referencing struct via array member" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -389,6 +406,7 @@ const EmptyStruct = struct {
};
test "align 1 field before self referential align 8 field as slice return type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -413,6 +431,7 @@ const APackedStruct = packed struct {
};
test "packed struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -438,6 +457,7 @@ const Foo96Bits = packed struct {
};
test "packed struct 24bits" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -484,6 +504,7 @@ test "packed struct 24bits" {
}
test "runtime struct initialization of bitfield" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -523,6 +544,7 @@ const Bitfields = packed struct {
};
test "native bit field understands endianness" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -546,6 +568,7 @@ test "native bit field understands endianness" {
}
test "implicit cast packed struct field to const ptr" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -581,6 +604,7 @@ test "zero-bit field in packed struct" {
}
test "packed struct with non-ABI-aligned field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -610,6 +634,7 @@ const bit_field_1 = BitField1{
};
test "bit field access" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -642,6 +667,7 @@ fn getC(data: *const BitField1) u2 {
}
test "default struct initialization fields" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -907,6 +933,7 @@ test "packed struct field passed to generic function" {
}
test "anonymous struct literal syntax" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -1100,6 +1127,7 @@ test "type coercion of pointer to anon struct literal to pointer to struct" {
}
test "packed struct with undefined initializers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO

View File

@ -3,6 +3,7 @@ const builtin = @import("builtin");
const expect = std.testing.expect;
test "truncate u0 to larger integer allowed and has comptime known result" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var x: u0 = 0;
@ -11,6 +12,7 @@ test "truncate u0 to larger integer allowed and has comptime known result" {
}
test "truncate.u0.literal" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var z = @truncate(u0, 0);
@ -18,6 +20,7 @@ test "truncate.u0.literal" {
}
test "truncate.u0.const" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const c0: usize = 0;
@ -26,6 +29,7 @@ test "truncate.u0.const" {
}
test "truncate.u0.var" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var d: u8 = 2;
@ -34,6 +38,7 @@ test "truncate.u0.var" {
}
test "truncate i0 to larger integer allowed and has comptime known result" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var x: i0 = 0;
@ -42,6 +47,7 @@ test "truncate i0 to larger integer allowed and has comptime known result" {
}
test "truncate.i0.literal" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var z = @truncate(i0, 0);
@ -49,6 +55,7 @@ test "truncate.i0.literal" {
}
test "truncate.i0.const" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const c0: isize = 0;
@ -57,6 +64,7 @@ test "truncate.i0.const" {
}
test "truncate.i0.var" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var d: i8 = 2;
@ -65,6 +73,7 @@ test "truncate.i0.var" {
}
test "truncate on comptime integer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
var x = @truncate(u16, 9999);

View File

@ -25,6 +25,7 @@ fn readFirstVarArg(args: anytype) void {
}
test "send void arg to var args" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -84,6 +85,7 @@ fn foo2(args: anytype) bool {
}
test "array of var args functions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -93,6 +95,7 @@ test "array of var args functions" {
}
test "pass zero length array to var args param" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO

View File

@ -17,15 +17,8 @@ pub fn addCases(ctx: *TestContext) !void {
var case = ctx.exe("linux_aarch64 hello world", linux_aarch64);
// Regular old hello world
case.addCompareOutput(
\\pub export fn _start() noreturn {
\\pub fn main() void {
\\ print();
\\ exit();
\\}
\\
\\fn doNothing() void {}
\\
\\fn answer() u64 {
\\ return 0x1234abcd1234abcd;
\\}
\\
\\fn print() void {
@ -38,16 +31,6 @@ pub fn addCases(ctx: *TestContext) !void {
\\ : "memory", "cc"
\\ );
\\}
\\
\\fn exit() noreturn {
\\ asm volatile ("svc #0"
\\ :
\\ : [number] "{x8}" (93),
\\ [arg1] "{x0}" (0)
\\ : "memory", "cc"
\\ );
\\ unreachable;
\\}
,
"Hello, World!\n",
);
@ -102,6 +85,74 @@ pub fn addCases(ctx: *TestContext) !void {
,
"Hello, World!\n",
);
case.addCompareOutput(
\\pub fn main() void {
\\ foo(true);
\\}
\\
\\fn foo(x: bool) void {
\\ if (x) {
\\ print();
\\ }
\\}
\\
\\fn print() void {
\\ asm volatile ("svc #0"
\\ :
\\ : [number] "{x8}" (64),
\\ [arg1] "{x0}" (1),
\\ [arg2] "{x1}" (@ptrToInt("Hello, World!\n")),
\\ [arg3] "{x2}" ("Hello, World!\n".len),
\\ : "memory", "cc"
\\ );
\\}
,
"Hello, World!\n",
);
}
{
var case = ctx.exe("large add function", linux_aarch64);
case.addCompareOutput(
\\pub fn main() void {
\\ assert(add(3, 4) == 791);
\\}
\\
\\fn add(a: u32, b: u32) u32 {
\\ const x: u32 = blk: {
\\ const c = a + b; // 7
\\ const d = a + c; // 10
\\ const e = d + b; // 14
\\ const f = d + e; // 24
\\ const g = e + f; // 38
\\ const h = f + g; // 62
\\ const i = g + h; // 100
\\ const j = i + d; // 110
\\ const k = i + j; // 210
\\ const l = k + c; // 217
\\ const m = l + d; // 227
\\ const n = m + e; // 241
\\ const o = n + f; // 265
\\ const p = o + g; // 303
\\ const q = p + h; // 365
\\ const r = q + i; // 465
\\ const s = r + j; // 575
\\ const t = s + k; // 785
\\ break :blk t;
\\ };
\\ const y = x + a; // 788
\\ const z = y + a; // 791
\\ return z;
\\}
\\
\\fn assert(ok: bool) void {
\\ if (!ok) unreachable;
\\}
,
"",
);
}
// macOS tests