mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 14:23:09 +00:00
Merge pull request #11016 from ziglang/x64-more-codegen
This commit is contained in:
commit
ec54ceee6d
@ -1738,14 +1738,22 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
|
||||
}
|
||||
const err_union_ty = self.air.typeOf(ty_op.operand);
|
||||
const err_ty = err_union_ty.errorUnionSet();
|
||||
const payload_ty = err_union_ty.errorUnionPayload();
|
||||
const operand = try self.resolveInst(ty_op.operand);
|
||||
operand.freezeIfRegister(&self.register_manager);
|
||||
defer operand.unfreezeIfRegister(&self.register_manager);
|
||||
|
||||
const result: MCValue = result: {
|
||||
if (!payload_ty.hasRuntimeBits()) break :result operand;
|
||||
switch (operand) {
|
||||
.stack_offset => |off| {
|
||||
break :result MCValue{ .stack_offset = off };
|
||||
},
|
||||
.register => {
|
||||
// TODO reuse operand
|
||||
break :result try self.copyToRegisterWithInstTracking(inst, err_ty, operand);
|
||||
},
|
||||
else => return self.fail("TODO implement unwrap_err_err for {}", .{operand}),
|
||||
}
|
||||
};
|
||||
@ -1763,13 +1771,25 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
|
||||
if (!payload_ty.hasRuntimeBits()) break :result MCValue.none;
|
||||
|
||||
const operand = try self.resolveInst(ty_op.operand);
|
||||
operand.freezeIfRegister(&self.register_manager);
|
||||
defer operand.unfreezeIfRegister(&self.register_manager);
|
||||
|
||||
const err_ty = err_union_ty.errorUnionSet();
|
||||
const err_abi_size = @intCast(u32, err_ty.abiSize(self.target.*));
|
||||
switch (operand) {
|
||||
.stack_offset => |off| {
|
||||
const err_abi_size = @intCast(u32, err_ty.abiSize(self.target.*));
|
||||
const offset = off - @intCast(i32, err_abi_size);
|
||||
break :result MCValue{ .stack_offset = offset };
|
||||
},
|
||||
.register => {
|
||||
// TODO reuse operand
|
||||
const shift = @intCast(u6, err_ty.bitSize(self.target.*));
|
||||
const result = try self.copyToRegisterWithInstTracking(inst, err_union_ty, operand);
|
||||
try self.shiftRegister(result.register.to64(), shift);
|
||||
break :result MCValue{
|
||||
.register = registerAlias(result.register, @intCast(u32, payload_ty.abiSize(self.target.*))),
|
||||
};
|
||||
},
|
||||
else => return self.fail("TODO implement unwrap_err_payload for {}", .{operand}),
|
||||
}
|
||||
};
|
||||
@ -2013,8 +2033,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
self.register_manager.freezeRegs(&.{offset_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{offset_reg});
|
||||
|
||||
const addr_reg = blk: {
|
||||
const off = inner: {
|
||||
const addr_reg = try self.register_manager.allocReg(null);
|
||||
switch (array) {
|
||||
.register => {
|
||||
const off = @intCast(i32, try self.allocMem(
|
||||
@ -2023,21 +2042,6 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
array_ty.abiAlignment(self.target.*),
|
||||
));
|
||||
try self.genSetStack(array_ty, off, array, .{});
|
||||
break :inner off;
|
||||
},
|
||||
.stack_offset => |off| {
|
||||
break :inner off;
|
||||
},
|
||||
.memory,
|
||||
.got_load,
|
||||
.direct_load,
|
||||
=> {
|
||||
break :blk try self.loadMemPtrIntoRegister(Type.usize, array);
|
||||
},
|
||||
else => return self.fail("TODO implement array_elem_val when array is {}", .{array}),
|
||||
}
|
||||
};
|
||||
const addr_reg = try self.register_manager.allocReg(null);
|
||||
// lea reg, [rbp]
|
||||
_ = try self.addInst(.{
|
||||
.tag = .lea,
|
||||
@ -2047,8 +2051,26 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
}).encode(),
|
||||
.data = .{ .imm = @bitCast(u32, -off) },
|
||||
});
|
||||
break :blk addr_reg.to64();
|
||||
};
|
||||
},
|
||||
.stack_offset => |off| {
|
||||
// lea reg, [rbp]
|
||||
_ = try self.addInst(.{
|
||||
.tag = .lea,
|
||||
.ops = (Mir.Ops{
|
||||
.reg1 = addr_reg.to64(),
|
||||
.reg2 = .rbp,
|
||||
}).encode(),
|
||||
.data = .{ .imm = @bitCast(u32, -off) },
|
||||
});
|
||||
},
|
||||
.memory,
|
||||
.got_load,
|
||||
.direct_load,
|
||||
=> {
|
||||
try self.loadMemPtrIntoRegister(addr_reg, Type.usize, array);
|
||||
},
|
||||
else => return self.fail("TODO implement array_elem_val when array is {}", .{array}),
|
||||
}
|
||||
|
||||
// TODO we could allocate register here, but need to expect addr register and potentially
|
||||
// offset register.
|
||||
@ -2148,11 +2170,11 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
|
||||
defer operand.unfreezeIfRegister(&self.register_manager);
|
||||
|
||||
const tag_abi_size = tag_ty.abiSize(self.target.*);
|
||||
const offset: i32 = if (layout.tag_align < layout.payload_align) @intCast(i32, layout.payload_size) else 0;
|
||||
const dst_mcv: MCValue = blk: {
|
||||
switch (operand) {
|
||||
.stack_offset => |off| {
|
||||
if (tag_abi_size <= 8) {
|
||||
const offset: i32 = if (layout.tag_align < layout.payload_align) @intCast(i32, layout.payload_size) else 0;
|
||||
break :blk try self.copyToRegisterWithInstTracking(inst, tag_ty, .{
|
||||
.stack_offset = off - offset,
|
||||
});
|
||||
@ -2160,6 +2182,17 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
return self.fail("TODO implement get_union_tag for ABI larger than 8 bytes and operand {}", .{operand});
|
||||
},
|
||||
.register => {
|
||||
const shift: u6 = if (layout.tag_align < layout.payload_align)
|
||||
@intCast(u6, layout.payload_size * 8)
|
||||
else
|
||||
0;
|
||||
const result = try self.copyToRegisterWithInstTracking(inst, union_ty, operand);
|
||||
try self.shiftRegister(result.register.to64(), shift);
|
||||
break :blk MCValue{
|
||||
.register = registerAlias(result.register, @intCast(u32, layout.tag_size)),
|
||||
};
|
||||
},
|
||||
else => return self.fail("TODO implement get_union_tag for {}", .{operand}),
|
||||
}
|
||||
};
|
||||
@ -2354,7 +2387,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
|
||||
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
|
||||
}
|
||||
|
||||
fn loadMemPtrIntoRegister(self: *Self, ptr_ty: Type, ptr: MCValue) InnerError!Register {
|
||||
fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue) InnerError!void {
|
||||
switch (ptr) {
|
||||
.got_load,
|
||||
.direct_load,
|
||||
@ -2364,11 +2397,10 @@ fn loadMemPtrIntoRegister(self: *Self, ptr_ty: Type, ptr: MCValue) InnerError!Re
|
||||
.direct_load => 0b01,
|
||||
else => unreachable,
|
||||
};
|
||||
const reg = try self.register_manager.allocReg(null);
|
||||
_ = try self.addInst(.{
|
||||
.tag = .lea_pie,
|
||||
.ops = (Mir.Ops{
|
||||
.reg1 = reg.to64(),
|
||||
.reg1 = registerAlias(reg, @intCast(u32, ptr_ty.abiSize(self.target.*))),
|
||||
.flags = flags,
|
||||
}).encode(),
|
||||
.data = .{
|
||||
@ -2378,13 +2410,11 @@ fn loadMemPtrIntoRegister(self: *Self, ptr_ty: Type, ptr: MCValue) InnerError!Re
|
||||
},
|
||||
},
|
||||
});
|
||||
return reg.to64();
|
||||
},
|
||||
.memory => |addr| {
|
||||
// TODO: in case the address fits in an imm32 we can use [ds:imm32]
|
||||
// instead of wasting an instruction copying the address to a register
|
||||
const reg = try self.copyToTmpRegister(ptr_ty, .{ .immediate = addr });
|
||||
return reg.to64();
|
||||
try self.genSetReg(ptr_ty, reg, .{ .immediate = addr });
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@ -2484,7 +2514,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
.data = .{ .imm = 0 },
|
||||
});
|
||||
},
|
||||
.stack_offset => {
|
||||
.got_load,
|
||||
.direct_load,
|
||||
.memory,
|
||||
.stack_offset,
|
||||
=> {
|
||||
if (abi_size <= 8) {
|
||||
const tmp_reg = try self.copyToTmpRegister(value_ty, value);
|
||||
return self.store(ptr, .{ .register = tmp_reg }, ptr_ty, value_ty);
|
||||
@ -2507,7 +2541,8 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
value.freezeIfRegister(&self.register_manager);
|
||||
defer value.unfreezeIfRegister(&self.register_manager);
|
||||
|
||||
const addr_reg = try self.loadMemPtrIntoRegister(ptr_ty, ptr);
|
||||
const addr_reg = try self.register_manager.allocReg(null);
|
||||
try self.loadMemPtrIntoRegister(addr_reg, ptr_ty, ptr);
|
||||
|
||||
// to get the actual address of the value we want to modify we have to go through the GOT
|
||||
// mov reg, [reg]
|
||||
@ -2686,27 +2721,8 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
};
|
||||
|
||||
// Shift by struct_field_offset.
|
||||
const shift_amount = @intCast(u8, struct_field_offset * 8);
|
||||
if (shift_amount > 0) {
|
||||
if (shift_amount == 1) {
|
||||
_ = try self.addInst(.{
|
||||
.tag = .shr,
|
||||
.ops = (Mir.Ops{
|
||||
.reg1 = dst_mcv.register,
|
||||
}).encode(),
|
||||
.data = undefined,
|
||||
});
|
||||
} else {
|
||||
_ = try self.addInst(.{
|
||||
.tag = .shr,
|
||||
.ops = (Mir.Ops{
|
||||
.reg1 = dst_mcv.register,
|
||||
.flags = 0b10,
|
||||
}).encode(),
|
||||
.data = .{ .imm = shift_amount },
|
||||
});
|
||||
}
|
||||
}
|
||||
const shift = @intCast(u8, struct_field_offset * 8);
|
||||
try self.shiftRegister(dst_mcv.register, shift);
|
||||
|
||||
// Mask with reg.size() - struct_field_size
|
||||
const mask_shift = @intCast(u6, (64 - struct_field_ty.bitSize(self.target.*)));
|
||||
@ -3167,11 +3183,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*));
|
||||
const stack_offset = @intCast(i32, try self.allocMem(inst, ret_abi_size, ret_abi_align));
|
||||
|
||||
try self.register_manager.getReg(.rdi, inst);
|
||||
try self.register_manager.getReg(.rdi, null);
|
||||
self.register_manager.freezeRegs(&.{.rdi});
|
||||
try self.genSetReg(Type.usize, .rdi, .{ .ptr_stack_offset = stack_offset });
|
||||
|
||||
info.return_value.stack_offset = stack_offset;
|
||||
}
|
||||
defer if (info.return_value == .stack_offset) self.register_manager.unfreezeRegs(&.{.rdi});
|
||||
|
||||
for (args) |arg, arg_i| {
|
||||
const mc_arg = info.args[arg_i];
|
||||
@ -4676,28 +4694,25 @@ fn genInlineMemcpy(self: *Self, stack_offset: i32, ty: Type, val: MCValue, opts:
|
||||
if (opts.dest_stack_base) |reg| self.register_manager.freezeRegs(&.{reg});
|
||||
defer if (opts.dest_stack_base) |reg| self.register_manager.unfreezeRegs(&.{reg});
|
||||
|
||||
const addr_reg: Register = blk: {
|
||||
const addr_reg = try self.register_manager.allocReg(null);
|
||||
switch (val) {
|
||||
.memory,
|
||||
.direct_load,
|
||||
.got_load,
|
||||
=> {
|
||||
break :blk try self.loadMemPtrIntoRegister(Type.usize, val);
|
||||
try self.loadMemPtrIntoRegister(addr_reg, Type.usize, val);
|
||||
},
|
||||
.ptr_stack_offset, .stack_offset => |off| {
|
||||
const addr_reg = (try self.register_manager.allocReg(null)).to64();
|
||||
_ = try self.addInst(.{
|
||||
.tag = .lea,
|
||||
.ops = (Mir.Ops{
|
||||
.reg1 = addr_reg,
|
||||
.reg1 = addr_reg.to64(),
|
||||
.reg2 = opts.source_stack_base orelse .rbp,
|
||||
}).encode(),
|
||||
.data = .{ .imm = @bitCast(u32, -off) },
|
||||
});
|
||||
break :blk addr_reg;
|
||||
},
|
||||
.register => |reg| {
|
||||
const addr_reg = try self.register_manager.allocReg(null);
|
||||
_ = try self.addInst(.{
|
||||
.tag = .mov,
|
||||
.ops = (Mir.Ops{
|
||||
@ -4706,13 +4721,11 @@ fn genInlineMemcpy(self: *Self, stack_offset: i32, ty: Type, val: MCValue, opts:
|
||||
}).encode(),
|
||||
.data = undefined,
|
||||
});
|
||||
break :blk addr_reg.to64();
|
||||
},
|
||||
else => {
|
||||
return self.fail("TODO implement memcpy for setting stack from {}", .{val});
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
self.register_manager.freezeRegs(&.{addr_reg});
|
||||
defer self.register_manager.unfreezeRegs(&.{addr_reg});
|
||||
@ -4826,10 +4839,6 @@ fn genInlineMemset(
|
||||
try self.register_manager.getReg(.rax, null);
|
||||
|
||||
const abi_size = ty.abiSize(self.target.*);
|
||||
if (stack_offset > 128) {
|
||||
return self.fail("TODO inline memset with large stack offset", .{});
|
||||
}
|
||||
|
||||
const negative_offset = @bitCast(u32, -stack_offset);
|
||||
|
||||
// We are actually counting `abi_size` bytes; however, we reuse the index register
|
||||
@ -5768,3 +5777,25 @@ fn registerAlias(reg: Register, size_bytes: u32) Register {
|
||||
unreachable; // TODO handle floating-point registers
|
||||
}
|
||||
}
|
||||
|
||||
fn shiftRegister(self: *Self, reg: Register, shift: u8) !void {
|
||||
if (shift == 0) return;
|
||||
if (shift == 1) {
|
||||
_ = try self.addInst(.{
|
||||
.tag = .shr,
|
||||
.ops = (Mir.Ops{
|
||||
.reg1 = reg,
|
||||
}).encode(),
|
||||
.data = undefined,
|
||||
});
|
||||
} else {
|
||||
_ = try self.addInst(.{
|
||||
.tag = .shr,
|
||||
.ops = (Mir.Ops{
|
||||
.reg1 = reg,
|
||||
.flags = 0b10,
|
||||
}).encode(),
|
||||
.data = .{ .imm = shift },
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@ -553,7 +553,6 @@ pub fn generateSymbol(
|
||||
const target = bin_file.options.target;
|
||||
const abi_align = typed_value.ty.abiAlignment(target);
|
||||
|
||||
{
|
||||
const error_val = if (!is_payload) typed_value.val else Value.initTag(.zero);
|
||||
const begin = code.items.len;
|
||||
switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{
|
||||
@ -566,18 +565,9 @@ pub fn generateSymbol(
|
||||
},
|
||||
.fail => |em| return Result{ .fail = em },
|
||||
}
|
||||
const unpadded_end = code.items.len - begin;
|
||||
const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align);
|
||||
const padding = try math.cast(usize, padded_end - unpadded_end);
|
||||
|
||||
if (padding > 0) {
|
||||
try code.writer().writeByteNTimes(0, padding);
|
||||
}
|
||||
}
|
||||
|
||||
if (payload_ty.hasRuntimeBits()) {
|
||||
const payload_val = if (typed_value.val.castTag(.eu_payload)) |val| val.data else Value.initTag(.undef);
|
||||
const begin = code.items.len;
|
||||
switch (try generateSymbol(bin_file, parent_atom_index, src_loc, .{
|
||||
.ty = payload_ty,
|
||||
.val = payload_val,
|
||||
@ -588,6 +578,8 @@ pub fn generateSymbol(
|
||||
},
|
||||
.fail => |em| return Result{ .fail = em },
|
||||
}
|
||||
}
|
||||
|
||||
const unpadded_end = code.items.len - begin;
|
||||
const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align);
|
||||
const padding = try math.cast(usize, padded_end - unpadded_end);
|
||||
@ -595,7 +587,6 @@ pub fn generateSymbol(
|
||||
if (padding > 0) {
|
||||
try code.writer().writeByteNTimes(0, padding);
|
||||
}
|
||||
}
|
||||
|
||||
return Result{ .appended = {} };
|
||||
},
|
||||
|
||||
@ -131,7 +131,7 @@ fn testBytesAlign(b: u8) !void {
|
||||
test "@alignCast slices" {
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
|
||||
var array align(4) = [_]u32{ 1, 1 };
|
||||
const slice = array[0..];
|
||||
@ -147,7 +147,8 @@ fn sliceExpects4(slice: []align(4) u32) void {
|
||||
|
||||
test "return error union with 128-bit integer" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
|
||||
try expect(3 == try give());
|
||||
}
|
||||
|
||||
@ -14,7 +14,6 @@ test "@alignOf(T) before referencing T" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
comptime try expect(@alignOf(Foo) != maxInt(usize));
|
||||
if (native_arch == .x86_64) {
|
||||
comptime try expect(@alignOf(Foo) == 4);
|
||||
@ -23,7 +22,6 @@ test "@alignOf(T) before referencing T" {
|
||||
|
||||
test "comparison of @alignOf(T) against zero" {
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
{
|
||||
const T = struct { x: u32 };
|
||||
try expect(!(@alignOf(T) == 0));
|
||||
|
||||
@ -349,7 +349,6 @@ test "array literal as argument to function" {
|
||||
test "double nested array to const slice cast in array literal" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
|
||||
const S = struct {
|
||||
|
||||
@ -287,7 +287,6 @@ fn fB() []const u8 {
|
||||
test "call function pointer in struct" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
|
||||
|
||||
try expect(mem.eql(u8, f3(true), "a"));
|
||||
@ -814,7 +813,6 @@ test "if expression type coercion" {
|
||||
}
|
||||
|
||||
test "discarding the result of various expressions" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
|
||||
|
||||
@ -167,7 +167,6 @@ test "@bitCast packed structs at runtime and comptime" {
|
||||
test "@bitCast extern structs at runtime and comptime" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
|
||||
@ -241,7 +240,6 @@ test "implicit cast to error union by returning" {
|
||||
test "bitcast packed struct literal to byte" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
const Foo = packed struct {
|
||||
@ -254,7 +252,6 @@ test "bitcast packed struct literal to byte" {
|
||||
test "comptime bitcast used in expression has the correct type" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
const Foo = packed struct {
|
||||
|
||||
@ -6,7 +6,6 @@ const minInt = std.math.minInt;
|
||||
test "@bitReverse large exotic integer" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
// Currently failing on stage1 for big-endian targets
|
||||
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
|
||||
|
||||
|
||||
@ -14,7 +14,6 @@ const A = union(enum) {
|
||||
test "union that needs padding bytes inside an array" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
var as = [_]A{
|
||||
A{ .B = B{ .D = 1 } },
|
||||
A{ .B = B{ .D = 1 } },
|
||||
|
||||
@ -45,7 +45,6 @@ test "initialization" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
var t = a.init();
|
||||
try std.testing.expect(t.foo.len == 0);
|
||||
}
|
||||
|
||||
@ -5,7 +5,6 @@ fn f(buf: []u8) void {
|
||||
}
|
||||
|
||||
test "crash" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
|
||||
@ -223,7 +223,7 @@ test "@intToEnum passed a comptime_int to an enum with one item" {
|
||||
}
|
||||
|
||||
test "@intCast to u0 and use the result" {
|
||||
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest(zero: u1, one: u1, bigzero: i32) !void {
|
||||
@ -324,7 +324,6 @@ test "cast from ?[*]T to ??[*]T" {
|
||||
test "peer type unsigned int to signed" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var w: u31 = 5;
|
||||
var x: u8 = 7;
|
||||
@ -401,7 +400,6 @@ test "implicitly cast from [0]T to anyerror![]T" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
try testCastZeroArrayToErrSliceMut();
|
||||
comptime try testCastZeroArrayToErrSliceMut();
|
||||
@ -419,7 +417,6 @@ test "peer type resolution: [0]u8, []const u8, and anyerror![]u8" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() anyerror!void {
|
||||
@ -475,7 +472,6 @@ fn testCastConstArrayRefToConstSlice() !void {
|
||||
test "peer type resolution: error and [N]T" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
try expect(mem.eql(u8, try testPeerErrorAndArray(0), "OK"));
|
||||
comptime try expect(mem.eql(u8, try testPeerErrorAndArray(0), "OK"));
|
||||
@ -790,7 +786,6 @@ test "peer cast *[0]T to []const T" {
|
||||
test "peer cast *[N]T to [*]T" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
var array = [4:99]i32{ 1, 2, 3, 4 };
|
||||
var dest: [*]i32 = undefined;
|
||||
@ -844,7 +839,6 @@ test "peer cast [N:x]T to [N]T" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
@ -861,7 +855,6 @@ test "peer cast *[N:x]T to *[N]T" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
@ -915,7 +908,6 @@ test "peer type resolution implicit cast to return type" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
@ -1030,7 +1022,6 @@ test "pointer reinterpret const float to int" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
// The hex representation is 0x3fe3333333333303.
|
||||
const float: f64 = 5.99999999999994648725e-01;
|
||||
@ -1116,7 +1107,6 @@ test "peer type resolution: [0]u8 and []const u8" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
try expect(peerTypeEmptyArrayAndSlice(true, "hi").len == 0);
|
||||
try expect(peerTypeEmptyArrayAndSlice(false, "hi").len == 1);
|
||||
@ -1204,7 +1194,6 @@ test "implicit cast *[0]T to E![]const u8" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
var x = @as(anyerror![]const u8, &[0]u8{});
|
||||
try expect((x catch unreachable).len == 0);
|
||||
@ -1212,14 +1201,11 @@ test "implicit cast *[0]T to E![]const u8" {
|
||||
|
||||
var global_array: [4]u8 = undefined;
|
||||
test "cast from array reference to fn: comptime fn ptr" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
const f = @ptrCast(*const fn () callconv(.C) void, &global_array);
|
||||
try expect(@ptrToInt(f) == @ptrToInt(&global_array));
|
||||
}
|
||||
test "cast from array reference to fn: runtime fn ptr" {
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
var f = @ptrCast(*const fn () callconv(.C) void, &global_array);
|
||||
try expect(@ptrToInt(f) == @ptrToInt(&global_array));
|
||||
@ -1243,8 +1229,6 @@ test "*const [N]null u8 to ?[]const u8" {
|
||||
}
|
||||
|
||||
test "cast between [*c]T and ?[*:0]T on fn parameter" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
const S = struct {
|
||||
const Handler = ?fn ([*c]const u8) callconv(.C) void;
|
||||
fn addCallback(handler: Handler) void {
|
||||
@ -1276,7 +1260,6 @@ test "assignment to optional pointer result loc" {
|
||||
test "cast between *[N]void and []void" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
var a: [4]void = undefined;
|
||||
var b: []void = &a;
|
||||
@ -1286,7 +1269,6 @@ test "cast between *[N]void and []void" {
|
||||
test "peer resolve arrays of different size to const slice" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
try expect(mem.eql(u8, boolToStr(true), "true"));
|
||||
try expect(mem.eql(u8, boolToStr(false), "false"));
|
||||
@ -1357,7 +1339,6 @@ test "peer type resolution: unreachable, null, slice" {
|
||||
test "cast i8 fn call peers to i32 result" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
|
||||
@ -95,7 +95,6 @@ fn runSomeErrorDefers(x: bool) !bool {
|
||||
}
|
||||
|
||||
test "mixing normal and error defers" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
|
||||
|
||||
@ -616,7 +616,6 @@ fn testEnumWithSpecifiedTagValues(x: MultipleChoice) !void {
|
||||
}
|
||||
|
||||
test "enum with specified tag values" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
@ -625,7 +624,6 @@ test "enum with specified tag values" {
|
||||
}
|
||||
|
||||
test "non-exhaustive enum" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
@ -691,7 +689,6 @@ test "empty non-exhaustive enum" {
|
||||
}
|
||||
|
||||
test "single field non-exhaustive enum" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
@ -760,7 +757,6 @@ test "cast integer literal to enum" {
|
||||
}
|
||||
|
||||
test "enum with specified and unspecified tag values" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
@ -942,7 +938,6 @@ test "enum value allocation" {
|
||||
}
|
||||
|
||||
test "enum literal casting to tagged union" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
@ -968,7 +963,6 @@ test "enum literal casting to tagged union" {
|
||||
const Bar = enum { A, B, C, D };
|
||||
|
||||
test "enum literal casting to error union with payload enum" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
|
||||
@ -23,7 +23,6 @@ fn shouldBeNotEqual(a: anyerror, b: anyerror) void {
|
||||
}
|
||||
|
||||
test "error binary operator" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
@ -58,7 +57,6 @@ pub fn baz() anyerror!i32 {
|
||||
}
|
||||
|
||||
test "error wrapping" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
@ -66,7 +64,6 @@ test "error wrapping" {
|
||||
}
|
||||
|
||||
test "unwrap simple value from error" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
@ -78,7 +75,6 @@ fn unwrapSimpleValueFromErrorDo() anyerror!isize {
|
||||
}
|
||||
|
||||
test "error return in assignment" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
@ -103,7 +99,6 @@ test "syntax: optional operator in front of error union operator" {
|
||||
}
|
||||
|
||||
test "widen cast integer payload of error union function call" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
@ -121,7 +116,6 @@ test "widen cast integer payload of error union function call" {
|
||||
}
|
||||
|
||||
test "debug info for optional error set" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
@ -173,7 +167,6 @@ fn bar2() (error{}!void) {}
|
||||
test "error union type " {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
|
||||
try testErrorUnionType();
|
||||
@ -191,7 +184,6 @@ fn testErrorUnionType() !void {
|
||||
test "error set type" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
|
||||
try testErrorSetType();
|
||||
@ -312,7 +304,6 @@ test "error: Infer error set from literals" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
|
||||
_ = nullLiteral("n") catch |err| handleErrors(err);
|
||||
|
||||
@ -216,7 +216,6 @@ fn addPointCoords(pt: Point) i32 {
|
||||
}
|
||||
|
||||
test "pass by non-copying value through var arg" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
|
||||
@ -48,7 +48,6 @@ var global_with_val: anyerror!u32 = 0;
|
||||
var global_with_err: anyerror!u32 = error.SomeError;
|
||||
|
||||
test "unwrap mutable global var" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
|
||||
@ -20,7 +20,6 @@ fn getErrInt() anyerror!i32 {
|
||||
test "ir block deps" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
try expect((foo(1) catch unreachable) == 0);
|
||||
try expect((foo(2) catch unreachable) == 0);
|
||||
|
||||
@ -4,7 +4,6 @@ const expect = std.testing.expect;
|
||||
|
||||
test "namespace depends on compile var" {
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (some_namespace.a_bool) {
|
||||
try expect(some_namespace.a_bool);
|
||||
} else {
|
||||
|
||||
@ -28,7 +28,6 @@ pub const EmptyStruct = struct {};
|
||||
test "optional pointer to size zero struct" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
var e = EmptyStruct{};
|
||||
var o: ?*EmptyStruct = &e;
|
||||
@ -37,7 +36,6 @@ test "optional pointer to size zero struct" {
|
||||
|
||||
test "equality compare optional pointers" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
try testNullPtrsEql();
|
||||
comptime try testNullPtrsEql();
|
||||
@ -152,7 +150,6 @@ fn test_cmp_optional_non_optional() !void {
|
||||
test "unwrap function call with optional pointer return value" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
const S = struct {
|
||||
fn entry() !void {
|
||||
|
||||
@ -247,7 +247,6 @@ test "usingnamespace within struct scope" {
|
||||
|
||||
test "struct field init with catch" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
|
||||
@ -3,7 +3,6 @@ const builtin = @import("builtin");
|
||||
const expect = std.testing.expect;
|
||||
|
||||
test "try on error union" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
|
||||
@ -7,7 +7,6 @@ test "ignore lval with underscore" {
|
||||
}
|
||||
|
||||
test "ignore lval with underscore (while loop)" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
|
||||
@ -272,7 +272,6 @@ const TheUnion = union(TheTag) {
|
||||
C: i32,
|
||||
};
|
||||
test "cast union to tag type of union" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
@ -292,7 +291,6 @@ test "union field access gives the enum values" {
|
||||
}
|
||||
|
||||
test "cast tag type of union to union" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
@ -307,7 +305,6 @@ const Value2 = union(Letter2) {
|
||||
};
|
||||
|
||||
test "implicit cast union to its tag type" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
@ -401,7 +398,6 @@ test "tagged union with no payloads" {
|
||||
}
|
||||
|
||||
test "union with only 1 field casted to its enum type" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
@ -471,7 +467,6 @@ test "initialize global array of union" {
|
||||
}
|
||||
|
||||
test "update the tag value for zero-sized unions" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
@ -707,7 +702,6 @@ test "union with only 1 field casted to its enum type which has enum value speci
|
||||
}
|
||||
|
||||
test "@enumToInt works on unions" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
|
||||
@ -841,7 +835,6 @@ test "@unionInit can modify a union type" {
|
||||
}
|
||||
|
||||
test "@unionInit can modify a pointer value" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
|
||||
|
||||
@ -175,7 +175,6 @@ test "while with optional as condition with else" {
|
||||
}
|
||||
|
||||
test "while with error union condition" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
@ -248,7 +247,6 @@ fn returnTrue() bool {
|
||||
}
|
||||
|
||||
test "return with implicit cast from while loop" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
returnWithImplicitCastFromWhileLoopTest() catch unreachable;
|
||||
@ -260,7 +258,6 @@ fn returnWithImplicitCastFromWhileLoopTest() anyerror!void {
|
||||
}
|
||||
|
||||
test "while on error union with else result follow else prong" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
@ -271,7 +268,6 @@ test "while on error union with else result follow else prong" {
|
||||
}
|
||||
|
||||
test "while on error union with else result follow break prong" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user