x86_64: implement atomic loops

This commit is contained in:
Jacob Young 2023-03-25 13:44:50 -04:00
parent 6d9bdc8733
commit 1e080e5056
27 changed files with 249 additions and 110 deletions

View File

@ -1438,7 +1438,7 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
const min_abi_size = @min(dst_abi_size, src_abi_size);
const tag: Mir.Inst.Tag = switch (signedness) {
.signed => .movsx,
.unsigned => if (min_abi_size == 4) .mov else .movzx,
.unsigned => if (min_abi_size > 2) .mov else .movzx,
};
const dst_alias = switch (tag) {
.movsx => dst_reg.to64(),
@ -3889,10 +3889,10 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air:
const src_ty = self.air.typeOf(src_air);
const src_mcv = try self.resolveInst(src_air);
if (src_ty.zigTypeTag() == .Vector) {
return self.fail("TODO implement genBinOp for {}", .{src_ty.fmt(self.bin_file.options.module.?)});
return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(self.bin_file.options.module.?)});
}
if (src_ty.abiSize(self.target.*) > 8) {
return self.fail("TODO implement genBinOp for {}", .{src_ty.fmt(self.bin_file.options.module.?)});
return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(self.bin_file.options.module.?)});
}
switch (src_mcv) {
@ -4192,7 +4192,7 @@ fn genShiftBinOp(
return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()});
}
self.register_manager.getRegAssumeFree(.rcx, null);
try self.register_manager.getReg(.rcx, null);
const rcx_lock = self.register_manager.lockRegAssumeUnused(.rcx);
defer self.register_manager.unlockReg(rcx_lock);
@ -4717,7 +4717,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
);
}
},
else => return self.fail("TODO getBinOpMir implement large immediate ABI", .{}),
else => return self.fail("TODO genBinOpMir implement large immediate ABI", .{}),
}
},
.memory,
@ -4798,28 +4798,28 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
);
}
},
else => return self.fail("TODO getBinOpMir implement large immediate ABI", .{}),
else => return self.fail("TODO genBinOpMir implement large immediate ABI", .{}),
}
},
.memory,
.stack_offset,
.ptr_stack_offset,
=> {
return self.fail("TODO implement x86 ADD/SUB/CMP source memory", .{});
return self.fail("TODO implement x86 genBinOpMir source memory", .{});
},
.linker_load => {
return self.fail("TODO implement x86 ADD/SUB/CMP source symbol at index in linker", .{});
return self.fail("TODO implement x86 genBinOpMir source symbol at index in linker", .{});
},
.eflags => {
return self.fail("TODO implement x86 ADD/SUB/CMP source eflags", .{});
return self.fail("TODO implement x86 genBinOpMir source eflags", .{});
},
}
},
.memory => {
return self.fail("TODO implement x86 ADD/SUB/CMP destination memory", .{});
return self.fail("TODO implement x86 genBinOpMir destination memory", .{});
},
.linker_load => {
return self.fail("TODO implement x86 ADD/SUB/CMP destination symbol at index", .{});
return self.fail("TODO implement x86 genBinOpMir destination symbol at index", .{});
},
}
}
@ -7219,20 +7219,37 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
const ptr_ty = self.air.typeOf(extra.ptr);
const ptr_mcv = try self.resolveInst(extra.ptr);
const val_ty = self.air.typeOf(extra.expected_value);
const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*));
try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx });
const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx });
for (regs_lock) |lock| self.register_manager.unlockReg(lock);
const exp_mcv = try self.resolveInst(extra.expected_value);
try self.genSetReg(val_ty, .rax, exp_mcv);
if (val_abi_size > 8) switch (exp_mcv) {
.stack_offset => |exp_off| {
try self.genSetReg(Type.usize, .rax, .{ .stack_offset = exp_off - 0 });
try self.genSetReg(Type.usize, .rdx, .{ .stack_offset = exp_off - 8 });
},
else => return self.fail("TODO implement cmpxchg for {s}", .{@tagName(exp_mcv)}),
} else try self.genSetReg(val_ty, .rax, exp_mcv);
const rax_lock = self.register_manager.lockRegAssumeUnused(.rax);
defer self.register_manager.unlockReg(rax_lock);
const new_mcv = try self.resolveInst(extra.new_value);
const new_reg = try self.copyToTmpRegister(val_ty, new_mcv);
const new_reg: Register = if (val_abi_size > 8) switch (new_mcv) {
.stack_offset => |new_off| new: {
try self.genSetReg(Type.usize, .rbx, .{ .stack_offset = new_off - 0 });
try self.genSetReg(Type.usize, .rcx, .{ .stack_offset = new_off - 8 });
break :new undefined;
},
else => return self.fail("TODO implement cmpxchg for {s}", .{@tagName(exp_mcv)}),
} else try self.copyToTmpRegister(val_ty, new_mcv);
const new_lock = self.register_manager.lockRegAssumeUnused(new_reg);
defer self.register_manager.unlockReg(new_lock);
const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*));
const ptr_size = Memory.PtrSize.fromSize(val_abi_size);
const ptr_mem: Memory = switch (ptr_mcv) {
const ptr_mem = switch (ptr_mcv) {
.register => |reg| Memory.sib(ptr_size, .{ .base = reg }),
.ptr_stack_offset => |off| Memory.sib(ptr_size, .{ .base = .rbp, .disp = -off }),
else => Memory.sib(ptr_size, .{ .base = try self.copyToTmpRegister(ptr_ty, ptr_mcv) }),
@ -7241,16 +7258,30 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
defer if (mem_lock) |lock| self.register_manager.unlockReg(lock);
try self.spillEflagsIfOccupied();
_ = try self.addInst(.{ .tag = .cmpxchg, .ops = .lock_mr_sib, .data = .{ .rx = .{
.r = registerAlias(new_reg, val_abi_size),
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} } });
if (val_abi_size <= 8) {
_ = try self.addInst(.{ .tag = .cmpxchg, .ops = .lock_mr_sib, .data = .{ .rx = .{
.r = registerAlias(new_reg, val_abi_size),
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} } });
} else {
_ = try self.addInst(.{ .tag = .cmpxchgb, .ops = .lock_m_sib, .data = .{
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} });
}
const result: MCValue = result: {
if (self.liveness.isUnused(inst)) break :result .dead;
self.eflags_inst = inst;
break :result .{ .register_overflow = .{ .reg = .rax, .eflags = .ne } };
if (val_abi_size <= 8) {
self.eflags_inst = inst;
break :result .{ .register_overflow = .{ .reg = .rax, .eflags = .ne } };
}
const dst_mcv = try self.allocRegOrMem(inst, false);
try self.genSetStack(Type.bool, dst_mcv.stack_offset - 16, .{ .eflags = .ne }, .{});
try self.genSetStack(Type.usize, dst_mcv.stack_offset - 8, .{ .register = .rdx }, .{});
try self.genSetStack(Type.usize, dst_mcv.stack_offset - 0, .{ .register = .rax }, .{});
break :result dst_mcv;
};
return self.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value });
}
@ -7263,9 +7294,10 @@ fn atomicOp(
ptr_ty: Type,
val_ty: Type,
unused: bool,
op: ?std.builtin.AtomicRmwOp,
rmw_op: ?std.builtin.AtomicRmwOp,
order: std.builtin.AtomicOrder,
) InnerError!void {
const dst_mcv = MCValue{ .register = dst_reg };
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
@ -7283,7 +7315,7 @@ fn atomicOp(
const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*));
const ptr_size = Memory.PtrSize.fromSize(val_abi_size);
const ptr_mem: Memory = switch (ptr_mcv) {
const ptr_mem = switch (ptr_mcv) {
.register => |reg| Memory.sib(ptr_size, .{ .base = reg }),
.ptr_stack_offset => |off| Memory.sib(ptr_size, .{ .base = .rbp, .disp = -off }),
else => Memory.sib(ptr_size, .{ .base = try self.copyToTmpRegister(ptr_ty, ptr_mcv) }),
@ -7291,48 +7323,197 @@ fn atomicOp(
const mem_lock = if (ptr_mem.base()) |reg| self.register_manager.lockReg(reg) else null;
defer if (mem_lock) |lock| self.register_manager.unlockReg(lock);
try self.genSetReg(val_ty, dst_reg, val_mcv);
const method: enum { lock, loop, libcall } = if (val_ty.isRuntimeFloat())
.loop
else switch (rmw_op orelse .Xchg) {
.Xchg,
.Add,
.Sub,
=> if (val_abi_size <= 8) .lock else if (val_abi_size <= 16) .loop else .libcall,
.And,
.Or,
.Xor,
=> if (val_abi_size <= 8 and unused) .lock else if (val_abi_size <= 16) .loop else .libcall,
.Nand,
.Max,
.Min,
=> if (val_abi_size <= 16) .loop else .libcall,
};
switch (method) {
.lock => {
const tag: Mir.Inst.Tag = if (rmw_op) |op| switch (op) {
.Xchg => if (unused) .mov else .xchg,
.Add => if (unused) .add else .xadd,
.Sub => if (unused) .sub else .xadd,
.And => .@"and",
.Or => .@"or",
.Xor => .xor,
else => unreachable,
} else switch (order) {
.Unordered, .Monotonic, .Release, .AcqRel => .mov,
.Acquire => unreachable,
.SeqCst => .xchg,
};
const need_loop = val_ty.isRuntimeFloat() or if (op) |rmw| switch (rmw) {
.Xchg, .Add, .Sub => false,
.And, .Or, .Xor => !unused,
.Nand, .Max, .Min => true,
} else false;
if (!need_loop) {
const tag: Mir.Inst.Tag = if (op) |rmw| switch (rmw) {
.Xchg => if (unused) .mov else .xchg,
.Add => if (unused) .add else .xadd,
.Sub => if (unused) .sub else .xadd,
.And => .@"and",
.Or => .@"or",
.Xor => .xor,
else => unreachable,
} else switch (order) {
.Unordered, .Monotonic, .Release, .AcqRel => .mov,
.Acquire => unreachable,
.SeqCst => .xchg,
};
if (op == std.builtin.AtomicRmwOp.Sub and tag == .xadd) {
try self.genUnOpMir(.neg, val_ty, .{ .register = dst_reg });
}
_ = try self.addInst(.{ .tag = tag, .ops = switch (tag) {
.mov, .xchg => .mr_sib,
.xadd, .add, .sub, .@"and", .@"or", .xor => .lock_mr_sib,
else => unreachable,
}, .data = .{ .rx = .{
.r = registerAlias(dst_reg, val_abi_size),
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} } });
return;
try self.genSetReg(val_ty, dst_reg, val_mcv);
if (rmw_op == std.builtin.AtomicRmwOp.Sub and tag == .xadd) {
try self.genUnOpMir(.neg, val_ty, .{ .register = dst_reg });
}
_ = try self.addInst(.{ .tag = tag, .ops = switch (tag) {
.mov, .xchg => .mr_sib,
.xadd, .add, .sub, .@"and", .@"or", .xor => .lock_mr_sib,
else => unreachable,
}, .data = .{ .rx = .{
.r = registerAlias(dst_reg, val_abi_size),
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} } });
},
.loop => _ = try self.asmJccReloc(if (val_abi_size <= 8) loop: {
try self.genSetReg(val_ty, dst_reg, val_mcv);
try self.asmRegisterMemory(.mov, registerAlias(.rax, val_abi_size), ptr_mem);
const loop = @intCast(u32, self.mir_instructions.len);
if (rmw_op != std.builtin.AtomicRmwOp.Xchg) {
try self.genSetReg(val_ty, dst_reg, .{ .register = .rax });
}
if (rmw_op) |op| switch (op) {
.Xchg => try self.genSetReg(val_ty, dst_reg, val_mcv),
.Add => try self.genBinOpMir(.add, val_ty, dst_mcv, val_mcv),
.Sub => try self.genBinOpMir(.sub, val_ty, dst_mcv, val_mcv),
.And => try self.genBinOpMir(.@"and", val_ty, dst_mcv, val_mcv),
.Nand => {
try self.genBinOpMir(.@"and", val_ty, dst_mcv, val_mcv);
try self.genUnOpMir(.not, val_ty, dst_mcv);
},
.Or => try self.genBinOpMir(.@"or", val_ty, dst_mcv, val_mcv),
.Xor => try self.genBinOpMir(.xor, val_ty, dst_mcv, val_mcv),
.Min, .Max => {
const cc: Condition = switch (if (val_ty.isAbiInt())
val_ty.intInfo(self.target.*).signedness
else
.unsigned) {
.unsigned => switch (op) {
.Min => .a,
.Max => .b,
else => unreachable,
},
.signed => switch (op) {
.Min => .g,
.Max => .l,
else => unreachable,
},
};
try self.genBinOpMir(.cmp, val_ty, dst_mcv, val_mcv);
switch (val_mcv) {
.register => |val_reg| try self.asmCmovccRegisterRegister(
registerAlias(dst_reg, val_abi_size),
registerAlias(val_reg, val_abi_size),
cc,
),
.stack_offset => |val_off| try self.asmCmovccRegisterMemory(
registerAlias(dst_reg, val_abi_size),
Memory.sib(
Memory.PtrSize.fromSize(val_abi_size),
.{ .base = .rbp, .disp = -val_off },
),
cc,
),
else => {
const val_reg = try self.copyToTmpRegister(val_ty, val_mcv);
try self.asmCmovccRegisterRegister(
registerAlias(dst_reg, val_abi_size),
registerAlias(val_reg, val_abi_size),
cc,
);
},
}
},
};
_ = try self.addInst(.{ .tag = .cmpxchg, .ops = .lock_mr_sib, .data = .{ .rx = .{
.r = registerAlias(dst_reg, val_abi_size),
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} } });
break :loop loop;
} else loop: {
try self.asmRegisterMemory(.mov, .rax, Memory.sib(.qword, .{
.base = ptr_mem.sib.base,
.scale_index = ptr_mem.sib.scale_index,
.disp = ptr_mem.sib.disp + 0,
}));
try self.asmRegisterMemory(.mov, .rdx, Memory.sib(.qword, .{
.base = ptr_mem.sib.base,
.scale_index = ptr_mem.sib.scale_index,
.disp = ptr_mem.sib.disp + 8,
}));
const loop = @intCast(u32, self.mir_instructions.len);
switch (val_mcv) {
.stack_offset => |val_off| {
const val_lo_mem = Memory.sib(.qword, .{ .base = .rbp, .disp = 0 - val_off });
const val_hi_mem = Memory.sib(.qword, .{ .base = .rbp, .disp = 8 - val_off });
if (rmw_op != std.builtin.AtomicRmwOp.Xchg) {
try self.asmRegisterRegister(.mov, .rbx, .rax);
try self.asmRegisterRegister(.mov, .rcx, .rdx);
}
if (rmw_op) |op| switch (op) {
.Xchg => {
try self.asmRegisterMemory(.mov, .rbx, val_lo_mem);
try self.asmRegisterMemory(.mov, .rcx, val_hi_mem);
},
.Add => {
try self.asmRegisterMemory(.add, .rbx, val_lo_mem);
try self.asmRegisterMemory(.adc, .rcx, val_hi_mem);
},
.Sub => {
try self.asmRegisterMemory(.sub, .rbx, val_lo_mem);
try self.asmRegisterMemory(.sbb, .rcx, val_hi_mem);
},
.And => {
try self.asmRegisterMemory(.@"and", .rbx, val_lo_mem);
try self.asmRegisterMemory(.@"and", .rcx, val_hi_mem);
},
.Nand => {
try self.asmRegisterMemory(.@"and", .rbx, val_lo_mem);
try self.asmRegisterMemory(.@"and", .rcx, val_hi_mem);
try self.asmRegister(.not, .rbx);
try self.asmRegister(.not, .rcx);
},
.Or => {
try self.asmRegisterMemory(.@"or", .rbx, val_lo_mem);
try self.asmRegisterMemory(.@"or", .rcx, val_hi_mem);
},
.Xor => {
try self.asmRegisterMemory(.xor, .rbx, val_lo_mem);
try self.asmRegisterMemory(.xor, .rcx, val_hi_mem);
},
else => return self.fail(
"TODO implement x86 atomic loop for large abi {s}",
.{@tagName(op)},
),
};
},
else => return self.fail(
"TODO implement x86 atomic loop for large abi {s}",
.{@tagName(val_mcv)},
),
}
_ = try self.addInst(.{ .tag = .cmpxchgb, .ops = .lock_m_sib, .data = .{
.payload = try self.addExtra(Mir.MemorySib.encode(ptr_mem)),
} });
break :loop loop;
}, .ne),
.libcall => return self.fail("TODO implement x86 atomic libcall", .{}),
}
return self.fail("TODO implement x86 atomic loop", .{});
}
fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data;
try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx });
const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx });
defer for (regs_lock) |lock| self.register_manager.unlockReg(lock);
const unused = self.liveness.isUnused(inst);
const dst_reg = try self.register_manager.allocReg(if (unused) null else inst, gp);

View File

@ -411,20 +411,17 @@ pub const Memory = union(enum) {
dword,
qword,
tbyte,
dqword,
pub fn fromSize(size: u32) PtrSize {
return if (size <= 1)
.byte
else if (size <= 2)
.word
else if (size <= 4)
.dword
else if (size <= 8)
.qword
else if (size == 10)
.tbyte
else
unreachable;
return switch (size) {
1...1 => .byte,
2...2 => .word,
3...4 => .dword,
5...8 => .qword,
9...16 => .dqword,
else => unreachable,
};
}
pub fn fromBitSize(bit_size: u64) PtrSize {
@ -434,6 +431,7 @@ pub const Memory = union(enum) {
32 => .dword,
64 => .qword,
80 => .tbyte,
128 => .dqword,
else => unreachable,
};
}
@ -445,6 +443,7 @@ pub const Memory = union(enum) {
.dword => 32,
.qword => 64,
.tbyte => 80,
.dqword => 128,
};
}
};

View File

@ -70,7 +70,6 @@ test "array concat with undefined" {
test "array concat with tuple" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const array: [2]u8 = .{ 1, 2 };
{
@ -641,7 +640,6 @@ test "tuple to array handles sentinel" {
}
test "array init of container level array variable" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -4,7 +4,6 @@ const expectEqualStrings = std.testing.expectEqualStrings;
test "slicing slices" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO

View File

@ -3,7 +3,6 @@ const builtin = @import("builtin");
test {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO

View File

@ -15,7 +15,6 @@ fn takeFoo(foo: *const Foo) !void {
test {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO

View File

@ -20,7 +20,6 @@ fn letter(e: Letter) u8 {
test {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO

View File

@ -11,7 +11,6 @@ const text =
;
test "issue 6456" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO

View File

@ -8,7 +8,6 @@ const array = [_][]const []const u8{
test {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO

View File

@ -3,7 +3,6 @@ const builtin = @import("builtin");
test "empty file level struct" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const T = @import("empty_file_level_struct.zig");
@ -15,7 +14,6 @@ test "empty file level struct" {
test "empty file level union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const T = @import("empty_file_level_union.zig");

View File

@ -2,7 +2,6 @@ const std = @import("std");
const builtin = @import("builtin");
test "strlit to vector" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -560,7 +560,6 @@ fn testUnsignedNegationWrappingEval(x: u16) !void {
test "negation wrapping" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
try expectEqual(@as(u1, 1), negateWrap(u1, 1));
}

View File

@ -431,7 +431,6 @@ test "alignment of wrapping an optional payload" {
test "Optional slice size is optimized" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(@sizeOf(?[]u8) == @sizeOf([]u8));

View File

@ -120,7 +120,6 @@ test "consistent size of packed structs" {
}
test "correct sizeOf and offsets in packed structs" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -188,7 +187,6 @@ test "correct sizeOf and offsets in packed structs" {
}
test "nested packed structs" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -6,7 +6,6 @@ const native_endian = builtin.cpu.arch.endian();
test "packed struct explicit backing integer" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -412,7 +412,6 @@ test "@ptrToInt on null optional at comptime" {
test "indexing array with sentinel returns correct type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var s: [:0]const u8 = "abc";
@ -497,7 +496,6 @@ test "pointer to constant decl preserves alignment" {
test "ptrCast comptime known slice to C pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const s: [:0]const u8 = "foo";

View File

@ -688,7 +688,6 @@ test "slice field ptr var" {
test "global slice field access" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -733,7 +732,6 @@ test "empty slice ptr is non null" {
test "slice decays to many pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var buf: [8]u8 = "abcdefg\x00".*;
const p: [*:0]const u8 = buf[0..7 :0];
@ -744,7 +742,6 @@ test "write through pointer to optional slice arg" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const S = struct {
fn bar(foo: *?[]const u8) !void {

View File

@ -935,7 +935,6 @@ test "comptime struct field" {
}
test "tuple element initialized with fn call" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -620,7 +620,6 @@ test "switch on error set with single else" {
}
test "switch capture copies its payload" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -21,7 +21,6 @@ test "thread local variable" {
test "pointer to thread local array" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) {

View File

@ -23,7 +23,6 @@ test "casting to void with a macro" {
}
test "initializer list expression" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -52,7 +51,6 @@ test "reference to a struct type" {
test "cast negative integer to pointer" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -98,7 +96,6 @@ test "casting or calling a value with a paren-surrounded macro" {
test "nested comma operator" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -109,7 +106,6 @@ test "nested comma operator" {
test "cast functions" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -123,7 +119,6 @@ test "cast functions" {
test "large integer macro" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -133,7 +128,6 @@ test "large integer macro" {
test "string literal macro with embedded tab character" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -143,7 +137,6 @@ test "string literal macro with embedded tab character" {
test "string and char literals that are not UTF-8 encoded. Issue #12784" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -188,7 +181,6 @@ test "Macro that uses division operator. Issue #13162" {
test "Macro that uses remainder operator. Issue #13346" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -381,7 +381,6 @@ test "tuple of struct concatenation and coercion to array" {
test "nested runtime conditionals in tuple initializer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var data: u8 = 0;
const x = .{

View File

@ -7,7 +7,6 @@ const expectEqualStrings = testing.expectEqualStrings;
test "tuple declaration type info" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
{
const T = struct { comptime u32 align(2) = 1, []const u8 };
@ -57,7 +56,6 @@ test "tuple declaration type info" {
test "Tuple declaration usage" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const T = struct { u32, []const u8 };
var t: T = .{ 1, "foo" };

View File

@ -200,7 +200,6 @@ test "Type.ErrorUnion" {
test "Type.Opaque" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -348,7 +347,6 @@ test "Type.Struct" {
}
test "Type.Enum" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO

View File

@ -568,7 +568,6 @@ test "value from struct @typeInfo default_value can be loaded at comptime" {
test "@typeInfo decls and usingnamespace" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const A = struct {
const x = 5;

View File

@ -64,7 +64,6 @@ test "anon field init" {
}
test "basic" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -228,7 +227,6 @@ test "local variable" {
}
test "comptime parameters not converted to anytype in function type" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -1267,7 +1267,6 @@ test "store to vector in slice" {
test "addition of vectors represented as strings" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const V = @Vector(3, u8);
const foo: V = "foo".*;