x86_64: (re)implement optional ops

Note that this commit also changes the layout of optional for all
other backends using `src/codegen.zig` without updating them!
This commit is contained in:
Jacob Young 2023-03-19 06:49:50 -04:00 committed by Jakub Konka
parent 24f0900ecb
commit f95faac5ae
19 changed files with 243 additions and 149 deletions

View File

@ -1871,55 +1871,74 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void {
fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
if (self.liveness.isUnused(inst)) {
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
}
const payload_ty = self.air.typeOfIndex(inst);
const optional_ty = self.air.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
const result: MCValue = result: {
if (!payload_ty.hasRuntimeBits()) break :result MCValue.none;
if (optional_ty.isPtrLikeOptional()) {
if (self.reuseOperand(inst, ty_op.operand, 0, operand)) {
break :result operand;
if (self.liveness.isUnused(inst)) break :result .none;
const pl_ty = self.air.typeOfIndex(inst);
const opt_mcv = try self.resolveInst(ty_op.operand);
if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) {
switch (opt_mcv) {
.register => |reg| try self.truncateRegister(pl_ty, reg),
else => {},
}
break :result try self.copyToRegisterWithInstTracking(inst, payload_ty, operand);
break :result opt_mcv;
}
const offset = optional_ty.abiSize(self.target.*) - payload_ty.abiSize(self.target.*);
switch (operand) {
.stack_offset => |off| {
break :result MCValue{ .stack_offset = off - @intCast(i32, offset) };
},
.register => {
// TODO reuse the operand
const result = try self.copyToRegisterWithInstTracking(inst, optional_ty, operand);
const shift = @intCast(u8, offset * @sizeOf(usize));
try self.genShiftBinOpMir(.shr, optional_ty, result.register, .{ .immediate = @intCast(u8, shift) });
break :result result;
},
else => return self.fail("TODO implement optional_payload when operand is {}", .{operand}),
}
const pl_mcv = try self.allocRegOrMem(inst, true);
try self.setRegOrMem(pl_ty, pl_mcv, opt_mcv);
break :result pl_mcv;
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst))
.dead
const result: MCValue = result: {
if (self.liveness.isUnused(inst)) break :result .dead;
const dst_ty = self.air.typeOfIndex(inst);
const opt_mcv = try self.resolveInst(ty_op.operand);
break :result if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv))
opt_mcv
else
return self.fail("TODO implement .optional_payload_ptr for {}", .{self.target.cpu.arch});
try self.copyToRegisterWithInstTracking(inst, dst_ty, opt_mcv);
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst))
const result = result: {
const dst_ty = self.air.typeOfIndex(inst);
const src_ty = self.air.typeOf(ty_op.operand);
const opt_ty = src_ty.childType();
const src_mcv = try self.resolveInst(ty_op.operand);
if (opt_ty.optionalReprIsPayload()) {
break :result if (self.liveness.isUnused(inst))
.dead
else if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
src_mcv
else
return self.fail("TODO implement .optional_payload_ptr_set for {}", .{self.target.cpu.arch});
try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
}
const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
src_mcv
else
try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
const pl_ty = dst_ty.childType();
const pl_abi_size = @intCast(i32, pl_ty.abiSize(self.target.*));
try self.asmMemoryImmediate(
.mov,
Memory.sib(.byte, .{ .base = dst_mcv.register, .disp = pl_abi_size }),
Immediate.u(1),
);
break :result if (self.liveness.isUnused(inst)) .dead else dst_mcv;
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@ -2150,41 +2169,45 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void {
fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
if (self.liveness.isUnused(inst)) {
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
}
const payload_ty = self.air.typeOf(ty_op.operand);
const result: MCValue = result: {
if (!payload_ty.hasRuntimeBits()) {
break :result MCValue{ .immediate = 1 };
}
if (self.liveness.isUnused(inst)) break :result .dead;
const optional_ty = self.air.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
const operand_lock: ?RegisterLock = switch (operand) {
const pl_ty = self.air.typeOf(ty_op.operand);
if (!pl_ty.hasRuntimeBits()) break :result .{ .immediate = 1 };
const opt_ty = self.air.typeOfIndex(inst);
const pl_mcv = try self.resolveInst(ty_op.operand);
const same_repr = opt_ty.optionalReprIsPayload();
if (same_repr and self.reuseOperand(inst, ty_op.operand, 0, pl_mcv)) break :result pl_mcv;
const pl_lock: ?RegisterLock = switch (pl_mcv) {
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
else => null,
};
defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
defer if (pl_lock) |lock| self.register_manager.unlockReg(lock);
if (optional_ty.isPtrLikeOptional()) {
// TODO should we check if we can reuse the operand?
if (self.reuseOperand(inst, ty_op.operand, 0, operand)) {
break :result operand;
const opt_mcv = try self.allocRegOrMem(inst, true);
try self.setRegOrMem(pl_ty, opt_mcv, pl_mcv);
if (!same_repr) {
const pl_abi_size = @intCast(i32, pl_ty.abiSize(self.target.*));
switch (opt_mcv) {
else => unreachable,
.register => |opt_reg| try self.asmRegisterImmediate(
.bts,
opt_reg,
Immediate.u(@intCast(u6, pl_abi_size * 8)),
),
.stack_offset => |off| try self.asmMemoryImmediate(
.mov,
Memory.sib(.byte, .{ .base = .rsp, .disp = pl_abi_size - off }),
Immediate.u(0),
),
}
break :result try self.copyToRegisterWithInstTracking(inst, payload_ty, operand);
}
const optional_abi_size = @intCast(u32, optional_ty.abiSize(self.target.*));
const optional_abi_align = optional_ty.abiAlignment(self.target.*);
const payload_abi_size = @intCast(u32, payload_ty.abiSize(self.target.*));
const offset = optional_abi_size - payload_abi_size;
const stack_offset = @intCast(i32, try self.allocMem(inst, optional_abi_size, optional_abi_align));
try self.genSetStack(Type.bool, stack_offset, .{ .immediate = 1 }, .{});
try self.genSetStack(payload_ty, stack_offset - @intCast(i32, offset), operand, .{});
break :result MCValue{ .stack_offset = stack_offset };
break :result opt_mcv;
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@ -2619,7 +2642,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
},
.register => {
const shift: u6 = if (layout.tag_align < layout.payload_align)
@intCast(u6, layout.payload_size * @sizeOf(usize))
@intCast(u6, layout.payload_size * 8)
else
0;
const result = try self.copyToRegisterWithInstTracking(inst, union_ty, operand);
@ -3271,7 +3294,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
defer if (dst_mcv_lock) |lock| self.register_manager.unlockReg(lock);
// Shift by struct_field_offset.
const shift = @intCast(u8, struct_field_offset * @sizeOf(usize));
const shift = @intCast(u8, struct_field_offset * 8);
try self.genShiftBinOpMir(.shr, Type.usize, dst_mcv.register, .{ .immediate = shift });
// Mask with reg.bitSize() - struct_field_size
@ -4928,25 +4951,107 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, .unreach, .{ .none, .none, .none });
}
fn isNull(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCValue {
fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MCValue {
try self.spillEflagsIfOccupied();
self.eflags_inst = inst;
const cmp_ty: Type = if (!ty.isPtrLikeOptional()) blk: {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = ty.optionalChild(&buf);
break :blk if (payload_ty.hasRuntimeBitsIgnoreComptime()) Type.bool else ty;
} else ty;
var pl_buf: Type.Payload.ElemType = undefined;
const pl_ty = opt_ty.optionalChild(&pl_buf);
try self.genBinOpMir(.cmp, cmp_ty, operand, MCValue{ .immediate = 0 });
var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload())
.{ .off = 0, .ty = if (pl_ty.isSlice()) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty }
else
.{ .off = @intCast(i32, pl_ty.abiSize(self.target.*)), .ty = Type.bool };
return MCValue{ .eflags = .e };
switch (opt_mcv) {
.none,
.unreach,
.dead,
.undef,
.immediate,
.register_overflow,
.ptr_stack_offset,
.eflags,
=> unreachable,
.register => |opt_reg| {
if (some_info.off == 0) {
const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*));
const alias_reg = registerAlias(opt_reg, some_abi_size);
assert(some_abi_size * 8 == alias_reg.bitSize());
try self.asmRegisterRegister(.@"test", alias_reg, alias_reg);
return .{ .eflags = .z };
}
assert(some_info.ty.tag() == .bool);
const opt_abi_size = @intCast(u32, opt_ty.abiSize(self.target.*));
try self.asmRegisterImmediate(
.bt,
registerAlias(opt_reg, opt_abi_size),
Immediate.u(@intCast(u6, some_info.off * 8)),
);
return .{ .eflags = .nc };
},
.memory, .linker_load => {
const addr_reg = (try self.register_manager.allocReg(null, gp)).to64();
const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
defer self.register_manager.unlockReg(addr_reg_lock);
try self.loadMemPtrIntoRegister(addr_reg, Type.usize, opt_mcv);
// To get the actual address of the value we want to modify we have to go through the GOT
try self.asmRegisterMemory(.mov, addr_reg, Memory.sib(.qword, .{
.base = addr_reg,
.disp = 0,
}));
const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*));
try self.asmMemoryImmediate(.cmp, Memory.sib(
Memory.PtrSize.fromSize(some_abi_size),
.{ .base = addr_reg, .disp = some_info.off },
), Immediate.u(0));
return .{ .eflags = .e };
},
.stack_offset => |off| {
const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*));
try self.asmMemoryImmediate(.cmp, Memory.sib(
Memory.PtrSize.fromSize(some_abi_size),
.{ .base = .rbp, .disp = some_info.off - off },
), Immediate.u(0));
return .{ .eflags = .e };
},
}
}
fn isNonNull(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCValue {
const is_null_res = try self.isNull(inst, ty, operand);
assert(is_null_res.eflags == .e);
return MCValue{ .eflags = is_null_res.eflags.negate() };
fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) !MCValue {
try self.spillEflagsIfOccupied();
self.eflags_inst = inst;
const opt_ty = ptr_ty.childType();
var pl_buf: Type.Payload.ElemType = undefined;
const pl_ty = opt_ty.optionalChild(&pl_buf);
var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload())
.{ .off = 0, .ty = if (pl_ty.isSlice()) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty }
else
.{ .off = @intCast(i32, pl_ty.abiSize(self.target.*)), .ty = Type.bool };
const ptr_reg = switch (ptr_mcv) {
.register => |reg| reg,
else => try self.copyToTmpRegister(ptr_ty, ptr_mcv),
};
const ptr_lock = self.register_manager.lockReg(ptr_reg);
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*));
try self.asmMemoryImmediate(.cmp, Memory.sib(
Memory.PtrSize.fromSize(some_abi_size),
.{ .base = ptr_reg, .disp = some_info.off },
), Immediate.u(0));
return .{ .eflags = .e };
}
fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) !MCValue {
@ -5012,29 +5117,11 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void {
fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
if (self.liveness.isUnused(inst)) {
return self.finishAir(inst, .dead, .{ un_op, .none, .none });
}
const operand_ptr = try self.resolveInst(un_op);
const operand_ptr_lock: ?RegisterLock = switch (operand_ptr) {
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
else => null,
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(un_op);
const ty = self.air.typeOf(un_op);
break :result try self.isNullPtr(inst, ty, operand);
};
defer if (operand_ptr_lock) |lock| self.register_manager.unlockReg(lock);
const ptr_ty = self.air.typeOf(un_op);
const elem_ty = ptr_ty.childType();
const operand = if (elem_ty.isPtrLikeOptional() and self.reuseOperand(inst, un_op, 0, operand_ptr))
// The MCValue that holds the pointer can be re-used as the value.
operand_ptr
else
try self.allocTempRegOrMem(elem_ty, true);
try self.load(operand, operand_ptr, ptr_ty);
const result = try self.isNull(inst, elem_ty, operand);
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@ -5043,36 +5130,24 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(un_op);
const ty = self.air.typeOf(un_op);
break :result try self.isNonNull(inst, ty, operand);
break :result switch (try self.isNull(inst, ty, operand)) {
.eflags => |cc| .{ .eflags = cc.negate() },
else => unreachable,
};
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
if (self.liveness.isUnused(inst)) {
return self.finishAir(inst, .dead, .{ un_op, .none, .none });
}
const operand_ptr = try self.resolveInst(un_op);
const operand_ptr_lock: ?RegisterLock = switch (operand_ptr) {
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
else => null,
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(un_op);
const ty = self.air.typeOf(un_op);
break :result switch (try self.isNullPtr(inst, ty, operand)) {
.eflags => |cc| .{ .eflags = cc.negate() },
else => unreachable,
};
};
defer if (operand_ptr_lock) |lock| self.register_manager.unlockReg(lock);
const ptr_ty = self.air.typeOf(un_op);
const elem_ty = ptr_ty.childType();
const operand = if (elem_ty.isPtrLikeOptional() and self.reuseOperand(inst, un_op, 0, operand_ptr))
// The MCValue that holds the pointer can be re-used as the value.
operand_ptr
else
try self.allocTempRegOrMem(elem_ty, true);
try self.load(operand, operand_ptr, ptr_ty);
const result = try self.isNonNull(inst, ptr_ty.elemType(), operand);
return self.finishAir(inst, result, .{ un_op, .none, .none });
}
@ -6967,7 +7042,10 @@ fn registerAlias(reg: Register, size_bytes: u32) Register {
/// Truncates the value in the register in place.
/// Clobbers any remaining bits.
fn truncateRegister(self: *Self, ty: Type, reg: Register) !void {
const int_info = ty.intInfo(self.target.*);
const int_info = if (ty.isAbiInt()) ty.intInfo(self.target.*) else std.builtin.Type.Int{
.signedness = .unsigned,
.bits = @intCast(u16, ty.bitSize(self.target.*)),
};
const max_reg_bit_width = Register.rax.bitSize();
switch (int_info.signedness) {
.signed => {

View File

@ -75,6 +75,10 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
.@"and",
.bsf,
.bsr,
.bt,
.btc,
.btr,
.bts,
.call,
.cbw,
.cwde,

View File

@ -307,7 +307,7 @@ pub const Mnemonic = enum {
// zig fmt: off
// General-purpose
adc, add, @"and",
bsf, bsr,
bsf, bsr, bt, btc, btr, bts,
call, cbw, cdq, cdqe,
cmova, cmovae, cmovb, cmovbe, cmovc, cmove, cmovg, cmovge, cmovl, cmovle, cmovna,
cmovnae, cmovnb, cmovnbe, cmovnc, cmovne, cmovng, cmovnge, cmovnl, cmovnle, cmovno,

View File

@ -42,6 +42,14 @@ pub const Inst = struct {
bsf,
/// Bit scan reverse
bsr,
/// Bit test
bt,
/// Bit test and complement
btc,
/// Bit test and reset
btr,
/// Bit test and set
bts,
/// Call
call,
/// Convert byte to word

View File

@ -89,6 +89,34 @@ pub const table = &[_]Entry{
.{ .bsr, .rm, .r32, .rm32, .none, .none, &.{ 0x0f, 0xbd }, 0, .none },
.{ .bsr, .rm, .r64, .rm64, .none, .none, &.{ 0x0f, 0xbd }, 0, .long },
.{ .bt, .mr, .rm16, .r16, .none, .none, &.{ 0x0f, 0xa3 }, 0, .none },
.{ .bt, .mr, .rm32, .r32, .none, .none, &.{ 0x0f, 0xa3 }, 0, .none },
.{ .bt, .mr, .rm64, .r64, .none, .none, &.{ 0x0f, 0xa3 }, 0, .long },
.{ .bt, .mi, .rm16, .imm8, .none, .none, &.{ 0x0f, 0xba }, 4, .none },
.{ .bt, .mi, .rm32, .imm8, .none, .none, &.{ 0x0f, 0xba }, 4, .none },
.{ .bt, .mi, .rm64, .imm8, .none, .none, &.{ 0x0f, 0xba }, 4, .long },
.{ .btc, .mr, .rm16, .r16, .none, .none, &.{ 0x0f, 0xbb }, 0, .none },
.{ .btc, .mr, .rm32, .r32, .none, .none, &.{ 0x0f, 0xbb }, 0, .none },
.{ .btc, .mr, .rm64, .r64, .none, .none, &.{ 0x0f, 0xbb }, 0, .long },
.{ .btc, .mi, .rm16, .imm8, .none, .none, &.{ 0x0f, 0xba }, 7, .none },
.{ .btc, .mi, .rm32, .imm8, .none, .none, &.{ 0x0f, 0xba }, 7, .none },
.{ .btc, .mi, .rm64, .imm8, .none, .none, &.{ 0x0f, 0xba }, 7, .long },
.{ .btr, .mr, .rm16, .r16, .none, .none, &.{ 0x0f, 0xb3 }, 0, .none },
.{ .btr, .mr, .rm32, .r32, .none, .none, &.{ 0x0f, 0xb3 }, 0, .none },
.{ .btr, .mr, .rm64, .r64, .none, .none, &.{ 0x0f, 0xb3 }, 0, .long },
.{ .btr, .mi, .rm16, .imm8, .none, .none, &.{ 0x0f, 0xba }, 6, .none },
.{ .btr, .mi, .rm32, .imm8, .none, .none, &.{ 0x0f, 0xba }, 6, .none },
.{ .btr, .mi, .rm64, .imm8, .none, .none, &.{ 0x0f, 0xba }, 6, .long },
.{ .bts, .mr, .rm16, .r16, .none, .none, &.{ 0x0f, 0xab }, 0, .none },
.{ .bts, .mr, .rm32, .r32, .none, .none, &.{ 0x0f, 0xab }, 0, .none },
.{ .bts, .mr, .rm64, .r64, .none, .none, &.{ 0x0f, 0xab }, 0, .long },
.{ .bts, .mi, .rm16, .imm8, .none, .none, &.{ 0x0f, 0xba }, 5, .none },
.{ .bts, .mi, .rm32, .imm8, .none, .none, &.{ 0x0f, 0xba }, 5, .none },
.{ .bts, .mi, .rm64, .imm8, .none, .none, &.{ 0x0f, 0xba }, 5, .long },
// This is M encoding according to Intel, but D makes more sense here.
.{ .call, .d, .rel32, .none, .none, .none, &.{ 0xe8 }, 0, .none },
.{ .call, .m, .rm64, .none, .none, .none, &.{ 0xff }, 2, .none },

View File

@ -608,7 +608,6 @@ pub fn generateSymbol(
const payload_type = typed_value.ty.optionalChild(&opt_buf);
const is_pl = !typed_value.val.isNull();
const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow;
const offset = abi_size - (math.cast(usize, payload_type.abiSize(target)) orelse return error.Overflow);
if (!payload_type.hasRuntimeBits()) {
try code.writer().writeByteNTimes(@boolToInt(is_pl), abi_size);
@ -639,8 +638,8 @@ pub fn generateSymbol(
return Result.ok;
}
const padding = abi_size - (math.cast(usize, payload_type.abiSize(target)) orelse return error.Overflow) - 1;
const value = if (typed_value.val.castTag(.opt_payload)) |payload| payload.data else Value.initTag(.undef);
try code.writer().writeByteNTimes(@boolToInt(is_pl), offset);
switch (try generateSymbol(bin_file, src_loc, .{
.ty = payload_type,
.val = value,
@ -648,6 +647,8 @@ pub fn generateSymbol(
.ok => {},
.fail => |em| return Result{ .fail = em },
}
try code.writer().writeByte(@boolToInt(is_pl));
try code.writer().writeByteNTimes(0, padding);
return Result.ok;
},

View File

@ -14,7 +14,6 @@ pub const CustomDraw = DeleagateWithContext(fn (?OnConfirm) void);
test "simple test" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
var c: CustomDraw = undefined;
_ = c;

View File

@ -3,7 +3,6 @@ const std = @import("std");
const S = packed struct { a: u0 = 0 };
test {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -329,7 +329,6 @@ test "inline call preserves tail call" {
test "inline call doesn't re-evaluate non generic struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const S = struct {
fn foo(f: struct { a: u8, b: u8 }) !void {

View File

@ -1206,7 +1206,6 @@ fn cast128Float(x: u128) f128 {
test "implicit cast from *[N]T to ?[*]T" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: ?[*]u16 = null;

View File

@ -451,7 +451,6 @@ test "optional error set is the same size as error set" {
}
test "nested catch" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -130,7 +130,6 @@ test "if peer expressions inferred optional type" {
}
test "if-else expression with runtime condition result location is inferred optional" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -29,7 +29,6 @@ test "optional type" {
}
test "test maybe object and get a pointer to the inner value" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -138,7 +137,6 @@ test "optional pointer to 0 bit type null value at runtime" {
}
test "if var maybe pointer" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -91,7 +91,6 @@ test "address of unwrap optional" {
test "nested optional field in struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S2 = struct {
@ -109,7 +108,6 @@ test "nested optional field in struct" {
test "equality compare optional with non-optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try test_cmp_optional_non_optional();
@ -227,7 +225,6 @@ test "assigning to an unwrapped optional field in an inline loop" {
}
test "coerce an anon struct literal to optional struct" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -247,7 +244,6 @@ test "coerce an anon struct literal to optional struct" {
}
test "0-bit child type coerced to optional return ptr result location" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -299,7 +295,6 @@ test "0-bit child type coerced to optional" {
}
test "array of optional unaligned types" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -336,7 +331,6 @@ test "array of optional unaligned types" {
}
test "optional pointer to zero bit optional payload" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -450,7 +444,6 @@ test "Optional slice size is optimized" {
test "peer type resolution in nested if expressions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
const Thing = struct { n: i32 };
var a = false;

View File

@ -18,7 +18,6 @@ fn testReinterpretBytesAsInteger() !void {
}
test "reinterpret an array over multiple elements, with no well-defined layout" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -1149,7 +1149,6 @@ test "anon init through error unions and optionals" {
}
test "anon init through optional" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1456,7 +1455,6 @@ test "struct has only one reference" {
test "no dependency loop on pointer to optional struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const S = struct {
const A = struct { b: B };
@ -1509,7 +1507,6 @@ test "no dependency loop on optional field wrapped in generic function" {
}
test "optional field init with tuple" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO

View File

@ -263,7 +263,6 @@ test "initializing anon struct with mixed comptime-runtime fields" {
test "tuple in tuple passed to generic function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -283,7 +282,6 @@ test "tuple in tuple passed to generic function" {
test "coerce tuple to tuple" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const T = std.meta.Tuple(&.{u8});
@ -298,7 +296,6 @@ test "coerce tuple to tuple" {
test "tuple type with void field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const T = std.meta.Tuple(&[_]type{void});
const x = T{{}};
@ -335,7 +332,6 @@ test "zero sized struct in tuple handled correctly" {
test "tuple type with void field and a runtime field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
const T = std.meta.Tuple(&[_]type{ usize, void });
var t: T = .{ 5, {} };

View File

@ -1227,7 +1227,6 @@ test "union tag is set when initiated as a temporary value at runtime" {
}
test "extern union most-aligned field is smaller" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO

View File

@ -341,7 +341,6 @@ test "else continue outer while" {
}
test "try terminating an infinite loop" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO