diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index dd093508b1..97e672b71f 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -229,6 +229,14 @@ pub const MCValue = union(enum) { fn isRegister(mcv: MCValue) bool { return switch (mcv) { .register => true, + .register_offset => |reg_off| return reg_off.off == 0, + else => false, + }; + } + + fn isRegisterOffset(mcv: MCValue) bool { + return switch (mcv) { + .register, .register_offset => true, else => false, }; } @@ -1202,6 +1210,28 @@ fn asmRegisterMemory(self: *Self, tag: Mir.Inst.Tag, reg: Register, m: Memory) ! }); } +fn asmRegisterMemoryImmediate( + self: *Self, + tag: Mir.Inst.Tag, + reg: Register, + m: Memory, + imm: Immediate, +) !void { + _ = try self.addInst(.{ + .tag = tag, + .ops = switch (m) { + .sib => .rmi_sib, + .rip => .rmi_rip, + else => unreachable, + }, + .data = .{ .rix = .{ .r = reg, .i = @intCast(u8, imm.unsigned), .payload = switch (m) { + .sib => try self.addExtra(Mir.MemorySib.encode(m)), + .rip => try self.addExtra(Mir.MemoryRip.encode(m)), + else => unreachable, + } } }, + }); +} + fn asmMemoryRegister(self: *Self, tag: Mir.Inst.Tag, m: Memory, reg: Register) !void { _ = try self.addInst(.{ .tag = tag, @@ -1442,7 +1472,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .shl_sat => try self.airShlSat(inst), .slice => try self.airSlice(inst), - .sqrt, .sin, .cos, .tan, @@ -1451,14 +1480,14 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .log, .log2, .log10, - .fabs, .floor, .ceil, .round, .trunc_float, => try self.airUnaryMath(inst), - .neg => try self.airNeg(inst), + .sqrt => try self.airSqrt(inst), + .neg, .fabs => try self.airFloatSign(inst), .add_with_overflow => try self.airAddSubWithOverflow(inst), .sub_with_overflow => try self.airAddSubWithOverflow(inst), @@ -1944,7 +1973,7 @@ fn allocRegOrMemAdvanced(self: *Self, elem_ty: Type, inst: ?Air.Inst.Index, reg_ const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); if (abi_size <= ptr_bytes) { - if (self.register_manager.tryAllocReg(inst, try self.regClassForType(elem_ty))) |reg| { + if (self.register_manager.tryAllocReg(inst, regClassForType(elem_ty))) |reg| { return MCValue{ .register = registerAlias(reg, abi_size) }; } } @@ -1954,14 +1983,9 @@ fn allocRegOrMemAdvanced(self: *Self, elem_ty: Type, inst: ?Air.Inst.Index, reg_ return .{ .load_frame = .{ .index = frame_index } }; } -fn regClassForType(self: *Self, ty: Type) !RegisterManager.RegisterBitSet { +fn regClassForType(ty: Type) RegisterManager.RegisterBitSet { return switch (ty.zigTypeTag()) { - .Vector => self.fail("TODO regClassForType for {}", .{ty.fmt(self.bin_file.options.module.?)}), - .Float => switch (ty.floatBits(self.target.*)) { - 32 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse)) sse else gp, - 64 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse2)) sse else gp, - else => gp, - }, + .Float, .Vector => sse, else => gp, }; } @@ -2104,7 +2128,7 @@ pub fn spillRegisters(self: *Self, registers: []const Register) !void { /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { - const reg = try self.register_manager.allocReg(null, try self.regClassForType(ty)); + const reg = try self.register_manager.allocReg(null, regClassForType(ty)); try self.genSetReg(reg, ty, mcv); return reg; } @@ -2119,7 +2143,7 @@ fn copyToRegisterWithInstTracking( ty: Type, mcv: MCValue, ) !MCValue { - const reg: Register = try self.register_manager.allocReg(reg_owner, try self.regClassForType(ty)); + const reg: Register = try self.register_manager.allocReg(reg_owner, regClassForType(ty)); try self.genSetReg(reg, ty, mcv); return MCValue{ .register = reg }; } @@ -2152,8 +2176,7 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { if (dst_ty.floatBits(self.target.*) != 32 or src_ty.floatBits(self.target.*) != 64 or !Target.x86.featureSetHas(self.target.cpu.features, .sse2)) return self.fail("TODO implement airFptrunc from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), - dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }); const src_mcv = try self.resolveInst(ty_op.operand); @@ -2175,8 +2198,7 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { if (dst_ty.floatBits(self.target.*) != 64 or src_ty.floatBits(self.target.*) != 32 or !Target.x86.featureSetHas(self.target.cpu.features, .sse2)) return self.fail("TODO implement airFpext from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), - dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }); const src_mcv = try self.resolveInst(ty_op.operand); @@ -3502,17 +3524,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { defer self.register_manager.unlockReg(offset_reg_lock); const addr_reg = try self.register_manager.allocReg(null, gp); - switch (slice_mcv) { - .load_frame => |frame_addr| try self.asmRegisterMemory( - .mov, - addr_reg.to64(), - Memory.sib(.qword, .{ - .base = .{ .frame = frame_addr.index }, - .disp = frame_addr.off, - }), - ), - else => return self.fail("TODO implement slice_elem_ptr when slice is {}", .{slice_mcv}), - } + try self.genSetReg(addr_reg, Type.usize, slice_mcv); // TODO we could allocate register here, but need to expect addr register and potentially // offset register. try self.genBinOpMir(.add, slice_ptr_field_type, .{ .register = addr_reg }, .{ @@ -4188,7 +4200,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none }); } -fn airNeg(self: *Self, inst: Air.Inst.Index) !void { +fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const ty = self.air.typeOf(un_op); const ty_bits = ty.floatBits(self.target.*); @@ -4231,16 +4243,46 @@ fn airNeg(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_mcv.register); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); + const tag = self.air.instructions.items(.tag)[inst]; try self.genBinOpMir(switch (ty_bits) { - 32 => .xorps, - 64 => .xorpd, - else => return self.fail("TODO implement airNeg for {}", .{ + // No point using an extra prefix byte for *pd which performs the same operation. + 32, 64 => switch (tag) { + .neg => .xorps, + .fabs => .andnps, + else => unreachable, + }, + else => return self.fail("TODO implement airFloatSign for {}", .{ ty.fmt(self.bin_file.options.module.?), }), }, vec_ty, dst_mcv, sign_mcv); return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); } +fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { + const un_op = self.air.instructions.items(.data)[inst].un_op; + const ty = self.air.typeOf(un_op); + + const src_mcv = try self.resolveInst(un_op); + const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) + src_mcv + else + try self.copyToRegisterWithInstTracking(inst, ty, src_mcv); + + try self.genBinOpMir(switch (ty.zigTypeTag()) { + .Float => switch (ty.floatBits(self.target.*)) { + 32 => .sqrtss, + 64 => .sqrtsd, + else => return self.fail("TODO implement airSqrt for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }), + }, + else => return self.fail("TODO implement airSqrt for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }), + }, ty, dst_mcv, src_mcv); + return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); +} + fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; _ = un_op; @@ -4409,8 +4451,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.air.typeOf(ty_op.operand); const elem_size = elem_ty.abiSize(self.target.*); - const elem_rc = try self.regClassForType(elem_ty); - const ptr_rc = try self.regClassForType(ptr_ty); + const elem_rc = regClassForType(elem_ty); + const ptr_rc = regClassForType(ptr_ty); const ptr_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = if (elem_size <= 8 and elem_rc.supersetOf(ptr_rc) and @@ -4782,10 +4824,21 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { - const ty_op = self.air.instructions.items(.data)[inst].ty_op; - _ = ty_op; - return self.fail("TODO implement airFieldParentPtr for {}", .{self.target.cpu.arch}); - //return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; + + const inst_ty = self.air.typeOfIndex(inst); + const parent_ty = inst_ty.childType(); + const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, self.target.*)); + + const src_mcv = try self.resolveInst(extra.field_ptr); + const dst_mcv = if (src_mcv.isRegisterOffset() and + self.reuseOperand(inst, extra.field_ptr, 0, src_mcv)) + src_mcv + else + try self.copyToRegisterWithInstTracking(inst, inst_ty, src_mcv); + const result = dst_mcv.offset(-field_offset); + return self.finishAir(inst, result, .{ extra.field_ptr, .none, .none }); } fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue { @@ -5219,8 +5272,7 @@ fn genMulDivBinOp( .mul, .mulwrap => dst_abi_size != src_abi_size and dst_abi_size != src_abi_size * 2, .div_trunc, .div_floor, .div_exact, .rem, .mod => dst_abi_size != src_abi_size, } or src_abi_size > 8) return self.fail("TODO implement genMulDivBinOp from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), - dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }); const ty = if (dst_abi_size <= 8) dst_ty else src_ty; const abi_size = if (dst_abi_size <= 8) dst_abi_size else src_abi_size; @@ -5520,7 +5572,9 @@ fn genBinOp( }, lhs_ty, dst_mcv, src_mcv), .mul => try self.genBinOpMir(switch (lhs_ty.zigTypeTag()) { - else => return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?) }), + else => return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse)) .mulss @@ -5723,9 +5777,13 @@ fn genBinOp( .max => .maxsd, else => unreachable, }, - else => return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?) }), + else => return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), }, lhs_ty, dst_mcv, src_mcv), - else => return self.fail("TODO implement genBinOp for {s} {}", .{ @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?) }), + else => return self.fail("TODO implement genBinOp for {s} {}", .{ + @tagName(tag), lhs_ty.fmt(self.bin_file.options.module.?), + }), }, else => unreachable, @@ -5764,8 +5822,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s .Float => { if (!Target.x86.featureSetHas(self.target.cpu.features, .sse)) return self.fail("TODO genBinOpMir for {s} {} without sse", .{ - @tagName(mir_tag), - ty.fmt(self.bin_file.options.module.?), + @tagName(mir_tag), ty.fmt(self.bin_file.options.module.?), }); return self.asmRegisterRegister(mir_tag, dst_reg.to128(), src_reg.to128()); }, @@ -5863,7 +5920,12 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s .load_got, .load_tlv, => { - const addr_reg = try self.copyToTmpRegister(ty, src_mcv.address()); + var ptr_pl = Type.Payload.ElemType{ + .base = .{ .tag = .single_const_pointer }, + .data = ty, + }; + const ptr_ty = Type.initPayload(&ptr_pl.base); + const addr_reg = try self.copyToTmpRegister(ptr_ty, src_mcv.address()); return self.genBinOpMir(mir_tag, ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg }, }); @@ -7545,10 +7607,11 @@ fn movMirTag(self: *Self, ty: Type) !Mir.Inst.Tag { return switch (ty.zigTypeTag()) { else => .mov, .Float => switch (ty.floatBits(self.target.*)) { - 16 => .mov, - 32 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse)) .movss else .mov, - 64 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse2)) .movsd else .mov, - else => return self.fail("TODO movMirTag for {}", .{ + 16 => unreachable, // needs special handling + 32 => .movss, + 64 => .movsd, + 128 => .movaps, + else => return self.fail("TODO movMirTag from {}", .{ ty.fmt(self.bin_file.options.module.?), }), }, @@ -7657,8 +7720,17 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr }, .register => |src_reg| if (dst_reg.id() != src_reg.id()) try self.asmRegisterRegister( if ((dst_reg.class() == .floating_point) == (src_reg.class() == .floating_point)) - try self.movMirTag(ty) + switch (ty.zigTypeTag()) { + else => .mov, + .Float, .Vector => .movaps, + } else switch (abi_size) { + 2 => return try self.asmRegisterRegisterImmediate( + if (dst_reg.class() == .floating_point) .pinsrw else .pextrw, + registerAlias(dst_reg, abi_size), + registerAlias(src_reg, abi_size), + Immediate.u(0), + ), 4 => .movd, 8 => .movq, else => return self.fail( @@ -7669,18 +7741,12 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr registerAlias(dst_reg, abi_size), registerAlias(src_reg, abi_size), ), - .register_offset, .indirect, .load_frame, .lea_frame => try self.asmRegisterMemory( - switch (src_mcv) { - .register_offset => |reg_off| switch (reg_off.off) { - 0 => return self.genSetReg(dst_reg, ty, .{ .register = reg_off.reg }), - else => .lea, - }, - .indirect, .load_frame => try self.movMirTag(ty), - .lea_frame => .lea, - else => unreachable, - }, - registerAlias(dst_reg, abi_size), - Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (src_mcv) { + .register_offset, + .indirect, + .load_frame, + .lea_frame, + => { + const src_mem = Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (src_mcv) { .register_offset, .indirect => |reg_off| .{ .base = .{ .reg = reg_off.reg }, .disp = reg_off.off, @@ -7690,20 +7756,51 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr .disp = frame_addr.off, }, else => unreachable, - }), - ), + }); + if (ty.isRuntimeFloat() and ty.floatBits(self.target.*) == 16) + try self.asmRegisterMemoryImmediate( + .pinsrw, + registerAlias(dst_reg, abi_size), + src_mem, + Immediate.u(0), + ) + else + try self.asmRegisterMemory( + switch (src_mcv) { + .register_offset => |reg_off| switch (reg_off.off) { + 0 => return self.genSetReg(dst_reg, ty, .{ .register = reg_off.reg }), + else => .lea, + }, + .indirect, .load_frame => try self.movMirTag(ty), + .lea_frame => .lea, + else => unreachable, + }, + registerAlias(dst_reg, abi_size), + src_mem, + ); + }, .memory, .load_direct, .load_got, .load_tlv => { switch (src_mcv) { - .memory => |addr| if (math.cast(i32, @bitCast(i64, addr))) |small_addr| - return self.asmRegisterMemory( - try self.movMirTag(ty), - registerAlias(dst_reg, abi_size), - Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ - .base = .{ .reg = .ds }, - .disp = small_addr, - }), - ), - .load_direct => |sym_index| if (try self.movMirTag(ty) == .mov) { + .memory => |addr| if (math.cast(i32, @bitCast(i64, addr))) |small_addr| { + const src_mem = Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ + .base = .{ .reg = .ds }, + .disp = small_addr, + }); + return if (ty.isRuntimeFloat() and ty.floatBits(self.target.*) == 16) + self.asmRegisterMemoryImmediate( + .pinsrw, + registerAlias(dst_reg, abi_size), + src_mem, + Immediate.u(0), + ) + else + self.asmRegisterMemory( + try self.movMirTag(ty), + registerAlias(dst_reg, abi_size), + src_mem, + ); + }, + .load_direct => |sym_index| if (!ty.isRuntimeFloat()) { const atom_index = try self.owner.getSymbolIndex(self); _ = try self.addInst(.{ .tag = .mov_linker, @@ -7724,11 +7821,22 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_lock); - try self.asmRegisterMemory( - try self.movMirTag(ty), - registerAlias(dst_reg, abi_size), - Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .{ .reg = addr_reg } }), - ); + const src_mem = Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ + .base = .{ .reg = addr_reg }, + }); + if (ty.isRuntimeFloat() and ty.floatBits(self.target.*) == 16) + try self.asmRegisterMemoryImmediate( + .pinsrw, + registerAlias(dst_reg, abi_size), + src_mem, + Immediate.u(0), + ) + else + try self.asmRegisterMemory( + try self.movMirTag(ty), + registerAlias(dst_reg, abi_size), + src_mem, + ); }, .lea_direct, .lea_got => |sym_index| { const atom_index = try self.owner.getSymbolIndex(self); @@ -7821,11 +7929,25 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal }, }, .eflags => |cc| try self.asmSetccMemory(Memory.sib(.byte, .{ .base = base, .disp = disp }), cc), - .register => |reg| try self.asmMemoryRegister( - try self.movMirTag(ty), - Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = base, .disp = disp }), - registerAlias(reg, abi_size), - ), + .register => |src_reg| { + const dst_mem = Memory.sib( + Memory.PtrSize.fromSize(abi_size), + .{ .base = base, .disp = disp }, + ); + if (ty.isRuntimeFloat() and ty.floatBits(self.target.*) == 16) + try self.asmMemoryRegisterImmediate( + .pextrw, + dst_mem, + registerAlias(src_reg, abi_size), + Immediate.u(0), + ) + else + try self.asmMemoryRegister( + try self.movMirTag(ty), + dst_mem, + registerAlias(src_reg, abi_size), + ); + }, .register_overflow => |ro| { try self.genSetMem( base, @@ -8028,8 +8150,8 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { const src_ty = self.air.typeOf(ty_op.operand); const result = result: { - const dst_rc = try self.regClassForType(dst_ty); - const src_rc = try self.regClassForType(src_ty); + const dst_rc = regClassForType(dst_ty); + const src_rc = regClassForType(src_ty); const operand = try self.resolveInst(ty_op.operand); if (dst_rc.supersetOf(src_rc) and self.reuseOperand(inst, ty_op.operand, 0, operand)) break :result operand; @@ -8084,8 +8206,7 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { .unsigned => src_bits + 1, }, 32), 8) catch unreachable; if (src_size > 8) return self.fail("TODO implement airIntToFloat from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), - dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }); const src_mcv = try self.resolveInst(ty_op.operand); @@ -8098,7 +8219,7 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { if (src_bits < src_size * 8) try self.truncateRegister(src_ty, src_reg); - const dst_reg = try self.register_manager.allocReg(inst, try self.regClassForType(dst_ty)); + const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty)); const dst_mcv = MCValue{ .register = dst_reg }; const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); @@ -8108,19 +8229,16 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { .cvtsi2ss else return self.fail("TODO implement airIntToFloat from {} to {} without sse", .{ - src_ty.fmt(self.bin_file.options.module.?), - dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }), 64 => if (Target.x86.featureSetHas(self.target.cpu.features, .sse2)) .cvtsi2sd else return self.fail("TODO implement airIntToFloat from {} to {} without sse2", .{ - src_ty.fmt(self.bin_file.options.module.?), - dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }), else => return self.fail("TODO implement airIntToFloat from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), - dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), }), }, dst_reg.to128(), registerAlias(src_reg, src_size)); @@ -9000,7 +9118,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; _ = extra; - return self.fail("TODO implement airAggregateInit for x86_64", .{}); + return self.fail("TODO implement airUnionInit for x86_64", .{}); //return self.finishAir(inst, result, .{ extra.init, .none, .none }); } diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index 5cb7f7a2d9..944fe85458 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -268,23 +268,37 @@ pub const Mnemonic = enum { movd, // SSE addss, + andps, + andnps, cmpss, cvtsi2ss, divss, maxss, minss, - movss, + movaps, movss, movups, mulss, + orps, + pextrw, + pinsrw, + sqrtps, + sqrtss, subss, ucomiss, xorps, // SSE2 addsd, + andpd, + andnpd, //cmpsd, cvtsd2ss, cvtsi2sd, cvtss2sd, divsd, maxsd, minsd, + movapd, movq, //movd, movsd, + movupd, mulsd, + orpd, + sqrtpd, + sqrtsd, subsd, ucomisd, xorpd, diff --git a/src/arch/x86_64/Lower.zig b/src/arch/x86_64/Lower.zig index a961100687..4289cfaf2a 100644 --- a/src/arch/x86_64/Lower.zig +++ b/src/arch/x86_64/Lower.zig @@ -94,18 +94,29 @@ pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction { .xor, .addss, + .andnps, + .andps, .cmpss, .cvtsi2ss, .divss, .maxss, .minss, + .movaps, .movss, + .movups, .mulss, + .orps, + .pextrw, + .pinsrw, .roundss, + .sqrtps, + .sqrtss, .subss, .ucomiss, .xorps, .addsd, + .andnpd, + .andpd, .cmpsd, .cvtsd2ss, .cvtsi2sd, @@ -115,7 +126,10 @@ pub fn lowerMir(lower: *Lower, inst: Mir.Inst) Error![]const Instruction { .minsd, .movsd, .mulsd, + .orpd, .roundsd, + .sqrtpd, + .sqrtsd, .subsd, .ucomisd, .xorpd, @@ -188,6 +202,8 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate { .mi_rip_u, .lock_mi_sib_u, .lock_mi_rip_u, + .rmi_sib, + .rmi_rip, .mri_sib, .mri_rip, => Immediate.u(i), @@ -202,6 +218,7 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory { return lower.mir.resolveFrameLoc(switch (ops) { .rm_sib, .rm_sib_cc, + .rmi_sib, .m_sib, .m_sib_cc, .mi_sib_u, @@ -217,6 +234,7 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory { .rm_rip, .rm_rip_cc, + .rmi_rip, .m_rip, .m_rip_cc, .mi_rip_u, @@ -311,6 +329,11 @@ fn mirGeneric(lower: *Lower, inst: Mir.Inst) Error!void { .{ .reg = inst.data.rx.r }, .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) }, }, + .rmi_sib, .rmi_rip => &.{ + .{ .reg = inst.data.rix.r }, + .{ .mem = lower.mem(inst.ops, inst.data.rix.payload) }, + .{ .imm = lower.imm(inst.ops, inst.data.rix.i) }, + }, .mr_sib, .lock_mr_sib, .mr_rip, .lock_mr_rip => &.{ .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) }, .{ .reg = inst.data.rx.r }, diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index c14338b13d..6b2db1b696 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -168,6 +168,10 @@ pub const Inst = struct { /// Add single precision floating point values addss, + /// Bitwise logical and of packed single precision floating-point values + andps, + /// Bitwise logical and not of packed single precision floating-point values + andnps, /// Compare scalar single-precision floating-point values cmpss, /// Convert doubleword integer to scalar single-precision floating-point value @@ -178,13 +182,27 @@ pub const Inst = struct { maxss, /// Return minimum single-precision floating-point value minss, + /// Move aligned packed single-precision floating-point values + movaps, /// Move scalar single-precision floating-point value movss, + /// Move unaligned packed single-precision floating-point values + movups, /// Multiply scalar single-precision floating-point values mulss, + /// Bitwise logical or of packed single precision floating-point values + orps, + /// Extract word + pextrw, + /// Insert word + pinsrw, /// Round scalar single-precision floating-point values roundss, + /// Square root of scalar single precision floating-point value + sqrtps, /// Subtract scalar single-precision floating-point values + sqrtss, + /// Square root of single precision floating-point values subss, /// Unordered compare scalar single-precision floating-point values ucomiss, @@ -192,6 +210,10 @@ pub const Inst = struct { xorps, /// Add double precision floating point values addsd, + /// Bitwise logical and not of packed double precision floating-point values + andnpd, + /// Bitwise logical and of packed double precision floating-point values + andpd, /// Compare scalar double-precision floating-point values cmpsd, /// Convert scalar double-precision floating-point value to scalar single-precision floating-point value @@ -210,8 +232,14 @@ pub const Inst = struct { movsd, /// Multiply scalar double-precision floating-point values mulsd, + /// Bitwise logical or of packed double precision floating-point values + orpd, /// Round scalar double-precision floating-point values roundsd, + /// Square root of double precision floating-point values + sqrtpd, + /// Square root of scalar double precision floating-point value + sqrtsd, /// Subtract scalar double-precision floating-point values subsd, /// Unordered compare scalar double-precision floating-point values @@ -326,6 +354,12 @@ pub const Inst = struct { /// Register, memory (RIP) operands with condition code (CC). /// Uses `rx_cc` payload. rm_rip_cc, + /// Register, memory (SIB), immediate (byte) operands. + /// Uses `rix` payload with extra data of type `MemorySib`. + rmi_sib, + /// Register, memory (RIP), immediate (byte) operands. + /// Uses `rix` payload with extra data of type `MemoryRip`. + rmi_rip, /// Single memory (SIB) operand. /// Uses `payload` with extra data of type `MemorySib`. m_sib, diff --git a/src/arch/x86_64/encodings.zig b/src/arch/x86_64/encodings.zig index ac427c3633..f87a110e99 100644 --- a/src/arch/x86_64/encodings.zig +++ b/src/arch/x86_64/encodings.zig @@ -832,6 +832,10 @@ pub const table = [_]Entry{ // SSE .{ .addss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x58 }, 0, .sse }, + .{ .andnps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x55 }, 0, .sse }, + + .{ .andps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x54 }, 0, .sse }, + .{ .cmpss, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0xf3, 0x0f, 0xc2 }, 0, .sse }, .{ .cvtsi2ss, .rm, &.{ .xmm, .rm32 }, &.{ 0xf3, 0x0f, 0x2a }, 0, .sse }, @@ -843,13 +847,24 @@ pub const table = [_]Entry{ .{ .minss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5d }, 0, .sse }, + .{ .movaps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x28 }, 0, .sse }, + .{ .movaps, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x0f, 0x29 }, 0, .sse }, + .{ .movss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x10 }, 0, .sse }, .{ .movss, .mr, &.{ .xmm_m32, .xmm }, &.{ 0xf3, 0x0f, 0x11 }, 0, .sse }, + .{ .movups, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x10 }, 0, .sse }, + .{ .movups, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x0f, 0x11 }, 0, .sse }, + .{ .mulss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x59 }, 0, .sse }, + .{ .orps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x56 }, 0, .sse }, + .{ .subss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5c }, 0, .sse }, + .{ .sqrtps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x51 }, 0, .sse }, + .{ .sqrtss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x51 }, 0, .sse }, + .{ .ucomiss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0x0f, 0x2e }, 0, .sse }, .{ .xorps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x57 }, 0, .sse }, @@ -857,6 +872,10 @@ pub const table = [_]Entry{ // SSE2 .{ .addsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x58 }, 0, .sse2 }, + .{ .andnpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x55 }, 0, .sse2 }, + + .{ .andpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x54 }, 0, .sse2 }, + .{ .cmpsd, .rmi, &.{ .xmm, .xmm_m64, .imm8 }, &.{ 0xf2, 0x0f, 0xc2 }, 0, .sse2 }, .{ .cvtsd2ss, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5a }, 0, .sse2 }, @@ -872,6 +891,9 @@ pub const table = [_]Entry{ .{ .minsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5d }, 0, .sse2 }, + .{ .movapd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x28 }, 0, .sse2 }, + .{ .movapd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x29 }, 0, .sse2 }, + .{ .movd, .rm, &.{ .xmm, .rm32 }, &.{ 0x66, 0x0f, 0x6e }, 0, .sse2 }, .{ .movd, .mr, &.{ .rm32, .xmm }, &.{ 0x66, 0x0f, 0x7e }, 0, .sse2 }, @@ -881,8 +903,20 @@ pub const table = [_]Entry{ .{ .movq, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf3, 0x0f, 0x7e }, 0, .sse2 }, .{ .movq, .mr, &.{ .xmm_m64, .xmm }, &.{ 0x66, 0x0f, 0xd6 }, 0, .sse2 }, + .{ .movupd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x10 }, 0, .sse2 }, + .{ .movupd, .mr, &.{ .xmm_m128, .xmm }, &.{ 0x66, 0x0f, 0x11 }, 0, .sse2 }, + .{ .mulsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x59 }, 0, .sse2 }, + .{ .orpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x56 }, 0, .sse2 }, + + .{ .pextrw, .mri, &.{ .r16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0xc5 }, 0, .sse2 }, + + .{ .pinsrw, .rmi, &.{ .xmm, .rm16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .sse2 }, + + .{ .sqrtpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x51 }, 0, .sse2 }, + .{ .sqrtsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x51 }, 0, .sse2 }, + .{ .subsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5c }, 0, .sse2 }, .{ .movsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x10 }, 0, .sse2 }, @@ -893,6 +927,8 @@ pub const table = [_]Entry{ .{ .xorpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x57 }, 0, .sse2 }, // SSE4.1 + .{ .pextrw, .mri, &.{ .rm16, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x15 }, 0, .sse4_1 }, + .{ .roundss, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0a }, 0, .sse4_1 }, .{ .roundsd, .rmi, &.{ .xmm, .xmm_m64, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0b }, 0, .sse4_1 }, }; diff --git a/src/codegen.zig b/src/codegen.zig index 078feb409d..9d479b90cd 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -341,14 +341,20 @@ pub fn generateSymbol( } return Result.ok; }, - .variable => { - const decl = typed_value.val.castTag(.variable).?.data.owner_decl; - return lowerDeclRef(bin_file, src_loc, typed_value, decl, code, debug_output, reloc_info); - }, - .decl_ref => { - const decl = typed_value.val.castTag(.decl_ref).?.data; - return lowerDeclRef(bin_file, src_loc, typed_value, decl, code, debug_output, reloc_info); - }, + .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( + bin_file, + src_loc, + typed_value, + switch (tag) { + .variable => typed_value.val.castTag(.variable).?.data.owner_decl, + .decl_ref => typed_value.val.castTag(.decl_ref).?.data, + .decl_ref_mut => typed_value.val.castTag(.decl_ref_mut).?.data.decl_index, + else => unreachable, + }, + code, + debug_output, + reloc_info, + ), .slice => { const slice = typed_value.val.castTag(.slice).?.data; @@ -374,66 +380,7 @@ pub fn generateSymbol( return Result.ok; }, - .field_ptr => { - const field_ptr = typed_value.val.castTag(.field_ptr).?.data; - const container_ptr = field_ptr.container_ptr; - - switch (container_ptr.tag()) { - .decl_ref => { - const decl_index = container_ptr.castTag(.decl_ref).?.data; - const decl = mod.declPtr(decl_index); - const addend = blk: { - switch (decl.ty.zigTypeTag()) { - .Struct => { - const addend = decl.ty.structFieldOffset(field_ptr.field_index, target); - break :blk @intCast(u32, addend); - }, - .Pointer => { - assert(decl.ty.isSlice()); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const addend = switch (field_ptr.field_index) { - 0 => 0, - 1 => decl.ty.slicePtrFieldType(&buf).abiSize(target), - else => unreachable, - }; - break :blk @intCast(u32, addend); - }, - else => return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for pointer type value: '{s}'", - .{@tagName(typed_value.val.tag())}, - ), - }, - } - }; - return lowerDeclRef(bin_file, src_loc, typed_value, decl_index, code, debug_output, .{ - .parent_atom_index = reloc_info.parent_atom_index, - .addend = (reloc_info.addend orelse 0) + addend, - }); - }, - .field_ptr => { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = typed_value.ty, - .val = container_ptr, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - return Result.ok; - }, - else => return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for pointer type value: '{s}'", - .{@tagName(typed_value.val.tag())}, - ), - }, - } - }, - .elem_ptr => return lowerParentPtr( + .field_ptr, .elem_ptr => return lowerParentPtr( bin_file, src_loc, typed_value, @@ -846,16 +793,12 @@ pub fn generateSymbol( }, else => unreachable, }, - else => |t| { - return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for type '{s}'", - .{@tagName(t)}, - ), - }; - }, + else => |tag| return Result{ .fail = try ErrorMsg.create( + bin_file.allocator, + src_loc, + "TODO implement generateSymbol for type '{s}'", + .{@tagName(tag)}, + ) }, } } @@ -871,6 +814,38 @@ fn lowerParentPtr( const target = bin_file.options.target; switch (parent_ptr.tag()) { + .field_ptr => { + const field_ptr = parent_ptr.castTag(.field_ptr).?.data; + return lowerParentPtr( + bin_file, + src_loc, + typed_value, + field_ptr.container_ptr, + code, + debug_output, + reloc_info.offset(@intCast(u32, switch (field_ptr.container_ty.zigTypeTag()) { + .Pointer => offset: { + assert(field_ptr.container_ty.isSlice()); + var buf: Type.SlicePtrFieldTypeBuffer = undefined; + break :offset switch (field_ptr.field_index) { + 0 => 0, + 1 => field_ptr.container_ty.slicePtrFieldType(&buf).abiSize(target), + else => unreachable, + }; + }, + .Struct, .Union => field_ptr.container_ty.structFieldOffset( + field_ptr.field_index, + target, + ), + else => return Result{ .fail = try ErrorMsg.create( + bin_file.allocator, + src_loc, + "TODO implement lowerParentPtr for field_ptr with a container of type {}", + .{field_ptr.container_ty.fmt(bin_file.options.module.?)}, + ) }, + })), + ); + }, .elem_ptr => { const elem_ptr = parent_ptr.castTag(.elem_ptr).?.data; return lowerParentPtr( @@ -883,28 +858,26 @@ fn lowerParentPtr( reloc_info.offset(@intCast(u32, elem_ptr.index * elem_ptr.elem_ty.abiSize(target))), ); }, - .decl_ref => { - const decl_index = parent_ptr.castTag(.decl_ref).?.data; - return lowerDeclRef( - bin_file, - src_loc, - typed_value, - decl_index, - code, - debug_output, - reloc_info, - ); - }, - else => |t| { - return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement lowerParentPtr for type '{s}'", - .{@tagName(t)}, - ), - }; - }, + .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( + bin_file, + src_loc, + typed_value, + switch (tag) { + .variable => parent_ptr.castTag(.variable).?.data.owner_decl, + .decl_ref => parent_ptr.castTag(.decl_ref).?.data, + .decl_ref_mut => parent_ptr.castTag(.decl_ref_mut).?.data.decl_index, + else => unreachable, + }, + code, + debug_output, + reloc_info, + ), + else => |tag| return Result{ .fail = try ErrorMsg.create( + bin_file.allocator, + src_loc, + "TODO implement lowerParentPtr for type '{s}'", + .{@tagName(tag)}, + ) }, } } @@ -1156,11 +1129,16 @@ pub fn genTypedValue( const target = bin_file.options.target; const ptr_bits = target.cpu.arch.ptrBitWidth(); - if (typed_value.val.castTag(.decl_ref)) |payload| { - return genDeclRef(bin_file, src_loc, typed_value, payload.data); - } - if (typed_value.val.castTag(.decl_ref_mut)) |payload| { - return genDeclRef(bin_file, src_loc, typed_value, payload.data.decl_index); + if (!typed_value.ty.isSlice()) { + if (typed_value.val.castTag(.variable)) |payload| { + return genDeclRef(bin_file, src_loc, typed_value, payload.data.owner_decl); + } + if (typed_value.val.castTag(.decl_ref)) |payload| { + return genDeclRef(bin_file, src_loc, typed_value, payload.data); + } + if (typed_value.val.castTag(.decl_ref_mut)) |payload| { + return genDeclRef(bin_file, src_loc, typed_value, payload.data.decl_index); + } } switch (typed_value.ty.zigTypeTag()) { diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index 86fc61c2c9..6fdd309371 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -363,8 +363,6 @@ fn hereIsAnOpaqueType(ptr: *OpaqueA) *OpaqueA { } test "take address of parameter" { - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -392,8 +390,6 @@ test "array 2D const double ptr" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO const rect_2d_vertexes = [_][1]f32{ @@ -407,8 +403,6 @@ test "array 2D const double ptr with offset" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; const rect_2d_vertexes = [_][2]f32{ @@ -422,8 +416,6 @@ test "array 3D const double ptr with offset" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO const rect_3d_vertexes = [_][2][2]f32{ diff --git a/test/behavior/bugs/13069.zig b/test/behavior/bugs/13069.zig index 1c2526ef2a..41c5906ee6 100644 --- a/test/behavior/bugs/13069.zig +++ b/test/behavior/bugs/13069.zig @@ -6,8 +6,6 @@ test { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO var opt_x: ?[3]f32 = [_]f32{0.0} ** 3; diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 3c1d26f284..039e0a3d17 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -95,9 +95,6 @@ test "comptime_int @intToFloat" { test "@intToFloat" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -636,8 +633,6 @@ test "vector casts" { } test "@floatCast cast down" { - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -688,7 +683,6 @@ test "peer cast: error set any anyerror" { } test "peer type resolution: error set supersets" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -718,7 +712,6 @@ test "peer type resolution: error set supersets" { } test "peer type resolution: disjoint error sets" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -748,7 +741,6 @@ test "peer type resolution: disjoint error sets" { } test "peer type resolution: error union and error set" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -782,7 +774,6 @@ test "peer type resolution: error union and error set" { } test "peer type resolution: error union after non-error" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig index 26b941bcdc..5f0037f6dc 100644 --- a/test/behavior/enum.zig +++ b/test/behavior/enum.zig @@ -940,8 +940,6 @@ test "constant enum initialization with differing sizes" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO try test3_1(test3_foo); try test3_2(test3_bar); diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index 3a10a288e3..b12fcc7afa 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -535,8 +535,6 @@ test "static eval list init" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO try expect(static_vec3.data[2] == 1.0); try expect(vec3(0.0, 0.0, 3.0).data[2] == 3.0); @@ -1185,7 +1183,6 @@ test "equality of pointers to comptime const" { } test "storing an array of type in a field" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index f713cd035c..b98d782da1 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -96,7 +96,6 @@ test "negative f128 floatToInt at compile-time" { } test "@sqrt" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -904,8 +903,6 @@ test "negation f16" { } test "negation f32" { - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -925,8 +922,6 @@ test "negation f32" { } test "negation f64" { - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -1061,8 +1056,6 @@ test "nan negation f16" { } test "nan negation f32" { - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -1082,8 +1075,6 @@ test "nan negation f32" { test "nan negation f64" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index 71b7b36c21..4ff5e20378 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -338,8 +338,6 @@ test "function call with anon list literal" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { @@ -360,8 +358,6 @@ test "function call with anon list literal - 2D" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { diff --git a/test/behavior/for.zig b/test/behavior/for.zig index 0cec2d62aa..b3d82fd255 100644 --- a/test/behavior/for.zig +++ b/test/behavior/for.zig @@ -66,7 +66,6 @@ test "ignore lval with underscore (for loop)" { } test "basic for loop" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -306,7 +305,6 @@ test "1-based counter and ptr to array" { test "slice and two counters, one is offset and one is runtime" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const slice: []const u8 = "blah"; @@ -335,7 +333,6 @@ test "slice and two counters, one is offset and one is runtime" { test "two slices, one captured by-ref" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO var buf: [10]u8 = undefined; @@ -355,7 +352,6 @@ test "two slices, one captured by-ref" { test "raw pointer and slice" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO var buf: [10]u8 = undefined; diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig index c5168e420b..e7c053e36c 100644 --- a/test/behavior/generics.zig +++ b/test/behavior/generics.zig @@ -59,8 +59,6 @@ test "fn with comptime args" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO try expect(gimmeTheBigOne(1234, 5678) == 5678); try expect(shouldCallSameInstance(34, 12) == 34); @@ -71,8 +69,6 @@ test "anytype params" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO try expect(max_i32(12, 34) == 34); try expect(max_f64(1.2, 3.4) == 3.4); @@ -250,7 +246,6 @@ test "function parameter is generic" { } test "generic function instantiation turns into comptime call" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 01b927b913..f9c9f43927 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -203,8 +203,6 @@ test "float equality" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO const x: f64 = 0.012; const y: f64 = x + 1.0; diff --git a/test/behavior/maximum_minimum.zig b/test/behavior/maximum_minimum.zig index d7b93c56c0..e6a8553e8c 100644 --- a/test/behavior/maximum_minimum.zig +++ b/test/behavior/maximum_minimum.zig @@ -8,8 +8,6 @@ test "@max" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { @@ -56,8 +54,6 @@ test "@min" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index 0bd8388660..cfce97b550 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -206,8 +206,6 @@ test "allowzero pointer and slice" { } test "assign null directly to C pointer and test null equality" { - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -346,8 +344,6 @@ test "pointer sentinel with +inf" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { diff --git a/test/behavior/ptrcast.zig b/test/behavior/ptrcast.zig index 6a5ae726ae..d52f7ea727 100644 --- a/test/behavior/ptrcast.zig +++ b/test/behavior/ptrcast.zig @@ -128,7 +128,6 @@ fn testReinterpretOverAlignedExternStructAsExternStruct() !void { test "lower reinterpreted comptime field ptr (with under-aligned fields)" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -152,7 +151,6 @@ test "lower reinterpreted comptime field ptr (with under-aligned fields)" { test "lower reinterpreted comptime field ptr" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/src.zig b/test/behavior/src.zig index e6b84e5d56..89a8e424aa 100644 --- a/test/behavior/src.zig +++ b/test/behavior/src.zig @@ -14,7 +14,6 @@ const builtin = @import("builtin"); const expect = std.testing.expect; test "@src" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index e533e34cc3..0ca7f70de1 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -744,8 +744,6 @@ var g_foo: S0 = S0.init(); test "packed struct with fp fields" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -1333,7 +1331,6 @@ test "under-aligned struct field" { } test "fieldParentPtr of a zero-bit field" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index 5e2d6d28c1..a32a762e04 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -230,8 +230,6 @@ test "switch prong with variable" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO try switchProngWithVarFn(SwitchProngWithVarEnum{ .One = 13 }); try switchProngWithVarFn(SwitchProngWithVarEnum{ .Two = 13.0 }); diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig index 11cc8b2dce..c1e5f40a46 100644 --- a/test/behavior/tuple.zig +++ b/test/behavior/tuple.zig @@ -209,7 +209,6 @@ test "initializing anon struct with explicit type" { } test "fieldParentPtr of tuple" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -220,7 +219,6 @@ test "fieldParentPtr of tuple" { } test "fieldParentPtr of anon struct" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/type_info.zig b/test/behavior/type_info.zig index 5a1ab7c2aa..ad6d5ac42f 100644 --- a/test/behavior/type_info.zig +++ b/test/behavior/type_info.zig @@ -159,7 +159,6 @@ fn testArray() !void { test "type info: error set, error union info, anyerror" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO try testErrorSet(); @@ -191,7 +190,6 @@ fn testErrorSet() !void { test "type info: error set single value" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const TestSet = error.One; @@ -205,7 +203,6 @@ test "type info: error set single value" { test "type info: error set merged" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const TestSet = error{ One, Two } || error{Three}; @@ -219,7 +216,6 @@ test "type info: error set merged" { } test "type info: enum info" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO diff --git a/test/behavior/union.zig b/test/behavior/union.zig index 3dd8919935..41842f5bc5 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -14,8 +14,6 @@ test "basic unions with floats" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO var foo = FooWithFloats{ .int = 1 }; try expect(foo.int == 1); @@ -31,8 +29,6 @@ test "init union with runtime value - floats" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO var foo: FooWithFloats = undefined; @@ -220,8 +216,6 @@ test "union with specified enum tag" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO try doTest(); comptime try doTest(); @@ -231,8 +225,6 @@ test "packed union generates correctly aligned type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO const U = packed union { f1: *const fn () error{TestUnexpectedResult}!void, @@ -448,7 +440,6 @@ const Foo1 = union(enum) { var glbl: Foo1 = undefined; test "global union with single field is correctly initialized" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO @@ -911,8 +902,6 @@ test "anonymous union literal syntax" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO const S = struct { const Number = union { @@ -1065,8 +1054,6 @@ test "containers with single-field enums" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO const S = struct { const A = union(enum) { f1 }; @@ -1525,8 +1512,6 @@ test "reinterpreting enum value inside packed union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64 and - comptime !std.Target.x86.featureSetHasAll(builtin.cpu.features, .{ .sse, .sse2, .sse4_1 })) return error.SkipZigTest; // TODO const U = packed union { tag: enum { a, b }, diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 01c76310d7..2c55af5f85 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -133,7 +133,6 @@ test "vector bit operators" { } test "implicit cast vector to array" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -151,7 +150,6 @@ test "implicit cast vector to array" { } test "array to vector" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -321,7 +319,6 @@ test "load vector elements via comptime index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { @@ -343,7 +340,6 @@ test "store vector elements via comptime index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { @@ -371,7 +367,6 @@ test "load vector elements via runtime index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { @@ -393,7 +388,6 @@ test "store vector elements via runtime index" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void {