mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
x86_64: increase passing test coverage on windows
Now that codegen has no references to linker state this is much easier. Closes #24153
This commit is contained in:
parent
ed37a1a33c
commit
1f98c98fff
@ -127,23 +127,23 @@ fn win_probe_stack_only() void {
|
||||
},
|
||||
.x86_64 => {
|
||||
asm volatile (
|
||||
\\ push %%rcx
|
||||
\\ push %%rax
|
||||
\\ cmp $0x1000,%%rax
|
||||
\\ lea 24(%%rsp),%%rcx
|
||||
\\ pushq %%rcx
|
||||
\\ pushq %%rax
|
||||
\\ cmpq $0x1000,%%rax
|
||||
\\ leaq 24(%%rsp),%%rcx
|
||||
\\ jb 1f
|
||||
\\ 2:
|
||||
\\ sub $0x1000,%%rcx
|
||||
\\ test %%rcx,(%%rcx)
|
||||
\\ sub $0x1000,%%rax
|
||||
\\ cmp $0x1000,%%rax
|
||||
\\ subq $0x1000,%%rcx
|
||||
\\ testq %%rcx,(%%rcx)
|
||||
\\ subq $0x1000,%%rax
|
||||
\\ cmpq $0x1000,%%rax
|
||||
\\ ja 2b
|
||||
\\ 1:
|
||||
\\ sub %%rax,%%rcx
|
||||
\\ test %%rcx,(%%rcx)
|
||||
\\ pop %%rax
|
||||
\\ pop %%rcx
|
||||
\\ ret
|
||||
\\ subq %%rax,%%rcx
|
||||
\\ testq %%rcx,(%%rcx)
|
||||
\\ popq %%rax
|
||||
\\ popq %%rcx
|
||||
\\ retq
|
||||
);
|
||||
},
|
||||
.x86 => {
|
||||
@ -179,26 +179,26 @@ fn win_probe_stack_adjust_sp() void {
|
||||
switch (arch) {
|
||||
.x86_64 => {
|
||||
asm volatile (
|
||||
\\ push %%rcx
|
||||
\\ cmp $0x1000,%%rax
|
||||
\\ lea 16(%%rsp),%%rcx
|
||||
\\ pushq %%rcx
|
||||
\\ cmpq $0x1000,%%rax
|
||||
\\ leaq 16(%%rsp),%%rcx
|
||||
\\ jb 1f
|
||||
\\ 2:
|
||||
\\ sub $0x1000,%%rcx
|
||||
\\ test %%rcx,(%%rcx)
|
||||
\\ sub $0x1000,%%rax
|
||||
\\ cmp $0x1000,%%rax
|
||||
\\ subq $0x1000,%%rcx
|
||||
\\ testq %%rcx,(%%rcx)
|
||||
\\ subq $0x1000,%%rax
|
||||
\\ cmpq $0x1000,%%rax
|
||||
\\ ja 2b
|
||||
\\ 1:
|
||||
\\ sub %%rax,%%rcx
|
||||
\\ test %%rcx,(%%rcx)
|
||||
\\ subq %%rax,%%rcx
|
||||
\\ testq %%rcx,(%%rcx)
|
||||
\\
|
||||
\\ lea 8(%%rsp),%%rax
|
||||
\\ mov %%rcx,%%rsp
|
||||
\\ mov -8(%%rax),%%rcx
|
||||
\\ push (%%rax)
|
||||
\\ sub %%rsp,%%rax
|
||||
\\ ret
|
||||
\\ leaq 8(%%rsp),%%rax
|
||||
\\ movq %%rcx,%%rsp
|
||||
\\ movq -8(%%rax),%%rcx
|
||||
\\ pushq (%%rax)
|
||||
\\ subq %%rsp,%%rax
|
||||
\\ retq
|
||||
);
|
||||
},
|
||||
.x86 => {
|
||||
|
||||
@ -220,42 +220,61 @@ pub inline fn floatEpsAt(comptime T: type, x: T) T {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the value inf for floating point type T.
|
||||
pub inline fn inf(comptime T: type) T {
|
||||
return reconstructFloat(T, floatExponentMax(T) + 1, mantissaOne(T));
|
||||
/// Returns the inf value for a floating point `Type`.
|
||||
pub inline fn inf(comptime Type: type) Type {
|
||||
const RuntimeType = switch (Type) {
|
||||
else => Type,
|
||||
comptime_float => f128, // any float type will do
|
||||
};
|
||||
return reconstructFloat(RuntimeType, floatExponentMax(RuntimeType) + 1, mantissaOne(RuntimeType));
|
||||
}
|
||||
|
||||
/// Returns the canonical quiet NaN representation for floating point type T.
|
||||
pub inline fn nan(comptime T: type) T {
|
||||
/// Returns the canonical quiet NaN representation for a floating point `Type`.
|
||||
pub inline fn nan(comptime Type: type) Type {
|
||||
const RuntimeType = switch (Type) {
|
||||
else => Type,
|
||||
comptime_float => f128, // any float type will do
|
||||
};
|
||||
return reconstructFloat(
|
||||
T,
|
||||
floatExponentMax(T) + 1,
|
||||
mantissaOne(T) | 1 << (floatFractionalBits(T) - 1),
|
||||
RuntimeType,
|
||||
floatExponentMax(RuntimeType) + 1,
|
||||
mantissaOne(RuntimeType) | 1 << (floatFractionalBits(RuntimeType) - 1),
|
||||
);
|
||||
}
|
||||
|
||||
/// Returns a signalling NaN representation for floating point type T.
|
||||
/// Returns a signalling NaN representation for a floating point `Type`.
|
||||
///
|
||||
/// TODO: LLVM is known to miscompile on some architectures to quiet NaN -
|
||||
/// this is tracked by https://github.com/ziglang/zig/issues/14366
|
||||
pub inline fn snan(comptime T: type) T {
|
||||
pub inline fn snan(comptime Type: type) Type {
|
||||
const RuntimeType = switch (Type) {
|
||||
else => Type,
|
||||
comptime_float => f128, // any float type will do
|
||||
};
|
||||
return reconstructFloat(
|
||||
T,
|
||||
floatExponentMax(T) + 1,
|
||||
mantissaOne(T) | 1 << (floatFractionalBits(T) - 2),
|
||||
RuntimeType,
|
||||
floatExponentMax(RuntimeType) + 1,
|
||||
mantissaOne(RuntimeType) | 1 << (floatFractionalBits(RuntimeType) - 2),
|
||||
);
|
||||
}
|
||||
|
||||
test "float bits" {
|
||||
inline for ([_]type{ f16, f32, f64, f80, f128, c_longdouble }) |T| {
|
||||
// (1 +) for the sign bit, since it is separate from the other bits
|
||||
const size = 1 + floatExponentBits(T) + floatMantissaBits(T);
|
||||
try expect(@bitSizeOf(T) == size);
|
||||
fn floatBits(comptime Type: type) !void {
|
||||
// (1 +) for the sign bit, since it is separate from the other bits
|
||||
const size = 1 + floatExponentBits(Type) + floatMantissaBits(Type);
|
||||
try expect(@bitSizeOf(Type) == size);
|
||||
try expect(floatFractionalBits(Type) <= floatMantissaBits(Type));
|
||||
|
||||
// for machine epsilon, assert expmin <= -prec <= expmax
|
||||
try expect(floatExponentMin(T) <= -floatFractionalBits(T));
|
||||
try expect(-floatFractionalBits(T) <= floatExponentMax(T));
|
||||
}
|
||||
// for machine epsilon, assert expmin <= -prec <= expmax
|
||||
try expect(floatExponentMin(Type) <= -floatFractionalBits(Type));
|
||||
try expect(-floatFractionalBits(Type) <= floatExponentMax(Type));
|
||||
}
|
||||
test floatBits {
|
||||
try floatBits(f16);
|
||||
try floatBits(f32);
|
||||
try floatBits(f64);
|
||||
try floatBits(f80);
|
||||
try floatBits(f128);
|
||||
try floatBits(c_longdouble);
|
||||
}
|
||||
|
||||
test inf {
|
||||
|
||||
@ -4,20 +4,47 @@ const expect = std.testing.expect;
|
||||
|
||||
/// Returns whether x is negative or negative 0.
|
||||
pub fn signbit(x: anytype) bool {
|
||||
const T = @TypeOf(x);
|
||||
const TBits = std.meta.Int(.unsigned, @typeInfo(T).float.bits);
|
||||
return @as(TBits, @bitCast(x)) >> (@bitSizeOf(T) - 1) != 0;
|
||||
return switch (@typeInfo(@TypeOf(x))) {
|
||||
.int, .comptime_int => x,
|
||||
.float => |float| @as(@Type(.{ .int = .{
|
||||
.signedness = .signed,
|
||||
.bits = float.bits,
|
||||
} }), @bitCast(x)),
|
||||
.comptime_float => @as(i128, @bitCast(@as(f128, x))), // any float type will do
|
||||
else => @compileError("std.math.signbit does not support " ++ @typeName(@TypeOf(x))),
|
||||
} < 0;
|
||||
}
|
||||
|
||||
test signbit {
|
||||
inline for ([_]type{ f16, f32, f64, f80, f128 }) |T| {
|
||||
try expect(!signbit(@as(T, 0.0)));
|
||||
try expect(!signbit(@as(T, 1.0)));
|
||||
try expect(signbit(@as(T, -2.0)));
|
||||
try expect(signbit(@as(T, -0.0)));
|
||||
try expect(!signbit(math.inf(T)));
|
||||
try expect(signbit(-math.inf(T)));
|
||||
try expect(!signbit(math.nan(T)));
|
||||
try expect(signbit(-math.nan(T)));
|
||||
}
|
||||
try testInts(i0);
|
||||
try testInts(u0);
|
||||
try testInts(i1);
|
||||
try testInts(u1);
|
||||
try testInts(i2);
|
||||
try testInts(u2);
|
||||
|
||||
try testFloats(f16);
|
||||
try testFloats(f32);
|
||||
try testFloats(f64);
|
||||
try testFloats(f80);
|
||||
try testFloats(f128);
|
||||
try testFloats(c_longdouble);
|
||||
try testFloats(comptime_float);
|
||||
}
|
||||
|
||||
fn testInts(comptime Type: type) !void {
|
||||
try expect((std.math.minInt(Type) < 0) == signbit(@as(Type, std.math.minInt(Type))));
|
||||
try expect(!signbit(@as(Type, 0)));
|
||||
try expect(!signbit(@as(Type, std.math.maxInt(Type))));
|
||||
}
|
||||
|
||||
fn testFloats(comptime Type: type) !void {
|
||||
try expect(!signbit(@as(Type, 0.0)));
|
||||
try expect(!signbit(@as(Type, 1.0)));
|
||||
try expect(signbit(@as(Type, -2.0)));
|
||||
try expect(signbit(@as(Type, -0.0)));
|
||||
try expect(!signbit(math.inf(Type)));
|
||||
try expect(signbit(-math.inf(Type)));
|
||||
try expect(!signbit(math.nan(Type)));
|
||||
try expect(signbit(-math.nan(Type)));
|
||||
}
|
||||
|
||||
@ -7571,7 +7571,10 @@ const lfs64_abi = native_os == .linux and builtin.link_libc and (builtin.abi.isG
|
||||
/// If this happens the fix is to add the error code to the corresponding
|
||||
/// switch expression, possibly introduce a new error in the error set, and
|
||||
/// send a patch to Zig.
|
||||
pub const unexpected_error_tracing = builtin.zig_backend == .stage2_llvm and builtin.mode == .Debug;
|
||||
pub const unexpected_error_tracing = builtin.mode == .Debug and switch (builtin.zig_backend) {
|
||||
.stage2_llvm, .stage2_x86_64 => true,
|
||||
else => false,
|
||||
};
|
||||
|
||||
pub const UnexpectedError = error{
|
||||
/// The Operating System returned an undocumented error code.
|
||||
|
||||
@ -485,6 +485,9 @@ fn _start() callconv(.naked) noreturn {
|
||||
}
|
||||
|
||||
fn WinStartup() callconv(.withStackAlign(.c, 1)) noreturn {
|
||||
// Switch from the x87 fpu state set by windows to the state expected by the gnu abi.
|
||||
if (builtin.abi == .gnu) asm volatile ("fninit");
|
||||
|
||||
if (!builtin.single_threaded and !builtin.link_libc) {
|
||||
_ = @import("os/windows/tls.zig");
|
||||
}
|
||||
@ -495,6 +498,9 @@ fn WinStartup() callconv(.withStackAlign(.c, 1)) noreturn {
|
||||
}
|
||||
|
||||
fn wWinMainCRTStartup() callconv(.withStackAlign(.c, 1)) noreturn {
|
||||
// Switch from the x87 fpu state set by windows to the state expected by the gnu abi.
|
||||
if (builtin.abi == .gnu) asm volatile ("fninit");
|
||||
|
||||
if (!builtin.single_threaded and !builtin.link_libc) {
|
||||
_ = @import("os/windows/tls.zig");
|
||||
}
|
||||
|
||||
@ -122,8 +122,10 @@ pub const Feature = enum {
|
||||
|
||||
/// Legalize (shift lhs, (splat rhs)) -> (shift lhs, rhs)
|
||||
unsplat_shift_rhs,
|
||||
/// Legalize reduce of a one element vector to a bitcast
|
||||
/// Legalize reduce of a one element vector to a bitcast.
|
||||
reduce_one_elem_to_bitcast,
|
||||
/// Legalize splat to a one element vector to a bitcast.
|
||||
splat_one_elem_to_bitcast,
|
||||
|
||||
/// Replace `intcast_safe` with an explicit safety check which `call`s the panic function on failure.
|
||||
/// Not compatible with `scalarize_intcast_safe`.
|
||||
@ -628,7 +630,17 @@ fn legalizeBody(l: *Legalize, body_start: usize, body_len: usize) Error!void {
|
||||
else => {},
|
||||
}
|
||||
},
|
||||
.splat => {},
|
||||
.splat => if (l.features.has(.splat_one_elem_to_bitcast)) {
|
||||
const ty_op = l.air_instructions.items(.data)[@intFromEnum(inst)].ty_op;
|
||||
switch (ty_op.ty.toType().vectorLen(zcu)) {
|
||||
0 => unreachable,
|
||||
1 => continue :inst l.replaceInst(inst, .bitcast, .{ .ty_op = .{
|
||||
.ty = ty_op.ty,
|
||||
.operand = ty_op.operand,
|
||||
} }),
|
||||
else => {},
|
||||
}
|
||||
},
|
||||
.shuffle_one => if (l.features.has(.scalarize_shuffle_one)) continue :inst try l.scalarize(inst, .shuffle_one),
|
||||
.shuffle_two => if (l.features.has(.scalarize_shuffle_two)) continue :inst try l.scalarize(inst, .shuffle_two),
|
||||
.select => if (l.features.has(.scalarize_select)) continue :inst try l.scalarize(inst, .select),
|
||||
|
||||
@ -5962,10 +5962,14 @@ fn airBr(func: *Func, inst: Air.Inst.Index) !void {
|
||||
if (first_br) break :result src_mcv;
|
||||
|
||||
try func.getValue(block_tracking.short, br.block_inst);
|
||||
// .long = .none to avoid merging operand and block result stack frames.
|
||||
const current_tracking: InstTracking = .{ .long = .none, .short = src_mcv };
|
||||
try current_tracking.materializeUnsafe(func, br.block_inst, block_tracking.*);
|
||||
for (current_tracking.getRegs()) |src_reg| func.register_manager.freeReg(src_reg);
|
||||
try InstTracking.materializeUnsafe(
|
||||
// .long = .none to avoid merging operand and block result stack frames.
|
||||
.{ .long = .none, .short = src_mcv },
|
||||
func,
|
||||
br.block_inst,
|
||||
block_tracking.*,
|
||||
);
|
||||
try func.freeValue(src_mcv);
|
||||
break :result block_tracking.short;
|
||||
}
|
||||
|
||||
@ -8192,8 +8196,11 @@ fn genTypedValue(func: *Func, val: Value) InnerError!MCValue {
|
||||
const lf = func.bin_file;
|
||||
const src_loc = func.src_loc;
|
||||
|
||||
const result = if (val.isUndef(pt.zcu))
|
||||
try lf.lowerUav(pt, val.toIntern(), .none, src_loc)
|
||||
const result: codegen.GenResult = if (val.isUndef(pt.zcu))
|
||||
switch (try lf.lowerUav(pt, val.toIntern(), .none, src_loc)) {
|
||||
.sym_index => |sym_index| .{ .mcv = .{ .load_symbol = sym_index } },
|
||||
.fail => |em| .{ .fail = em },
|
||||
}
|
||||
else
|
||||
try codegen.genTypedValue(lf, pt, src_loc, val, func.target);
|
||||
const mcv: MCValue = switch (result) {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -107,7 +107,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
||||
nav,
|
||||
emit.lower.target,
|
||||
)) {
|
||||
.mcv => |mcv| mcv.lea_symbol,
|
||||
.sym_index => |sym_index| sym_index,
|
||||
.fail => |em| {
|
||||
assert(emit.lower.err_msg == null);
|
||||
emit.lower.err_msg = em;
|
||||
@ -151,7 +151,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
||||
Type.fromInterned(uav.orig_ty).ptrAlignment(emit.pt.zcu),
|
||||
emit.lower.src_loc,
|
||||
)) {
|
||||
.mcv => |mcv| mcv.load_symbol,
|
||||
.sym_index => |sym_index| sym_index,
|
||||
.fail => |em| {
|
||||
assert(emit.lower.err_msg == null);
|
||||
emit.lower.err_msg = em;
|
||||
@ -186,7 +186,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
||||
else if (emit.bin_file.cast(.macho)) |macho_file|
|
||||
try macho_file.getGlobalSymbol(extern_func.toSlice(&emit.lower.mir).?, null)
|
||||
else if (emit.bin_file.cast(.coff)) |coff_file|
|
||||
link.File.Coff.global_symbol_bit | try coff_file.getGlobalSymbol(extern_func.toSlice(&emit.lower.mir).?, null)
|
||||
try coff_file.getGlobalSymbol(extern_func.toSlice(&emit.lower.mir).?, "compiler_rt")
|
||||
else
|
||||
return emit.fail("external symbols unimplemented for {s}", .{@tagName(emit.bin_file.tag)}),
|
||||
.is_extern = true,
|
||||
@ -548,10 +548,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
||||
=> return emit.fail("unable to codegen: {s}", .{@errorName(err)}),
|
||||
else => |e| return e,
|
||||
}) {
|
||||
.mcv => |mcv| switch (mcv) {
|
||||
else => unreachable,
|
||||
.load_direct, .load_symbol => |sym_index| sym_index,
|
||||
},
|
||||
.sym_index => |sym_index| sym_index,
|
||||
.fail => |em| {
|
||||
assert(emit.lower.err_msg == null);
|
||||
emit.lower.err_msg = em;
|
||||
@ -564,10 +561,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
||||
Type.fromInterned(uav.orig_ty).ptrAlignment(emit.pt.zcu),
|
||||
emit.lower.src_loc,
|
||||
)) {
|
||||
.mcv => |mcv| switch (mcv) {
|
||||
else => unreachable,
|
||||
.load_direct, .load_symbol => |sym_index| sym_index,
|
||||
},
|
||||
.sym_index => |sym_index| sym_index,
|
||||
.fail => |em| {
|
||||
assert(emit.lower.err_msg == null);
|
||||
emit.lower.err_msg = em;
|
||||
|
||||
@ -598,7 +598,7 @@ pub const Op = enum {
|
||||
.rax => .rax,
|
||||
.cl => .cl,
|
||||
.dx => .dx,
|
||||
else => switch (reg.bitSize()) {
|
||||
else => switch (reg.size().bitSize(target)) {
|
||||
8 => .r8,
|
||||
16 => .r16,
|
||||
32 => .r32,
|
||||
@ -615,7 +615,7 @@ pub const Op = enum {
|
||||
.mmx => .mm,
|
||||
.sse => switch (reg) {
|
||||
.xmm0 => .xmm0,
|
||||
else => switch (reg.bitSize()) {
|
||||
else => switch (reg.size().bitSize(target)) {
|
||||
128 => .xmm,
|
||||
256 => .ymm,
|
||||
else => unreachable,
|
||||
|
||||
@ -12,6 +12,8 @@ extra: []const u32,
|
||||
string_bytes: []const u8,
|
||||
locals: []const Local,
|
||||
table: []const Inst.Index,
|
||||
/// Optional data which, when present, can be used to accelerate encoding speed.
|
||||
memoized_encodings: []const u0 = &.{},
|
||||
frame_locs: std.MultiArrayList(FrameLoc).Slice,
|
||||
|
||||
pub const Inst = struct {
|
||||
@ -1963,6 +1965,7 @@ pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
|
||||
gpa.free(mir.string_bytes);
|
||||
gpa.free(mir.locals);
|
||||
gpa.free(mir.table);
|
||||
gpa.free(mir.memoized_encodings);
|
||||
mir.frame_locs.deinit(gpa);
|
||||
mir.* = undefined;
|
||||
}
|
||||
|
||||
@ -1,20 +1,86 @@
|
||||
pub const Class = enum {
|
||||
/// INTEGER: This class consists of integral types that fit into one of the general
|
||||
/// purpose registers.
|
||||
integer,
|
||||
/// SSE: The class consists of types that fit into a vector register.
|
||||
sse,
|
||||
/// SSEUP: The class consists of types that fit into a vector register and can be passed
|
||||
/// and returned in the upper bytes of it.
|
||||
sseup,
|
||||
/// X87, X87UP: These classes consist of types that will be returned via the
|
||||
/// x87 FPU.
|
||||
x87,
|
||||
/// The 15-bit exponent, 1-bit sign, and 6 bytes of padding of an `f80`.
|
||||
x87up,
|
||||
complex_x87,
|
||||
memory,
|
||||
/// NO_CLASS: This class is used as initializer in the algorithms. It will be used for
|
||||
/// padding and empty structures and unions.
|
||||
none,
|
||||
/// MEMORY: This class consists of types that will be passed and returned in mem-
|
||||
/// ory via the stack.
|
||||
memory,
|
||||
/// Win64 passes 128-bit integers as `Class.memory` but returns them as `Class.sse`.
|
||||
win_i128,
|
||||
/// A `Class.sse` containing one `f32`.
|
||||
float,
|
||||
/// A `Class.sse` containing two `f32`s.
|
||||
float_combine,
|
||||
/// Clang passes each vector element in a separate `Class.integer`, but returns as `Class.memory`.
|
||||
integer_per_element,
|
||||
|
||||
fn isX87(class: Class) bool {
|
||||
pub const one_integer: [8]Class = .{
|
||||
.integer, .none, .none, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
pub const two_integers: [8]Class = .{
|
||||
.integer, .integer, .none, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
pub const three_integers: [8]Class = .{
|
||||
.integer, .integer, .integer, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
pub const four_integers: [8]Class = .{
|
||||
.integer, .integer, .integer, .integer,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
pub const len_integers: [8]Class = .{
|
||||
.integer_per_element, .none, .none, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
|
||||
pub const @"f16" = @"f64";
|
||||
pub const @"f32": [8]Class = .{
|
||||
.float, .none, .none, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
pub const @"f64": [8]Class = .{
|
||||
.sse, .none, .none, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
pub const @"f80": [8]Class = .{
|
||||
.x87, .x87up, .none, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
pub const @"f128": [8]Class = .{
|
||||
.sse, .sseup, .none, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
|
||||
/// COMPLEX_X87: This class consists of types that will be returned via the x87
|
||||
/// FPU.
|
||||
pub const complex_x87: [8]Class = .{
|
||||
.x87, .x87up, .x87, .x87up,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
|
||||
pub const stack: [8]Class = .{
|
||||
.memory, .none, .none, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
|
||||
pub fn isX87(class: Class) bool {
|
||||
return switch (class) {
|
||||
.x87, .x87up, .complex_x87 => true,
|
||||
.x87, .x87up => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
@ -44,7 +110,7 @@ pub const Class = enum {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
|
||||
pub fn classifyWindows(ty: Type, zcu: *Zcu, target: *const std.Target) Class {
|
||||
// https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017
|
||||
// "There's a strict one-to-one correspondence between a function call's arguments
|
||||
// and the registers used for those arguments. Any argument that doesn't fit in 8
|
||||
@ -53,7 +119,7 @@ pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
|
||||
// "All floating point operations are done using the 16 XMM registers."
|
||||
// "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed
|
||||
// as if they were integers of the same size."
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
return switch (ty.zigTypeTag(zcu)) {
|
||||
.pointer,
|
||||
.int,
|
||||
.bool,
|
||||
@ -70,19 +136,23 @@ pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
|
||||
.frame,
|
||||
=> switch (ty.abiSize(zcu)) {
|
||||
0 => unreachable,
|
||||
1, 2, 4, 8 => return .integer,
|
||||
1, 2, 4, 8 => .integer,
|
||||
else => switch (ty.zigTypeTag(zcu)) {
|
||||
.int => return .win_i128,
|
||||
.@"struct", .@"union" => if (ty.containerLayout(zcu) == .@"packed") {
|
||||
return .win_i128;
|
||||
} else {
|
||||
return .memory;
|
||||
},
|
||||
else => return .memory,
|
||||
.int => .win_i128,
|
||||
.@"struct", .@"union" => if (ty.containerLayout(zcu) == .@"packed")
|
||||
.win_i128
|
||||
else
|
||||
.memory,
|
||||
else => .memory,
|
||||
},
|
||||
},
|
||||
|
||||
.float, .vector => return .sse,
|
||||
.float => switch (ty.floatBits(target)) {
|
||||
16, 32, 64, 128 => .sse,
|
||||
80 => .memory,
|
||||
else => unreachable,
|
||||
},
|
||||
.vector => .sse,
|
||||
|
||||
.type,
|
||||
.comptime_float,
|
||||
@ -93,171 +163,109 @@ pub fn classifyWindows(ty: Type, zcu: *Zcu) Class {
|
||||
.@"opaque",
|
||||
.enum_literal,
|
||||
=> unreachable,
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub const Context = enum { ret, arg, field, other };
|
||||
pub const Context = enum { ret, arg, other };
|
||||
|
||||
/// There are a maximum of 8 possible return slots. Returned values are in
|
||||
/// the beginning of the array; unused slots are filled with .none.
|
||||
pub fn classifySystemV(ty: Type, zcu: *Zcu, target: *const std.Target, ctx: Context) [8]Class {
|
||||
const memory_class = [_]Class{
|
||||
.memory, .none, .none, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
var result = [1]Class{.none} ** 8;
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.pointer => switch (ty.ptrSize(zcu)) {
|
||||
.slice => {
|
||||
result[0] = .integer;
|
||||
result[1] = .integer;
|
||||
return result;
|
||||
},
|
||||
else => {
|
||||
result[0] = .integer;
|
||||
return result;
|
||||
},
|
||||
.slice => return Class.two_integers,
|
||||
else => return Class.one_integer,
|
||||
},
|
||||
.int, .@"enum", .error_set => {
|
||||
const bits = ty.intInfo(zcu).bits;
|
||||
if (bits <= 64) {
|
||||
result[0] = .integer;
|
||||
return result;
|
||||
}
|
||||
if (bits <= 128) {
|
||||
result[0] = .integer;
|
||||
result[1] = .integer;
|
||||
return result;
|
||||
}
|
||||
if (bits <= 192) {
|
||||
result[0] = .integer;
|
||||
result[1] = .integer;
|
||||
result[2] = .integer;
|
||||
return result;
|
||||
}
|
||||
if (bits <= 256) {
|
||||
result[0] = .integer;
|
||||
result[1] = .integer;
|
||||
result[2] = .integer;
|
||||
result[3] = .integer;
|
||||
return result;
|
||||
}
|
||||
return memory_class;
|
||||
},
|
||||
.bool, .void, .noreturn => {
|
||||
result[0] = .integer;
|
||||
return result;
|
||||
if (bits <= 64 * 1) return Class.one_integer;
|
||||
if (bits <= 64 * 2) return Class.two_integers;
|
||||
if (bits <= 64 * 3) return Class.three_integers;
|
||||
if (bits <= 64 * 4) return Class.four_integers;
|
||||
return Class.stack;
|
||||
},
|
||||
.bool, .void, .noreturn => return Class.one_integer,
|
||||
.float => switch (ty.floatBits(target)) {
|
||||
16 => {
|
||||
if (ctx == .field) {
|
||||
result[0] = .memory;
|
||||
} else {
|
||||
// TODO clang doesn't allow __fp16 as .ret or .arg
|
||||
result[0] = .sse;
|
||||
}
|
||||
return result;
|
||||
},
|
||||
32 => {
|
||||
result[0] = .float;
|
||||
return result;
|
||||
},
|
||||
64 => {
|
||||
result[0] = .sse;
|
||||
return result;
|
||||
},
|
||||
128 => {
|
||||
// "Arguments of types __float128, _Decimal128 and __m128 are
|
||||
// split into two halves. The least significant ones belong
|
||||
// to class SSE, the most significant one to class SSEUP."
|
||||
result[0] = .sse;
|
||||
result[1] = .sseup;
|
||||
return result;
|
||||
},
|
||||
80 => {
|
||||
// "The 64-bit mantissa of arguments of type long double
|
||||
// belongs to classX87, the 16-bit exponent plus 6 bytes
|
||||
// of padding belongs to class X87UP."
|
||||
result[0] = .x87;
|
||||
result[1] = .x87up;
|
||||
return result;
|
||||
if (ctx == .other) return Class.stack;
|
||||
// TODO clang doesn't allow __fp16 as .ret or .arg
|
||||
return Class.f16;
|
||||
},
|
||||
32 => return Class.f32,
|
||||
64 => return Class.f64,
|
||||
// "Arguments of types __float128, _Decimal128 and __m128 are
|
||||
// split into two halves. The least significant ones belong
|
||||
// to class SSE, the most significant one to class SSEUP."
|
||||
128 => return Class.f128,
|
||||
// "The 64-bit mantissa of arguments of type long double
|
||||
// belongs to class X87, the 16-bit exponent plus 6 bytes
|
||||
// of padding belongs to class X87UP."
|
||||
80 => return Class.f80,
|
||||
else => unreachable,
|
||||
},
|
||||
.vector => {
|
||||
const elem_ty = ty.childType(zcu);
|
||||
const bits = elem_ty.bitSize(zcu) * ty.arrayLen(zcu);
|
||||
if (elem_ty.toIntern() == .bool_type) {
|
||||
if (bits <= 32) return .{
|
||||
.integer, .none, .none, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
if (bits <= 64) return .{
|
||||
.sse, .none, .none, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
if (ctx == .arg) {
|
||||
if (bits <= 128) return .{
|
||||
.integer_per_element, .none, .none, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
if (bits <= 256 and target.cpu.has(.x86, .avx)) return .{
|
||||
.integer_per_element, .none, .none, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
if (bits <= 512 and target.cpu.has(.x86, .avx512f)) return .{
|
||||
.integer_per_element, .none, .none, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
}
|
||||
return memory_class;
|
||||
if (bits <= 32) return Class.one_integer;
|
||||
if (bits <= 64) return Class.f64;
|
||||
if (ctx == .other) return Class.stack;
|
||||
if (bits <= 128) return Class.len_integers;
|
||||
if (bits <= 256 and target.cpu.has(.x86, .avx)) return Class.len_integers;
|
||||
if (bits <= 512 and target.cpu.has(.x86, .avx512f)) return Class.len_integers;
|
||||
return Class.stack;
|
||||
}
|
||||
if (bits <= 64) return .{
|
||||
if (elem_ty.isRuntimeFloat() and elem_ty.floatBits(target) == 80) {
|
||||
if (bits <= 80 * 1) return Class.f80;
|
||||
if (bits <= 80 * 2) return Class.complex_x87;
|
||||
return Class.stack;
|
||||
}
|
||||
if (bits <= 64 * 1) return .{
|
||||
.sse, .none, .none, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
if (bits <= 128) return .{
|
||||
if (bits <= 64 * 2) return .{
|
||||
.sse, .sseup, .none, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
if (ctx == .arg and !target.cpu.has(.x86, .avx)) return memory_class;
|
||||
if (bits <= 192) return .{
|
||||
if (ctx == .arg and !target.cpu.has(.x86, .avx)) return Class.stack;
|
||||
if (bits <= 64 * 3) return .{
|
||||
.sse, .sseup, .sseup, .none,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
if (bits <= 256) return .{
|
||||
if (bits <= 64 * 4) return .{
|
||||
.sse, .sseup, .sseup, .sseup,
|
||||
.none, .none, .none, .none,
|
||||
};
|
||||
if (ctx == .arg and !target.cpu.has(.x86, .avx512f)) return memory_class;
|
||||
if (bits <= 320) return .{
|
||||
if (ctx == .arg and !target.cpu.has(.x86, .avx512f)) return Class.stack;
|
||||
if (bits <= 64 * 5) return .{
|
||||
.sse, .sseup, .sseup, .sseup,
|
||||
.sseup, .none, .none, .none,
|
||||
};
|
||||
if (bits <= 384) return .{
|
||||
if (bits <= 64 * 6) return .{
|
||||
.sse, .sseup, .sseup, .sseup,
|
||||
.sseup, .sseup, .none, .none,
|
||||
};
|
||||
if (bits <= 448) return .{
|
||||
if (bits <= 64 * 7) return .{
|
||||
.sse, .sseup, .sseup, .sseup,
|
||||
.sseup, .sseup, .sseup, .none,
|
||||
};
|
||||
if (bits <= 512 or (ctx == .ret and bits <= @as(u64, if (target.cpu.has(.x86, .avx512f))
|
||||
2048
|
||||
if (bits <= 64 * 8 or (ctx == .ret and bits <= @as(u64, if (target.cpu.has(.x86, .avx512f))
|
||||
64 * 32
|
||||
else if (target.cpu.has(.x86, .avx))
|
||||
1024
|
||||
64 * 16
|
||||
else
|
||||
512))) return .{
|
||||
64 * 8))) return .{
|
||||
.sse, .sseup, .sseup, .sseup,
|
||||
.sseup, .sseup, .sseup, .sseup,
|
||||
};
|
||||
return memory_class;
|
||||
return Class.stack;
|
||||
},
|
||||
.optional => {
|
||||
if (ty.optionalReprIsPayload(zcu)) {
|
||||
return classifySystemV(ty.optionalChild(zcu), zcu, target, ctx);
|
||||
}
|
||||
return memory_class;
|
||||
return Class.stack;
|
||||
},
|
||||
.@"struct", .@"union" => {
|
||||
// "If the size of an object is larger than eight eightbytes, or
|
||||
@ -269,15 +277,14 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: *const std.Target, ctx: Cont
|
||||
.auto => unreachable,
|
||||
.@"extern" => {},
|
||||
.@"packed" => {
|
||||
assert(ty_size <= 16);
|
||||
result[0] = .integer;
|
||||
if (ty_size > 8) result[1] = .integer;
|
||||
return result;
|
||||
if (ty_size <= 8) return Class.one_integer;
|
||||
if (ty_size <= 16) return Class.two_integers;
|
||||
unreachable; // frontend should not have allowed this type as extern
|
||||
},
|
||||
}
|
||||
if (ty_size > 64)
|
||||
return memory_class;
|
||||
if (ty_size > 64) return Class.stack;
|
||||
|
||||
var result: [8]Class = @splat(.none);
|
||||
_ = if (zcu.typeToStruct(ty)) |loaded_struct|
|
||||
classifySystemVStruct(&result, 0, loaded_struct, zcu, target)
|
||||
else if (zcu.typeToUnion(ty)) |loaded_union|
|
||||
@ -290,15 +297,15 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: *const std.Target, ctx: Cont
|
||||
// "If one of the classes is MEMORY, the whole argument is passed in memory"
|
||||
// "If X87UP is not preceded by X87, the whole argument is passed in memory."
|
||||
for (result, 0..) |class, i| switch (class) {
|
||||
.memory => return memory_class,
|
||||
.x87up => if (i == 0 or result[i - 1] != .x87) return memory_class,
|
||||
.memory => return Class.stack,
|
||||
.x87up => if (i == 0 or result[i - 1] != .x87) return Class.stack,
|
||||
else => continue,
|
||||
};
|
||||
// "If the size of the aggregate exceeds two eightbytes and the first eight-
|
||||
// byte isn’t SSE or any other eightbyte isn’t SSEUP, the whole argument
|
||||
// is passed in memory."
|
||||
if (ty_size > 16 and (result[0] != .sse or
|
||||
std.mem.indexOfNone(Class, result[1..], &.{ .sseup, .none }) != null)) return memory_class;
|
||||
std.mem.indexOfNone(Class, result[1..], &.{ .sseup, .none }) != null)) return Class.stack;
|
||||
|
||||
// "If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE."
|
||||
for (&result, 0..) |*item, i| {
|
||||
@ -311,16 +318,9 @@ pub fn classifySystemV(ty: Type, zcu: *Zcu, target: *const std.Target, ctx: Cont
|
||||
},
|
||||
.array => {
|
||||
const ty_size = ty.abiSize(zcu);
|
||||
if (ty_size <= 8) {
|
||||
result[0] = .integer;
|
||||
return result;
|
||||
}
|
||||
if (ty_size <= 16) {
|
||||
result[0] = .integer;
|
||||
result[1] = .integer;
|
||||
return result;
|
||||
}
|
||||
return memory_class;
|
||||
if (ty_size <= 8) return Class.one_integer;
|
||||
if (ty_size <= 16) return Class.two_integers;
|
||||
return Class.stack;
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@ -363,7 +363,7 @@ fn classifySystemVStruct(
|
||||
.@"packed" => {},
|
||||
}
|
||||
}
|
||||
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
|
||||
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .other), .none);
|
||||
for (result[@intCast(byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
|
||||
result_class.* = result_class.combineSystemV(field_class);
|
||||
byte_offset += field_ty.abiSize(zcu);
|
||||
@ -406,7 +406,7 @@ fn classifySystemVUnion(
|
||||
.@"packed" => {},
|
||||
}
|
||||
}
|
||||
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .field), .none);
|
||||
const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .other), .none);
|
||||
for (result[@intCast(starting_byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
|
||||
result_class.* = result_class.combineSystemV(field_class);
|
||||
}
|
||||
|
||||
@ -465,25 +465,25 @@ pub const Register = enum(u8) {
|
||||
return @intCast(@intFromEnum(reg) - base);
|
||||
}
|
||||
|
||||
pub fn bitSize(reg: Register) u10 {
|
||||
pub fn size(reg: Register) Memory.Size {
|
||||
return switch (@intFromEnum(reg)) {
|
||||
// zig fmt: off
|
||||
@intFromEnum(Register.rax) ... @intFromEnum(Register.r15) => 64,
|
||||
@intFromEnum(Register.eax) ... @intFromEnum(Register.r15d) => 32,
|
||||
@intFromEnum(Register.ax) ... @intFromEnum(Register.r15w) => 16,
|
||||
@intFromEnum(Register.al) ... @intFromEnum(Register.r15b) => 8,
|
||||
@intFromEnum(Register.ah) ... @intFromEnum(Register.bh) => 8,
|
||||
@intFromEnum(Register.rax) ... @intFromEnum(Register.r15) => .qword,
|
||||
@intFromEnum(Register.eax) ... @intFromEnum(Register.r15d) => .dword,
|
||||
@intFromEnum(Register.ax) ... @intFromEnum(Register.r15w) => .word,
|
||||
@intFromEnum(Register.al) ... @intFromEnum(Register.r15b) => .byte,
|
||||
@intFromEnum(Register.ah) ... @intFromEnum(Register.bh) => .byte,
|
||||
|
||||
@intFromEnum(Register.zmm0) ... @intFromEnum(Register.zmm15) => 512,
|
||||
@intFromEnum(Register.ymm0) ... @intFromEnum(Register.ymm15) => 256,
|
||||
@intFromEnum(Register.xmm0) ... @intFromEnum(Register.xmm15) => 128,
|
||||
@intFromEnum(Register.mm0) ... @intFromEnum(Register.mm7) => 64,
|
||||
@intFromEnum(Register.st0) ... @intFromEnum(Register.st7) => 80,
|
||||
@intFromEnum(Register.zmm0) ... @intFromEnum(Register.zmm15) => .zword,
|
||||
@intFromEnum(Register.ymm0) ... @intFromEnum(Register.ymm15) => .yword,
|
||||
@intFromEnum(Register.xmm0) ... @intFromEnum(Register.xmm15) => .xword,
|
||||
@intFromEnum(Register.mm0) ... @intFromEnum(Register.mm7) => .qword,
|
||||
@intFromEnum(Register.st0) ... @intFromEnum(Register.st7) => .tbyte,
|
||||
|
||||
@intFromEnum(Register.es) ... @intFromEnum(Register.gs) => 16,
|
||||
@intFromEnum(Register.es) ... @intFromEnum(Register.gs) => .word,
|
||||
|
||||
@intFromEnum(Register.cr0) ... @intFromEnum(Register.cr15) => 64,
|
||||
@intFromEnum(Register.dr0) ... @intFromEnum(Register.dr15) => 64,
|
||||
@intFromEnum(Register.cr0) ... @intFromEnum(Register.cr15) => .gpr,
|
||||
@intFromEnum(Register.dr0) ... @intFromEnum(Register.dr15) => .gpr,
|
||||
|
||||
else => unreachable,
|
||||
// zig fmt: on
|
||||
@ -549,8 +549,8 @@ pub const Register = enum(u8) {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn toSize(reg: Register, size: Memory.Size, target: *const std.Target) Register {
|
||||
return switch (size) {
|
||||
pub fn toSize(reg: Register, new_size: Memory.Size, target: *const std.Target) Register {
|
||||
return switch (new_size) {
|
||||
.none => unreachable,
|
||||
.ptr => reg.toBitSize(target.ptrBitWidth()),
|
||||
.gpr => switch (target.cpu.arch) {
|
||||
|
||||
193
src/codegen.zig
193
src/codegen.zig
@ -810,7 +810,7 @@ fn lowerUavRef(
|
||||
|
||||
const uav_align = ip.indexToKey(uav.orig_ty).ptr_type.flags.alignment;
|
||||
switch (try lf.lowerUav(pt, uav_val, uav_align, src_loc)) {
|
||||
.mcv => {},
|
||||
.sym_index => {},
|
||||
.fail => |em| std.debug.panic("TODO rework lowerUav. internal error: {s}", .{em.msg}),
|
||||
}
|
||||
|
||||
@ -920,6 +920,90 @@ pub const LinkerLoad = struct {
|
||||
sym_index: u32,
|
||||
};
|
||||
|
||||
pub const SymbolResult = union(enum) { sym_index: u32, fail: *ErrorMsg };
|
||||
|
||||
pub fn genNavRef(
|
||||
lf: *link.File,
|
||||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
nav_index: InternPool.Nav.Index,
|
||||
target: *const std.Target,
|
||||
) CodeGenError!SymbolResult {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const nav = ip.getNav(nav_index);
|
||||
log.debug("genNavRef({})", .{nav.fqn.fmt(ip)});
|
||||
|
||||
const lib_name, const linkage, const is_threadlocal = if (nav.getExtern(ip)) |e|
|
||||
.{ e.lib_name, e.linkage, e.is_threadlocal and zcu.comp.config.any_non_single_threaded }
|
||||
else
|
||||
.{ .none, .internal, false };
|
||||
if (lf.cast(.elf)) |elf_file| {
|
||||
const zo = elf_file.zigObjectPtr().?;
|
||||
switch (linkage) {
|
||||
.internal => {
|
||||
const sym_index = try zo.getOrCreateMetadataForNav(zcu, nav_index);
|
||||
if (is_threadlocal) zo.symbol(sym_index).flags.is_tls = true;
|
||||
return .{ .sym_index = sym_index };
|
||||
},
|
||||
.strong, .weak => {
|
||||
const sym_index = try elf_file.getGlobalSymbol(nav.name.toSlice(ip), lib_name.toSlice(ip));
|
||||
switch (linkage) {
|
||||
.internal => unreachable,
|
||||
.strong => {},
|
||||
.weak => zo.symbol(sym_index).flags.weak = true,
|
||||
.link_once => unreachable,
|
||||
}
|
||||
if (is_threadlocal) zo.symbol(sym_index).flags.is_tls = true;
|
||||
return .{ .sym_index = sym_index };
|
||||
},
|
||||
.link_once => unreachable,
|
||||
}
|
||||
} else if (lf.cast(.macho)) |macho_file| {
|
||||
const zo = macho_file.getZigObject().?;
|
||||
switch (linkage) {
|
||||
.internal => {
|
||||
const sym_index = try zo.getOrCreateMetadataForNav(macho_file, nav_index);
|
||||
if (is_threadlocal) zo.symbols.items[sym_index].flags.tlv = true;
|
||||
return .{ .sym_index = sym_index };
|
||||
},
|
||||
.strong, .weak => {
|
||||
const sym_index = try macho_file.getGlobalSymbol(nav.name.toSlice(ip), lib_name.toSlice(ip));
|
||||
switch (linkage) {
|
||||
.internal => unreachable,
|
||||
.strong => {},
|
||||
.weak => zo.symbols.items[sym_index].flags.weak = true,
|
||||
.link_once => unreachable,
|
||||
}
|
||||
if (is_threadlocal) zo.symbols.items[sym_index].flags.tlv = true;
|
||||
return .{ .sym_index = sym_index };
|
||||
},
|
||||
.link_once => unreachable,
|
||||
}
|
||||
} else if (lf.cast(.coff)) |coff_file| {
|
||||
// TODO audit this
|
||||
switch (linkage) {
|
||||
.internal => {
|
||||
const atom_index = try coff_file.getOrCreateAtomForNav(nav_index);
|
||||
const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
|
||||
return .{ .sym_index = sym_index };
|
||||
},
|
||||
.strong, .weak => {
|
||||
const global_index = try coff_file.getGlobalSymbol(nav.name.toSlice(ip), lib_name.toSlice(ip));
|
||||
try coff_file.need_got_table.put(zcu.gpa, global_index, {}); // needs GOT
|
||||
return .{ .sym_index = global_index };
|
||||
},
|
||||
.link_once => unreachable,
|
||||
}
|
||||
} else if (lf.cast(.plan9)) |p9| {
|
||||
return .{ .sym_index = try p9.seeNav(pt, nav_index) };
|
||||
} else {
|
||||
const msg = try ErrorMsg.create(zcu.gpa, src_loc, "TODO genNavRef for target {}", .{target});
|
||||
return .{ .fail = msg };
|
||||
}
|
||||
}
|
||||
|
||||
/// deprecated legacy type
|
||||
pub const GenResult = union(enum) {
|
||||
mcv: MCValue,
|
||||
fail: *ErrorMsg,
|
||||
@ -951,89 +1035,6 @@ pub const GenResult = union(enum) {
|
||||
};
|
||||
};
|
||||
|
||||
pub fn genNavRef(
|
||||
lf: *link.File,
|
||||
pt: Zcu.PerThread,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
nav_index: InternPool.Nav.Index,
|
||||
target: *const std.Target,
|
||||
) CodeGenError!GenResult {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const nav = ip.getNav(nav_index);
|
||||
log.debug("genNavRef({})", .{nav.fqn.fmt(ip)});
|
||||
|
||||
const lib_name, const linkage, const is_threadlocal = if (nav.getExtern(ip)) |e|
|
||||
.{ e.lib_name, e.linkage, e.is_threadlocal and zcu.comp.config.any_non_single_threaded }
|
||||
else
|
||||
.{ .none, .internal, false };
|
||||
if (lf.cast(.elf)) |elf_file| {
|
||||
const zo = elf_file.zigObjectPtr().?;
|
||||
switch (linkage) {
|
||||
.internal => {
|
||||
const sym_index = try zo.getOrCreateMetadataForNav(zcu, nav_index);
|
||||
if (is_threadlocal) zo.symbol(sym_index).flags.is_tls = true;
|
||||
return .{ .mcv = .{ .lea_symbol = sym_index } };
|
||||
},
|
||||
.strong, .weak => {
|
||||
const sym_index = try elf_file.getGlobalSymbol(nav.name.toSlice(ip), lib_name.toSlice(ip));
|
||||
switch (linkage) {
|
||||
.internal => unreachable,
|
||||
.strong => {},
|
||||
.weak => zo.symbol(sym_index).flags.weak = true,
|
||||
.link_once => unreachable,
|
||||
}
|
||||
if (is_threadlocal) zo.symbol(sym_index).flags.is_tls = true;
|
||||
return .{ .mcv = .{ .lea_symbol = sym_index } };
|
||||
},
|
||||
.link_once => unreachable,
|
||||
}
|
||||
} else if (lf.cast(.macho)) |macho_file| {
|
||||
const zo = macho_file.getZigObject().?;
|
||||
switch (linkage) {
|
||||
.internal => {
|
||||
const sym_index = try zo.getOrCreateMetadataForNav(macho_file, nav_index);
|
||||
if (is_threadlocal) zo.symbols.items[sym_index].flags.tlv = true;
|
||||
return .{ .mcv = .{ .lea_symbol = sym_index } };
|
||||
},
|
||||
.strong, .weak => {
|
||||
const sym_index = try macho_file.getGlobalSymbol(nav.name.toSlice(ip), lib_name.toSlice(ip));
|
||||
switch (linkage) {
|
||||
.internal => unreachable,
|
||||
.strong => {},
|
||||
.weak => zo.symbols.items[sym_index].flags.weak = true,
|
||||
.link_once => unreachable,
|
||||
}
|
||||
if (is_threadlocal) zo.symbols.items[sym_index].flags.tlv = true;
|
||||
return .{ .mcv = .{ .lea_symbol = sym_index } };
|
||||
},
|
||||
.link_once => unreachable,
|
||||
}
|
||||
} else if (lf.cast(.coff)) |coff_file| {
|
||||
// TODO audit this
|
||||
switch (linkage) {
|
||||
.internal => {
|
||||
const atom_index = try coff_file.getOrCreateAtomForNav(nav_index);
|
||||
const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
|
||||
return .{ .mcv = .{ .lea_symbol = sym_index } };
|
||||
},
|
||||
.strong, .weak => {
|
||||
const global_index = try coff_file.getGlobalSymbol(nav.name.toSlice(ip), lib_name.toSlice(ip));
|
||||
try coff_file.need_got_table.put(zcu.gpa, global_index, {}); // needs GOT
|
||||
return .{ .mcv = .{ .lea_symbol = global_index } };
|
||||
},
|
||||
.link_once => unreachable,
|
||||
}
|
||||
} else if (lf.cast(.plan9)) |p9| {
|
||||
const atom_index = try p9.seeNav(pt, nav_index);
|
||||
const atom = p9.getAtom(atom_index);
|
||||
return .{ .mcv = .{ .memory = atom.getOffsetTableAddress(p9) } };
|
||||
} else {
|
||||
const msg = try ErrorMsg.create(zcu.gpa, src_loc, "TODO genNavRef for target {}", .{target});
|
||||
return .{ .fail = msg };
|
||||
}
|
||||
}
|
||||
|
||||
/// deprecated legacy code path
|
||||
pub fn genTypedValue(
|
||||
lf: *link.File,
|
||||
@ -1042,30 +1043,28 @@ pub fn genTypedValue(
|
||||
val: Value,
|
||||
target: *const std.Target,
|
||||
) CodeGenError!GenResult {
|
||||
return switch (try lowerValue(pt, val, target)) {
|
||||
const res = try lowerValue(pt, val, target);
|
||||
return switch (res) {
|
||||
.none => .{ .mcv = .none },
|
||||
.undef => .{ .mcv = .undef },
|
||||
.immediate => |imm| .{ .mcv = .{ .immediate = imm } },
|
||||
.lea_nav => |nav| genNavRef(lf, pt, src_loc, nav, target),
|
||||
.lea_uav => |uav| switch (try lf.lowerUav(
|
||||
.lea_nav => |nav| switch (try genNavRef(lf, pt, src_loc, nav, target)) {
|
||||
.sym_index => |sym_index| .{ .mcv = .{ .lea_symbol = sym_index } },
|
||||
.fail => |em| .{ .fail = em },
|
||||
},
|
||||
.load_uav, .lea_uav => |uav| switch (try lf.lowerUav(
|
||||
pt,
|
||||
uav.val,
|
||||
Type.fromInterned(uav.orig_ty).ptrAlignment(pt.zcu),
|
||||
src_loc,
|
||||
)) {
|
||||
.mcv => |mcv| .{ .mcv = switch (mcv) {
|
||||
.sym_index => |sym_index| .{ .mcv = switch (res) {
|
||||
else => unreachable,
|
||||
.load_direct => |sym_index| .{ .lea_direct = sym_index },
|
||||
.load_symbol => |sym_index| .{ .lea_symbol = sym_index },
|
||||
.load_uav => .{ .load_symbol = sym_index },
|
||||
.lea_uav => .{ .lea_symbol = sym_index },
|
||||
} },
|
||||
.fail => |em| .{ .fail = em },
|
||||
},
|
||||
.load_uav => |uav| lf.lowerUav(
|
||||
pt,
|
||||
uav.val,
|
||||
Type.fromInterned(uav.orig_ty).ptrAlignment(pt.zcu),
|
||||
src_loc,
|
||||
),
|
||||
};
|
||||
}
|
||||
|
||||
@ -1076,8 +1075,8 @@ const LowerResult = union(enum) {
|
||||
/// such as ARM, the immediate will never exceed 32-bits.
|
||||
immediate: u64,
|
||||
lea_nav: InternPool.Nav.Index,
|
||||
lea_uav: InternPool.Key.Ptr.BaseAddr.Uav,
|
||||
load_uav: InternPool.Key.Ptr.BaseAddr.Uav,
|
||||
lea_uav: InternPool.Key.Ptr.BaseAddr.Uav,
|
||||
};
|
||||
|
||||
pub fn lowerValue(pt: Zcu.PerThread, val: Value, target: *const std.Target) Allocator.Error!LowerResult {
|
||||
|
||||
@ -12115,7 +12115,7 @@ fn firstParamSRet(fn_info: InternPool.Key.FuncType, zcu: *Zcu, target: *const st
|
||||
return switch (fn_info.cc) {
|
||||
.auto => returnTypeByRef(zcu, target, return_type),
|
||||
.x86_64_sysv => firstParamSRetSystemV(return_type, zcu, target),
|
||||
.x86_64_win => x86_64_abi.classifyWindows(return_type, zcu) == .memory,
|
||||
.x86_64_win => x86_64_abi.classifyWindows(return_type, zcu, target) == .memory,
|
||||
.x86_sysv, .x86_win => isByRef(return_type, zcu),
|
||||
.x86_stdcall => !isScalar(zcu, return_type),
|
||||
.wasm_mvp => wasm_c_abi.classifyType(return_type, zcu) == .indirect,
|
||||
@ -12215,7 +12215,7 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu
|
||||
fn lowerWin64FnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Builder.Type {
|
||||
const zcu = o.pt.zcu;
|
||||
const return_type = Type.fromInterned(fn_info.return_type);
|
||||
switch (x86_64_abi.classifyWindows(return_type, zcu)) {
|
||||
switch (x86_64_abi.classifyWindows(return_type, zcu, zcu.getTarget())) {
|
||||
.integer => {
|
||||
if (isScalar(zcu, return_type)) {
|
||||
return o.lowerType(return_type);
|
||||
@ -12239,7 +12239,6 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.E
|
||||
return o.lowerType(return_type);
|
||||
}
|
||||
const classes = x86_64_abi.classifySystemV(return_type, zcu, zcu.getTarget(), .ret);
|
||||
if (classes[0] == .memory) return .void;
|
||||
var types_index: u32 = 0;
|
||||
var types_buffer: [8]Builder.Type = undefined;
|
||||
for (classes) |class| {
|
||||
@ -12274,15 +12273,9 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.E
|
||||
types_index += 1;
|
||||
},
|
||||
.x87up => continue,
|
||||
.complex_x87 => {
|
||||
@panic("TODO");
|
||||
},
|
||||
.memory => unreachable, // handled above
|
||||
.win_i128 => unreachable, // windows only
|
||||
.none => break,
|
||||
.integer_per_element => {
|
||||
@panic("TODO");
|
||||
},
|
||||
.memory, .integer_per_element => return .void,
|
||||
.win_i128 => unreachable, // windows only
|
||||
}
|
||||
}
|
||||
const first_non_integer = std.mem.indexOfNone(x86_64_abi.Class, &classes, &.{.integer});
|
||||
@ -12492,7 +12485,7 @@ const ParamTypeIterator = struct {
|
||||
|
||||
fn nextWin64(it: *ParamTypeIterator, ty: Type) ?Lowering {
|
||||
const zcu = it.object.pt.zcu;
|
||||
switch (x86_64_abi.classifyWindows(ty, zcu)) {
|
||||
switch (x86_64_abi.classifyWindows(ty, zcu, zcu.getTarget())) {
|
||||
.integer => {
|
||||
if (isScalar(zcu, ty)) {
|
||||
it.zig_index += 1;
|
||||
@ -12573,12 +12566,9 @@ const ParamTypeIterator = struct {
|
||||
return .byref;
|
||||
},
|
||||
.x87up => unreachable,
|
||||
.complex_x87 => {
|
||||
@panic("TODO");
|
||||
},
|
||||
.none => break,
|
||||
.memory => unreachable, // handled above
|
||||
.win_i128 => unreachable, // windows only
|
||||
.none => break,
|
||||
.integer_per_element => {
|
||||
@panic("TODO");
|
||||
},
|
||||
|
||||
@ -920,7 +920,7 @@ pub const File = struct {
|
||||
decl_val: InternPool.Index,
|
||||
decl_align: InternPool.Alignment,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
) !codegen.GenResult {
|
||||
) !codegen.SymbolResult {
|
||||
assert(base.comp.zcu.?.llvm_object == null);
|
||||
switch (base.tag) {
|
||||
.lld => unreachable,
|
||||
|
||||
@ -752,7 +752,7 @@ fn shrinkAtom(coff: *Coff, atom_index: Atom.Index, new_block_size: u32) void {
|
||||
// capacity, insert a free list node for it.
|
||||
}
|
||||
|
||||
fn writeAtom(coff: *Coff, atom_index: Atom.Index, code: []u8) !void {
|
||||
fn writeAtom(coff: *Coff, atom_index: Atom.Index, code: []u8, resolve_relocs: bool) !void {
|
||||
const atom = coff.getAtom(atom_index);
|
||||
const sym = atom.getSymbol(coff);
|
||||
const section = coff.sections.get(@intFromEnum(sym.section_number) - 1);
|
||||
@ -774,11 +774,13 @@ fn writeAtom(coff: *Coff, atom_index: Atom.Index, code: []u8) !void {
|
||||
var relocs = std.ArrayList(*Relocation).init(gpa);
|
||||
defer relocs.deinit();
|
||||
|
||||
if (coff.relocs.getPtr(atom_index)) |rels| {
|
||||
try relocs.ensureTotalCapacityPrecise(rels.items.len);
|
||||
for (rels.items) |*reloc| {
|
||||
if (reloc.isResolvable(coff) and reloc.dirty) {
|
||||
relocs.appendAssumeCapacity(reloc);
|
||||
if (resolve_relocs) {
|
||||
if (coff.relocs.getPtr(atom_index)) |rels| {
|
||||
try relocs.ensureTotalCapacityPrecise(rels.items.len);
|
||||
for (rels.items) |*reloc| {
|
||||
if (reloc.isResolvable(coff) and reloc.dirty) {
|
||||
relocs.appendAssumeCapacity(reloc);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -812,12 +814,15 @@ fn writeAtom(coff: *Coff, atom_index: Atom.Index, code: []u8) !void {
|
||||
}
|
||||
}
|
||||
|
||||
coff.resolveRelocs(atom_index, relocs.items, code, coff.image_base);
|
||||
if (resolve_relocs) {
|
||||
coff.resolveRelocs(atom_index, relocs.items, code, coff.image_base);
|
||||
}
|
||||
try coff.pwriteAll(code, file_offset);
|
||||
|
||||
// Now we can mark the relocs as resolved.
|
||||
while (relocs.pop()) |reloc| {
|
||||
reloc.dirty = false;
|
||||
if (resolve_relocs) {
|
||||
// Now we can mark the relocs as resolved.
|
||||
while (relocs.pop()) |reloc| {
|
||||
reloc.dirty = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -914,6 +919,7 @@ fn writeOffsetTableEntry(coff: *Coff, index: usize) !void {
|
||||
}
|
||||
|
||||
fn markRelocsDirtyByTarget(coff: *Coff, target: SymbolWithLoc) void {
|
||||
if (!coff.base.comp.incremental) return;
|
||||
// TODO: reverse-lookup might come in handy here
|
||||
for (coff.relocs.values()) |*relocs| {
|
||||
for (relocs.items) |*reloc| {
|
||||
@ -924,6 +930,7 @@ fn markRelocsDirtyByTarget(coff: *Coff, target: SymbolWithLoc) void {
|
||||
}
|
||||
|
||||
fn markRelocsDirtyByAddress(coff: *Coff, addr: u32) void {
|
||||
if (!coff.base.comp.incremental) return;
|
||||
const got_moved = blk: {
|
||||
const sect_id = coff.got_section_index orelse break :blk false;
|
||||
break :blk coff.sections.items(.header)[sect_id].virtual_address >= addr;
|
||||
@ -1129,7 +1136,7 @@ fn lowerConst(
|
||||
log.debug("allocated atom for {s} at 0x{x}", .{ name, atom.getSymbol(coff).value });
|
||||
log.debug(" (required alignment 0x{x})", .{required_alignment});
|
||||
|
||||
try coff.writeAtom(atom_index, code);
|
||||
try coff.writeAtom(atom_index, code, coff.base.comp.incremental);
|
||||
|
||||
return .{ .ok = atom_index };
|
||||
}
|
||||
@ -1212,8 +1219,7 @@ fn updateLazySymbolAtom(
|
||||
});
|
||||
defer gpa.free(name);
|
||||
|
||||
const atom = coff.getAtomPtr(atom_index);
|
||||
const local_sym_index = atom.getSymbolIndex().?;
|
||||
const local_sym_index = coff.getAtomPtr(atom_index).getSymbolIndex().?;
|
||||
|
||||
const src = Type.fromInterned(sym.ty).srcLocOrNull(zcu) orelse Zcu.LazySrcLoc.unneeded;
|
||||
try codegen.generateLazySymbol(
|
||||
@ -1228,12 +1234,13 @@ fn updateLazySymbolAtom(
|
||||
);
|
||||
const code = code_buffer.items;
|
||||
|
||||
const code_len: u32 = @intCast(code.len);
|
||||
const atom = coff.getAtomPtr(atom_index);
|
||||
const symbol = atom.getSymbolPtr(coff);
|
||||
try coff.setSymbolName(symbol, name);
|
||||
symbol.section_number = @enumFromInt(section_index + 1);
|
||||
symbol.type = .{ .complex_type = .NULL, .base_type = .NULL };
|
||||
|
||||
const code_len: u32 = @intCast(code.len);
|
||||
const vaddr = try coff.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0));
|
||||
errdefer coff.freeAtom(atom_index);
|
||||
|
||||
@ -1244,7 +1251,7 @@ fn updateLazySymbolAtom(
|
||||
symbol.value = vaddr;
|
||||
|
||||
try coff.addGotEntry(.{ .sym_index = local_sym_index });
|
||||
try coff.writeAtom(atom_index, code);
|
||||
try coff.writeAtom(atom_index, code, coff.base.comp.incremental);
|
||||
}
|
||||
|
||||
pub fn getOrCreateAtomForLazySymbol(
|
||||
@ -1392,7 +1399,7 @@ fn updateNavCode(
|
||||
};
|
||||
}
|
||||
|
||||
coff.writeAtom(atom_index, code) catch |err| switch (err) {
|
||||
coff.writeAtom(atom_index, code, coff.base.comp.incremental) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => |e| return coff.base.cgFail(nav_index, "failed to write atom: {s}", .{@errorName(e)}),
|
||||
};
|
||||
@ -1430,7 +1437,7 @@ pub fn updateExports(
|
||||
const first_exp = export_indices[0].ptr(zcu);
|
||||
const res = try coff.lowerUav(pt, uav, .none, first_exp.src);
|
||||
switch (res) {
|
||||
.mcv => {},
|
||||
.sym_index => {},
|
||||
.fail => |em| {
|
||||
// TODO maybe it's enough to return an error here and let Module.processExportsInner
|
||||
// handle the error?
|
||||
@ -1677,7 +1684,7 @@ fn flushInner(coff: *Coff, arena: Allocator, tid: Zcu.PerThread.Id) !void {
|
||||
const amt = try coff.base.file.?.preadAll(code.items, file_offset);
|
||||
if (amt != code.items.len) return error.InputOutput;
|
||||
|
||||
try coff.writeAtom(atom_index, code.items);
|
||||
try coff.writeAtom(atom_index, code.items, true);
|
||||
}
|
||||
|
||||
// Update GOT if it got moved in memory.
|
||||
@ -1770,7 +1777,7 @@ pub fn lowerUav(
|
||||
uav: InternPool.Index,
|
||||
explicit_alignment: InternPool.Alignment,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
) !codegen.GenResult {
|
||||
) !codegen.SymbolResult {
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const val = Value.fromInterned(uav);
|
||||
@ -1782,7 +1789,7 @@ pub fn lowerUav(
|
||||
const atom = coff.getAtom(metadata.atom);
|
||||
const existing_addr = atom.getSymbol(coff).value;
|
||||
if (uav_alignment.check(existing_addr))
|
||||
return .{ .mcv = .{ .load_symbol = atom.getSymbolIndex().? } };
|
||||
return .{ .sym_index = atom.getSymbolIndex().? };
|
||||
}
|
||||
|
||||
var name_buf: [32]u8 = undefined;
|
||||
@ -1813,9 +1820,7 @@ pub fn lowerUav(
|
||||
.atom = atom_index,
|
||||
.section = coff.rdata_section_index.?,
|
||||
});
|
||||
return .{ .mcv = .{
|
||||
.load_symbol = coff.getAtom(atom_index).getSymbolIndex().?,
|
||||
} };
|
||||
return .{ .sym_index = coff.getAtom(atom_index).getSymbolIndex().? };
|
||||
}
|
||||
|
||||
pub fn getUavVAddr(
|
||||
@ -2479,11 +2484,6 @@ const GetOrPutGlobalPtrResult = struct {
|
||||
value_ptr: *SymbolWithLoc,
|
||||
};
|
||||
|
||||
/// Used only for disambiguating local from global at relocation level.
|
||||
/// TODO this must go away.
|
||||
pub const global_symbol_bit: u32 = 0x80000000;
|
||||
pub const global_symbol_mask: u32 = 0x7fffffff;
|
||||
|
||||
/// Return pointer to the global entry for `name` if one exists.
|
||||
/// Puts a new global entry for `name` if one doesn't exist, and
|
||||
/// returns a pointer to it.
|
||||
|
||||
@ -473,7 +473,7 @@ pub fn lowerUav(
|
||||
uav: InternPool.Index,
|
||||
explicit_alignment: InternPool.Alignment,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
) !codegen.GenResult {
|
||||
) !codegen.SymbolResult {
|
||||
return self.zigObjectPtr().?.lowerUav(self, pt, uav, explicit_alignment, src_loc);
|
||||
}
|
||||
|
||||
|
||||
@ -997,7 +997,7 @@ pub fn lowerUav(
|
||||
uav: InternPool.Index,
|
||||
explicit_alignment: InternPool.Alignment,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
) !codegen.GenResult {
|
||||
) !codegen.SymbolResult {
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const val = Value.fromInterned(uav);
|
||||
@ -1010,7 +1010,7 @@ pub fn lowerUav(
|
||||
const sym = self.symbol(metadata.symbol_index);
|
||||
const existing_alignment = sym.atom(elf_file).?.alignment;
|
||||
if (uav_alignment.order(existing_alignment).compare(.lte))
|
||||
return .{ .mcv = .{ .load_symbol = metadata.symbol_index } };
|
||||
return .{ .sym_index = metadata.symbol_index };
|
||||
}
|
||||
|
||||
const osec = if (self.data_relro_index) |sym_index|
|
||||
@ -1047,12 +1047,11 @@ pub fn lowerUav(
|
||||
.{@errorName(e)},
|
||||
) },
|
||||
};
|
||||
const sym_index = switch (res) {
|
||||
.ok => |sym_index| sym_index,
|
||||
.fail => |em| return .{ .fail = em },
|
||||
};
|
||||
try self.uavs.put(gpa, uav, .{ .symbol_index = sym_index, .allocated = true });
|
||||
return .{ .mcv = .{ .load_symbol = sym_index } };
|
||||
switch (res) {
|
||||
.sym_index => |sym_index| try self.uavs.put(gpa, uav, .{ .symbol_index = sym_index, .allocated = true }),
|
||||
.fail => {},
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
pub fn getOrCreateMetadataForLazySymbol(
|
||||
@ -1692,11 +1691,6 @@ fn updateLazySymbol(
|
||||
try elf_file.pwriteAll(code, atom_ptr.offset(elf_file));
|
||||
}
|
||||
|
||||
const LowerConstResult = union(enum) {
|
||||
ok: Symbol.Index,
|
||||
fail: *Zcu.ErrorMsg,
|
||||
};
|
||||
|
||||
fn lowerConst(
|
||||
self: *ZigObject,
|
||||
elf_file: *Elf,
|
||||
@ -1706,7 +1700,7 @@ fn lowerConst(
|
||||
required_alignment: InternPool.Alignment,
|
||||
output_section_index: u32,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
) !LowerConstResult {
|
||||
) !codegen.SymbolResult {
|
||||
const gpa = pt.zcu.gpa;
|
||||
|
||||
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
@ -1740,7 +1734,7 @@ fn lowerConst(
|
||||
|
||||
try elf_file.pwriteAll(code, atom_ptr.offset(elf_file));
|
||||
|
||||
return .{ .ok = sym_index };
|
||||
return .{ .sym_index = sym_index };
|
||||
}
|
||||
|
||||
pub fn updateExports(
|
||||
@ -1764,7 +1758,7 @@ pub fn updateExports(
|
||||
const first_exp = export_indices[0].ptr(zcu);
|
||||
const res = try self.lowerUav(elf_file, pt, uav, .none, first_exp.src);
|
||||
switch (res) {
|
||||
.mcv => {},
|
||||
.sym_index => {},
|
||||
.fail => |em| {
|
||||
// TODO maybe it's enough to return an error here and let Zcu.processExportsInner
|
||||
// handle the error?
|
||||
|
||||
@ -3092,7 +3092,7 @@ pub fn lowerUav(
|
||||
uav: InternPool.Index,
|
||||
explicit_alignment: InternPool.Alignment,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
) !codegen.GenResult {
|
||||
) !codegen.SymbolResult {
|
||||
return self.getZigObject().?.lowerUav(self, pt, uav, explicit_alignment, src_loc);
|
||||
}
|
||||
|
||||
|
||||
@ -704,7 +704,7 @@ pub fn lowerUav(
|
||||
uav: InternPool.Index,
|
||||
explicit_alignment: Atom.Alignment,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
) !codegen.GenResult {
|
||||
) !codegen.SymbolResult {
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const val = Value.fromInterned(uav);
|
||||
@ -716,7 +716,7 @@ pub fn lowerUav(
|
||||
const sym = self.symbols.items[metadata.symbol_index];
|
||||
const existing_alignment = sym.getAtom(macho_file).?.alignment;
|
||||
if (uav_alignment.order(existing_alignment).compare(.lte))
|
||||
return .{ .mcv = .{ .load_symbol = sym.nlist_idx } };
|
||||
return .{ .sym_index = metadata.symbol_index };
|
||||
}
|
||||
|
||||
var name_buf: [32]u8 = undefined;
|
||||
@ -740,14 +740,11 @@ pub fn lowerUav(
|
||||
.{@errorName(e)},
|
||||
) },
|
||||
};
|
||||
const sym_index = switch (res) {
|
||||
.ok => |sym_index| sym_index,
|
||||
.fail => |em| return .{ .fail = em },
|
||||
};
|
||||
try self.uavs.put(gpa, uav, .{ .symbol_index = sym_index });
|
||||
return .{ .mcv = .{
|
||||
.load_symbol = self.symbols.items[sym_index].nlist_idx,
|
||||
} };
|
||||
switch (res) {
|
||||
.sym_index => |sym_index| try self.uavs.put(gpa, uav, .{ .symbol_index = sym_index }),
|
||||
.fail => {},
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
fn freeNavMetadata(self: *ZigObject, macho_file: *MachO, sym_index: Symbol.Index) void {
|
||||
@ -1187,11 +1184,6 @@ fn getNavOutputSection(
|
||||
return macho_file.zig_data_sect_index.?;
|
||||
}
|
||||
|
||||
const LowerConstResult = union(enum) {
|
||||
ok: Symbol.Index,
|
||||
fail: *Zcu.ErrorMsg,
|
||||
};
|
||||
|
||||
fn lowerConst(
|
||||
self: *ZigObject,
|
||||
macho_file: *MachO,
|
||||
@ -1201,7 +1193,7 @@ fn lowerConst(
|
||||
required_alignment: Atom.Alignment,
|
||||
output_section_index: u8,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
) !LowerConstResult {
|
||||
) !codegen.SymbolResult {
|
||||
const gpa = macho_file.base.comp.gpa;
|
||||
|
||||
var code_buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
@ -1241,7 +1233,7 @@ fn lowerConst(
|
||||
const file_offset = sect.offset + atom.value;
|
||||
try macho_file.pwriteAll(code, file_offset);
|
||||
|
||||
return .{ .ok = sym_index };
|
||||
return .{ .sym_index = sym_index };
|
||||
}
|
||||
|
||||
pub fn updateExports(
|
||||
@ -1265,7 +1257,7 @@ pub fn updateExports(
|
||||
const first_exp = export_indices[0].ptr(zcu);
|
||||
const res = try self.lowerUav(macho_file, pt, uav, .none, first_exp.src);
|
||||
switch (res) {
|
||||
.mcv => {},
|
||||
.sym_index => {},
|
||||
.fail => |em| {
|
||||
// TODO maybe it's enough to return an error here and let Zcu.processExportsInner
|
||||
// handle the error?
|
||||
|
||||
@ -1358,7 +1358,7 @@ pub fn lowerUav(
|
||||
uav: InternPool.Index,
|
||||
explicit_alignment: InternPool.Alignment,
|
||||
src_loc: Zcu.LazySrcLoc,
|
||||
) !codegen.GenResult {
|
||||
) !codegen.SymbolResult {
|
||||
_ = explicit_alignment;
|
||||
// example:
|
||||
// const ty = mod.intern_pool.typeOf(decl_val).toType();
|
||||
@ -1370,7 +1370,7 @@ pub fn lowerUav(
|
||||
// ...
|
||||
const gpa = self.base.comp.gpa;
|
||||
const gop = try self.uavs.getOrPut(gpa, uav);
|
||||
if (gop.found_existing) return .{ .mcv = .{ .load_direct = gop.value_ptr.* } };
|
||||
if (gop.found_existing) return .{ .sym_index = gop.value_ptr.* };
|
||||
const val = Value.fromInterned(uav);
|
||||
const name = try std.fmt.allocPrint(gpa, "__anon_{d}", .{@intFromEnum(uav)});
|
||||
|
||||
@ -1395,7 +1395,7 @@ pub fn lowerUav(
|
||||
.value = undefined,
|
||||
.name = name,
|
||||
};
|
||||
return .{ .mcv = .{ .load_direct = index } };
|
||||
return .{ .sym_index = index };
|
||||
}
|
||||
|
||||
pub fn getUavVAddr(self: *Plan9, uav: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 {
|
||||
|
||||
@ -156,7 +156,6 @@ test "@abs floats" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
try comptime testAbsFloats(f16);
|
||||
if (builtin.zig_backend != .stage2_riscv64) try testAbsFloats(f16);
|
||||
@ -341,7 +340,6 @@ test "@abs float vectors" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
@setEvalBranchQuota(2000);
|
||||
try comptime testAbsFloatVectors(f16, 1);
|
||||
|
||||
@ -302,19 +302,6 @@ test "array mult operator" {
|
||||
try expect(mem.eql(u8, "ab" ** 5, "ababababab"));
|
||||
}
|
||||
|
||||
const OpaqueA = opaque {};
|
||||
const OpaqueB = opaque {};
|
||||
|
||||
test "opaque types" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
try expect(*OpaqueA != *OpaqueB);
|
||||
|
||||
try expect(mem.eql(u8, @typeName(OpaqueA), "behavior.basic.OpaqueA"));
|
||||
try expect(mem.eql(u8, @typeName(OpaqueB), "behavior.basic.OpaqueB"));
|
||||
}
|
||||
|
||||
const global_a: i32 = 1234;
|
||||
const global_b: *const i32 = &global_a;
|
||||
const global_c: *const f32 = @as(*const f32, @ptrCast(global_b));
|
||||
@ -447,6 +434,7 @@ fn f2(x: bool) []const u8 {
|
||||
return (if (x) &fA else &fB)();
|
||||
}
|
||||
|
||||
const OpaqueA = opaque {};
|
||||
test "variable is allowed to be a pointer to an opaque type" {
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -1199,7 +1187,6 @@ test "arrays and vectors with big integers" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
@ -166,7 +166,6 @@ test "Saturating Shift Left" {
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn shlSat(x: anytype, y: std.math.Log2Int(@TypeOf(x))) @TypeOf(x) {
|
||||
|
||||
@ -302,7 +302,6 @@ test "@bitCast packed struct of floats" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
const Foo = packed struct {
|
||||
@ -342,7 +341,6 @@ test "comptime @bitCast packed struct to int and back" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const S = packed struct {
|
||||
void: void = {},
|
||||
@ -426,7 +424,6 @@ test "bitcast nan float does not modify signaling bit" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
const snan_u16: u16 = 0x7D00;
|
||||
|
||||
@ -126,7 +126,6 @@ test "@floatFromInt(f80)" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
@ -1406,7 +1405,6 @@ test "cast f16 to wider types" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
@ -1427,7 +1425,6 @@ test "cast f128 to narrower types" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
@ -1521,7 +1518,7 @@ test "coerce between pointers of compatible differently-named floats" {
|
||||
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows and !builtin.link_libc) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) {
|
||||
@ -1727,7 +1724,6 @@ test "peer type resolution: float and comptime-known fixed-width integer" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const i: u8 = 100;
|
||||
var f: f32 = 1.234;
|
||||
@ -2477,7 +2473,6 @@ test "@floatCast on vector" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
@ -2569,7 +2564,6 @@ test "@floatFromInt on vector" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
@ -2626,8 +2620,6 @@ test "@intFromBool on vector" {
|
||||
}
|
||||
|
||||
test "numeric coercions with undefined" {
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const from: i32 = undefined;
|
||||
var to: f32 = from;
|
||||
to = @floatFromInt(from);
|
||||
@ -2648,7 +2640,6 @@ test "@as does not corrupt values with incompatible representations" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const x: f32 = @as(f16, blk: {
|
||||
if (false) {
|
||||
|
||||
@ -526,7 +526,6 @@ test "runtime 128 bit integer division" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
var a: u128 = 152313999999999991610955792383;
|
||||
|
||||
@ -40,7 +40,7 @@ export fn testPackedStuff(a: *const PackedStruct, b: *const PackedUnion) void {
|
||||
}
|
||||
|
||||
test "export function alias" {
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
_ = struct {
|
||||
|
||||
@ -16,9 +16,6 @@ fn epsForType(comptime T: type) T {
|
||||
test "add f16" {
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
|
||||
!comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest;
|
||||
|
||||
try testAdd(f16);
|
||||
try comptime testAdd(f16);
|
||||
}
|
||||
@ -31,7 +28,6 @@ test "add f32/f64" {
|
||||
}
|
||||
|
||||
test "add f80/f128/c_longdouble" {
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
@ -52,7 +48,6 @@ fn testAdd(comptime T: type) !void {
|
||||
}
|
||||
|
||||
test "sub f16" {
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
try testSub(f16);
|
||||
@ -67,7 +62,6 @@ test "sub f32/f64" {
|
||||
}
|
||||
|
||||
test "sub f80/f128/c_longdouble" {
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
@ -88,7 +82,6 @@ fn testSub(comptime T: type) !void {
|
||||
}
|
||||
|
||||
test "mul f16" {
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
try testMul(f16);
|
||||
@ -103,7 +96,6 @@ test "mul f32/f64" {
|
||||
}
|
||||
|
||||
test "mul f80/f128/c_longdouble" {
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
@ -128,9 +120,6 @@ test "cmp f16" {
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
|
||||
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
|
||||
!comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest;
|
||||
|
||||
try testCmp(f16);
|
||||
try comptime testCmp(f16);
|
||||
}
|
||||
@ -158,7 +147,6 @@ test "cmp f128" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
try testCmp(f128);
|
||||
@ -171,8 +159,8 @@ test "cmp f80/c_longdouble" {
|
||||
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
|
||||
|
||||
try testCmp(f80);
|
||||
try comptime testCmp(f80);
|
||||
@ -242,7 +230,6 @@ test "vector cmp f16" {
|
||||
if (builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
try testCmpVector(f16);
|
||||
try comptime testCmpVector(f16);
|
||||
@ -256,7 +243,6 @@ test "vector cmp f32" {
|
||||
if (builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
try testCmpVector(f32);
|
||||
try comptime testCmpVector(f32);
|
||||
@ -269,7 +255,6 @@ test "vector cmp f64" {
|
||||
if (builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
try testCmpVector(f64);
|
||||
try comptime testCmpVector(f64);
|
||||
@ -285,7 +270,6 @@ test "vector cmp f128" {
|
||||
if (builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
try testCmpVector(f128);
|
||||
try comptime testCmpVector(f128);
|
||||
@ -297,7 +281,7 @@ test "vector cmp f80/c_longdouble" {
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
|
||||
|
||||
try testCmpVector(f80);
|
||||
try comptime testCmpVector(f80);
|
||||
@ -344,9 +328,6 @@ test "different sized float comparisons" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
|
||||
!comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest;
|
||||
|
||||
try testDifferentSizedFloatComparisons();
|
||||
try comptime testDifferentSizedFloatComparisons();
|
||||
}
|
||||
@ -395,9 +376,6 @@ test "@sqrt f16" {
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
|
||||
!comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest;
|
||||
|
||||
try testSqrt(f16);
|
||||
try comptime testSqrt(f16);
|
||||
}
|
||||
@ -418,9 +396,9 @@ test "@sqrt f80/f128/c_longdouble" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
|
||||
|
||||
if (builtin.os.tag == .freebsd) {
|
||||
// TODO https://github.com/ziglang/zig/issues/10875
|
||||
@ -527,7 +505,6 @@ test "@sin f16" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -539,7 +516,6 @@ test "@sin f32/f64" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -553,9 +529,9 @@ test "@sin f80/f128/c_longdouble" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
|
||||
|
||||
try testSin(f80);
|
||||
comptime try testSin(f80);
|
||||
@ -581,7 +557,6 @@ test "@sin with vectors" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -603,7 +578,6 @@ test "@cos f16" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -615,7 +589,6 @@ test "@cos f32/f64" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -629,9 +602,9 @@ test "@cos f80/f128/c_longdouble" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
|
||||
|
||||
try testCos(f80);
|
||||
try comptime testCos(f80);
|
||||
@ -657,7 +630,6 @@ test "@cos with vectors" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -679,7 +651,6 @@ test "@tan f16" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -691,7 +662,6 @@ test "@tan f32/f64" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -705,9 +675,9 @@ test "@tan f80/f128/c_longdouble" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
|
||||
|
||||
try testTan(f80);
|
||||
try comptime testTan(f80);
|
||||
@ -733,7 +703,6 @@ test "@tan with vectors" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -755,7 +724,6 @@ test "@exp f16" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -767,7 +735,6 @@ test "@exp f32/f64" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -781,9 +748,9 @@ test "@exp f80/f128/c_longdouble" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
|
||||
|
||||
try testExp(f80);
|
||||
try comptime testExp(f80);
|
||||
@ -813,7 +780,6 @@ test "@exp with vectors" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -835,7 +801,6 @@ test "@exp2 f16" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -847,7 +812,6 @@ test "@exp2 f32/f64" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -861,9 +825,9 @@ test "@exp2 f80/f128/c_longdouble" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
|
||||
|
||||
try testExp2(f80);
|
||||
try comptime testExp2(f80);
|
||||
@ -888,7 +852,6 @@ test "@exp2 with @vectors" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -910,7 +873,6 @@ test "@log f16" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -922,7 +884,6 @@ test "@log f32/f64" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -936,9 +897,9 @@ test "@log f80/f128/c_longdouble" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
|
||||
|
||||
try testLog(f80);
|
||||
try comptime testLog(f80);
|
||||
@ -964,7 +925,6 @@ test "@log with @vectors" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -983,7 +943,6 @@ test "@log2 f16" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -995,7 +954,6 @@ test "@log2 f32/f64" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -1009,9 +967,9 @@ test "@log2 f80/f128/c_longdouble" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
|
||||
|
||||
try testLog2(f80);
|
||||
try comptime testLog2(f80);
|
||||
@ -1042,7 +1000,6 @@ test "@log2 with vectors" {
|
||||
if (builtin.zig_backend == .stage2_llvm and
|
||||
builtin.cpu.arch == .aarch64 and
|
||||
builtin.os.tag == .windows) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
try testLog2WithVectors();
|
||||
try comptime testLog2WithVectors();
|
||||
@ -1062,7 +1019,6 @@ test "@log10 f16" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -1074,7 +1030,6 @@ test "@log10 f32/f64" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -1088,9 +1043,9 @@ test "@log10 f80/f128/c_longdouble" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
|
||||
|
||||
try testLog10(f80);
|
||||
try comptime testLog10(f80);
|
||||
@ -1115,7 +1070,6 @@ test "@log10 with vectors" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -1139,9 +1093,6 @@ test "@abs f16" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
|
||||
!comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest;
|
||||
|
||||
try testFabs(f16);
|
||||
try comptime testFabs(f16);
|
||||
}
|
||||
@ -1162,9 +1113,9 @@ test "@abs f80/f128/c_longdouble" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
|
||||
|
||||
try testFabs(f80);
|
||||
try comptime testFabs(f80);
|
||||
@ -1262,7 +1213,6 @@ test "@floor f16" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
try testFloor(f16);
|
||||
@ -1275,9 +1225,6 @@ test "@floor f32/f64" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
|
||||
!comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest;
|
||||
|
||||
try testFloor(f32);
|
||||
try comptime testFloor(f32);
|
||||
try testFloor(f64);
|
||||
@ -1342,9 +1289,6 @@ test "@floor with vectors" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
|
||||
!comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest;
|
||||
|
||||
try testFloorWithVectors();
|
||||
try comptime testFloorWithVectors();
|
||||
}
|
||||
@ -1363,7 +1307,6 @@ test "@ceil f16" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
try testCeil(f16);
|
||||
@ -1376,9 +1319,6 @@ test "@ceil f32/f64" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
|
||||
!comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest;
|
||||
|
||||
try testCeil(f32);
|
||||
try comptime testCeil(f32);
|
||||
try testCeil(f64);
|
||||
@ -1443,9 +1383,6 @@ test "@ceil with vectors" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
|
||||
!comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest;
|
||||
|
||||
try testCeilWithVectors();
|
||||
try comptime testCeilWithVectors();
|
||||
}
|
||||
@ -1464,7 +1401,6 @@ test "@trunc f16" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
try testTrunc(f16);
|
||||
@ -1477,9 +1413,6 @@ test "@trunc f32/f64" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
|
||||
!comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest;
|
||||
|
||||
try testTrunc(f32);
|
||||
try comptime testTrunc(f32);
|
||||
try testTrunc(f64);
|
||||
@ -1491,9 +1424,9 @@ test "@trunc f80/f128/c_longdouble" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_llvm and builtin.os.tag == .windows) {
|
||||
// https://github.com/ziglang/zig/issues/12602
|
||||
@ -1544,9 +1477,6 @@ test "@trunc with vectors" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
|
||||
!comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest;
|
||||
|
||||
try testTruncWithVectors();
|
||||
try comptime testTruncWithVectors();
|
||||
}
|
||||
@ -1566,9 +1496,7 @@ test "neg f16" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
|
||||
!comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
|
||||
if (builtin.os.tag == .freebsd) {
|
||||
// TODO file issue to track this failure
|
||||
@ -1597,8 +1525,8 @@ test "neg f80/f128/c_longdouble" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
|
||||
|
||||
try testNeg(f80);
|
||||
try comptime testNeg(f80);
|
||||
@ -1704,7 +1632,6 @@ test "comptime fixed-width float zero divided by zero produces NaN" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
inline for (.{ f16, f32, f64, f80, f128 }) |F| {
|
||||
|
||||
@ -587,7 +587,6 @@ fn StructCapture(comptime T: type) type {
|
||||
}
|
||||
|
||||
test "call generic function that uses capture from function declaration's scope" {
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
|
||||
@ -65,7 +65,6 @@ test "@clz" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
try testClz();
|
||||
@ -474,9 +473,6 @@ test "division" {
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
|
||||
!comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest;
|
||||
|
||||
try testIntDivision();
|
||||
try comptime testIntDivision();
|
||||
|
||||
@ -589,7 +585,6 @@ fn testFloatDivision() !void {
|
||||
}
|
||||
|
||||
test "large integer division" {
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
|
||||
@ -615,7 +610,6 @@ test "division half-precision floats" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
try testDivisionFP16();
|
||||
@ -757,7 +751,6 @@ test "f128" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
try test_f128();
|
||||
@ -843,7 +836,6 @@ test "128-bit multiplication" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
@ -1052,7 +1044,6 @@ test "@mulWithOverflow bitsize 128 bits" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
try testMulWithOverflow(u128, 3, 0x5555555555555555_5555555555555555, 0xffffffffffffffff_ffffffffffffffff, 0);
|
||||
try testMulWithOverflow(u128, 3, 0x5555555555555555_5555555555555556, 2, 1);
|
||||
@ -1078,7 +1069,6 @@ test "@mulWithOverflow bitsize 256 bits" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
{
|
||||
const const_lhs: u256 = 8035709466408580321693645878924206181189;
|
||||
@ -1475,7 +1465,6 @@ test "float remainder division using @rem" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
try comptime frem(f16);
|
||||
@ -1560,7 +1549,6 @@ test "@round f16" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
try testRound(f16, 12.0);
|
||||
@ -1571,7 +1559,6 @@ test "@round f32/f64" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
try testRound(f64, 12.0);
|
||||
@ -1591,7 +1578,6 @@ test "@round f80" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
try testRound(f80, 12.0);
|
||||
@ -1604,7 +1590,6 @@ test "@round f128" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
try testRound(f128, 12.0);
|
||||
@ -1624,7 +1609,6 @@ test "vector integer addition" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
@ -1646,7 +1630,6 @@ test "NaN comparison" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
|
||||
|
||||
@ -1665,7 +1648,6 @@ test "NaN comparison f80" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
try testNanEqNan(f80);
|
||||
@ -1722,7 +1704,7 @@ test "signed zeros are represented properly" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
@ -1900,7 +1882,6 @@ test "partially-runtime integer vector division would be illegal if vector eleme
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
var lhs: @Vector(2, i8) = .{ -128, 5 };
|
||||
const rhs: @Vector(2, i8) = .{ 1, -1 };
|
||||
@ -1930,9 +1911,6 @@ test "float vector division of comptime zero by runtime nan is nan" {
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
|
||||
!comptime builtin.cpu.has(.x86, .sse4_1)) return error.SkipZigTest;
|
||||
|
||||
const ct_zero: @Vector(1, f32) = .{0};
|
||||
var rt_nan: @Vector(1, f32) = .{math.nan(f32)};
|
||||
|
||||
|
||||
@ -122,7 +122,6 @@ test "@min/max for floats" {
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest(comptime T: type) !void {
|
||||
|
||||
@ -9,7 +9,6 @@ test "memmove and memset intrinsics" {
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
try testMemmoveMemset();
|
||||
try comptime testMemmoveMemset();
|
||||
@ -39,7 +38,6 @@ test "@memmove with both operands single-ptr-to-array, one is null-terminated" {
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
try testMemmoveBothSinglePtrArrayOneIsNullTerminated();
|
||||
try comptime testMemmoveBothSinglePtrArrayOneIsNullTerminated();
|
||||
@ -85,7 +83,6 @@ test "@memmove dest many pointer" {
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
try testMemmoveDestManyPtr();
|
||||
try comptime testMemmoveDestManyPtr();
|
||||
@ -129,7 +126,6 @@ test "@memmove slice" {
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
try testMemmoveSlice();
|
||||
try comptime testMemmoveSlice();
|
||||
|
||||
@ -9,9 +9,6 @@ test "@mulAdd" {
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
|
||||
!comptime builtin.cpu.has(.x86, .fma)) return error.SkipZigTest;
|
||||
|
||||
try comptime testMulAdd();
|
||||
try testMulAdd();
|
||||
}
|
||||
@ -37,7 +34,6 @@ test "@mulAdd f16" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -59,7 +55,6 @@ test "@mulAdd f80" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
try comptime testMulAdd80();
|
||||
@ -80,7 +75,6 @@ test "@mulAdd f128" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
try comptime testMulAdd128();
|
||||
@ -113,7 +107,6 @@ test "vector f16" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
@ -142,9 +135,6 @@ test "vector f32" {
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
|
||||
!comptime builtin.cpu.has(.x86, .fma)) return error.SkipZigTest;
|
||||
|
||||
try comptime vector32();
|
||||
try vector32();
|
||||
}
|
||||
@ -170,9 +160,6 @@ test "vector f64" {
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
|
||||
!comptime builtin.cpu.has(.x86, .fma)) return error.SkipZigTest;
|
||||
|
||||
try comptime vector64();
|
||||
try vector64();
|
||||
}
|
||||
@ -196,7 +183,6 @@ test "vector f80" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
try comptime vector80();
|
||||
@ -223,7 +209,6 @@ test "vector f128" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
try comptime vector128();
|
||||
|
||||
@ -15,7 +15,7 @@ test "call extern function defined with conflicting type" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
@import("conflicting_externs/a.zig").issue529(null);
|
||||
|
||||
@ -59,7 +59,6 @@ fn testNullPtrsEql() !void {
|
||||
}
|
||||
|
||||
test "optional with zero-bit type" {
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
|
||||
@ -661,7 +661,6 @@ test "nested packed struct field access test" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
const Vec2 = packed struct {
|
||||
|
||||
@ -58,7 +58,6 @@ test "saturating add 128bit" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
@ -132,7 +131,6 @@ test "saturating subtraction 128bit" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
@ -233,7 +231,6 @@ test "saturating multiplication <= 32 bits" {
|
||||
test "saturating mul i64, i128" {
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
try testSatMul(i64, 0, maxInt(i64), 0);
|
||||
try testSatMul(i64, 0, minInt(i64), 0);
|
||||
@ -266,7 +263,6 @@ test "saturating multiplication" {
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isWasm()) {
|
||||
// https://github.com/ziglang/zig/issues/9660
|
||||
@ -304,7 +300,6 @@ test "saturating shift-left" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
@ -349,7 +344,6 @@ test "saturating shift-left large rhs" {
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
{
|
||||
var lhs: u8 = undefined;
|
||||
@ -368,7 +362,6 @@ test "saturating shl uses the LHS type" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const lhs_const: u8 = 1;
|
||||
var lhs_var: u8 = 1;
|
||||
|
||||
@ -561,7 +561,6 @@ test "packed struct with non-ABI-aligned field" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const S = packed struct {
|
||||
x: u9,
|
||||
|
||||
@ -397,7 +397,6 @@ test "tuple of struct concatenation and coercion to array" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
|
||||
|
||||
@ -282,7 +282,6 @@ test "cast union to tag type of union" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
try testCastUnionToTag();
|
||||
try comptime testCastUnionToTag();
|
||||
@ -2262,7 +2261,6 @@ test "signed enum tag with negative value" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const Enum = enum(i8) {
|
||||
a = -1,
|
||||
|
||||
@ -80,7 +80,6 @@ test "vector int operators" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
@ -104,7 +103,6 @@ test "vector float operators" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
|
||||
// Triggers an assertion with LLVM 18:
|
||||
@ -260,9 +258,6 @@ test "array to vector with element type coercion" {
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff and
|
||||
!comptime builtin.cpu.has(.x86, .f16c)) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
var foo: f16 = 3.14;
|
||||
@ -301,7 +296,6 @@ test "tuple to vector" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
@ -365,7 +359,6 @@ test "vector @splat" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn testForT(comptime N: comptime_int, v: anytype) !void {
|
||||
@ -567,7 +560,6 @@ test "vector division operators" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTestDiv(comptime T: type, x: @Vector(4, T), y: @Vector(4, T)) !void {
|
||||
@ -718,7 +710,6 @@ test "vector shift operators" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTestShift(x: anytype, y: anytype) !void {
|
||||
@ -793,7 +784,6 @@ test "vector reduce operation" {
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21091
|
||||
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isSPARC()) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23719
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn testReduce(comptime op: std.builtin.ReduceOp, x: anytype, expected: anytype) !void {
|
||||
@ -1047,7 +1037,6 @@ test "saturating shift-left" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
@ -1072,7 +1061,6 @@ test "multiplication-assignment operator with an array operand" {
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
fn doTheTest() !void {
|
||||
@ -1332,7 +1320,6 @@ test "zero multiplicand" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
const zeros = @Vector(2, u32){ 0.0, 0.0 };
|
||||
var ones = @Vector(2, u32){ 1.0, 1.0 };
|
||||
@ -1395,7 +1382,6 @@ test "load packed vector element" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
var x: @Vector(2, u15) = .{ 1, 4 };
|
||||
@ -1426,7 +1412,6 @@ test "store to vector in slice" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
|
||||
var v = [_]@Vector(3, f32){
|
||||
.{ 1, 1, 1 },
|
||||
|
||||
@ -44,7 +44,6 @@ test "float widening" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
var a: f16 = 12.34;
|
||||
@ -65,7 +64,6 @@ test "float widening f16 to f128" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
var x: f16 = 12.34;
|
||||
|
||||
@ -5,8 +5,6 @@ test {
|
||||
if (builtin.zig_backend != .stage2_x86_64) return error.SkipZigTest;
|
||||
// MachO linker does not support executables this big.
|
||||
if (builtin.object_format == .macho) return error.SkipZigTest;
|
||||
// COFF linker does not support the new backend.
|
||||
if (builtin.object_format == .coff) return error.SkipZigTest;
|
||||
_ = @import("x86_64/access.zig");
|
||||
_ = @import("x86_64/binary.zig");
|
||||
_ = @import("x86_64/cast.zig");
|
||||
|
||||
@ -14,6 +14,7 @@ const Log2Int = math.Log2Int;
|
||||
const math = @import("math.zig");
|
||||
const nan = math.nan;
|
||||
const Scalar = math.Scalar;
|
||||
const select = math.select;
|
||||
const sign = math.sign;
|
||||
const splat = math.splat;
|
||||
const Sse = math.Sse;
|
||||
@ -84,6 +85,12 @@ fn binary(comptime op: anytype, comptime opts: struct { compare: Compare = .rela
|
||||
imm_rhs,
|
||||
);
|
||||
}
|
||||
fn testBools() !void {
|
||||
try testArgs(bool, false, false);
|
||||
try testArgs(bool, false, true);
|
||||
try testArgs(bool, true, false);
|
||||
try testArgs(bool, true, true);
|
||||
}
|
||||
fn testInts() !void {
|
||||
try testArgs(i1, 0x0, -0x1);
|
||||
try testArgs(u1, 0x1, 0x1);
|
||||
@ -1881,6 +1888,23 @@ fn binary(comptime op: anytype, comptime opts: struct { compare: Compare = .rela
|
||||
try testArgs(f128, nan(f128), inf(f128));
|
||||
try testArgs(f128, nan(f128), nan(f128));
|
||||
}
|
||||
fn testBoolVectors() !void {
|
||||
try testArgs(@Vector(1, bool), .{
|
||||
false,
|
||||
}, .{
|
||||
true,
|
||||
});
|
||||
try testArgs(@Vector(2, bool), .{
|
||||
false, true,
|
||||
}, .{
|
||||
true, false,
|
||||
});
|
||||
try testArgs(@Vector(4, bool), .{
|
||||
false, false, true, true,
|
||||
}, .{
|
||||
false, true, false, true,
|
||||
});
|
||||
}
|
||||
fn testIntVectors() !void {
|
||||
try testArgs(@Vector(1, i1), .{
|
||||
0x0,
|
||||
@ -5033,8 +5057,7 @@ inline fn addSafe(comptime Type: type, lhs: Type, rhs: Type) AddOneBit(Type) {
|
||||
test addSafe {
|
||||
const test_add_safe = binary(addSafe, .{});
|
||||
try test_add_safe.testInts();
|
||||
try test_add_safe.testFloats();
|
||||
try test_add_safe.testFloatVectors();
|
||||
try test_add_safe.testIntVectors();
|
||||
}
|
||||
|
||||
inline fn addWrap(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
@ -5046,13 +5069,13 @@ test addWrap {
|
||||
try test_add_wrap.testIntVectors();
|
||||
}
|
||||
|
||||
inline fn addSat(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
inline fn addSaturate(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
return lhs +| rhs;
|
||||
}
|
||||
test addSat {
|
||||
const test_add_sat = binary(addSat, .{});
|
||||
try test_add_sat.testInts();
|
||||
try test_add_sat.testIntVectors();
|
||||
test addSaturate {
|
||||
const test_add_saturate = binary(addSaturate, .{});
|
||||
try test_add_saturate.testInts();
|
||||
try test_add_saturate.testIntVectors();
|
||||
}
|
||||
|
||||
inline fn subUnsafe(comptime Type: type, lhs: Type, rhs: Type) AddOneBit(Type) {
|
||||
@ -5088,8 +5111,7 @@ inline fn subSafe(comptime Type: type, lhs: Type, rhs: Type) AddOneBit(Type) {
|
||||
test subSafe {
|
||||
const test_sub_safe = binary(subSafe, .{});
|
||||
try test_sub_safe.testInts();
|
||||
try test_sub_safe.testFloats();
|
||||
try test_sub_safe.testFloatVectors();
|
||||
try test_sub_safe.testIntVectors();
|
||||
}
|
||||
|
||||
inline fn subWrap(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
@ -5101,13 +5123,13 @@ test subWrap {
|
||||
try test_sub_wrap.testIntVectors();
|
||||
}
|
||||
|
||||
inline fn subSat(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
inline fn subSaturate(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
return lhs -| rhs;
|
||||
}
|
||||
test subSat {
|
||||
const test_sub_sat = binary(subSat, .{});
|
||||
try test_sub_sat.testInts();
|
||||
try test_sub_sat.testIntVectors();
|
||||
test subSaturate {
|
||||
const test_sub_saturate = binary(subSaturate, .{});
|
||||
try test_sub_saturate.testInts();
|
||||
try test_sub_saturate.testIntVectors();
|
||||
}
|
||||
|
||||
inline fn mulUnsafe(comptime Type: type, lhs: Type, rhs: Type) DoubleBits(Type) {
|
||||
@ -5118,6 +5140,8 @@ test mulUnsafe {
|
||||
const test_mul_unsafe = binary(mulUnsafe, .{});
|
||||
try test_mul_unsafe.testInts();
|
||||
try test_mul_unsafe.testIntVectors();
|
||||
try test_mul_unsafe.testFloats();
|
||||
try test_mul_unsafe.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn mulSafe(comptime Type: type, lhs: Type, rhs: Type) DoubleBits(Type) {
|
||||
@ -5127,6 +5151,7 @@ inline fn mulSafe(comptime Type: type, lhs: Type, rhs: Type) DoubleBits(Type) {
|
||||
test mulSafe {
|
||||
const test_mul_safe = binary(mulSafe, .{});
|
||||
try test_mul_safe.testInts();
|
||||
try test_mul_safe.testIntVectors();
|
||||
}
|
||||
|
||||
inline fn mulWrap(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
@ -5138,16 +5163,16 @@ test mulWrap {
|
||||
try test_mul_wrap.testIntVectors();
|
||||
}
|
||||
|
||||
inline fn mulSat(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
inline fn mulSaturate(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
return lhs *| rhs;
|
||||
}
|
||||
test mulSat {
|
||||
const test_mul_sat = binary(mulSat, .{});
|
||||
try test_mul_sat.testInts();
|
||||
try test_mul_sat.testIntVectors();
|
||||
test mulSaturate {
|
||||
const test_mul_saturate = binary(mulSaturate, .{});
|
||||
try test_mul_saturate.testInts();
|
||||
try test_mul_saturate.testIntVectors();
|
||||
}
|
||||
|
||||
inline fn multiply(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs * rhs) {
|
||||
inline fn multiply(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
return lhs * rhs;
|
||||
}
|
||||
test multiply {
|
||||
@ -5156,7 +5181,7 @@ test multiply {
|
||||
try test_multiply.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn divide(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs / rhs) {
|
||||
inline fn divide(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
return lhs / rhs;
|
||||
}
|
||||
test divide {
|
||||
@ -5165,29 +5190,49 @@ test divide {
|
||||
try test_divide.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn divTrunc(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(@divTrunc(lhs, rhs)) {
|
||||
inline fn divTruncUnoptimized(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
return @divTrunc(lhs, rhs);
|
||||
}
|
||||
test divTrunc {
|
||||
const test_div_trunc = binary(divTrunc, .{ .compare = .approx_int });
|
||||
try test_div_trunc.testInts();
|
||||
try test_div_trunc.testIntVectors();
|
||||
try test_div_trunc.testFloats();
|
||||
try test_div_trunc.testFloatVectors();
|
||||
test divTruncUnoptimized {
|
||||
const test_div_trunc_unoptimized = binary(divTruncUnoptimized, .{ .compare = .approx_int });
|
||||
try test_div_trunc_unoptimized.testInts();
|
||||
try test_div_trunc_unoptimized.testIntVectors();
|
||||
try test_div_trunc_unoptimized.testFloats();
|
||||
try test_div_trunc_unoptimized.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn divFloor(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(@divFloor(lhs, rhs)) {
|
||||
inline fn divTruncOptimized(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
@setFloatMode(.optimized);
|
||||
return @divTrunc(lhs, select(@abs(rhs) > splat(Type, 0.0), rhs, splat(Type, 1.0)));
|
||||
}
|
||||
test divTruncOptimized {
|
||||
const test_div_trunc_optimized = binary(divTruncOptimized, .{ .compare = .approx_int });
|
||||
try test_div_trunc_optimized.testFloats();
|
||||
try test_div_trunc_optimized.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn divFloorUnoptimized(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
return @divFloor(lhs, rhs);
|
||||
}
|
||||
test divFloor {
|
||||
const test_div_floor = binary(divFloor, .{ .compare = .approx_int });
|
||||
try test_div_floor.testInts();
|
||||
try test_div_floor.testIntVectors();
|
||||
try test_div_floor.testFloats();
|
||||
try test_div_floor.testFloatVectors();
|
||||
test divFloorUnoptimized {
|
||||
const test_div_floor_unoptimized = binary(divFloorUnoptimized, .{ .compare = .approx_int });
|
||||
try test_div_floor_unoptimized.testInts();
|
||||
try test_div_floor_unoptimized.testIntVectors();
|
||||
try test_div_floor_unoptimized.testFloats();
|
||||
try test_div_floor_unoptimized.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn rem(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(@rem(lhs, rhs)) {
|
||||
inline fn divFloorOptimized(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
@setFloatMode(.optimized);
|
||||
return @divFloor(lhs, select(@abs(rhs) > splat(Type, 0.0), rhs, splat(Type, 1.0)));
|
||||
}
|
||||
test divFloorOptimized {
|
||||
const test_div_floor_optimized = binary(divFloorOptimized, .{ .compare = .approx_int });
|
||||
try test_div_floor_optimized.testFloats();
|
||||
try test_div_floor_optimized.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn rem(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
return @rem(lhs, rhs);
|
||||
}
|
||||
test rem {
|
||||
@ -5198,7 +5243,7 @@ test rem {
|
||||
try test_rem.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn mod(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(@mod(lhs, rhs)) {
|
||||
inline fn mod(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
// workaround llvm backend bugs
|
||||
if (@inComptime() and @typeInfo(Scalar(Type)) == .float) {
|
||||
const scalarMod = struct {
|
||||
@ -5219,6 +5264,7 @@ inline fn mod(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(@mod(lhs, rhs))
|
||||
return @mod(lhs, rhs);
|
||||
}
|
||||
test mod {
|
||||
if (@import("builtin").object_format == .coff) return error.SkipZigTest;
|
||||
const test_mod = binary(mod, .{});
|
||||
try test_mod.testInts();
|
||||
try test_mod.testIntVectors();
|
||||
@ -5286,7 +5332,7 @@ test shlWithOverflow {
|
||||
try test_shl_with_overflow.testIntVectors();
|
||||
}
|
||||
|
||||
inline fn equal(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs == rhs) {
|
||||
inline fn equal(comptime Type: type, lhs: Type, rhs: Type) ChangeScalar(Type, bool) {
|
||||
return lhs == rhs;
|
||||
}
|
||||
test equal {
|
||||
@ -5297,7 +5343,7 @@ test equal {
|
||||
try test_equal.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn notEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs != rhs) {
|
||||
inline fn notEqual(comptime Type: type, lhs: Type, rhs: Type) ChangeScalar(Type, bool) {
|
||||
return lhs != rhs;
|
||||
}
|
||||
test notEqual {
|
||||
@ -5308,7 +5354,7 @@ test notEqual {
|
||||
try test_not_equal.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn lessThan(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs < rhs) {
|
||||
inline fn lessThan(comptime Type: type, lhs: Type, rhs: Type) ChangeScalar(Type, bool) {
|
||||
return lhs < rhs;
|
||||
}
|
||||
test lessThan {
|
||||
@ -5319,7 +5365,7 @@ test lessThan {
|
||||
try test_less_than.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn lessThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs <= rhs) {
|
||||
inline fn lessThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) ChangeScalar(Type, bool) {
|
||||
return lhs <= rhs;
|
||||
}
|
||||
test lessThanOrEqual {
|
||||
@ -5330,7 +5376,7 @@ test lessThanOrEqual {
|
||||
try test_less_than_or_equal.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn greaterThan(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs > rhs) {
|
||||
inline fn greaterThan(comptime Type: type, lhs: Type, rhs: Type) ChangeScalar(Type, bool) {
|
||||
return lhs > rhs;
|
||||
}
|
||||
test greaterThan {
|
||||
@ -5341,7 +5387,7 @@ test greaterThan {
|
||||
try test_greater_than.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn greaterThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs >= rhs) {
|
||||
inline fn greaterThanOrEqual(comptime Type: type, lhs: Type, rhs: Type) ChangeScalar(Type, bool) {
|
||||
return lhs >= rhs;
|
||||
}
|
||||
test greaterThanOrEqual {
|
||||
@ -5352,20 +5398,24 @@ test greaterThanOrEqual {
|
||||
try test_greater_than_or_equal.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn bitAnd(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs & rhs) {
|
||||
inline fn bitAnd(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
return lhs & rhs;
|
||||
}
|
||||
test bitAnd {
|
||||
const test_bit_and = binary(bitAnd, .{});
|
||||
try test_bit_and.testBools();
|
||||
try test_bit_and.testBoolVectors();
|
||||
try test_bit_and.testInts();
|
||||
try test_bit_and.testIntVectors();
|
||||
}
|
||||
|
||||
inline fn bitOr(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs | rhs) {
|
||||
inline fn bitOr(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
return lhs | rhs;
|
||||
}
|
||||
test bitOr {
|
||||
const test_bit_or = binary(bitOr, .{});
|
||||
try test_bit_or.testBools();
|
||||
try test_bit_or.testBoolVectors();
|
||||
try test_bit_or.testInts();
|
||||
try test_bit_or.testIntVectors();
|
||||
}
|
||||
@ -5417,7 +5467,7 @@ test shlExactUnsafe {
|
||||
try test_shl_exact_unsafe.testIntVectors();
|
||||
}
|
||||
|
||||
inline fn shlSat(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
inline fn shlSaturate(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
// workaround https://github.com/ziglang/zig/issues/23034
|
||||
if (@inComptime()) {
|
||||
// workaround https://github.com/ziglang/zig/issues/23139
|
||||
@ -5427,17 +5477,19 @@ inline fn shlSat(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
@setRuntimeSafety(false);
|
||||
return lhs <<| @abs(rhs);
|
||||
}
|
||||
test shlSat {
|
||||
const test_shl_sat = binary(shlSat, .{});
|
||||
try test_shl_sat.testInts();
|
||||
try test_shl_sat.testIntVectors();
|
||||
test shlSaturate {
|
||||
const test_shl_saturate = binary(shlSaturate, .{});
|
||||
try test_shl_saturate.testInts();
|
||||
try test_shl_saturate.testIntVectors();
|
||||
}
|
||||
|
||||
inline fn bitXor(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(lhs ^ rhs) {
|
||||
inline fn bitXor(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
return lhs ^ rhs;
|
||||
}
|
||||
test bitXor {
|
||||
const test_bit_xor = binary(bitXor, .{});
|
||||
try test_bit_xor.testBools();
|
||||
try test_bit_xor.testBoolVectors();
|
||||
try test_bit_xor.testInts();
|
||||
try test_bit_xor.testIntVectors();
|
||||
}
|
||||
@ -5516,7 +5568,7 @@ test reduceXorNotEqual {
|
||||
try test_reduce_xor_not_equal.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn mulAdd(comptime Type: type, lhs: Type, rhs: Type) @TypeOf(@mulAdd(Type, lhs, rhs, rhs)) {
|
||||
inline fn mulAdd(comptime Type: type, lhs: Type, rhs: Type) Type {
|
||||
return @mulAdd(Type, lhs, rhs, rhs);
|
||||
}
|
||||
test mulAdd {
|
||||
|
||||
@ -113,6 +113,51 @@ pub fn build(b: *std.Build) void {
|
||||
.cpu_arch = .x86_64,
|
||||
.cpu_model = .{ .explicit = &std.Target.x86.cpu.x86_64_v4 },
|
||||
},
|
||||
|
||||
.{
|
||||
.cpu_arch = .x86_64,
|
||||
.cpu_model = .{ .explicit = &std.Target.x86.cpu.x86_64 },
|
||||
.os_tag = .windows,
|
||||
.abi = .none,
|
||||
},
|
||||
.{
|
||||
.cpu_arch = .x86_64,
|
||||
.cpu_model = .{ .explicit = &std.Target.x86.cpu.x86_64 },
|
||||
.cpu_features_add = std.Target.x86.featureSet(&.{.ssse3}),
|
||||
.os_tag = .windows,
|
||||
.abi = .none,
|
||||
},
|
||||
.{
|
||||
.cpu_arch = .x86_64,
|
||||
.cpu_model = .{ .explicit = &std.Target.x86.cpu.x86_64_v2 },
|
||||
.os_tag = .windows,
|
||||
.abi = .none,
|
||||
},
|
||||
.{
|
||||
.cpu_arch = .x86_64,
|
||||
.cpu_model = .{ .explicit = &std.Target.x86.cpu.x86_64_v3 },
|
||||
.os_tag = .windows,
|
||||
.abi = .none,
|
||||
},
|
||||
|
||||
.{
|
||||
.cpu_arch = .x86_64,
|
||||
.cpu_model = .{ .explicit = &std.Target.x86.cpu.x86_64 },
|
||||
.os_tag = .windows,
|
||||
.abi = .gnu,
|
||||
},
|
||||
.{
|
||||
.cpu_arch = .x86_64,
|
||||
.cpu_model = .{ .explicit = &std.Target.x86.cpu.x86_64_v2 },
|
||||
.os_tag = .windows,
|
||||
.abi = .gnu,
|
||||
},
|
||||
.{
|
||||
.cpu_arch = .x86_64,
|
||||
.cpu_model = .{ .explicit = &std.Target.x86.cpu.x86_64_v3 },
|
||||
.os_tag = .windows,
|
||||
.abi = .gnu,
|
||||
},
|
||||
}) |query| {
|
||||
const target = b.resolveTargetQuery(query);
|
||||
const triple = query.zigTriple(b.allocator) catch @panic("OOM");
|
||||
|
||||
@ -14546,13 +14546,24 @@ test floatCast {
|
||||
try test_float_cast.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn intFromFloat(comptime Result: type, comptime Type: type, rhs: Type, comptime _: Type) Result {
|
||||
inline fn intFromFloatUnsafe(comptime Result: type, comptime Type: type, rhs: Type, comptime _: Type) Result {
|
||||
@setRuntimeSafety(false);
|
||||
return @intFromFloat(rhs);
|
||||
}
|
||||
test intFromFloat {
|
||||
const test_int_from_float = cast(intFromFloat, .{ .compare = .strict });
|
||||
try test_int_from_float.testIntsFromFloats();
|
||||
try test_int_from_float.testIntVectorsFromFloatVectors();
|
||||
test intFromFloatUnsafe {
|
||||
const test_int_from_float_unsafe = cast(intFromFloatUnsafe, .{ .compare = .strict });
|
||||
try test_int_from_float_unsafe.testIntsFromFloats();
|
||||
try test_int_from_float_unsafe.testIntVectorsFromFloatVectors();
|
||||
}
|
||||
|
||||
inline fn intFromFloatSafe(comptime Result: type, comptime Type: type, rhs: Type, comptime _: Type) Result {
|
||||
@setRuntimeSafety(true);
|
||||
return @intFromFloat(rhs);
|
||||
}
|
||||
test intFromFloatSafe {
|
||||
const test_int_from_float_safe = cast(intFromFloatSafe, .{ .compare = .strict });
|
||||
try test_int_from_float_safe.testIntsFromFloats();
|
||||
try test_int_from_float_safe.testIntVectorsFromFloatVectors();
|
||||
}
|
||||
|
||||
inline fn floatFromInt(comptime Result: type, comptime Type: type, rhs: Type, comptime _: Type) Result {
|
||||
|
||||
@ -35,10 +35,14 @@ pub fn ChangeScalar(comptime Type: type, comptime NewScalar: type) type {
|
||||
};
|
||||
}
|
||||
pub fn AsSignedness(comptime Type: type, comptime signedness: std.builtin.Signedness) type {
|
||||
return ChangeScalar(Type, @Type(.{ .int = .{
|
||||
.signedness = signedness,
|
||||
.bits = @typeInfo(Scalar(Type)).int.bits,
|
||||
} }));
|
||||
return switch (@typeInfo(Scalar(Type))) {
|
||||
.int => |int| ChangeScalar(Type, @Type(.{ .int = .{
|
||||
.signedness = signedness,
|
||||
.bits = int.bits,
|
||||
} })),
|
||||
.float => Type,
|
||||
else => @compileError(@typeName(Type)),
|
||||
};
|
||||
}
|
||||
pub fn AddOneBit(comptime Type: type) type {
|
||||
return ChangeScalar(Type, switch (@typeInfo(Scalar(Type))) {
|
||||
@ -56,7 +60,10 @@ pub fn DoubleBits(comptime Type: type) type {
|
||||
}
|
||||
pub fn RoundBitsUp(comptime Type: type, comptime multiple: u16) type {
|
||||
return ChangeScalar(Type, switch (@typeInfo(Scalar(Type))) {
|
||||
.int => |int| @Type(.{ .int = .{ .signedness = int.signedness, .bits = std.mem.alignForward(u16, int.bits, multiple) } }),
|
||||
.int => |int| @Type(.{ .int = .{
|
||||
.signedness = int.signedness,
|
||||
.bits = std.mem.alignForward(u16, int.bits, multiple),
|
||||
} }),
|
||||
.float => Scalar(Type),
|
||||
else => @compileError(@typeName(Type)),
|
||||
});
|
||||
@ -67,61 +74,30 @@ pub fn Log2Int(comptime Type: type) type {
|
||||
pub fn Log2IntCeil(comptime Type: type) type {
|
||||
return ChangeScalar(Type, math.Log2IntCeil(Scalar(Type)));
|
||||
}
|
||||
// inline to avoid a runtime `@splat`
|
||||
pub inline fn splat(comptime Type: type, scalar: Scalar(Type)) Type {
|
||||
pub fn splat(comptime Type: type, scalar: Scalar(Type)) Type {
|
||||
return switch (@typeInfo(Type)) {
|
||||
else => scalar,
|
||||
.vector => @splat(scalar),
|
||||
};
|
||||
}
|
||||
// inline to avoid a runtime `@select`
|
||||
inline fn select(cond: anytype, lhs: anytype, rhs: @TypeOf(lhs)) @TypeOf(lhs) {
|
||||
pub fn sign(rhs: anytype) ChangeScalar(@TypeOf(rhs), bool) {
|
||||
const Int = ChangeScalar(@TypeOf(rhs), switch (@typeInfo(Scalar(@TypeOf(rhs)))) {
|
||||
.int, .comptime_int => Scalar(@TypeOf(rhs)),
|
||||
.float => |float| @Type(.{ .int = .{
|
||||
.signedness = .signed,
|
||||
.bits = float.bits,
|
||||
} }),
|
||||
else => @compileError(@typeName(@TypeOf(rhs))),
|
||||
});
|
||||
return @as(Int, @bitCast(rhs)) < splat(Int, 0);
|
||||
}
|
||||
pub fn select(cond: anytype, lhs: anytype, rhs: @TypeOf(lhs)) @TypeOf(lhs) {
|
||||
return switch (@typeInfo(@TypeOf(cond))) {
|
||||
.bool => if (cond) lhs else rhs,
|
||||
.vector => @select(Scalar(@TypeOf(lhs)), cond, lhs, rhs),
|
||||
else => @compileError(@typeName(@TypeOf(cond))),
|
||||
};
|
||||
}
|
||||
pub fn sign(rhs: anytype) ChangeScalar(@TypeOf(rhs), bool) {
|
||||
const ScalarInt = @Type(.{ .int = .{
|
||||
.signedness = .unsigned,
|
||||
.bits = @bitSizeOf(Scalar(@TypeOf(rhs))),
|
||||
} });
|
||||
const VectorInt = ChangeScalar(@TypeOf(rhs), ScalarInt);
|
||||
return @as(VectorInt, @bitCast(rhs)) & splat(VectorInt, @as(ScalarInt, 1) << @bitSizeOf(ScalarInt) - 1) != splat(VectorInt, 0);
|
||||
}
|
||||
fn boolAnd(lhs: anytype, rhs: @TypeOf(lhs)) @TypeOf(lhs) {
|
||||
switch (@typeInfo(@TypeOf(lhs))) {
|
||||
.bool => return lhs and rhs,
|
||||
.vector => |vector| switch (vector.child) {
|
||||
bool => {
|
||||
const Bits = @Type(.{ .int = .{ .signedness = .unsigned, .bits = vector.len } });
|
||||
const lhs_bits: Bits = @bitCast(lhs);
|
||||
const rhs_bits: Bits = @bitCast(rhs);
|
||||
return @bitCast(lhs_bits & rhs_bits);
|
||||
},
|
||||
else => {},
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
@compileError("unsupported boolAnd type: " ++ @typeName(@TypeOf(lhs)));
|
||||
}
|
||||
fn boolOr(lhs: anytype, rhs: @TypeOf(lhs)) @TypeOf(lhs) {
|
||||
switch (@typeInfo(@TypeOf(lhs))) {
|
||||
.bool => return lhs or rhs,
|
||||
.vector => |vector| switch (vector.child) {
|
||||
bool => {
|
||||
const Bits = @Type(.{ .int = .{ .signedness = .unsigned, .bits = vector.len } });
|
||||
const lhs_bits: Bits = @bitCast(lhs);
|
||||
const rhs_bits: Bits = @bitCast(rhs);
|
||||
return @bitCast(lhs_bits | rhs_bits);
|
||||
},
|
||||
else => {},
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
@compileError("unsupported boolOr type: " ++ @typeName(@TypeOf(lhs)));
|
||||
}
|
||||
|
||||
pub const Compare = enum { strict, relaxed, approx, approx_int, approx_or_overflow };
|
||||
// noinline for a more helpful stack trace
|
||||
@ -131,9 +107,9 @@ pub noinline fn checkExpected(expected: anytype, actual: @TypeOf(expected), comp
|
||||
else => expected != actual,
|
||||
.float => switch (compare) {
|
||||
.strict, .relaxed => {
|
||||
const unequal = boolAnd(expected != actual, boolOr(expected == expected, actual == actual));
|
||||
const unequal = (expected != actual) & ((expected == expected) | (actual == actual));
|
||||
break :unexpected switch (compare) {
|
||||
.strict => boolOr(unequal, sign(expected) != sign(actual)),
|
||||
.strict => unequal | (sign(expected) != sign(actual)),
|
||||
.relaxed => unequal,
|
||||
.approx, .approx_int, .approx_or_overflow => comptime unreachable,
|
||||
};
|
||||
@ -156,10 +132,10 @@ pub noinline fn checkExpected(expected: anytype, actual: @TypeOf(expected), comp
|
||||
break :unexpected switch (compare) {
|
||||
.strict, .relaxed => comptime unreachable,
|
||||
.approx, .approx_int => approx_unequal,
|
||||
.approx_or_overflow => boolAnd(approx_unequal, boolOr(boolAnd(
|
||||
@abs(expected) != splat(Expected, inf(Expected)),
|
||||
@abs(actual) != splat(Expected, inf(Expected)),
|
||||
), sign(expected) != sign(actual))),
|
||||
.approx_or_overflow => approx_unequal &
|
||||
(((@abs(expected) != splat(Expected, inf(Expected))) &
|
||||
(@abs(actual) != splat(Expected, inf(Expected)))) |
|
||||
(sign(expected) != sign(actual))),
|
||||
};
|
||||
},
|
||||
},
|
||||
|
||||
@ -1,9 +1,11 @@
|
||||
const AsSignedness = math.AsSignedness;
|
||||
const checkExpected = math.checkExpected;
|
||||
const Compare = math.Compare;
|
||||
const fmax = math.fmax;
|
||||
const fmin = math.fmin;
|
||||
const Gpr = math.Gpr;
|
||||
const inf = math.inf;
|
||||
const Log2IntCeil = math.Log2IntCeil;
|
||||
const math = @import("math.zig");
|
||||
const nan = math.nan;
|
||||
const RoundBitsUp = math.RoundBitsUp;
|
||||
@ -56,6 +58,10 @@ fn unary(comptime op: anytype, comptime opts: struct {
|
||||
f128 => libc_name ++ "q",
|
||||
else => break :libc,
|
||||
},
|
||||
.library_name = switch (@import("builtin").object_format) {
|
||||
else => null,
|
||||
.coff => "compiler_rt",
|
||||
},
|
||||
});
|
||||
switch (@typeInfo(Type)) {
|
||||
else => break :expected libc_func(imm_arg),
|
||||
@ -98,6 +104,10 @@ fn unary(comptime op: anytype, comptime opts: struct {
|
||||
imm_arg,
|
||||
);
|
||||
}
|
||||
fn testBools() !void {
|
||||
try testArgs(bool, false);
|
||||
try testArgs(bool, true);
|
||||
}
|
||||
fn testIntTypes() !void {
|
||||
try testArgs(i1, undefined);
|
||||
try testArgs(u1, undefined);
|
||||
@ -4804,16 +4814,27 @@ fn unary(comptime op: anytype, comptime opts: struct {
|
||||
};
|
||||
}
|
||||
|
||||
inline fn bitNot(comptime Type: type, rhs: Type) @TypeOf(~rhs) {
|
||||
inline fn boolNot(comptime Type: type, rhs: Type) Type {
|
||||
return !rhs;
|
||||
}
|
||||
test boolNot {
|
||||
const test_bool_not = unary(boolNot, .{});
|
||||
try test_bool_not.testBools();
|
||||
try test_bool_not.testBoolVectors();
|
||||
}
|
||||
|
||||
inline fn bitNot(comptime Type: type, rhs: Type) Type {
|
||||
return ~rhs;
|
||||
}
|
||||
test bitNot {
|
||||
const test_bit_not = unary(bitNot, .{});
|
||||
try test_bit_not.testBools();
|
||||
try test_bit_not.testBoolVectors();
|
||||
try test_bit_not.testInts();
|
||||
try test_bit_not.testIntVectors();
|
||||
}
|
||||
|
||||
inline fn clz(comptime Type: type, rhs: Type) @TypeOf(@clz(rhs)) {
|
||||
inline fn clz(comptime Type: type, rhs: Type) Log2IntCeil(Type) {
|
||||
return @clz(rhs);
|
||||
}
|
||||
test clz {
|
||||
@ -4822,7 +4843,7 @@ test clz {
|
||||
try test_clz.testIntVectors();
|
||||
}
|
||||
|
||||
inline fn ctz(comptime Type: type, rhs: Type) @TypeOf(@ctz(rhs)) {
|
||||
inline fn ctz(comptime Type: type, rhs: Type) Log2IntCeil(Type) {
|
||||
return @ctz(rhs);
|
||||
}
|
||||
test ctz {
|
||||
@ -4831,7 +4852,7 @@ test ctz {
|
||||
try test_ctz.testIntVectors();
|
||||
}
|
||||
|
||||
inline fn popCount(comptime Type: type, rhs: Type) @TypeOf(@popCount(rhs)) {
|
||||
inline fn popCount(comptime Type: type, rhs: Type) Log2IntCeil(Type) {
|
||||
return @popCount(rhs);
|
||||
}
|
||||
test popCount {
|
||||
@ -4849,7 +4870,7 @@ test byteSwap {
|
||||
try test_byte_swap.testIntVectors();
|
||||
}
|
||||
|
||||
inline fn bitReverse(comptime Type: type, rhs: Type) @TypeOf(@bitReverse(rhs)) {
|
||||
inline fn bitReverse(comptime Type: type, rhs: Type) Type {
|
||||
return @bitReverse(rhs);
|
||||
}
|
||||
test bitReverse {
|
||||
@ -4858,7 +4879,7 @@ test bitReverse {
|
||||
try test_bit_reverse.testIntVectors();
|
||||
}
|
||||
|
||||
inline fn sqrt(comptime Type: type, rhs: Type) @TypeOf(@sqrt(rhs)) {
|
||||
inline fn sqrt(comptime Type: type, rhs: Type) Type {
|
||||
return @sqrt(rhs);
|
||||
}
|
||||
test sqrt {
|
||||
@ -4867,7 +4888,7 @@ test sqrt {
|
||||
try test_sqrt.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn sin(comptime Type: type, rhs: Type) @TypeOf(@sin(rhs)) {
|
||||
inline fn sin(comptime Type: type, rhs: Type) Type {
|
||||
return @sin(rhs);
|
||||
}
|
||||
test sin {
|
||||
@ -4876,7 +4897,7 @@ test sin {
|
||||
try test_sin.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn cos(comptime Type: type, rhs: Type) @TypeOf(@cos(rhs)) {
|
||||
inline fn cos(comptime Type: type, rhs: Type) Type {
|
||||
return @cos(rhs);
|
||||
}
|
||||
test cos {
|
||||
@ -4885,7 +4906,7 @@ test cos {
|
||||
try test_cos.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn tan(comptime Type: type, rhs: Type) @TypeOf(@tan(rhs)) {
|
||||
inline fn tan(comptime Type: type, rhs: Type) Type {
|
||||
return @tan(rhs);
|
||||
}
|
||||
test tan {
|
||||
@ -4894,7 +4915,7 @@ test tan {
|
||||
try test_tan.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn exp(comptime Type: type, rhs: Type) @TypeOf(@exp(rhs)) {
|
||||
inline fn exp(comptime Type: type, rhs: Type) Type {
|
||||
return @exp(rhs);
|
||||
}
|
||||
test exp {
|
||||
@ -4903,7 +4924,7 @@ test exp {
|
||||
try test_exp.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn exp2(comptime Type: type, rhs: Type) @TypeOf(@exp2(rhs)) {
|
||||
inline fn exp2(comptime Type: type, rhs: Type) Type {
|
||||
return @exp2(rhs);
|
||||
}
|
||||
test exp2 {
|
||||
@ -4912,7 +4933,7 @@ test exp2 {
|
||||
try test_exp2.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn log(comptime Type: type, rhs: Type) @TypeOf(@log(rhs)) {
|
||||
inline fn log(comptime Type: type, rhs: Type) Type {
|
||||
return @log(rhs);
|
||||
}
|
||||
test log {
|
||||
@ -4921,7 +4942,7 @@ test log {
|
||||
try test_log.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn log2(comptime Type: type, rhs: Type) @TypeOf(@log2(rhs)) {
|
||||
inline fn log2(comptime Type: type, rhs: Type) Type {
|
||||
return @log2(rhs);
|
||||
}
|
||||
test log2 {
|
||||
@ -4930,7 +4951,7 @@ test log2 {
|
||||
try test_log2.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn log10(comptime Type: type, rhs: Type) @TypeOf(@log10(rhs)) {
|
||||
inline fn log10(comptime Type: type, rhs: Type) Type {
|
||||
return @log10(rhs);
|
||||
}
|
||||
test log10 {
|
||||
@ -4939,7 +4960,7 @@ test log10 {
|
||||
try test_log10.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn abs(comptime Type: type, rhs: Type) @TypeOf(@abs(rhs)) {
|
||||
inline fn abs(comptime Type: type, rhs: Type) AsSignedness(Type, .unsigned) {
|
||||
return @abs(rhs);
|
||||
}
|
||||
test abs {
|
||||
@ -4950,7 +4971,7 @@ test abs {
|
||||
try test_abs.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn floor(comptime Type: type, rhs: Type) @TypeOf(@floor(rhs)) {
|
||||
inline fn floor(comptime Type: type, rhs: Type) Type {
|
||||
return @floor(rhs);
|
||||
}
|
||||
test floor {
|
||||
@ -4959,7 +4980,7 @@ test floor {
|
||||
try test_floor.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn ceil(comptime Type: type, rhs: Type) @TypeOf(@ceil(rhs)) {
|
||||
inline fn ceil(comptime Type: type, rhs: Type) Type {
|
||||
return @ceil(rhs);
|
||||
}
|
||||
test ceil {
|
||||
@ -4968,7 +4989,7 @@ test ceil {
|
||||
try test_ceil.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn round(comptime Type: type, rhs: Type) @TypeOf(@round(rhs)) {
|
||||
inline fn round(comptime Type: type, rhs: Type) Type {
|
||||
return @round(rhs);
|
||||
}
|
||||
test round {
|
||||
@ -4977,7 +4998,7 @@ test round {
|
||||
try test_round.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn trunc(comptime Type: type, rhs: Type) @TypeOf(@trunc(rhs)) {
|
||||
inline fn trunc(comptime Type: type, rhs: Type) Type {
|
||||
return @trunc(rhs);
|
||||
}
|
||||
test trunc {
|
||||
@ -4986,7 +5007,7 @@ test trunc {
|
||||
try test_trunc.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn negate(comptime Type: type, rhs: Type) @TypeOf(-rhs) {
|
||||
inline fn negate(comptime Type: type, rhs: Type) Type {
|
||||
return -rhs;
|
||||
}
|
||||
test negate {
|
||||
@ -5098,40 +5119,40 @@ test reduceXor {
|
||||
try test_reduce_xor.testIntVectors();
|
||||
}
|
||||
|
||||
inline fn reduceMin(comptime Type: type, rhs: Type) @typeInfo(Type).vector.child {
|
||||
inline fn reduceMinUnoptimized(comptime Type: type, rhs: Type) @typeInfo(Type).vector.child {
|
||||
return @reduce(.Min, rhs);
|
||||
}
|
||||
test reduceMin {
|
||||
const test_reduce_min = unary(reduceMin, .{});
|
||||
try test_reduce_min.testIntVectors();
|
||||
try test_reduce_min.testFloatVectors();
|
||||
test reduceMinUnoptimized {
|
||||
const test_reduce_min_unoptimized = unary(reduceMinUnoptimized, .{});
|
||||
try test_reduce_min_unoptimized.testIntVectors();
|
||||
try test_reduce_min_unoptimized.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn reduceMax(comptime Type: type, rhs: Type) @typeInfo(Type).vector.child {
|
||||
inline fn reduceMaxUnoptimized(comptime Type: type, rhs: Type) @typeInfo(Type).vector.child {
|
||||
return @reduce(.Max, rhs);
|
||||
}
|
||||
test reduceMax {
|
||||
const test_reduce_max = unary(reduceMax, .{});
|
||||
try test_reduce_max.testIntVectors();
|
||||
try test_reduce_max.testFloatVectors();
|
||||
test reduceMaxUnoptimized {
|
||||
const test_reduce_max_unoptimized = unary(reduceMaxUnoptimized, .{});
|
||||
try test_reduce_max_unoptimized.testIntVectors();
|
||||
try test_reduce_max_unoptimized.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn reduceAdd(comptime Type: type, rhs: Type) @typeInfo(Type).vector.child {
|
||||
inline fn reduceAddUnoptimized(comptime Type: type, rhs: Type) @typeInfo(Type).vector.child {
|
||||
return @reduce(.Add, rhs);
|
||||
}
|
||||
test reduceAdd {
|
||||
const test_reduce_add = unary(reduceAdd, .{});
|
||||
try test_reduce_add.testIntVectors();
|
||||
try test_reduce_add.testFloatVectors();
|
||||
test reduceAddUnoptimized {
|
||||
const test_reduce_add_unoptimized = unary(reduceAddUnoptimized, .{});
|
||||
try test_reduce_add_unoptimized.testIntVectors();
|
||||
try test_reduce_add_unoptimized.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn reduceMul(comptime Type: type, rhs: Type) @typeInfo(Type).vector.child {
|
||||
inline fn reduceMulUnoptimized(comptime Type: type, rhs: Type) @typeInfo(Type).vector.child {
|
||||
return @reduce(.Mul, rhs);
|
||||
}
|
||||
test reduceMul {
|
||||
const test_reduce_mul = unary(reduceMul, .{});
|
||||
try test_reduce_mul.testIntVectors();
|
||||
try test_reduce_mul.testFloatVectors();
|
||||
test reduceMulUnoptimized {
|
||||
const test_reduce_mul_unoptimized = unary(reduceMulUnoptimized, .{});
|
||||
try test_reduce_mul_unoptimized.testIntVectors();
|
||||
try test_reduce_mul_unoptimized.testFloatVectors();
|
||||
}
|
||||
|
||||
inline fn reduceMinOptimized(comptime Type: type, rhs: Type) @typeInfo(Type).vector.child {
|
||||
|
||||
@ -1558,6 +1558,15 @@ const test_targets = blk: {
|
||||
.use_llvm = false,
|
||||
.use_lld = false,
|
||||
},
|
||||
.{
|
||||
.target = .{
|
||||
.cpu_arch = .x86_64,
|
||||
.os_tag = .windows,
|
||||
.abi = .gnu,
|
||||
},
|
||||
.use_llvm = false,
|
||||
.use_lld = false,
|
||||
},
|
||||
.{
|
||||
.target = .{
|
||||
.cpu_arch = .x86_64,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user