Merge pull request #21933 from kcbanner/comptime_nan_comparison

Fix float vector comparisons with signed zero and NaN, add test coverage
This commit is contained in:
Andrew Kelley 2025-03-09 15:06:25 -04:00 committed by GitHub
commit 539f3effd3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 139 additions and 3 deletions

View File

@ -38029,6 +38029,11 @@ fn compareScalar(
const pt = sema.pt;
const coerced_lhs = try pt.getCoerced(lhs, ty);
const coerced_rhs = try pt.getCoerced(rhs, ty);
// Equality comparisons of signed zero and NaN need to use floating point semantics
if (coerced_lhs.isFloat(pt.zcu) or coerced_rhs.isFloat(pt.zcu))
return Value.compareHeteroSema(coerced_lhs, op, coerced_rhs, pt);
switch (op) {
.eq => return sema.valuesEqual(coerced_lhs, coerced_rhs, ty),
.neq => return !(try sema.valuesEqual(coerced_lhs, coerced_rhs, ty)),

View File

@ -1132,6 +1132,8 @@ pub fn compareHeteroAdvanced(
else => {},
}
}
if (lhs.isNan(zcu) or rhs.isNan(zcu)) return op == .neq;
return (try orderAdvanced(lhs, rhs, strat, zcu, tid)).compare(op);
}

View File

@ -132,13 +132,20 @@ test "cmp f16" {
try comptime testCmp(f16);
}
test "cmp f32/f64" {
test "cmp f32" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
try testCmp(f32);
try comptime testCmp(f32);
}
test "cmp f64" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.cpu.arch.isArm() and builtin.target.abi.float() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234
try testCmp(f32);
try comptime testCmp(f32);
try testCmp(f64);
try comptime testCmp(f64);
}
@ -224,6 +231,98 @@ fn testCmp(comptime T: type) !void {
}
}
test "vector cmp f16" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest;
try testCmpVector(f16);
try comptime testCmpVector(f16);
}
test "vector cmp f32" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest;
try testCmpVector(f32);
try comptime testCmpVector(f32);
}
test "vector cmp f64" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest;
try testCmpVector(f64);
try comptime testCmpVector(f64);
}
test "vector cmp f128" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest;
try testCmpVector(f128);
try comptime testCmpVector(f128);
}
test "vector cmp f80/c_longdouble" {
if (true) return error.SkipZigTest;
try testCmpVector(f80);
try comptime testCmpVector(f80);
try testCmpVector(c_longdouble);
try comptime testCmpVector(c_longdouble);
}
fn testCmpVector(comptime T: type) !void {
var edges = [_]T{
-math.inf(T),
-math.floatMax(T),
-math.floatMin(T),
-math.floatTrueMin(T),
-0.0,
math.nan(T),
0.0,
math.floatTrueMin(T),
math.floatMin(T),
math.floatMax(T),
math.inf(T),
};
_ = &edges;
for (edges, 0..) |rhs, rhs_i| {
const rhs_v: @Vector(4, T) = .{ rhs, rhs, rhs, rhs };
for (edges, 0..) |lhs, lhs_i| {
const no_nan = lhs_i != 5 and rhs_i != 5;
const lhs_order = if (lhs_i < 5) lhs_i else lhs_i - 2;
const rhs_order = if (rhs_i < 5) rhs_i else rhs_i - 2;
const lhs_v: @Vector(4, T) = .{ lhs, lhs, lhs, lhs };
try expect(@reduce(.And, (lhs_v == rhs_v)) == (no_nan and lhs_order == rhs_order));
try expect(@reduce(.And, (lhs_v != rhs_v)) == !(no_nan and lhs_order == rhs_order));
try expect(@reduce(.And, (lhs_v < rhs_v)) == (no_nan and lhs_order < rhs_order));
try expect(@reduce(.And, (lhs_v > rhs_v)) == (no_nan and lhs_order > rhs_order));
try expect(@reduce(.And, (lhs_v <= rhs_v)) == (no_nan and lhs_order <= rhs_order));
try expect(@reduce(.And, (lhs_v >= rhs_v)) == (no_nan and lhs_order >= rhs_order));
}
}
}
test "different sized float comparisons" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@ -1703,3 +1802,33 @@ test "optimized float mode" {
try expect(S.optimized(small) == small);
try expect(S.strict(small) == tiny);
}
fn MakeType(comptime x: anytype) type {
return struct {
fn get() @TypeOf(x) {
return x;
}
};
}
const nan_a: f32 = @bitCast(@as(u32, 0xffc00000));
const nan_b: f32 = @bitCast(@as(u32, 0xffe00000));
fn testMemoization() !void {
try expect(MakeType(nan_a) == MakeType(nan_a));
try expect(MakeType(nan_b) == MakeType(nan_b));
try expect(MakeType(nan_a) != MakeType(nan_b));
}
fn testVectorMemoization(comptime T: type) !void {
const nan_a_v: T = @splat(nan_a);
const nan_b_v: T = @splat(nan_b);
try expect(MakeType(nan_a_v) == MakeType(nan_a_v));
try expect(MakeType(nan_b_v) == MakeType(nan_b_v));
try expect(MakeType(nan_a_v) != MakeType(nan_b_v));
}
test "comptime calls are only memoized when float arguments are bit-for-bit equal" {
try comptime testMemoization();
try comptime testVectorMemoization(@Vector(4, f32));
}