Merge pull request #11847 from ziglang/better-libcompiler_rt

This commit is contained in:
Jakub Konka 2022-06-20 00:26:39 +02:00 committed by GitHub
commit 74442f3503
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
194 changed files with 4566 additions and 3184 deletions

View File

@ -480,8 +480,15 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/std/sort.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/absv.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/addXf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/absvdi2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/absvsi2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/absvti2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/adddf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/addf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/addo.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/addsf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/addtf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/addxf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/arm.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/atomics.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/aulldiv.zig"
@ -490,7 +497,12 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/ceil.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/clear_cache.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/cmp.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/compareXf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/cmpdf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/cmpsf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/cmptf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/cmpxf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/common.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/comparef.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/cos.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/count0bits.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/divdf3.zig"
@ -501,25 +513,101 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/emutls.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/exp.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/exp2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendXfYf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/extend_f80.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/extenddftf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/extenddfxf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendhfsf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendhftf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendhfxf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendsfdf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendsftf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendsfxf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendxftf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fabs.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixXfYi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatXiYf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixdfdi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixdfsi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixdfti.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixhfdi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixhfsi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixhfti.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixsfdi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixsfsi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixsfti.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixtfdi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixtfsi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixtfti.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsdfdi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsdfsi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsdfti.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunshfdi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunshfsi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunshfti.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunssfdi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunssfsi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunssfti.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunstfdi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunstfsi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunstfti.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsxfdi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsxfsi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsxfti.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixxfdi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixxfsi.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixxfti.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/float_to_int.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatdidf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatdihf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatdisf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatditf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatdixf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatsidf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatsihf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatsisf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatsitf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatsixf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floattidf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floattihf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floattisf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floattitf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floattixf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatundidf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatundihf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatundisf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunditf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatundixf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunsidf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunsihf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunsisf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunsitf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunsixf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatuntidf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatuntihf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatuntisf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatuntitf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatuntixf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floor.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fma.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fmax.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fmin.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fmod.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/gedf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/gesf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/getf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/gexf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/int.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/int_to_float.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/log.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/log10.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/log2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/modti3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulXf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/muldf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/muldi3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulo.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulsf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/multf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/multi3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulxf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/negXf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/negXi2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/negv.zig"
@ -533,19 +621,34 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/shift.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/sin.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/sincos.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/sparc.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/sqrt.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/stack_probe.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/subdf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/subo.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/subsf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/subtf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/subxf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/tan.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/trig.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/trunc.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncXfYf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/trunc_f80.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncdfhf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncdfsf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncf.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncsfhf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/trunctfdf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/trunctfhf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/trunctfsf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/trunctfxf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncxfdf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncxfhf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncxfsf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/udivmod.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/udivmodti4.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/udivti3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/umodti3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/unorddf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/unordsf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/unordtf2.zig"
"${CMAKE_SOURCE_DIR}/lib/std/start.zig"
"${CMAKE_SOURCE_DIR}/lib/std/std.zig"
"${CMAKE_SOURCE_DIR}/lib/std/target.zig"

View File

@ -75,7 +75,7 @@ release/bin/zig build test-translate-c -Denable-macos-sdk
release/bin/zig build test-run-translated-c -Denable-macos-sdk
release/bin/zig build docs -Denable-macos-sdk
release/bin/zig build test-fmt -Denable-macos-sdk
release/bin/zig build test-cases -Denable-macos-sdk
release/bin/zig build test-cases -Denable-macos-sdk -Dsingle-threaded
if [ "${BUILD_REASON}" != "PullRequest" ]; then
mv ../LICENSE release/

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,6 @@
// absv - absolute oVerflow
// * @panic, if value can not be represented
// - absvXi4_generic for unoptimized version
inline fn absvXi(comptime ST: type, a: ST) ST {
/// absv - absolute oVerflow
/// * @panic if value can not be represented
pub inline fn absv(comptime ST: type, a: ST) ST {
const UT = switch (ST) {
i32 => u32,
i64 => u64,
@ -21,18 +19,6 @@ inline fn absvXi(comptime ST: type, a: ST) ST {
return x;
}
pub fn __absvsi2(a: i32) callconv(.C) i32 {
return absvXi(i32, a);
}
pub fn __absvdi2(a: i64) callconv(.C) i64 {
return absvXi(i64, a);
}
pub fn __absvti2(a: i128) callconv(.C) i128 {
return absvXi(i128, a);
}
test {
_ = @import("absvsi2_test.zig");
_ = @import("absvdi2_test.zig");

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const absv = @import("./absv.zig").absv;
pub const panic = common.panic;
comptime {
@export(__absvdi2, .{ .name = "__absvdi2", .linkage = common.linkage });
}
pub fn __absvdi2(a: i64) callconv(.C) i64 {
return absv(i64, a);
}

View File

@ -1,8 +1,9 @@
const absv = @import("absv.zig");
const testing = @import("std").testing;
const __absvdi2 = @import("absvdi2.zig").__absvdi2;
fn test__absvdi2(a: i64, expected: i64) !void {
var result = absv.__absvdi2(a);
var result = __absvdi2(a);
try testing.expectEqual(expected, result);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const absv = @import("./absv.zig").absv;
pub const panic = common.panic;
comptime {
@export(__absvsi2, .{ .name = "__absvsi2", .linkage = common.linkage });
}
pub fn __absvsi2(a: i32) callconv(.C) i32 {
return absv(i32, a);
}

View File

@ -1,8 +1,9 @@
const absv = @import("absv.zig");
const testing = @import("std").testing;
const __absvsi2 = @import("absvsi2.zig").__absvsi2;
fn test__absvsi2(a: i32, expected: i32) !void {
var result = absv.__absvsi2(a);
var result = __absvsi2(a);
try testing.expectEqual(expected, result);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const absv = @import("./absv.zig").absv;
pub const panic = common.panic;
comptime {
@export(__absvti2, .{ .name = "__absvti2", .linkage = common.linkage });
}
pub fn __absvti2(a: i128) callconv(.C) i128 {
return absv(i128, a);
}

View File

@ -1,8 +1,9 @@
const absv = @import("absv.zig");
const testing = @import("std").testing;
const __absvti2 = @import("absvti2.zig").__absvti2;
fn test__absvti2(a: i128, expected: i128) !void {
var result = absv.__absvti2(a);
var result = __absvti2(a);
try testing.expectEqual(expected, result);
}

View File

@ -0,0 +1,20 @@
const common = @import("./common.zig");
const addf3 = @import("./addf3.zig").addf3;
pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
@export(__aeabi_dadd, .{ .name = "__aeabi_dadd", .linkage = common.linkage });
} else {
@export(__adddf3, .{ .name = "__adddf3", .linkage = common.linkage });
}
}
fn __adddf3(a: f64, b: f64) callconv(.C) f64 {
return addf3(f64, a, b);
}
fn __aeabi_dadd(a: f64, b: f64) callconv(.AAPCS) f64 {
return addf3(f64, a, b);
}

View File

@ -1,84 +1,12 @@
// Ported from:
//
// https://github.com/llvm/llvm-project/blob/02d85149a05cb1f6dc49f0ba7a2ceca53718ae17/compiler-rt/lib/builtins/fp_add_impl.inc
const std = @import("std");
const math = std.math;
const builtin = @import("builtin");
const compiler_rt = @import("../compiler_rt.zig");
const common = @import("./common.zig");
const normalize = common.normalize;
pub fn __addsf3(a: f32, b: f32) callconv(.C) f32 {
return addXf3(f32, a, b);
}
pub fn __adddf3(a: f64, b: f64) callconv(.C) f64 {
return addXf3(f64, a, b);
}
pub fn __addxf3(a: f80, b: f80) callconv(.C) f80 {
return addXf3(f80, a, b);
}
pub fn __subxf3(a: f80, b: f80) callconv(.C) f80 {
var b_rep = std.math.break_f80(b);
b_rep.exp ^= 0x8000;
return __addxf3(a, std.math.make_f80(b_rep));
}
pub fn __addtf3(a: f128, b: f128) callconv(.C) f128 {
return addXf3(f128, a, b);
}
pub fn __subsf3(a: f32, b: f32) callconv(.C) f32 {
const neg_b = @bitCast(f32, @bitCast(u32, b) ^ (@as(u32, 1) << 31));
return addXf3(f32, a, neg_b);
}
pub fn __subdf3(a: f64, b: f64) callconv(.C) f64 {
const neg_b = @bitCast(f64, @bitCast(u64, b) ^ (@as(u64, 1) << 63));
return addXf3(f64, a, neg_b);
}
pub fn __subtf3(a: f128, b: f128) callconv(.C) f128 {
const neg_b = @bitCast(f128, @bitCast(u128, b) ^ (@as(u128, 1) << 127));
return addXf3(f128, a, neg_b);
}
pub fn __aeabi_fadd(a: f32, b: f32) callconv(.AAPCS) f32 {
@setRuntimeSafety(false);
return @call(.{ .modifier = .always_inline }, __addsf3, .{ a, b });
}
pub fn __aeabi_dadd(a: f64, b: f64) callconv(.AAPCS) f64 {
@setRuntimeSafety(false);
return @call(.{ .modifier = .always_inline }, __adddf3, .{ a, b });
}
pub fn __aeabi_fsub(a: f32, b: f32) callconv(.AAPCS) f32 {
@setRuntimeSafety(false);
return @call(.{ .modifier = .always_inline }, __subsf3, .{ a, b });
}
pub fn __aeabi_dsub(a: f64, b: f64) callconv(.AAPCS) f64 {
@setRuntimeSafety(false);
return @call(.{ .modifier = .always_inline }, __subdf3, .{ a, b });
}
// TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154
fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeInfo(T).Float.bits)) i32 {
const bits = @typeInfo(T).Float.bits;
const Z = std.meta.Int(.unsigned, bits);
const S = std.meta.Int(.unsigned, bits - @clz(Z, @as(Z, bits) - 1));
const fractionalBits = math.floatFractionalBits(T);
const integerBit = @as(Z, 1) << fractionalBits;
const shift = @clz(std.meta.Int(.unsigned, bits), significand.*) - @clz(Z, integerBit);
significand.* <<= @intCast(S, shift);
return @as(i32, 1) - shift;
}
// TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154
fn addXf3(comptime T: type, a: T, b: T) T {
/// Ported from:
///
/// https://github.com/llvm/llvm-project/blob/02d85149a05cb1f6dc49f0ba7a2ceca53718ae17/compiler-rt/lib/builtins/fp_add_impl.inc
pub inline fn addf3(comptime T: type, a: T, b: T) T {
const bits = @typeInfo(T).Float.bits;
const Z = std.meta.Int(.unsigned, bits);
const S = std.meta.Int(.unsigned, bits - @clz(Z, @as(Z, bits) - 1));
@ -240,5 +168,5 @@ fn addXf3(comptime T: type, a: T, b: T) T {
}
test {
_ = @import("addXf3_test.zig");
_ = @import("addf3_test.zig");
}

View File

@ -7,7 +7,9 @@ const std = @import("std");
const math = std.math;
const qnan128 = @bitCast(f128, @as(u128, 0x7fff800000000000) << 64);
const __addtf3 = @import("addXf3.zig").__addtf3;
const __addtf3 = @import("addtf3.zig").__addtf3;
const __addxf3 = @import("addxf3.zig").__addxf3;
const __subtf3 = @import("subtf3.zig").__subtf3;
fn test__addtf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void {
const x = __addtf3(a, b);
@ -48,8 +50,6 @@ test "addtf3" {
try test__addtf3(0x1.edcba52449872455634654321fp-1, 0x1.23456734245345543849abcdefp+5, 0x40042afc95c8b579, 0x61e58dd6c51eb77c);
}
const __subtf3 = @import("addXf3.zig").__subtf3;
fn test__subtf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void {
const x = __subtf3(a, b);
@ -87,7 +87,6 @@ test "subtf3" {
try test__subtf3(0x1.ee9d7c52354a6936ab8d7654321fp-1, 0x1.234567829a3bcdef5678ade36734p+5, 0xc0041b8af1915166, 0xa44a7bca780a166c);
}
const __addxf3 = @import("addXf3.zig").__addxf3;
const qnan80 = @bitCast(f80, @bitCast(u80, math.nan(f80)) | (1 << (math.floatFractionalBits(f80) - 1)));
fn test__addxf3(a: f80, b: f80, expected: u80) !void {

View File

@ -1,4 +1,14 @@
const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
pub const panic = @import("common.zig").panic;
comptime {
@export(__addosi4, .{ .name = "__addosi4", .linkage = linkage });
@export(__addodi4, .{ .name = "__addodi4", .linkage = linkage });
@export(__addoti4, .{ .name = "__addoti4", .linkage = linkage });
}
// addo - add overflow
// * return a+%b.

View File

@ -0,0 +1,20 @@
const common = @import("./common.zig");
const addf3 = @import("./addf3.zig").addf3;
pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
@export(__aeabi_fadd, .{ .name = "__aeabi_fadd", .linkage = common.linkage });
} else {
@export(__addsf3, .{ .name = "__addsf3", .linkage = common.linkage });
}
}
fn __addsf3(a: f32, b: f32) callconv(.C) f32 {
return addf3(f32, a, b);
}
fn __aeabi_fadd(a: f32, b: f32) callconv(.AAPCS) f32 {
return addf3(f32, a, b);
}

View File

@ -0,0 +1,26 @@
const common = @import("./common.zig");
const addf3 = @import("./addf3.zig").addf3;
pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
@export(__addkf3, .{ .name = "__addkf3", .linkage = common.linkage });
} else if (common.want_sparc_abi) {
@export(_Qp_add, .{ .name = "_Qp_add", .linkage = common.linkage });
} else {
@export(__addtf3, .{ .name = "__addtf3", .linkage = common.linkage });
}
}
pub fn __addtf3(a: f128, b: f128) callconv(.C) f128 {
return addf3(f128, a, b);
}
fn __addkf3(a: f128, b: f128) callconv(.C) f128 {
return addf3(f128, a, b);
}
fn _Qp_add(c: *f128, a: *f128, b: *f128) callconv(.C) void {
c.* = addf3(f128, a.*, b.*);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const addf3 = @import("./addf3.zig").addf3;
pub const panic = common.panic;
comptime {
@export(__addxf3, .{ .name = "__addxf3", .linkage = common.linkage });
}
pub fn __addxf3(a: f80, b: f80) callconv(.C) f80 {
return addf3(f80, a, b);
}

View File

@ -1,5 +1,46 @@
// ARM specific builtins
const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const common = @import("common.zig");
pub const panic = common.panic;
comptime {
if (!builtin.is_test) {
if (arch.isARM() or arch.isThumb()) {
@export(__aeabi_unwind_cpp_pr0, .{ .name = "__aeabi_unwind_cpp_pr0", .linkage = common.linkage });
@export(__aeabi_unwind_cpp_pr1, .{ .name = "__aeabi_unwind_cpp_pr1", .linkage = common.linkage });
@export(__aeabi_unwind_cpp_pr2, .{ .name = "__aeabi_unwind_cpp_pr2", .linkage = common.linkage });
@export(__aeabi_ldivmod, .{ .name = "__aeabi_ldivmod", .linkage = common.linkage });
@export(__aeabi_uldivmod, .{ .name = "__aeabi_uldivmod", .linkage = common.linkage });
@export(__aeabi_idivmod, .{ .name = "__aeabi_idivmod", .linkage = common.linkage });
@export(__aeabi_uidivmod, .{ .name = "__aeabi_uidivmod", .linkage = common.linkage });
@export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy", .linkage = common.linkage });
@export(__aeabi_memcpy4, .{ .name = "__aeabi_memcpy4", .linkage = common.linkage });
@export(__aeabi_memcpy8, .{ .name = "__aeabi_memcpy8", .linkage = common.linkage });
@export(__aeabi_memmove, .{ .name = "__aeabi_memmove", .linkage = common.linkage });
@export(__aeabi_memmove4, .{ .name = "__aeabi_memmove4", .linkage = common.linkage });
@export(__aeabi_memmove8, .{ .name = "__aeabi_memmove8", .linkage = common.linkage });
@export(__aeabi_memset, .{ .name = "__aeabi_memset", .linkage = common.linkage });
@export(__aeabi_memset4, .{ .name = "__aeabi_memset4", .linkage = common.linkage });
@export(__aeabi_memset8, .{ .name = "__aeabi_memset8", .linkage = common.linkage });
@export(__aeabi_memclr, .{ .name = "__aeabi_memclr", .linkage = common.linkage });
@export(__aeabi_memclr4, .{ .name = "__aeabi_memclr4", .linkage = common.linkage });
@export(__aeabi_memclr8, .{ .name = "__aeabi_memclr8", .linkage = common.linkage });
if (builtin.os.tag == .linux) {
@export(__aeabi_read_tp, .{ .name = "__aeabi_read_tp", .linkage = common.linkage });
}
}
}
}
const __divmodsi4 = @import("int.zig").__divmodsi4;
const __udivmodsi4 = @import("int.zig").__udivmodsi4;
@ -14,11 +55,27 @@ pub fn __aeabi_memcpy(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
@setRuntimeSafety(false);
_ = memcpy(dest, src, n);
}
pub fn __aeabi_memcpy4(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
@setRuntimeSafety(false);
_ = memcpy(dest, src, n);
}
pub fn __aeabi_memcpy8(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
@setRuntimeSafety(false);
_ = memcpy(dest, src, n);
}
pub fn __aeabi_memmove(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
@setRuntimeSafety(false);
_ = memmove(dest, src, n);
}
pub fn __aeabi_memmove4(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
@setRuntimeSafety(false);
_ = memmove(dest, src, n);
}
pub fn __aeabi_memmove8(dest: [*]u8, src: [*]u8, n: usize) callconv(.AAPCS) void {
@setRuntimeSafety(false);
_ = memmove(dest, src, n);
}
pub fn __aeabi_memset(dest: [*]u8, n: usize, c: u8) callconv(.AAPCS) void {
@setRuntimeSafety(false);
@ -26,16 +83,32 @@ pub fn __aeabi_memset(dest: [*]u8, n: usize, c: u8) callconv(.AAPCS) void {
// two arguments swapped
_ = memset(dest, c, n);
}
pub fn __aeabi_memset4(dest: [*]u8, n: usize, c: u8) callconv(.AAPCS) void {
@setRuntimeSafety(false);
_ = memset(dest, c, n);
}
pub fn __aeabi_memset8(dest: [*]u8, n: usize, c: u8) callconv(.AAPCS) void {
@setRuntimeSafety(false);
_ = memset(dest, c, n);
}
pub fn __aeabi_memclr(dest: [*]u8, n: usize) callconv(.AAPCS) void {
@setRuntimeSafety(false);
_ = memset(dest, 0, n);
}
pub fn __aeabi_memclr4(dest: [*]u8, n: usize) callconv(.AAPCS) void {
@setRuntimeSafety(false);
_ = memset(dest, 0, n);
}
pub fn __aeabi_memclr8(dest: [*]u8, n: usize) callconv(.AAPCS) void {
@setRuntimeSafety(false);
_ = memset(dest, 0, n);
}
// Dummy functions to avoid errors during the linking phase
pub fn __aeabi_unwind_cpp_pr0() callconv(.C) void {}
pub fn __aeabi_unwind_cpp_pr1() callconv(.C) void {}
pub fn __aeabi_unwind_cpp_pr2() callconv(.C) void {}
pub fn __aeabi_unwind_cpp_pr0() callconv(.AAPCS) void {}
pub fn __aeabi_unwind_cpp_pr1() callconv(.AAPCS) void {}
pub fn __aeabi_unwind_cpp_pr2() callconv(.AAPCS) void {}
// This function can only clobber r0 according to the ABI
pub fn __aeabi_read_tp() callconv(.Naked) void {

View File

@ -2,8 +2,8 @@ const std = @import("std");
const builtin = @import("builtin");
const cpu = builtin.cpu;
const arch = cpu.arch;
const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
pub const panic = @import("common.zig").panic;
// This parameter is true iff the target architecture supports the bare minimum
// to implement the atomic load/store intrinsics.

View File

@ -1,7 +1,20 @@
const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const abi = builtin.abi;
const common = @import("common.zig");
pub const panic = common.panic;
comptime {
if (arch == .i386 and abi == .msvc) {
// Don't let LLVM apply the stdcall name mangling on those MSVC builtins
@export(_alldiv, .{ .name = "\x01__alldiv", .linkage = common.linkage });
@export(_aulldiv, .{ .name = "\x01__aulldiv", .linkage = common.linkage });
}
}
pub fn _alldiv(a: i64, b: i64) callconv(.Stdcall) i64 {
@setRuntimeSafety(builtin.is_test);
const s_a = a >> (64 - 1);
const s_b = b >> (64 - 1);

View File

@ -1,7 +1,20 @@
const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const abi = builtin.abi;
const common = @import("common.zig");
pub const panic = common.panic;
comptime {
if (arch == .i386 and abi == .msvc) {
// Don't let LLVM apply the stdcall name mangling on those MSVC builtins
@export(_allrem, .{ .name = "\x01__allrem", .linkage = common.linkage });
@export(_aullrem, .{ .name = "\x01__aullrem", .linkage = common.linkage });
}
}
pub fn _allrem(a: i64, b: i64) callconv(.Stdcall) i64 {
@setRuntimeSafety(builtin.is_test);
const s_a = a >> (64 - 1);
const s_b = b >> (64 - 1);

View File

@ -1,5 +1,14 @@
const std = @import("std");
const builtin = @import("builtin");
const common = @import("common.zig");
pub const panic = common.panic;
comptime {
@export(__bswapsi2, .{ .name = "__bswapsi2", .linkage = common.linkage });
@export(__bswapdi2, .{ .name = "__bswapdi2", .linkage = common.linkage });
@export(__bswapti2, .{ .name = "__bswapti2", .linkage = common.linkage });
}
// bswap - byteswap
// - bswapXi2 for unoptimized big and little endian
@ -12,7 +21,6 @@ const builtin = @import("builtin");
// 00 00 00 ff << 3*8 (rightmost byte)
inline fn bswapXi2(comptime T: type, a: T) T {
@setRuntimeSafety(builtin.is_test);
switch (@bitSizeOf(T)) {
32 => {
// zig fmt: off

View File

@ -1,12 +1,27 @@
// Ported from musl, which is licensed under the MIT license:
// https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
//
// https://git.musl-libc.org/cgit/musl/tree/src/math/ceilf.c
// https://git.musl-libc.org/cgit/musl/tree/src/math/ceil.c
//! Ported from musl, which is MIT licensed.
//! https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
//!
//! https://git.musl-libc.org/cgit/musl/tree/src/math/ceilf.c
//! https://git.musl-libc.org/cgit/musl/tree/src/math/ceil.c
const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const math = std.math;
const expect = std.testing.expect;
const common = @import("common.zig");
pub const panic = common.panic;
comptime {
@export(__ceilh, .{ .name = "__ceilh", .linkage = common.linkage });
@export(ceilf, .{ .name = "ceilf", .linkage = common.linkage });
@export(ceil, .{ .name = "ceil", .linkage = common.linkage });
@export(__ceilx, .{ .name = "__ceilx", .linkage = common.linkage });
const ceilq_sym_name = if (common.want_ppc_abi) "ceilf128" else "ceilq";
@export(ceilq, .{ .name = ceilq_sym_name, .linkage = common.linkage });
@export(ceill, .{ .name = "ceill", .linkage = common.linkage });
}
pub fn __ceilh(x: f16) callconv(.C) f16 {
// TODO: more efficient implementation

View File

@ -2,6 +2,7 @@ const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const os = builtin.os.tag;
pub const panic = @import("common.zig").panic;
// Ported from llvm-project d32170dbd5b0d54436537b6b75beaf44324e0c28
@ -10,7 +11,13 @@ const os = builtin.os.tag;
// It is expected to invalidate the instruction cache for the
// specified range.
pub fn clear_cache(start: usize, end: usize) callconv(.C) void {
comptime {
if (builtin.zig_backend != .stage2_llvm) {
_ = clear_cache;
}
}
fn clear_cache(start: usize, end: usize) callconv(.C) void {
const x86 = switch (arch) {
.i386, .x86_64 => true,
else => false,

View File

@ -1,5 +1,18 @@
const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
const common = @import("common.zig");
pub const panic = common.panic;
comptime {
@export(__cmpsi2, .{ .name = "__cmpsi2", .linkage = common.linkage });
@export(__cmpdi2, .{ .name = "__cmpdi2", .linkage = common.linkage });
@export(__cmpti2, .{ .name = "__cmpti2", .linkage = common.linkage });
@export(__ucmpsi2, .{ .name = "__ucmpsi2", .linkage = common.linkage });
@export(__ucmpdi2, .{ .name = "__ucmpdi2", .linkage = common.linkage });
@export(__ucmpti2, .{ .name = "__ucmpti2", .linkage = common.linkage });
}
// cmp - signed compare
// - cmpXi2_generic for unoptimized little and big endian
@ -12,7 +25,6 @@ const builtin = @import("builtin");
// a > b => 2
inline fn XcmpXi2(comptime T: type, a: T, b: T) i32 {
@setRuntimeSafety(builtin.is_test);
var cmp1: i32 = 0;
var cmp2: i32 = 0;
if (a > b)

View File

@ -0,0 +1,68 @@
///! The quoted behavior definitions are from
///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
const common = @import("./common.zig");
const comparef = @import("./comparef.zig");
pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
@export(__aeabi_dcmpeq, .{ .name = "__aeabi_dcmpeq", .linkage = common.linkage });
@export(__aeabi_dcmplt, .{ .name = "__aeabi_dcmplt", .linkage = common.linkage });
@export(__aeabi_dcmple, .{ .name = "__aeabi_dcmple", .linkage = common.linkage });
} else {
@export(__eqdf2, .{ .name = "__eqdf2", .linkage = common.linkage });
@export(__nedf2, .{ .name = "__nedf2", .linkage = common.linkage });
@export(__ledf2, .{ .name = "__ledf2", .linkage = common.linkage });
@export(__cmpdf2, .{ .name = "__cmpdf2", .linkage = common.linkage });
@export(__ltdf2, .{ .name = "__ltdf2", .linkage = common.linkage });
}
}
/// "These functions calculate a <=> b. That is, if a is less than b, they return -1;
/// if a is greater than b, they return 1; and if a and b are equal they return 0.
/// If either argument is NaN they return 1..."
///
/// Note that this matches the definition of `__ledf2`, `__eqdf2`, `__nedf2`, `__cmpdf2`,
/// and `__ltdf2`.
fn __cmpdf2(a: f64, b: f64) callconv(.C) i32 {
return @enumToInt(comparef.cmpf2(f64, comparef.LE, a, b));
}
/// "These functions return a value less than or equal to zero if neither argument is NaN,
/// and a is less than or equal to b."
pub fn __ledf2(a: f64, b: f64) callconv(.C) i32 {
return __cmpdf2(a, b);
}
/// "These functions return zero if neither argument is NaN, and a and b are equal."
/// Note that due to some kind of historical accident, __eqdf2 and __nedf2 are defined
/// to have the same return value.
pub fn __eqdf2(a: f64, b: f64) callconv(.C) i32 {
return __cmpdf2(a, b);
}
/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
/// Note that due to some kind of historical accident, __eqdf2 and __nedf2 are defined
/// to have the same return value.
pub fn __nedf2(a: f64, b: f64) callconv(.C) i32 {
return __cmpdf2(a, b);
}
/// "These functions return a value less than zero if neither argument is NaN, and a
/// is strictly less than b."
pub fn __ltdf2(a: f64, b: f64) callconv(.C) i32 {
return __cmpdf2(a, b);
}
fn __aeabi_dcmpeq(a: f64, b: f64) callconv(.AAPCS) i32 {
return @boolToInt(comparef.cmpf2(f64, comparef.LE, a, b) == .Equal);
}
fn __aeabi_dcmplt(a: f64, b: f64) callconv(.AAPCS) i32 {
return @boolToInt(comparef.cmpf2(f64, comparef.LE, a, b) == .Less);
}
fn __aeabi_dcmple(a: f64, b: f64) callconv(.AAPCS) i32 {
return @boolToInt(comparef.cmpf2(f64, comparef.LE, a, b) != .Greater);
}

View File

@ -0,0 +1,68 @@
///! The quoted behavior definitions are from
///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
const common = @import("./common.zig");
const comparef = @import("./comparef.zig");
pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
@export(__aeabi_fcmpeq, .{ .name = "__aeabi_fcmpeq", .linkage = common.linkage });
@export(__aeabi_fcmplt, .{ .name = "__aeabi_fcmplt", .linkage = common.linkage });
@export(__aeabi_fcmple, .{ .name = "__aeabi_fcmple", .linkage = common.linkage });
} else {
@export(__eqsf2, .{ .name = "__eqsf2", .linkage = common.linkage });
@export(__nesf2, .{ .name = "__nesf2", .linkage = common.linkage });
@export(__lesf2, .{ .name = "__lesf2", .linkage = common.linkage });
@export(__cmpsf2, .{ .name = "__cmpsf2", .linkage = common.linkage });
@export(__ltsf2, .{ .name = "__ltsf2", .linkage = common.linkage });
}
}
/// "These functions calculate a <=> b. That is, if a is less than b, they return -1;
/// if a is greater than b, they return 1; and if a and b are equal they return 0.
/// If either argument is NaN they return 1..."
///
/// Note that this matches the definition of `__lesf2`, `__eqsf2`, `__nesf2`, `__cmpsf2`,
/// and `__ltsf2`.
fn __cmpsf2(a: f32, b: f32) callconv(.C) i32 {
return @enumToInt(comparef.cmpf2(f32, comparef.LE, a, b));
}
/// "These functions return a value less than or equal to zero if neither argument is NaN,
/// and a is less than or equal to b."
pub fn __lesf2(a: f32, b: f32) callconv(.C) i32 {
return __cmpsf2(a, b);
}
/// "These functions return zero if neither argument is NaN, and a and b are equal."
/// Note that due to some kind of historical accident, __eqsf2 and __nesf2 are defined
/// to have the same return value.
pub fn __eqsf2(a: f32, b: f32) callconv(.C) i32 {
return __cmpsf2(a, b);
}
/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
/// Note that due to some kind of historical accident, __eqsf2 and __nesf2 are defined
/// to have the same return value.
pub fn __nesf2(a: f32, b: f32) callconv(.C) i32 {
return __cmpsf2(a, b);
}
/// "These functions return a value less than zero if neither argument is NaN, and a
/// is strictly less than b."
pub fn __ltsf2(a: f32, b: f32) callconv(.C) i32 {
return __cmpsf2(a, b);
}
fn __aeabi_fcmpeq(a: f32, b: f32) callconv(.AAPCS) i32 {
return @boolToInt(comparef.cmpf2(f32, comparef.LE, a, b) == .Equal);
}
fn __aeabi_fcmplt(a: f32, b: f32) callconv(.AAPCS) i32 {
return @boolToInt(comparef.cmpf2(f32, comparef.LE, a, b) == .Less);
}
fn __aeabi_fcmple(a: f32, b: f32) callconv(.AAPCS) i32 {
return @boolToInt(comparef.cmpf2(f32, comparef.LE, a, b) != .Greater);
}

122
lib/compiler_rt/cmptf2.zig Normal file
View File

@ -0,0 +1,122 @@
///! The quoted behavior definitions are from
///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
const common = @import("./common.zig");
const comparef = @import("./comparef.zig");
pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
@export(__eqkf2, .{ .name = "__eqkf2", .linkage = common.linkage });
@export(__nekf2, .{ .name = "__nekf2", .linkage = common.linkage });
@export(__ltkf2, .{ .name = "__ltkf2", .linkage = common.linkage });
@export(__lekf2, .{ .name = "__lekf2", .linkage = common.linkage });
} else if (common.want_sparc_abi) {
@export(_Qp_cmp, .{ .name = "_Qp_cmp", .linkage = common.linkage });
@export(_Qp_feq, .{ .name = "_Qp_feq", .linkage = common.linkage });
@export(_Qp_fne, .{ .name = "_Qp_fne", .linkage = common.linkage });
@export(_Qp_flt, .{ .name = "_Qp_flt", .linkage = common.linkage });
@export(_Qp_fle, .{ .name = "_Qp_fle", .linkage = common.linkage });
@export(_Qp_fgt, .{ .name = "_Qp_fgt", .linkage = common.linkage });
@export(_Qp_fge, .{ .name = "_Qp_fge", .linkage = common.linkage });
} else {
@export(__eqtf2, .{ .name = "__eqtf2", .linkage = common.linkage });
@export(__netf2, .{ .name = "__netf2", .linkage = common.linkage });
@export(__letf2, .{ .name = "__letf2", .linkage = common.linkage });
@export(__cmptf2, .{ .name = "__cmptf2", .linkage = common.linkage });
@export(__lttf2, .{ .name = "__lttf2", .linkage = common.linkage });
}
}
/// "These functions calculate a <=> b. That is, if a is less than b, they return -1;
/// if a is greater than b, they return 1; and if a and b are equal they return 0.
/// If either argument is NaN they return 1..."
///
/// Note that this matches the definition of `__letf2`, `__eqtf2`, `__netf2`, `__cmptf2`,
/// and `__lttf2`.
fn __cmptf2(a: f128, b: f128) callconv(.C) i32 {
return @enumToInt(comparef.cmpf2(f128, comparef.LE, a, b));
}
/// "These functions return a value less than or equal to zero if neither argument is NaN,
/// and a is less than or equal to b."
fn __letf2(a: f128, b: f128) callconv(.C) i32 {
return __cmptf2(a, b);
}
/// "These functions return zero if neither argument is NaN, and a and b are equal."
/// Note that due to some kind of historical accident, __eqtf2 and __netf2 are defined
/// to have the same return value.
fn __eqtf2(a: f128, b: f128) callconv(.C) i32 {
return __cmptf2(a, b);
}
/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
/// Note that due to some kind of historical accident, __eqtf2 and __netf2 are defined
/// to have the same return value.
fn __netf2(a: f128, b: f128) callconv(.C) i32 {
return __cmptf2(a, b);
}
/// "These functions return a value less than zero if neither argument is NaN, and a
/// is strictly less than b."
fn __lttf2(a: f128, b: f128) callconv(.C) i32 {
return __cmptf2(a, b);
}
fn __eqkf2(a: f128, b: f128) callconv(.C) i32 {
return __cmptf2(a, b);
}
fn __nekf2(a: f128, b: f128) callconv(.C) i32 {
return __cmptf2(a, b);
}
fn __ltkf2(a: f128, b: f128) callconv(.C) i32 {
return __cmptf2(a, b);
}
fn __lekf2(a: f128, b: f128) callconv(.C) i32 {
return __cmptf2(a, b);
}
const SparcFCMP = enum(i32) {
Equal = 0,
Less = 1,
Greater = 2,
Unordered = 3,
};
fn _Qp_cmp(a: *const f128, b: *const f128) callconv(.C) i32 {
return @enumToInt(comparef.cmpf2(f128, SparcFCMP, a.*, b.*));
}
fn _Qp_feq(a: *const f128, b: *const f128) callconv(.C) bool {
return @intToEnum(SparcFCMP, _Qp_cmp(a, b)) == .Equal;
}
fn _Qp_fne(a: *const f128, b: *const f128) callconv(.C) bool {
return @intToEnum(SparcFCMP, _Qp_cmp(a, b)) != .Equal;
}
fn _Qp_flt(a: *const f128, b: *const f128) callconv(.C) bool {
return @intToEnum(SparcFCMP, _Qp_cmp(a, b)) == .Less;
}
fn _Qp_fgt(a: *const f128, b: *const f128) callconv(.C) bool {
return @intToEnum(SparcFCMP, _Qp_cmp(a, b)) == .Greater;
}
fn _Qp_fge(a: *const f128, b: *const f128) callconv(.C) bool {
return switch (@intToEnum(SparcFCMP, _Qp_cmp(a, b))) {
.Equal, .Greater => true,
.Less, .Unordered => false,
};
}
fn _Qp_fle(a: *const f128, b: *const f128) callconv(.C) bool {
return switch (@intToEnum(SparcFCMP, _Qp_cmp(a, b))) {
.Equal, .Less => true,
.Greater, .Unordered => false,
};
}

View File

@ -0,0 +1,50 @@
///! The quoted behavior definitions are from
///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
const common = @import("./common.zig");
const comparef = @import("./comparef.zig");
pub const panic = common.panic;
comptime {
@export(__eqxf2, .{ .name = "__eqxf2", .linkage = common.linkage });
@export(__nexf2, .{ .name = "__nexf2", .linkage = common.linkage });
@export(__lexf2, .{ .name = "__lexf2", .linkage = common.linkage });
@export(__cmpxf2, .{ .name = "__cmpxf2", .linkage = common.linkage });
@export(__ltxf2, .{ .name = "__ltxf2", .linkage = common.linkage });
}
/// "These functions calculate a <=> b. That is, if a is less than b, they return -1;
/// if a is greater than b, they return 1; and if a and b are equal they return 0.
/// If either argument is NaN they return 1..."
///
/// Note that this matches the definition of `__lexf2`, `__eqxf2`, `__nexf2`, `__cmpxf2`,
/// and `__ltxf2`.
fn __cmpxf2(a: f80, b: f80) callconv(.C) i32 {
return @enumToInt(comparef.cmp_f80(comparef.LE, a, b));
}
/// "These functions return a value less than or equal to zero if neither argument is NaN,
/// and a is less than or equal to b."
fn __lexf2(a: f80, b: f80) callconv(.C) i32 {
return __cmpxf2(a, b);
}
/// "These functions return zero if neither argument is NaN, and a and b are equal."
/// Note that due to some kind of historical accident, __eqxf2 and __nexf2 are defined
/// to have the same return value.
fn __eqxf2(a: f80, b: f80) callconv(.C) i32 {
return __cmpxf2(a, b);
}
/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
/// Note that due to some kind of historical accident, __eqxf2 and __nexf2 are defined
/// to have the same return value.
fn __nexf2(a: f80, b: f80) callconv(.C) i32 {
return __cmpxf2(a, b);
}
/// "These functions return a value less than zero if neither argument is NaN, and a
/// is strictly less than b."
fn __ltxf2(a: f80, b: f80) callconv(.C) i32 {
return __cmpxf2(a, b);
}

190
lib/compiler_rt/common.zig Normal file
View File

@ -0,0 +1,190 @@
const std = @import("std");
const builtin = @import("builtin");
pub const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
pub const want_aeabi = switch (builtin.abi) {
.eabi,
.eabihf,
.musleabi,
.musleabihf,
.gnueabi,
.gnueabihf,
=> switch (builtin.cpu.arch) {
.arm, .armeb, .thumb, .thumbeb => true,
else => false,
},
else => false,
};
pub const want_ppc_abi = builtin.cpu.arch.isPPC() or builtin.cpu.arch.isPPC64();
/// This governs whether to use these symbol names for f16/f32 conversions
/// rather than the standard names:
/// * __gnu_f2h_ieee
/// * __gnu_h2f_ieee
/// Known correct configurations:
/// x86_64-freestanding-none => true
/// x86_64-linux-none => true
/// x86_64-linux-gnu => true
/// x86_64-linux-musl => true
/// x86_64-linux-eabi => true
/// arm-linux-musleabihf => true
/// arm-linux-gnueabihf => true
/// arm-linux-eabihf => false
/// wasm32-wasi-musl => false
/// wasm32-freestanding-none => false
/// x86_64-windows-gnu => true
/// x86_64-windows-msvc => true
/// any-macos-any => false
pub const gnu_f16_abi = switch (builtin.cpu.arch) {
.wasm32, .wasm64 => false,
.arm, .armeb, .thumb, .thumbeb => switch (builtin.abi) {
.eabi, .eabihf => false,
else => true,
},
else => !builtin.os.tag.isDarwin(),
};
pub const want_sparc_abi = builtin.cpu.arch.isSPARC();
// Avoid dragging in the runtime safety mechanisms into this .o file,
// unless we're trying to test compiler-rt.
pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace) noreturn {
_ = error_return_trace;
if (builtin.is_test) {
@setCold(true);
std.debug.panic("{s}", .{msg});
} else {
unreachable;
}
}
/// AArch64 is the only ABI (at the moment) to support f16 arguments without the
/// need for extending them to wider fp types.
/// TODO remove this; do this type selection in the language rather than
/// here in compiler-rt.
pub const F16T = if (builtin.cpu.arch.isAARCH64()) f16 else u16;
pub fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
switch (Z) {
u16 => {
// 16x16 --> 32 bit multiply
const product = @as(u32, a) * @as(u32, b);
hi.* = @intCast(u16, product >> 16);
lo.* = @truncate(u16, product);
},
u32 => {
// 32x32 --> 64 bit multiply
const product = @as(u64, a) * @as(u64, b);
hi.* = @truncate(u32, product >> 32);
lo.* = @truncate(u32, product);
},
u64 => {
const S = struct {
fn loWord(x: u64) u64 {
return @truncate(u32, x);
}
fn hiWord(x: u64) u64 {
return @truncate(u32, x >> 32);
}
};
// 64x64 -> 128 wide multiply for platforms that don't have such an operation;
// many 64-bit platforms have this operation, but they tend to have hardware
// floating-point, so we don't bother with a special case for them here.
// Each of the component 32x32 -> 64 products
const plolo: u64 = S.loWord(a) * S.loWord(b);
const plohi: u64 = S.loWord(a) * S.hiWord(b);
const philo: u64 = S.hiWord(a) * S.loWord(b);
const phihi: u64 = S.hiWord(a) * S.hiWord(b);
// Sum terms that contribute to lo in a way that allows us to get the carry
const r0: u64 = S.loWord(plolo);
const r1: u64 = S.hiWord(plolo) +% S.loWord(plohi) +% S.loWord(philo);
lo.* = r0 +% (r1 << 32);
// Sum terms contributing to hi with the carry from lo
hi.* = S.hiWord(plohi) +% S.hiWord(philo) +% S.hiWord(r1) +% phihi;
},
u128 => {
const Word_LoMask = @as(u64, 0x00000000ffffffff);
const Word_HiMask = @as(u64, 0xffffffff00000000);
const Word_FullMask = @as(u64, 0xffffffffffffffff);
const S = struct {
fn Word_1(x: u128) u64 {
return @truncate(u32, x >> 96);
}
fn Word_2(x: u128) u64 {
return @truncate(u32, x >> 64);
}
fn Word_3(x: u128) u64 {
return @truncate(u32, x >> 32);
}
fn Word_4(x: u128) u64 {
return @truncate(u32, x);
}
};
// 128x128 -> 256 wide multiply for platforms that don't have such an operation;
// many 64-bit platforms have this operation, but they tend to have hardware
// floating-point, so we don't bother with a special case for them here.
const product11: u64 = S.Word_1(a) * S.Word_1(b);
const product12: u64 = S.Word_1(a) * S.Word_2(b);
const product13: u64 = S.Word_1(a) * S.Word_3(b);
const product14: u64 = S.Word_1(a) * S.Word_4(b);
const product21: u64 = S.Word_2(a) * S.Word_1(b);
const product22: u64 = S.Word_2(a) * S.Word_2(b);
const product23: u64 = S.Word_2(a) * S.Word_3(b);
const product24: u64 = S.Word_2(a) * S.Word_4(b);
const product31: u64 = S.Word_3(a) * S.Word_1(b);
const product32: u64 = S.Word_3(a) * S.Word_2(b);
const product33: u64 = S.Word_3(a) * S.Word_3(b);
const product34: u64 = S.Word_3(a) * S.Word_4(b);
const product41: u64 = S.Word_4(a) * S.Word_1(b);
const product42: u64 = S.Word_4(a) * S.Word_2(b);
const product43: u64 = S.Word_4(a) * S.Word_3(b);
const product44: u64 = S.Word_4(a) * S.Word_4(b);
const sum0: u128 = @as(u128, product44);
const sum1: u128 = @as(u128, product34) +%
@as(u128, product43);
const sum2: u128 = @as(u128, product24) +%
@as(u128, product33) +%
@as(u128, product42);
const sum3: u128 = @as(u128, product14) +%
@as(u128, product23) +%
@as(u128, product32) +%
@as(u128, product41);
const sum4: u128 = @as(u128, product13) +%
@as(u128, product22) +%
@as(u128, product31);
const sum5: u128 = @as(u128, product12) +%
@as(u128, product21);
const sum6: u128 = @as(u128, product11);
const r0: u128 = (sum0 & Word_FullMask) +%
((sum1 & Word_LoMask) << 32);
const r1: u128 = (sum0 >> 64) +%
((sum1 >> 32) & Word_FullMask) +%
(sum2 & Word_FullMask) +%
((sum3 << 32) & Word_HiMask);
lo.* = r0 +% (r1 << 64);
hi.* = (r1 >> 64) +%
(sum1 >> 96) +%
(sum2 >> 64) +%
(sum3 >> 32) +%
sum4 +%
(sum5 << 32) +%
(sum6 << 64);
},
else => @compileError("unsupported"),
}
}
pub fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeInfo(T).Float.bits)) i32 {
const Z = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
const integerBit = @as(Z, 1) << std.math.floatFractionalBits(T);
const shift = @clz(Z, significand.*) - @clz(Z, integerBit);
significand.* <<= @intCast(std.math.Log2Int(Z), shift);
return @as(i32, 1) - shift;
}

View File

@ -1,328 +0,0 @@
// Ported from:
//
// https://github.com/llvm/llvm-project/commit/d674d96bc56c0f377879d01c9d8dfdaaa7859cdb/compiler-rt/lib/builtins/comparesf2.c
const std = @import("std");
const builtin = @import("builtin");
const LE = enum(i32) {
Less = -1,
Equal = 0,
Greater = 1,
const Unordered: LE = .Greater;
};
const GE = enum(i32) {
Less = -1,
Equal = 0,
Greater = 1,
const Unordered: GE = .Less;
};
pub inline fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT {
@setRuntimeSafety(builtin.is_test);
const bits = @typeInfo(T).Float.bits;
const srep_t = std.meta.Int(.signed, bits);
const rep_t = std.meta.Int(.unsigned, bits);
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
const absMask = signBit - 1;
const infT = comptime std.math.inf(T);
const infRep = @bitCast(rep_t, infT);
const aInt = @bitCast(srep_t, a);
const bInt = @bitCast(srep_t, b);
const aAbs = @bitCast(rep_t, aInt) & absMask;
const bAbs = @bitCast(rep_t, bInt) & absMask;
// If either a or b is NaN, they are unordered.
if (aAbs > infRep or bAbs > infRep) return RT.Unordered;
// If a and b are both zeros, they are equal.
if ((aAbs | bAbs) == 0) return .Equal;
// If at least one of a and b is positive, we get the same result comparing
// a and b as signed integers as we would with a floating-point compare.
if ((aInt & bInt) >= 0) {
if (aInt < bInt) {
return .Less;
} else if (aInt == bInt) {
return .Equal;
} else return .Greater;
} else {
// Otherwise, both are negative, so we need to flip the sense of the
// comparison to get the correct result. (This assumes a twos- or ones-
// complement integer representation; if integers are represented in a
// sign-magnitude representation, then this flip is incorrect).
if (aInt > bInt) {
return .Less;
} else if (aInt == bInt) {
return .Equal;
} else return .Greater;
}
}
pub inline fn unordcmp(comptime T: type, a: T, b: T) i32 {
@setRuntimeSafety(builtin.is_test);
const rep_t = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
const absMask = signBit - 1;
const infRep = @bitCast(rep_t, std.math.inf(T));
const aAbs: rep_t = @bitCast(rep_t, a) & absMask;
const bAbs: rep_t = @bitCast(rep_t, b) & absMask;
return @boolToInt(aAbs > infRep or bAbs > infRep);
}
// Comparison between f32
pub fn __lesf2(a: f32, b: f32) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
const float = cmp(f32, LE, a, b);
return @bitCast(i32, float);
}
pub fn __gesf2(a: f32, b: f32) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
const float = cmp(f32, GE, a, b);
return @bitCast(i32, float);
}
pub fn __eqsf2(a: f32, b: f32) callconv(.C) i32 {
return __lesf2(a, b);
}
pub fn __ltsf2(a: f32, b: f32) callconv(.C) i32 {
return __lesf2(a, b);
}
pub fn __nesf2(a: f32, b: f32) callconv(.C) i32 {
return __lesf2(a, b);
}
pub fn __gtsf2(a: f32, b: f32) callconv(.C) i32 {
return __gesf2(a, b);
}
// Comparison between f64
pub fn __ledf2(a: f64, b: f64) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
const float = cmp(f64, LE, a, b);
return @bitCast(i32, float);
}
pub fn __gedf2(a: f64, b: f64) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
const float = cmp(f64, GE, a, b);
return @bitCast(i32, float);
}
pub fn __eqdf2(a: f64, b: f64) callconv(.C) i32 {
return __ledf2(a, b);
}
pub fn __ltdf2(a: f64, b: f64) callconv(.C) i32 {
return __ledf2(a, b);
}
pub fn __nedf2(a: f64, b: f64) callconv(.C) i32 {
return __ledf2(a, b);
}
pub fn __gtdf2(a: f64, b: f64) callconv(.C) i32 {
return __gedf2(a, b);
}
// Comparison between f80
pub inline fn cmp_f80(comptime RT: type, a: f80, b: f80) RT {
const a_rep = std.math.break_f80(a);
const b_rep = std.math.break_f80(b);
const sig_bits = std.math.floatMantissaBits(f80);
const int_bit = 0x8000000000000000;
const sign_bit = 0x8000;
const special_exp = 0x7FFF;
// If either a or b is NaN, they are unordered.
if ((a_rep.exp & special_exp == special_exp and a_rep.fraction ^ int_bit != 0) or
(b_rep.exp & special_exp == special_exp and b_rep.fraction ^ int_bit != 0))
return RT.Unordered;
// If a and b are both zeros, they are equal.
if ((a_rep.fraction | b_rep.fraction) | ((a_rep.exp | b_rep.exp) & special_exp) == 0)
return .Equal;
if (@boolToInt(a_rep.exp == b_rep.exp) & @boolToInt(a_rep.fraction == b_rep.fraction) != 0) {
return .Equal;
} else if (a_rep.exp & sign_bit != b_rep.exp & sign_bit) {
// signs are different
if (@bitCast(i16, a_rep.exp) < @bitCast(i16, b_rep.exp)) {
return .Less;
} else {
return .Greater;
}
} else {
const a_fraction = a_rep.fraction | (@as(u80, a_rep.exp) << sig_bits);
const b_fraction = b_rep.fraction | (@as(u80, b_rep.exp) << sig_bits);
if (a_fraction < b_fraction) {
return .Less;
} else {
return .Greater;
}
}
}
pub fn __lexf2(a: f80, b: f80) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
const float = cmp_f80(LE, a, b);
return @bitCast(i32, float);
}
pub fn __gexf2(a: f80, b: f80) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
const float = cmp_f80(GE, a, b);
return @bitCast(i32, float);
}
pub fn __eqxf2(a: f80, b: f80) callconv(.C) i32 {
return __lexf2(a, b);
}
pub fn __ltxf2(a: f80, b: f80) callconv(.C) i32 {
return __lexf2(a, b);
}
pub fn __nexf2(a: f80, b: f80) callconv(.C) i32 {
return __lexf2(a, b);
}
pub fn __gtxf2(a: f80, b: f80) callconv(.C) i32 {
return __gexf2(a, b);
}
// Comparison between f128
pub fn __letf2(a: f128, b: f128) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
const float = cmp(f128, LE, a, b);
return @bitCast(i32, float);
}
pub fn __getf2(a: f128, b: f128) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
const float = cmp(f128, GE, a, b);
return @bitCast(i32, float);
}
pub fn __eqtf2(a: f128, b: f128) callconv(.C) i32 {
return __letf2(a, b);
}
pub fn __lttf2(a: f128, b: f128) callconv(.C) i32 {
return __letf2(a, b);
}
pub fn __netf2(a: f128, b: f128) callconv(.C) i32 {
return __letf2(a, b);
}
pub fn __gttf2(a: f128, b: f128) callconv(.C) i32 {
return __getf2(a, b);
}
// Unordered comparison between f32/f64/f128
pub fn __unordsf2(a: f32, b: f32) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
return unordcmp(f32, a, b);
}
pub fn __unorddf2(a: f64, b: f64) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
return unordcmp(f64, a, b);
}
pub fn __unordtf2(a: f128, b: f128) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
return unordcmp(f128, a, b);
}
// ARM EABI intrinsics
pub fn __aeabi_fcmpeq(a: f32, b: f32) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
return @boolToInt(@call(.{ .modifier = .always_inline }, __eqsf2, .{ a, b }) == 0);
}
pub fn __aeabi_fcmplt(a: f32, b: f32) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
return @boolToInt(@call(.{ .modifier = .always_inline }, __ltsf2, .{ a, b }) < 0);
}
pub fn __aeabi_fcmple(a: f32, b: f32) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
return @boolToInt(@call(.{ .modifier = .always_inline }, __lesf2, .{ a, b }) <= 0);
}
pub fn __aeabi_fcmpge(a: f32, b: f32) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
return @boolToInt(@call(.{ .modifier = .always_inline }, __gesf2, .{ a, b }) >= 0);
}
pub fn __aeabi_fcmpgt(a: f32, b: f32) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
return @boolToInt(@call(.{ .modifier = .always_inline }, __gtsf2, .{ a, b }) > 0);
}
pub fn __aeabi_fcmpun(a: f32, b: f32) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
return @call(.{ .modifier = .always_inline }, __unordsf2, .{ a, b });
}
pub fn __aeabi_dcmpeq(a: f64, b: f64) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
return @boolToInt(@call(.{ .modifier = .always_inline }, __eqdf2, .{ a, b }) == 0);
}
pub fn __aeabi_dcmplt(a: f64, b: f64) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
return @boolToInt(@call(.{ .modifier = .always_inline }, __ltdf2, .{ a, b }) < 0);
}
pub fn __aeabi_dcmple(a: f64, b: f64) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
return @boolToInt(@call(.{ .modifier = .always_inline }, __ledf2, .{ a, b }) <= 0);
}
pub fn __aeabi_dcmpge(a: f64, b: f64) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
return @boolToInt(@call(.{ .modifier = .always_inline }, __gedf2, .{ a, b }) >= 0);
}
pub fn __aeabi_dcmpgt(a: f64, b: f64) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
return @boolToInt(@call(.{ .modifier = .always_inline }, __gtdf2, .{ a, b }) > 0);
}
pub fn __aeabi_dcmpun(a: f64, b: f64) callconv(.AAPCS) i32 {
@setRuntimeSafety(false);
return @call(.{ .modifier = .always_inline }, __unorddf2, .{ a, b });
}
test "comparesf2" {
_ = @import("comparesf2_test.zig");
}
test "comparedf2" {
_ = @import("comparedf2_test.zig");
}

View File

@ -6,7 +6,15 @@ const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
const comparedf2 = @import("compareXf2.zig");
const __eqdf2 = @import("./cmpdf2.zig").__eqdf2;
const __ledf2 = @import("./cmpdf2.zig").__ledf2;
const __ltdf2 = @import("./cmpdf2.zig").__ltdf2;
const __nedf2 = @import("./cmpdf2.zig").__nedf2;
const __gedf2 = @import("./gedf2.zig").__gedf2;
const __gtdf2 = @import("./gedf2.zig").__gtdf2;
const __unorddf2 = @import("./unorddf2.zig").__unorddf2;
const TestVector = struct {
a: f64,
@ -21,25 +29,25 @@ const TestVector = struct {
};
fn test__cmpdf2(vector: TestVector) bool {
if (comparedf2.__eqdf2(vector.a, vector.b) != vector.eqReference) {
if (__eqdf2(vector.a, vector.b) != vector.eqReference) {
return false;
}
if (comparedf2.__gedf2(vector.a, vector.b) != vector.geReference) {
if (__gedf2(vector.a, vector.b) != vector.geReference) {
return false;
}
if (comparedf2.__gtdf2(vector.a, vector.b) != vector.gtReference) {
if (__gtdf2(vector.a, vector.b) != vector.gtReference) {
return false;
}
if (comparedf2.__ledf2(vector.a, vector.b) != vector.leReference) {
if (__ledf2(vector.a, vector.b) != vector.leReference) {
return false;
}
if (comparedf2.__ltdf2(vector.a, vector.b) != vector.ltReference) {
if (__ltdf2(vector.a, vector.b) != vector.ltReference) {
return false;
}
if (comparedf2.__nedf2(vector.a, vector.b) != vector.neReference) {
if (__nedf2(vector.a, vector.b) != vector.neReference) {
return false;
}
if (comparedf2.__unorddf2(vector.a, vector.b) != vector.unReference) {
if (__unorddf2(vector.a, vector.b) != vector.unReference) {
return false;
}
return true;

View File

@ -0,0 +1,118 @@
const std = @import("std");
pub const LE = enum(i32) {
Less = -1,
Equal = 0,
Greater = 1,
const Unordered: LE = .Greater;
};
pub const GE = enum(i32) {
Less = -1,
Equal = 0,
Greater = 1,
const Unordered: GE = .Less;
};
pub inline fn cmpf2(comptime T: type, comptime RT: type, a: T, b: T) RT {
const bits = @typeInfo(T).Float.bits;
const srep_t = std.meta.Int(.signed, bits);
const rep_t = std.meta.Int(.unsigned, bits);
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
const absMask = signBit - 1;
const infT = comptime std.math.inf(T);
const infRep = @bitCast(rep_t, infT);
const aInt = @bitCast(srep_t, a);
const bInt = @bitCast(srep_t, b);
const aAbs = @bitCast(rep_t, aInt) & absMask;
const bAbs = @bitCast(rep_t, bInt) & absMask;
// If either a or b is NaN, they are unordered.
if (aAbs > infRep or bAbs > infRep) return RT.Unordered;
// If a and b are both zeros, they are equal.
if ((aAbs | bAbs) == 0) return .Equal;
// If at least one of a and b is positive, we get the same result comparing
// a and b as signed integers as we would with a floating-point compare.
if ((aInt & bInt) >= 0) {
if (aInt < bInt) {
return .Less;
} else if (aInt == bInt) {
return .Equal;
} else return .Greater;
} else {
// Otherwise, both are negative, so we need to flip the sense of the
// comparison to get the correct result. (This assumes a twos- or ones-
// complement integer representation; if integers are represented in a
// sign-magnitude representation, then this flip is incorrect).
if (aInt > bInt) {
return .Less;
} else if (aInt == bInt) {
return .Equal;
} else return .Greater;
}
}
pub inline fn cmp_f80(comptime RT: type, a: f80, b: f80) RT {
const a_rep = std.math.break_f80(a);
const b_rep = std.math.break_f80(b);
const sig_bits = std.math.floatMantissaBits(f80);
const int_bit = 0x8000000000000000;
const sign_bit = 0x8000;
const special_exp = 0x7FFF;
// If either a or b is NaN, they are unordered.
if ((a_rep.exp & special_exp == special_exp and a_rep.fraction ^ int_bit != 0) or
(b_rep.exp & special_exp == special_exp and b_rep.fraction ^ int_bit != 0))
return RT.Unordered;
// If a and b are both zeros, they are equal.
if ((a_rep.fraction | b_rep.fraction) | ((a_rep.exp | b_rep.exp) & special_exp) == 0)
return .Equal;
if (@boolToInt(a_rep.exp == b_rep.exp) & @boolToInt(a_rep.fraction == b_rep.fraction) != 0) {
return .Equal;
} else if (a_rep.exp & sign_bit != b_rep.exp & sign_bit) {
// signs are different
if (@bitCast(i16, a_rep.exp) < @bitCast(i16, b_rep.exp)) {
return .Less;
} else {
return .Greater;
}
} else {
const a_fraction = a_rep.fraction | (@as(u80, a_rep.exp) << sig_bits);
const b_fraction = b_rep.fraction | (@as(u80, b_rep.exp) << sig_bits);
if (a_fraction < b_fraction) {
return .Less;
} else {
return .Greater;
}
}
}
pub inline fn unordcmp(comptime T: type, a: T, b: T) i32 {
const rep_t = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
const absMask = signBit - 1;
const infRep = @bitCast(rep_t, std.math.inf(T));
const aAbs: rep_t = @bitCast(rep_t, a) & absMask;
const bAbs: rep_t = @bitCast(rep_t, b) & absMask;
return @boolToInt(aAbs > infRep or bAbs > infRep);
}
test {
_ = @import("comparesf2_test.zig");
_ = @import("comparedf2_test.zig");
}

View File

@ -6,7 +6,15 @@ const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
const comparesf2 = @import("compareXf2.zig");
const __eqsf2 = @import("./cmpsf2.zig").__eqsf2;
const __lesf2 = @import("./cmpsf2.zig").__lesf2;
const __ltsf2 = @import("./cmpsf2.zig").__ltsf2;
const __nesf2 = @import("./cmpsf2.zig").__nesf2;
const __gesf2 = @import("./gesf2.zig").__gesf2;
const __gtsf2 = @import("./gesf2.zig").__gtsf2;
const __unordsf2 = @import("./unordsf2.zig").__unordsf2;
const TestVector = struct {
a: f32,
@ -21,25 +29,25 @@ const TestVector = struct {
};
fn test__cmpsf2(vector: TestVector) bool {
if (comparesf2.__eqsf2(vector.a, vector.b) != vector.eqReference) {
if (__eqsf2(vector.a, vector.b) != vector.eqReference) {
return false;
}
if (comparesf2.__gesf2(vector.a, vector.b) != vector.geReference) {
if (__gesf2(vector.a, vector.b) != vector.geReference) {
return false;
}
if (comparesf2.__gtsf2(vector.a, vector.b) != vector.gtReference) {
if (__gtsf2(vector.a, vector.b) != vector.gtReference) {
return false;
}
if (comparesf2.__lesf2(vector.a, vector.b) != vector.leReference) {
if (__lesf2(vector.a, vector.b) != vector.leReference) {
return false;
}
if (comparesf2.__ltsf2(vector.a, vector.b) != vector.ltReference) {
if (__ltsf2(vector.a, vector.b) != vector.ltReference) {
return false;
}
if (comparesf2.__nesf2(vector.a, vector.b) != vector.neReference) {
if (__nesf2(vector.a, vector.b) != vector.neReference) {
return false;
}
if (comparesf2.__unordsf2(vector.a, vector.b) != vector.unReference) {
if (__unordsf2(vector.a, vector.b) != vector.unReference) {
return false;
}
return true;

View File

@ -1,11 +1,26 @@
const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const math = std.math;
const expect = std.testing.expect;
const common = @import("common.zig");
pub const panic = common.panic;
const trig = @import("trig.zig");
const rem_pio2 = @import("rem_pio2.zig").rem_pio2;
const rem_pio2f = @import("rem_pio2f.zig").rem_pio2f;
comptime {
@export(__cosh, .{ .name = "__cosh", .linkage = common.linkage });
@export(cosf, .{ .name = "cosf", .linkage = common.linkage });
@export(cos, .{ .name = "cos", .linkage = common.linkage });
@export(__cosx, .{ .name = "__cosx", .linkage = common.linkage });
const cosq_sym_name = if (common.want_ppc_abi) "cosf128" else "cosq";
@export(cosq, .{ .name = cosq_sym_name, .linkage = common.linkage });
@export(cosl, .{ .name = "cosl", .linkage = common.linkage });
}
pub fn __cosh(a: f16) callconv(.C) f16 {
// TODO: more efficient implementation
return @floatCast(f16, cosf(a));

View File

@ -1,5 +1,21 @@
const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
const common = @import("common.zig");
pub const panic = common.panic;
comptime {
@export(__clzsi2, .{ .name = "__clzsi2", .linkage = common.linkage });
@export(__clzdi2, .{ .name = "__clzdi2", .linkage = common.linkage });
@export(__clzti2, .{ .name = "__clzti2", .linkage = common.linkage });
@export(__ctzsi2, .{ .name = "__ctzsi2", .linkage = common.linkage });
@export(__ctzdi2, .{ .name = "__ctzdi2", .linkage = common.linkage });
@export(__ctzti2, .{ .name = "__ctzti2", .linkage = common.linkage });
@export(__ffssi2, .{ .name = "__ffssi2", .linkage = common.linkage });
@export(__ffsdi2, .{ .name = "__ffsdi2", .linkage = common.linkage });
@export(__ffsti2, .{ .name = "__ffsti2", .linkage = common.linkage });
}
// clz - count leading zeroes
// - clzXi2 for unoptimized little and big endian
@ -15,8 +31,6 @@ const builtin = @import("builtin");
// - ffsXi2 for unoptimized little and big endian
inline fn clzXi2(comptime T: type, a: T) i32 {
@setRuntimeSafety(builtin.is_test);
var x = switch (@bitSizeOf(T)) {
32 => @bitCast(u32, a),
64 => @bitCast(u64, a),
@ -154,8 +168,6 @@ pub fn __clzti2(a: i128) callconv(.C) i32 {
}
inline fn ctzXi2(comptime T: type, a: T) i32 {
@setRuntimeSafety(builtin.is_test);
var x = switch (@bitSizeOf(T)) {
32 => @bitCast(u32, a),
64 => @bitCast(u64, a),
@ -191,8 +203,6 @@ pub fn __ctzti2(a: i128) callconv(.C) i32 {
}
inline fn ffsXi2(comptime T: type, a: T) i32 {
@setRuntimeSafety(builtin.is_test);
var x = switch (@bitSizeOf(T)) {
32 => @bitCast(u32, a),
64 => @bitCast(u64, a),

View File

@ -1,12 +1,35 @@
// Ported from:
//
// https://github.com/llvm/llvm-project/commit/d674d96bc56c0f377879d01c9d8dfdaaa7859cdb/compiler-rt/lib/builtins/divdf3.c
//! Ported from:
//!
//! https://github.com/llvm/llvm-project/commit/d674d96bc56c0f377879d01c9d8dfdaaa7859cdb/compiler-rt/lib/builtins/divdf3.c
const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const is_test = builtin.is_test;
const common = @import("common.zig");
const normalize = common.normalize;
const wideMultiply = common.wideMultiply;
pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
@export(__aeabi_ddiv, .{ .name = "__aeabi_ddiv", .linkage = common.linkage });
} else {
@export(__divdf3, .{ .name = "__divdf3", .linkage = common.linkage });
}
}
pub fn __divdf3(a: f64, b: f64) callconv(.C) f64 {
@setRuntimeSafety(builtin.is_test);
return div(a, b);
}
fn __aeabi_ddiv(a: f64, b: f64) callconv(.AAPCS) f64 {
return div(a, b);
}
inline fn div(a: f64, b: f64) f64 {
const Z = std.meta.Int(.unsigned, 64);
const SignedZ = std.meta.Int(.signed, 64);
@ -202,130 +225,6 @@ pub fn __divdf3(a: f64, b: f64) callconv(.C) f64 {
}
}
pub fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
@setRuntimeSafety(builtin.is_test);
switch (Z) {
u32 => {
// 32x32 --> 64 bit multiply
const product = @as(u64, a) * @as(u64, b);
hi.* = @truncate(u32, product >> 32);
lo.* = @truncate(u32, product);
},
u64 => {
const S = struct {
fn loWord(x: u64) u64 {
return @truncate(u32, x);
}
fn hiWord(x: u64) u64 {
return @truncate(u32, x >> 32);
}
};
// 64x64 -> 128 wide multiply for platforms that don't have such an operation;
// many 64-bit platforms have this operation, but they tend to have hardware
// floating-point, so we don't bother with a special case for them here.
// Each of the component 32x32 -> 64 products
const plolo: u64 = S.loWord(a) * S.loWord(b);
const plohi: u64 = S.loWord(a) * S.hiWord(b);
const philo: u64 = S.hiWord(a) * S.loWord(b);
const phihi: u64 = S.hiWord(a) * S.hiWord(b);
// Sum terms that contribute to lo in a way that allows us to get the carry
const r0: u64 = S.loWord(plolo);
const r1: u64 = S.hiWord(plolo) +% S.loWord(plohi) +% S.loWord(philo);
lo.* = r0 +% (r1 << 32);
// Sum terms contributing to hi with the carry from lo
hi.* = S.hiWord(plohi) +% S.hiWord(philo) +% S.hiWord(r1) +% phihi;
},
u128 => {
const Word_LoMask = @as(u64, 0x00000000ffffffff);
const Word_HiMask = @as(u64, 0xffffffff00000000);
const Word_FullMask = @as(u64, 0xffffffffffffffff);
const S = struct {
fn Word_1(x: u128) u64 {
return @truncate(u32, x >> 96);
}
fn Word_2(x: u128) u64 {
return @truncate(u32, x >> 64);
}
fn Word_3(x: u128) u64 {
return @truncate(u32, x >> 32);
}
fn Word_4(x: u128) u64 {
return @truncate(u32, x);
}
};
// 128x128 -> 256 wide multiply for platforms that don't have such an operation;
// many 64-bit platforms have this operation, but they tend to have hardware
// floating-point, so we don't bother with a special case for them here.
const product11: u64 = S.Word_1(a) * S.Word_1(b);
const product12: u64 = S.Word_1(a) * S.Word_2(b);
const product13: u64 = S.Word_1(a) * S.Word_3(b);
const product14: u64 = S.Word_1(a) * S.Word_4(b);
const product21: u64 = S.Word_2(a) * S.Word_1(b);
const product22: u64 = S.Word_2(a) * S.Word_2(b);
const product23: u64 = S.Word_2(a) * S.Word_3(b);
const product24: u64 = S.Word_2(a) * S.Word_4(b);
const product31: u64 = S.Word_3(a) * S.Word_1(b);
const product32: u64 = S.Word_3(a) * S.Word_2(b);
const product33: u64 = S.Word_3(a) * S.Word_3(b);
const product34: u64 = S.Word_3(a) * S.Word_4(b);
const product41: u64 = S.Word_4(a) * S.Word_1(b);
const product42: u64 = S.Word_4(a) * S.Word_2(b);
const product43: u64 = S.Word_4(a) * S.Word_3(b);
const product44: u64 = S.Word_4(a) * S.Word_4(b);
const sum0: u128 = @as(u128, product44);
const sum1: u128 = @as(u128, product34) +%
@as(u128, product43);
const sum2: u128 = @as(u128, product24) +%
@as(u128, product33) +%
@as(u128, product42);
const sum3: u128 = @as(u128, product14) +%
@as(u128, product23) +%
@as(u128, product32) +%
@as(u128, product41);
const sum4: u128 = @as(u128, product13) +%
@as(u128, product22) +%
@as(u128, product31);
const sum5: u128 = @as(u128, product12) +%
@as(u128, product21);
const sum6: u128 = @as(u128, product11);
const r0: u128 = (sum0 & Word_FullMask) +%
((sum1 & Word_LoMask) << 32);
const r1: u128 = (sum0 >> 64) +%
((sum1 >> 32) & Word_FullMask) +%
(sum2 & Word_FullMask) +%
((sum3 << 32) & Word_HiMask);
lo.* = r0 +% (r1 << 64);
hi.* = (r1 >> 64) +%
(sum1 >> 96) +%
(sum2 >> 64) +%
(sum3 >> 32) +%
sum4 +%
(sum5 << 32) +%
(sum6 << 64);
},
else => @compileError("unsupported"),
}
}
pub fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeInfo(T).Float.bits)) i32 {
@setRuntimeSafety(builtin.is_test);
const Z = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
const integerBit = @as(Z, 1) << std.math.floatFractionalBits(T);
const shift = @clz(Z, significand.*) - @clz(Z, integerBit);
significand.* <<= @intCast(std.math.Log2Int(Z), shift);
return @as(i32, 1) - shift;
}
pub fn __aeabi_ddiv(a: f64, b: f64) callconv(.AAPCS) f64 {
@setRuntimeSafety(false);
return @call(.{ .modifier = .always_inline }, __divdf3, .{ a, b });
}
test {
_ = @import("divdf3_test.zig");
}

View File

@ -1,12 +1,33 @@
// Ported from:
//
// https://github.com/llvm/llvm-project/commit/d674d96bc56c0f377879d01c9d8dfdaaa7859cdb/compiler-rt/lib/builtins/divsf3.c
//! Ported from:
//!
//! https://github.com/llvm/llvm-project/commit/d674d96bc56c0f377879d01c9d8dfdaaa7859cdb/compiler-rt/lib/builtins/divsf3.c
const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const common = @import("common.zig");
const normalize = common.normalize;
pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
@export(__aeabi_fdiv, .{ .name = "__aeabi_fdiv", .linkage = common.linkage });
} else {
@export(__divsf3, .{ .name = "__divsf3", .linkage = common.linkage });
}
}
pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
@setRuntimeSafety(builtin.is_test);
return div(a, b);
}
fn __aeabi_fdiv(a: f32, b: f32) callconv(.AAPCS) f32 {
return div(a, b);
}
inline fn div(a: f32, b: f32) f32 {
const Z = std.meta.Int(.unsigned, 32);
const significandBits = std.math.floatMantissaBits(f32);
@ -184,22 +205,6 @@ pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
}
}
fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeInfo(T).Float.bits)) i32 {
@setRuntimeSafety(builtin.is_test);
const Z = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = @as(Z, 1) << significandBits;
const shift = @clz(Z, significand.*) - @clz(Z, implicitBit);
significand.* <<= @intCast(std.math.Log2Int(Z), shift);
return 1 - shift;
}
pub fn __aeabi_fdiv(a: f32, b: f32) callconv(.AAPCS) f32 {
@setRuntimeSafety(false);
return @call(.{ .modifier = .always_inline }, __divsf3, .{ a, b });
}
test {
_ = @import("divsf3_test.zig");
}

View File

@ -1,11 +1,35 @@
const std = @import("std");
const builtin = @import("builtin");
const normalize = @import("divdf3.zig").normalize;
const wideMultiply = @import("divdf3.zig").wideMultiply;
const common = @import("common.zig");
const normalize = common.normalize;
const wideMultiply = common.wideMultiply;
pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
@export(__divkf3, .{ .name = "__divkf3", .linkage = common.linkage });
} else if (common.want_sparc_abi) {
@export(_Qp_div, .{ .name = "_Qp_div", .linkage = common.linkage });
} else {
@export(__divtf3, .{ .name = "__divtf3", .linkage = common.linkage });
}
}
pub fn __divtf3(a: f128, b: f128) callconv(.C) f128 {
@setRuntimeSafety(builtin.is_test);
return div(a, b);
}
fn __divkf3(a: f128, b: f128) callconv(.C) f128 {
return div(a, b);
}
fn _Qp_div(c: *f128, a: *const f128, b: *const f128) callconv(.C) void {
c.* = div(a.*, b.*);
}
inline fn div(a: f128, b: f128) f128 {
const Z = std.meta.Int(.unsigned, 128);
const significandBits = std.math.floatMantissaBits(f128);

View File

@ -1,9 +1,43 @@
const udivmod = @import("udivmod.zig").udivmod;
const std = @import("std");
const builtin = @import("builtin");
const udivmod = @import("udivmod.zig").udivmod;
const arch = builtin.cpu.arch;
const common = @import("common.zig");
pub const panic = common.panic;
comptime {
if (builtin.os.tag == .windows) {
switch (arch) {
.i386 => {
@export(__divti3, .{ .name = "__divti3", .linkage = common.linkage });
},
.x86_64 => {
// The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
// that LLVM expects compiler-rt to have.
@export(__divti3_windows_x86_64, .{ .name = "__divti3", .linkage = common.linkage });
},
else => {},
}
if (arch.isAARCH64()) {
@export(__divti3, .{ .name = "__divti3", .linkage = common.linkage });
}
} else {
@export(__divti3, .{ .name = "__divti3", .linkage = common.linkage });
}
}
pub fn __divti3(a: i128, b: i128) callconv(.C) i128 {
@setRuntimeSafety(builtin.is_test);
return div(a, b);
}
const v128 = @import("std").meta.Vector(2, u64);
fn __divti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
return @bitCast(v128, div(@bitCast(i128, a), @bitCast(i128, b)));
}
inline fn div(a: i128, b: i128) i128 {
const s_a = a >> (128 - 1);
const s_b = b >> (128 - 1);
@ -15,14 +49,6 @@ pub fn __divti3(a: i128, b: i128) callconv(.C) i128 {
return (@bitCast(i128, r) ^ s) -% s;
}
const v128 = @import("std").meta.Vector(2, u64);
pub fn __divti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
return @bitCast(v128, @call(.{ .modifier = .always_inline }, __divti3, .{
@bitCast(i128, a),
@bitCast(i128, b),
}));
}
test {
_ = @import("divti3_test.zig");
}

View File

@ -1,10 +1,18 @@
const std = @import("std");
const builtin = @import("builtin");
const normalize = @import("divdf3.zig").normalize;
const wideMultiply = @import("divdf3.zig").wideMultiply;
const arch = builtin.cpu.arch;
const common = @import("common.zig");
const normalize = common.normalize;
const wideMultiply = common.wideMultiply;
pub const panic = common.panic;
comptime {
@export(__divxf3, .{ .name = "__divxf3", .linkage = common.linkage });
}
pub fn __divxf3(a: f80, b: f80) callconv(.C) f80 {
@setRuntimeSafety(builtin.is_test);
const T = f80;
const Z = std.meta.Int(.unsigned, @bitSizeOf(T));

View File

@ -1,22 +1,26 @@
// __emutls_get_address specific builtin
//
// derived work from LLVM Compiler Infrastructure - release 8.0 (MIT)
// https://github.com/llvm-mirror/compiler-rt/blob/release_80/lib/builtins/emutls.c
//
//! __emutls_get_address specific builtin
//!
//! derived work from LLVM Compiler Infrastructure - release 8.0 (MIT)
//! https://github.com/llvm-mirror/compiler-rt/blob/release_80/lib/builtins/emutls.c
const std = @import("std");
const builtin = @import("builtin");
const common = @import("common.zig");
const abort = std.os.abort;
const assert = std.debug.assert;
const expect = std.testing.expect;
// defined in C as:
// typedef unsigned int gcc_word __attribute__((mode(word)));
/// defined in C as:
/// typedef unsigned int gcc_word __attribute__((mode(word)));
const gcc_word = usize;
pub const panic = common.panic;
comptime {
assert(builtin.link_libc);
if (builtin.link_libc and builtin.os.tag == .openbsd) {
@export(__emutls_get_address, .{ .name = "__emutls_get_address", .linkage = common.linkage });
}
}
/// public entrypoint for generated code using EmulatedTLS
@ -319,6 +323,8 @@ const emutls_control = extern struct {
};
test "simple_allocator" {
if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
var data1: *[64]u8 = simple_allocator.alloc([64]u8);
defer simple_allocator.free(data1);
for (data1) |*c| {
@ -333,6 +339,8 @@ test "simple_allocator" {
}
test "__emutls_get_address zeroed" {
if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
var ctl = emutls_control.init(usize, null);
try expect(ctl.object.index == 0);
@ -352,6 +360,8 @@ test "__emutls_get_address zeroed" {
}
test "__emutls_get_address with default_value" {
if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
var value: usize = 5678; // default value
var ctl = emutls_control.init(usize, &value);
try expect(ctl.object.index == 0);
@ -370,6 +380,8 @@ test "__emutls_get_address with default_value" {
}
test "test default_value with differents sizes" {
if (!builtin.link_libc or builtin.os.tag != .openbsd) return error.SkipZigTest;
const testType = struct {
fn _testType(comptime T: type, value: T) !void {
var def: T = value;

View File

@ -5,8 +5,23 @@
// https://git.musl-libc.org/cgit/musl/tree/src/math/exp.c
const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const math = std.math;
const expect = std.testing.expect;
const common = @import("common.zig");
pub const panic = common.panic;
comptime {
@export(__exph, .{ .name = "__exph", .linkage = common.linkage });
@export(expf, .{ .name = "expf", .linkage = common.linkage });
@export(exp, .{ .name = "exp", .linkage = common.linkage });
@export(__expx, .{ .name = "__expx", .linkage = common.linkage });
const expq_sym_name = if (common.want_ppc_abi) "expf128" else "expq";
@export(expq, .{ .name = expq_sym_name, .linkage = common.linkage });
@export(expl, .{ .name = "expl", .linkage = common.linkage });
}
pub fn __exph(a: f16) callconv(.C) f16 {
// TODO: more efficient implementation

View File

@ -5,8 +5,23 @@
// https://git.musl-libc.org/cgit/musl/tree/src/math/exp2.c
const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const math = std.math;
const expect = std.testing.expect;
const common = @import("common.zig");
pub const panic = common.panic;
comptime {
@export(__exp2h, .{ .name = "__exp2h", .linkage = common.linkage });
@export(exp2f, .{ .name = "exp2f", .linkage = common.linkage });
@export(exp2, .{ .name = "exp2", .linkage = common.linkage });
@export(__exp2x, .{ .name = "__exp2x", .linkage = common.linkage });
const exp2q_sym_name = if (common.want_ppc_abi) "exp2f128" else "exp2q";
@export(exp2q, .{ .name = exp2q_sym_name, .linkage = common.linkage });
@export(exp2l, .{ .name = "exp2l", .linkage = common.linkage });
}
pub fn __exp2h(x: f16) callconv(.C) f16 {
// TODO: more efficient implementation

View File

@ -1,131 +0,0 @@
const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
const native_arch = builtin.cpu.arch;
// AArch64 is the only ABI (at the moment) to support f16 arguments without the
// need for extending them to wider fp types.
pub const F16T = if (native_arch.isAARCH64()) f16 else u16;
pub fn __extendhfxf2(a: F16T) callconv(.C) f80 {
return extendF80(f16, @bitCast(u16, a));
}
pub fn __extendsfxf2(a: f32) callconv(.C) f80 {
return extendF80(f32, @bitCast(u32, a));
}
pub fn __extenddfxf2(a: f64) callconv(.C) f80 {
return extendF80(f64, @bitCast(u64, a));
}
inline fn extendF80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) f80 {
@setRuntimeSafety(builtin.is_test);
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
const src_sig_bits = std.math.floatMantissaBits(src_t);
const dst_int_bit = 0x8000000000000000;
const dst_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
const dst_exp_bias = 16383;
const src_bits = @bitSizeOf(src_t);
const src_exp_bits = src_bits - src_sig_bits - 1;
const src_inf_exp = (1 << src_exp_bits) - 1;
const src_exp_bias = src_inf_exp >> 1;
const src_min_normal = 1 << src_sig_bits;
const src_inf = src_inf_exp << src_sig_bits;
const src_sign_mask = 1 << (src_sig_bits + src_exp_bits);
const src_abs_mask = src_sign_mask - 1;
const src_qnan = 1 << (src_sig_bits - 1);
const src_nan_code = src_qnan - 1;
var dst: std.math.F80 = undefined;
// Break a into a sign and representation of the absolute value
const a_abs = a & src_abs_mask;
const sign: u16 = if (a & src_sign_mask != 0) 0x8000 else 0;
if (a_abs -% src_min_normal < src_inf - src_min_normal) {
// a is a normal number.
// Extend to the destination type by shifting the significand and
// exponent into the proper position and rebiasing the exponent.
dst.exp = @intCast(u16, a_abs >> src_sig_bits);
dst.exp += dst_exp_bias - src_exp_bias;
dst.fraction = @as(u64, a_abs) << (dst_sig_bits - src_sig_bits);
dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers
} else if (a_abs >= src_inf) {
// a is NaN or infinity.
// Conjure the result by beginning with infinity, then setting the qNaN
// bit (if needed) and right-aligning the rest of the trailing NaN
// payload field.
dst.exp = 0x7fff;
dst.fraction = dst_int_bit;
dst.fraction |= @as(u64, a_abs & src_qnan) << (dst_sig_bits - src_sig_bits);
dst.fraction |= @as(u64, a_abs & src_nan_code) << (dst_sig_bits - src_sig_bits);
} else if (a_abs != 0) {
// a is denormal.
// renormalize the significand and clear the leading bit, then insert
// the correct adjusted exponent in the destination type.
const scale: u16 = @clz(src_rep_t, a_abs) -
@clz(src_rep_t, @as(src_rep_t, src_min_normal));
dst.fraction = @as(u64, a_abs) << @intCast(u6, dst_sig_bits - src_sig_bits + scale);
dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers
dst.exp = @truncate(u16, a_abs >> @intCast(u4, src_sig_bits - scale));
dst.exp ^= 1;
dst.exp |= dst_exp_bias - src_exp_bias - scale + 1;
} else {
// a is zero.
dst.exp = 0;
dst.fraction = 0;
}
dst.exp |= sign;
return std.math.make_f80(dst);
}
pub fn __extendxftf2(a: f80) callconv(.C) f128 {
@setRuntimeSafety(builtin.is_test);
const src_int_bit: u64 = 0x8000000000000000;
const src_sig_mask = ~src_int_bit;
const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
const dst_sig_bits = std.math.floatMantissaBits(f128);
const dst_bits = @bitSizeOf(f128);
const dst_min_normal = @as(u128, 1) << dst_sig_bits;
// Break a into a sign and representation of the absolute value
var a_rep = std.math.break_f80(a);
const sign = a_rep.exp & 0x8000;
a_rep.exp &= 0x7FFF;
var abs_result: u128 = undefined;
if (a_rep.exp == 0 and a_rep.fraction == 0) {
// zero
abs_result = 0;
} else if (a_rep.exp == 0x7FFF) {
// a is nan or infinite
abs_result = @as(u128, a_rep.fraction) << (dst_sig_bits - src_sig_bits);
abs_result |= @as(u128, a_rep.exp) << dst_sig_bits;
} else if (a_rep.fraction & src_int_bit != 0) {
// a is a normal value
abs_result = @as(u128, a_rep.fraction & src_sig_mask) << (dst_sig_bits - src_sig_bits);
abs_result |= @as(u128, a_rep.exp) << dst_sig_bits;
} else {
// a is denormal
// renormalize the significand and clear the leading bit and integer part,
// then insert the correct adjusted exponent in the destination type.
const scale: u32 = @clz(u64, a_rep.fraction);
abs_result = @as(u128, a_rep.fraction) << @intCast(u7, dst_sig_bits - src_sig_bits + scale + 1);
abs_result ^= dst_min_normal;
abs_result |= @as(u128, scale + 1) << dst_sig_bits;
}
// Apply the signbit to (dst_t)abs(a).
const result: u128 align(@alignOf(f128)) = abs_result | @as(u128, sign) << (dst_bits - 16);
return @bitCast(f128, result);
}

View File

@ -0,0 +1,26 @@
const common = @import("./common.zig");
const extendf = @import("./extendf.zig").extendf;
pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
@export(__extenddfkf2, .{ .name = "__extenddfkf2", .linkage = common.linkage });
} else if (common.want_sparc_abi) {
@export(_Qp_dtoq, .{ .name = "_Qp_dtoq", .linkage = common.linkage });
} else {
@export(__extenddftf2, .{ .name = "__extenddftf2", .linkage = common.linkage });
}
}
pub fn __extenddftf2(a: f64) callconv(.C) f128 {
return extendf(f128, f64, @bitCast(u64, a));
}
fn __extenddfkf2(a: f64) callconv(.C) f128 {
return extendf(f128, f64, @bitCast(u64, a));
}
fn _Qp_dtoq(c: *f128, a: f64) callconv(.C) void {
c.* = extendf(f128, f64, @bitCast(u64, a));
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const extend_f80 = @import("./extendf.zig").extend_f80;
pub const panic = common.panic;
comptime {
@export(__extenddfxf2, .{ .name = "__extenddfxf2", .linkage = common.linkage });
}
fn __extenddfxf2(a: f64) callconv(.C) f80 {
return extend_f80(f64, @bitCast(u64, a));
}

View File

@ -1,45 +1,10 @@
const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
const native_arch = builtin.cpu.arch;
pub fn __extendsfdf2(a: f32) callconv(.C) f64 {
return extendXfYf2(f64, f32, @bitCast(u32, a));
}
pub fn __extenddftf2(a: f64) callconv(.C) f128 {
return extendXfYf2(f128, f64, @bitCast(u64, a));
}
pub fn __extendsftf2(a: f32) callconv(.C) f128 {
return extendXfYf2(f128, f32, @bitCast(u32, a));
}
// AArch64 is the only ABI (at the moment) to support f16 arguments without the
// need for extending them to wider fp types.
pub const F16T = if (native_arch.isAARCH64()) f16 else u16;
pub fn __extendhfsf2(a: F16T) callconv(.C) f32 {
return extendXfYf2(f32, f16, @bitCast(u16, a));
}
pub fn __extendhftf2(a: F16T) callconv(.C) f128 {
return extendXfYf2(f128, f16, @bitCast(u16, a));
}
pub fn __aeabi_h2f(arg: u16) callconv(.AAPCS) f32 {
@setRuntimeSafety(false);
return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f32, f16, arg });
}
pub fn __aeabi_f2d(arg: f32) callconv(.AAPCS) f64 {
@setRuntimeSafety(false);
return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f64, f32, @bitCast(u32, arg) });
}
inline fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) dst_t {
@setRuntimeSafety(builtin.is_test);
pub inline fn extendf(
comptime dst_t: type,
comptime src_t: type,
a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits),
) dst_t {
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
const srcSigBits = std.math.floatMantissaBits(src_t);
@ -107,6 +72,71 @@ inline fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: std.meta.In
return @bitCast(dst_t, result);
}
test {
_ = @import("extendXfYf2_test.zig");
pub inline fn extend_f80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) f80 {
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
const src_sig_bits = std.math.floatMantissaBits(src_t);
const dst_int_bit = 0x8000000000000000;
const dst_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
const dst_exp_bias = 16383;
const src_bits = @bitSizeOf(src_t);
const src_exp_bits = src_bits - src_sig_bits - 1;
const src_inf_exp = (1 << src_exp_bits) - 1;
const src_exp_bias = src_inf_exp >> 1;
const src_min_normal = 1 << src_sig_bits;
const src_inf = src_inf_exp << src_sig_bits;
const src_sign_mask = 1 << (src_sig_bits + src_exp_bits);
const src_abs_mask = src_sign_mask - 1;
const src_qnan = 1 << (src_sig_bits - 1);
const src_nan_code = src_qnan - 1;
var dst: std.math.F80 = undefined;
// Break a into a sign and representation of the absolute value
const a_abs = a & src_abs_mask;
const sign: u16 = if (a & src_sign_mask != 0) 0x8000 else 0;
if (a_abs -% src_min_normal < src_inf - src_min_normal) {
// a is a normal number.
// Extend to the destination type by shifting the significand and
// exponent into the proper position and rebiasing the exponent.
dst.exp = @intCast(u16, a_abs >> src_sig_bits);
dst.exp += dst_exp_bias - src_exp_bias;
dst.fraction = @as(u64, a_abs) << (dst_sig_bits - src_sig_bits);
dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers
} else if (a_abs >= src_inf) {
// a is NaN or infinity.
// Conjure the result by beginning with infinity, then setting the qNaN
// bit (if needed) and right-aligning the rest of the trailing NaN
// payload field.
dst.exp = 0x7fff;
dst.fraction = dst_int_bit;
dst.fraction |= @as(u64, a_abs & src_qnan) << (dst_sig_bits - src_sig_bits);
dst.fraction |= @as(u64, a_abs & src_nan_code) << (dst_sig_bits - src_sig_bits);
} else if (a_abs != 0) {
// a is denormal.
// renormalize the significand and clear the leading bit, then insert
// the correct adjusted exponent in the destination type.
const scale: u16 = @clz(src_rep_t, a_abs) -
@clz(src_rep_t, @as(src_rep_t, src_min_normal));
dst.fraction = @as(u64, a_abs) << @intCast(u6, dst_sig_bits - src_sig_bits + scale);
dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers
dst.exp = @truncate(u16, a_abs >> @intCast(u4, src_sig_bits - scale));
dst.exp ^= 1;
dst.exp |= dst_exp_bias - src_exp_bias - scale + 1;
} else {
// a is zero.
dst.exp = 0;
dst.fraction = 0;
}
dst.exp |= sign;
return std.math.make_f80(dst);
}
test {
_ = @import("extendf_test.zig");
}

View File

@ -1,22 +1,22 @@
const builtin = @import("builtin");
const __extendhfsf2 = @import("extendXfYf2.zig").__extendhfsf2;
const __extendhftf2 = @import("extendXfYf2.zig").__extendhftf2;
const __extendsftf2 = @import("extendXfYf2.zig").__extendsftf2;
const __extenddftf2 = @import("extendXfYf2.zig").__extenddftf2;
const F16T = @import("extendXfYf2.zig").F16T;
const __extendhfsf2 = @import("extendhfsf2.zig").__extendhfsf2;
const __extendhftf2 = @import("extendhftf2.zig").__extendhftf2;
const __extendsftf2 = @import("extendsftf2.zig").__extendsftf2;
const __extenddftf2 = @import("extenddftf2.zig").__extenddftf2;
const F16T = @import("./common.zig").F16T;
fn test__extenddftf2(a: f64, expectedHi: u64, expectedLo: u64) !void {
fn test__extenddftf2(a: f64, expected_hi: u64, expected_lo: u64) !void {
const x = __extenddftf2(a);
const rep = @bitCast(u128, x);
const hi = @intCast(u64, rep >> 64);
const lo = @truncate(u64, rep);
if (hi == expectedHi and lo == expectedLo)
if (hi == expected_hi and lo == expected_lo)
return;
// test other possible NaN representation(signal NaN)
if (expectedHi == 0x7fff800000000000 and expectedLo == 0x0) {
if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and
((hi & 0xffffffffffff) > 0 or lo > 0))
{
@ -43,18 +43,18 @@ fn test__extendhfsf2(a: u16, expected: u32) !void {
return error.TestFailure;
}
fn test__extendsftf2(a: f32, expectedHi: u64, expectedLo: u64) !void {
fn test__extendsftf2(a: f32, expected_hi: u64, expected_lo: u64) !void {
const x = __extendsftf2(a);
const rep = @bitCast(u128, x);
const hi = @intCast(u64, rep >> 64);
const lo = @truncate(u64, rep);
if (hi == expectedHi and lo == expectedLo)
if (hi == expected_hi and lo == expected_lo)
return;
// test other possible NaN representation(signal NaN)
if (expectedHi == 0x7fff800000000000 and expectedLo == 0x0) {
if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and
((hi & 0xffffffffffff) > 0 or lo > 0))
{
@ -159,18 +159,18 @@ fn makeInf32() f32 {
return @bitCast(f32, @as(u32, 0x7f800000));
}
fn test__extendhftf2(a: u16, expectedHi: u64, expectedLo: u64) !void {
fn test__extendhftf2(a: u16, expected_hi: u64, expected_lo: u64) !void {
const x = __extendhftf2(@bitCast(F16T, a));
const rep = @bitCast(u128, x);
const hi = @intCast(u64, rep >> 64);
const lo = @truncate(u64, rep);
if (hi == expectedHi and lo == expectedLo)
if (hi == expected_hi and lo == expected_lo)
return;
// test other possible NaN representation(signal NaN)
if (expectedHi == 0x7fff800000000000 and expectedLo == 0x0) {
if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and
((hi & 0xffffffffffff) > 0 or lo > 0))
{

View File

@ -0,0 +1,26 @@
const common = @import("./common.zig");
const extendf = @import("./extendf.zig").extendf;
pub const panic = common.panic;
comptime {
if (common.gnu_f16_abi) {
@export(__gnu_h2f_ieee, .{ .name = "__gnu_h2f_ieee", .linkage = common.linkage });
} else if (common.want_aeabi) {
@export(__aeabi_h2f, .{ .name = "__aeabi_h2f", .linkage = common.linkage });
} else {
@export(__extendhfsf2, .{ .name = "__extendhfsf2", .linkage = common.linkage });
}
}
pub fn __extendhfsf2(a: common.F16T) callconv(.C) f32 {
return extendf(f32, f16, @bitCast(u16, a));
}
fn __gnu_h2f_ieee(a: common.F16T) callconv(.C) f32 {
return extendf(f32, f16, @bitCast(u16, a));
}
fn __aeabi_h2f(a: u16) callconv(.AAPCS) f32 {
return extendf(f32, f16, @bitCast(u16, a));
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const extendf = @import("./extendf.zig").extendf;
pub const panic = common.panic;
comptime {
@export(__extendhftf2, .{ .name = "__extendhftf2", .linkage = common.linkage });
}
pub fn __extendhftf2(a: common.F16T) callconv(.C) f128 {
return extendf(f128, f16, @bitCast(u16, a));
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const extend_f80 = @import("./extendf.zig").extend_f80;
pub const panic = common.panic;
comptime {
@export(__extendhfxf2, .{ .name = "__extendhfxf2", .linkage = common.linkage });
}
fn __extendhfxf2(a: common.F16T) callconv(.C) f80 {
return extend_f80(f16, @bitCast(u16, a));
}

View File

@ -0,0 +1,20 @@
const common = @import("./common.zig");
const extendf = @import("./extendf.zig").extendf;
pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
@export(__aeabi_f2d, .{ .name = "__aeabi_f2d", .linkage = common.linkage });
} else {
@export(__extendsfdf2, .{ .name = "__extendsfdf2", .linkage = common.linkage });
}
}
fn __extendsfdf2(a: f32) callconv(.C) f64 {
return extendf(f64, f32, @bitCast(u32, a));
}
fn __aeabi_f2d(a: f32) callconv(.AAPCS) f64 {
return extendf(f64, f32, @bitCast(u32, a));
}

View File

@ -0,0 +1,26 @@
const common = @import("./common.zig");
const extendf = @import("./extendf.zig").extendf;
pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
@export(__extendsfkf2, .{ .name = "__extendsfkf2", .linkage = common.linkage });
} else if (common.want_sparc_abi) {
@export(_Qp_stoq, .{ .name = "_Qp_stoq", .linkage = common.linkage });
} else {
@export(__extendsftf2, .{ .name = "__extendsftf2", .linkage = common.linkage });
}
}
pub fn __extendsftf2(a: f32) callconv(.C) f128 {
return extendf(f128, f32, @bitCast(u32, a));
}
fn __extendsfkf2(a: f32) callconv(.C) f128 {
return extendf(f128, f32, @bitCast(u32, a));
}
fn _Qp_stoq(c: *f128, a: f32) callconv(.C) void {
c.* = extendf(f128, f32, @bitCast(u32, a));
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const extend_f80 = @import("./extendf.zig").extend_f80;
pub const panic = common.panic;
comptime {
@export(__extendsfxf2, .{ .name = "__extendsfxf2", .linkage = common.linkage });
}
fn __extendsfxf2(a: f32) callconv(.C) f80 {
return extend_f80(f32, @bitCast(u32, a));
}

View File

@ -0,0 +1,50 @@
const std = @import("std");
const common = @import("./common.zig");
pub const panic = common.panic;
comptime {
@export(__extendxftf2, .{ .name = "__extendxftf2", .linkage = common.linkage });
}
fn __extendxftf2(a: f80) callconv(.C) f128 {
const src_int_bit: u64 = 0x8000000000000000;
const src_sig_mask = ~src_int_bit;
const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
const dst_sig_bits = std.math.floatMantissaBits(f128);
const dst_bits = @bitSizeOf(f128);
const dst_min_normal = @as(u128, 1) << dst_sig_bits;
// Break a into a sign and representation of the absolute value
var a_rep = std.math.break_f80(a);
const sign = a_rep.exp & 0x8000;
a_rep.exp &= 0x7FFF;
var abs_result: u128 = undefined;
if (a_rep.exp == 0 and a_rep.fraction == 0) {
// zero
abs_result = 0;
} else if (a_rep.exp == 0x7FFF) {
// a is nan or infinite
abs_result = @as(u128, a_rep.fraction) << (dst_sig_bits - src_sig_bits);
abs_result |= @as(u128, a_rep.exp) << dst_sig_bits;
} else if (a_rep.fraction & src_int_bit != 0) {
// a is a normal value
abs_result = @as(u128, a_rep.fraction & src_sig_mask) << (dst_sig_bits - src_sig_bits);
abs_result |= @as(u128, a_rep.exp) << dst_sig_bits;
} else {
// a is denormal
// renormalize the significand and clear the leading bit and integer part,
// then insert the correct adjusted exponent in the destination type.
const scale: u32 = @clz(u64, a_rep.fraction);
abs_result = @as(u128, a_rep.fraction) << @intCast(u7, dst_sig_bits - src_sig_bits + scale + 1);
abs_result ^= dst_min_normal;
abs_result |= @as(u128, scale + 1) << dst_sig_bits;
}
// Apply the signbit to (dst_t)abs(a).
const result: u128 align(@alignOf(f128)) = abs_result | @as(u128, sign) << (dst_bits - 16);
return @bitCast(f128, result);
}

View File

@ -1,4 +1,19 @@
const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const common = @import("common.zig");
pub const panic = common.panic;
comptime {
@export(__fabsh, .{ .name = "__fabsh", .linkage = common.linkage });
@export(fabsf, .{ .name = "fabsf", .linkage = common.linkage });
@export(fabs, .{ .name = "fabs", .linkage = common.linkage });
@export(__fabsx, .{ .name = "__fabsx", .linkage = common.linkage });
const fabsq_sym_name = if (common.want_ppc_abi) "fabsf128" else "fabsq";
@export(fabsq, .{ .name = fabsq_sym_name, .linkage = common.linkage });
@export(fabsl, .{ .name = "fabsl", .linkage = common.linkage });
}
pub fn __fabsh(a: f16) callconv(.C) f16 {
return generic_fabs(a);

View File

@ -1,224 +0,0 @@
const std = @import("std");
const math = std.math;
const Log2Int = math.Log2Int;
const is_test = @import("builtin").is_test;
pub inline fn fixXfYi(comptime I: type, a: anytype) I {
@setRuntimeSafety(is_test);
const F = @TypeOf(a);
const float_bits = @typeInfo(F).Float.bits;
const int_bits = @typeInfo(I).Int.bits;
const rep_t = std.meta.Int(.unsigned, float_bits);
const sig_bits = math.floatMantissaBits(F);
const exp_bits = math.floatExponentBits(F);
const fractional_bits = math.floatFractionalBits(F);
const implicit_bit = if (F != f80) (@as(rep_t, 1) << sig_bits) else 0;
const max_exp = (1 << (exp_bits - 1));
const exp_bias = max_exp - 1;
const sig_mask = (@as(rep_t, 1) << sig_bits) - 1;
// Break a into sign, exponent, significand
const a_rep: rep_t = @bitCast(rep_t, a);
const negative = (a_rep >> (float_bits - 1)) != 0;
const exponent = @intCast(i32, (a_rep << 1) >> (sig_bits + 1)) - exp_bias;
const significand: rep_t = (a_rep & sig_mask) | implicit_bit;
// If the exponent is negative, the result rounds to zero.
if (exponent < 0) return 0;
// If the value is too large for the integer type, saturate.
switch (@typeInfo(I).Int.signedness) {
.unsigned => {
if (negative) return 0;
if (@intCast(c_uint, exponent) >= @minimum(int_bits, max_exp)) return math.maxInt(I);
},
.signed => if (@intCast(c_uint, exponent) >= @minimum(int_bits - 1, max_exp)) {
return if (negative) math.minInt(I) else math.maxInt(I);
},
}
// If 0 <= exponent < sig_bits, right shift to get the result.
// Otherwise, shift left.
var result: I = undefined;
if (exponent < fractional_bits) {
result = @intCast(I, significand >> @intCast(Log2Int(rep_t), fractional_bits - exponent));
} else {
result = @intCast(I, significand) << @intCast(Log2Int(I), exponent - fractional_bits);
}
if ((@typeInfo(I).Int.signedness == .signed) and negative)
return ~result +% 1;
return result;
}
// Conversion from f16
pub fn __fixhfsi(a: f16) callconv(.C) i32 {
return fixXfYi(i32, a);
}
pub fn __fixunshfsi(a: f16) callconv(.C) u32 {
return fixXfYi(u32, a);
}
pub fn __fixhfdi(a: f16) callconv(.C) i64 {
return fixXfYi(i64, a);
}
pub fn __fixunshfdi(a: f16) callconv(.C) u64 {
return fixXfYi(u64, a);
}
pub fn __fixhfti(a: f16) callconv(.C) i128 {
return fixXfYi(i128, a);
}
pub fn __fixunshfti(a: f16) callconv(.C) u128 {
return fixXfYi(u128, a);
}
// Conversion from f32
pub fn __fixsfsi(a: f32) callconv(.C) i32 {
return fixXfYi(i32, a);
}
pub fn __fixunssfsi(a: f32) callconv(.C) u32 {
return fixXfYi(u32, a);
}
pub fn __fixsfdi(a: f32) callconv(.C) i64 {
return fixXfYi(i64, a);
}
pub fn __fixunssfdi(a: f32) callconv(.C) u64 {
return fixXfYi(u64, a);
}
pub fn __fixsfti(a: f32) callconv(.C) i128 {
return fixXfYi(i128, a);
}
pub fn __fixunssfti(a: f32) callconv(.C) u128 {
return fixXfYi(u128, a);
}
// Conversion from f64
pub fn __fixdfsi(a: f64) callconv(.C) i32 {
return fixXfYi(i32, a);
}
pub fn __fixunsdfsi(a: f64) callconv(.C) u32 {
return fixXfYi(u32, a);
}
pub fn __fixdfdi(a: f64) callconv(.C) i64 {
return fixXfYi(i64, a);
}
pub fn __fixunsdfdi(a: f64) callconv(.C) u64 {
return fixXfYi(u64, a);
}
pub fn __fixdfti(a: f64) callconv(.C) i128 {
return fixXfYi(i128, a);
}
pub fn __fixunsdfti(a: f64) callconv(.C) u128 {
return fixXfYi(u128, a);
}
// Conversion from f80
pub fn __fixxfsi(a: f80) callconv(.C) i32 {
return fixXfYi(i32, a);
}
pub fn __fixunsxfsi(a: f80) callconv(.C) u32 {
return fixXfYi(u32, a);
}
pub fn __fixxfdi(a: f80) callconv(.C) i64 {
return fixXfYi(i64, a);
}
pub fn __fixunsxfdi(a: f80) callconv(.C) u64 {
return fixXfYi(u64, a);
}
pub fn __fixxfti(a: f80) callconv(.C) i128 {
return fixXfYi(i128, a);
}
pub fn __fixunsxfti(a: f80) callconv(.C) u128 {
return fixXfYi(u128, a);
}
// Conversion from f128
pub fn __fixtfsi(a: f128) callconv(.C) i32 {
return fixXfYi(i32, a);
}
pub fn __fixunstfsi(a: f128) callconv(.C) u32 {
return fixXfYi(u32, a);
}
pub fn __fixtfdi(a: f128) callconv(.C) i64 {
return fixXfYi(i64, a);
}
pub fn __fixunstfdi(a: f128) callconv(.C) u64 {
return fixXfYi(u64, a);
}
pub fn __fixtfti(a: f128) callconv(.C) i128 {
return fixXfYi(i128, a);
}
pub fn __fixunstfti(a: f128) callconv(.C) u128 {
return fixXfYi(u128, a);
}
// Conversion from f32
pub fn __aeabi_f2iz(a: f32) callconv(.AAPCS) i32 {
return fixXfYi(i32, a);
}
pub fn __aeabi_f2uiz(a: f32) callconv(.AAPCS) u32 {
return fixXfYi(u32, a);
}
pub fn __aeabi_f2lz(a: f32) callconv(.AAPCS) i64 {
return fixXfYi(i64, a);
}
pub fn __aeabi_f2ulz(a: f32) callconv(.AAPCS) u64 {
return fixXfYi(u64, a);
}
// Conversion from f64
pub fn __aeabi_d2iz(a: f64) callconv(.AAPCS) i32 {
return fixXfYi(i32, a);
}
pub fn __aeabi_d2uiz(a: f64) callconv(.AAPCS) u32 {
return fixXfYi(u32, a);
}
pub fn __aeabi_d2lz(a: f64) callconv(.AAPCS) i64 {
return fixXfYi(i64, a);
}
pub fn __aeabi_d2ulz(a: f64) callconv(.AAPCS) u64 {
return fixXfYi(u64, a);
}
test {
_ = @import("fixXfYi_test.zig");
}

View File

@ -0,0 +1,20 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
@export(__aeabi_d2lz, .{ .name = "__aeabi_d2lz", .linkage = common.linkage });
} else {
@export(__fixdfdi, .{ .name = "__fixdfdi", .linkage = common.linkage });
}
}
pub fn __fixdfdi(a: f64) callconv(.C) i64 {
return floatToInt(i64, a);
}
fn __aeabi_d2lz(a: f64) callconv(.AAPCS) i64 {
return floatToInt(i64, a);
}

View File

@ -0,0 +1,20 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
@export(__aeabi_d2iz, .{ .name = "__aeabi_d2iz", .linkage = common.linkage });
} else {
@export(__fixdfsi, .{ .name = "__fixdfsi", .linkage = common.linkage });
}
}
pub fn __fixdfsi(a: f64) callconv(.C) i32 {
return floatToInt(i32, a);
}
fn __aeabi_d2iz(a: f64) callconv(.AAPCS) i32 {
return floatToInt(i32, a);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixdfti, .{ .name = "__fixdfti", .linkage = common.linkage });
}
pub fn __fixdfti(a: f64) callconv(.C) i128 {
return floatToInt(i128, a);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixhfdi, .{ .name = "__fixhfdi", .linkage = common.linkage });
}
fn __fixhfdi(a: f16) callconv(.C) i64 {
return floatToInt(i64, a);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixhfsi, .{ .name = "__fixhfsi", .linkage = common.linkage });
}
fn __fixhfsi(a: f16) callconv(.C) i32 {
return floatToInt(i32, a);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixhfti, .{ .name = "__fixhfti", .linkage = common.linkage });
}
fn __fixhfti(a: f16) callconv(.C) i128 {
return floatToInt(i128, a);
}

View File

@ -0,0 +1,20 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
@export(__aeabi_f2lz, .{ .name = "__aeabi_f2lz", .linkage = common.linkage });
} else {
@export(__fixsfdi, .{ .name = "__fixsfdi", .linkage = common.linkage });
}
}
pub fn __fixsfdi(a: f32) callconv(.C) i64 {
return floatToInt(i64, a);
}
fn __aeabi_f2lz(a: f32) callconv(.AAPCS) i64 {
return floatToInt(i64, a);
}

View File

@ -0,0 +1,20 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
@export(__aeabi_f2iz, .{ .name = "__aeabi_f2iz", .linkage = common.linkage });
} else {
@export(__fixsfsi, .{ .name = "__fixsfsi", .linkage = common.linkage });
}
}
pub fn __fixsfsi(a: f32) callconv(.C) i32 {
return floatToInt(i32, a);
}
fn __aeabi_f2iz(a: f32) callconv(.AAPCS) i32 {
return floatToInt(i32, a);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixsfti, .{ .name = "__fixsfti", .linkage = common.linkage });
}
pub fn __fixsfti(a: f32) callconv(.C) i128 {
return floatToInt(i128, a);
}

View File

@ -0,0 +1,26 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
@export(__fixkfdi, .{ .name = "__fixkfdi", .linkage = common.linkage });
} else if (common.want_sparc_abi) {
@export(_Qp_qtox, .{ .name = "_Qp_qtox", .linkage = common.linkage });
} else {
@export(__fixtfdi, .{ .name = "__fixtfdi", .linkage = common.linkage });
}
}
pub fn __fixtfdi(a: f128) callconv(.C) i64 {
return floatToInt(i64, a);
}
fn __fixkfdi(a: f128) callconv(.C) i64 {
return floatToInt(i64, a);
}
fn _Qp_qtox(a: *const f128) callconv(.C) i64 {
return floatToInt(i64, a.*);
}

View File

@ -0,0 +1,26 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
@export(__fixkfsi, .{ .name = "__fixkfsi", .linkage = common.linkage });
} else if (common.want_sparc_abi) {
@export(_Qp_qtoi, .{ .name = "_Qp_qtoi", .linkage = common.linkage });
} else {
@export(__fixtfsi, .{ .name = "__fixtfsi", .linkage = common.linkage });
}
}
pub fn __fixtfsi(a: f128) callconv(.C) i32 {
return floatToInt(i32, a);
}
fn __fixkfsi(a: f128) callconv(.C) i32 {
return floatToInt(i32, a);
}
fn _Qp_qtoi(a: *const f128) callconv(.C) i32 {
return floatToInt(i32, a.*);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixtfti, .{ .name = "__fixtfti", .linkage = common.linkage });
}
pub fn __fixtfti(a: f128) callconv(.C) i128 {
return floatToInt(i128, a);
}

View File

@ -0,0 +1,20 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
@export(__aeabi_d2ulz, .{ .name = "__aeabi_d2ulz", .linkage = common.linkage });
} else {
@export(__fixunsdfdi, .{ .name = "__fixunsdfdi", .linkage = common.linkage });
}
}
pub fn __fixunsdfdi(a: f64) callconv(.C) u64 {
return floatToInt(u64, a);
}
fn __aeabi_d2ulz(a: f64) callconv(.AAPCS) u64 {
return floatToInt(u64, a);
}

View File

@ -0,0 +1,20 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
@export(__aeabi_d2uiz, .{ .name = "__aeabi_d2uiz", .linkage = common.linkage });
} else {
@export(__fixunsdfsi, .{ .name = "__fixunsdfsi", .linkage = common.linkage });
}
}
pub fn __fixunsdfsi(a: f64) callconv(.C) u32 {
return floatToInt(u32, a);
}
fn __aeabi_d2uiz(a: f64) callconv(.AAPCS) u32 {
return floatToInt(u32, a);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixunsdfti, .{ .name = "__fixunsdfti", .linkage = common.linkage });
}
pub fn __fixunsdfti(a: f64) callconv(.C) u128 {
return floatToInt(u128, a);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixunshfdi, .{ .name = "__fixunshfdi", .linkage = common.linkage });
}
fn __fixunshfdi(a: f16) callconv(.C) u64 {
return floatToInt(u64, a);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixunshfsi, .{ .name = "__fixunshfsi", .linkage = common.linkage });
}
fn __fixunshfsi(a: f16) callconv(.C) u32 {
return floatToInt(u32, a);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixunshfti, .{ .name = "__fixunshfti", .linkage = common.linkage });
}
pub fn __fixunshfti(a: f16) callconv(.C) u128 {
return floatToInt(u128, a);
}

View File

@ -0,0 +1,20 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
@export(__aeabi_f2ulz, .{ .name = "__aeabi_f2ulz", .linkage = common.linkage });
} else {
@export(__fixunssfdi, .{ .name = "__fixunssfdi", .linkage = common.linkage });
}
}
pub fn __fixunssfdi(a: f32) callconv(.C) u64 {
return floatToInt(u64, a);
}
fn __aeabi_f2ulz(a: f32) callconv(.AAPCS) u64 {
return floatToInt(u64, a);
}

View File

@ -0,0 +1,20 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
@export(__aeabi_f2uiz, .{ .name = "__aeabi_f2uiz", .linkage = common.linkage });
} else {
@export(__fixunssfsi, .{ .name = "__fixunssfsi", .linkage = common.linkage });
}
}
pub fn __fixunssfsi(a: f32) callconv(.C) u32 {
return floatToInt(u32, a);
}
fn __aeabi_f2uiz(a: f32) callconv(.AAPCS) u32 {
return floatToInt(u32, a);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixunssfti, .{ .name = "__fixunssfti", .linkage = common.linkage });
}
pub fn __fixunssfti(a: f32) callconv(.C) u128 {
return floatToInt(u128, a);
}

View File

@ -0,0 +1,26 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
@export(__fixunskfdi, .{ .name = "__fixunskfdi", .linkage = common.linkage });
} else if (common.want_sparc_abi) {
@export(_Qp_qtoux, .{ .name = "_Qp_qtoux", .linkage = common.linkage });
} else {
@export(__fixunstfdi, .{ .name = "__fixunstfdi", .linkage = common.linkage });
}
}
pub fn __fixunstfdi(a: f128) callconv(.C) u64 {
return floatToInt(u64, a);
}
fn __fixunskfdi(a: f128) callconv(.C) u64 {
return floatToInt(u64, a);
}
fn _Qp_qtoux(a: *const f128) callconv(.C) u64 {
return floatToInt(u64, a.*);
}

View File

@ -0,0 +1,26 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
@export(__fixunskfsi, .{ .name = "__fixunskfsi", .linkage = common.linkage });
} else if (common.want_sparc_abi) {
@export(_Qp_qtoui, .{ .name = "_Qp_qtoui", .linkage = common.linkage });
} else {
@export(__fixunstfsi, .{ .name = "__fixunstfsi", .linkage = common.linkage });
}
}
pub fn __fixunstfsi(a: f128) callconv(.C) u32 {
return floatToInt(u32, a);
}
fn __fixunskfsi(a: f128) callconv(.C) u32 {
return floatToInt(u32, a);
}
fn _Qp_qtoui(a: *const f128) callconv(.C) u32 {
return floatToInt(u32, a.*);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixunstfti, .{ .name = "__fixunstfti", .linkage = common.linkage });
}
pub fn __fixunstfti(a: f128) callconv(.C) u128 {
return floatToInt(u128, a);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixunsxfdi, .{ .name = "__fixunsxfdi", .linkage = common.linkage });
}
fn __fixunsxfdi(a: f80) callconv(.C) u64 {
return floatToInt(u64, a);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixunsxfsi, .{ .name = "__fixunsxfsi", .linkage = common.linkage });
}
fn __fixunsxfsi(a: f80) callconv(.C) u32 {
return floatToInt(u32, a);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixunsxfti, .{ .name = "__fixunsxfti", .linkage = common.linkage });
}
pub fn __fixunsxfti(a: f80) callconv(.C) u128 {
return floatToInt(u128, a);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixxfdi, .{ .name = "__fixxfdi", .linkage = common.linkage });
}
fn __fixxfdi(a: f80) callconv(.C) i64 {
return floatToInt(i64, a);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixxfsi, .{ .name = "__fixxfsi", .linkage = common.linkage });
}
fn __fixxfsi(a: f80) callconv(.C) i32 {
return floatToInt(i32, a);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixxfti, .{ .name = "__fixxfti", .linkage = common.linkage });
}
fn __fixxfti(a: f80) callconv(.C) i128 {
return floatToInt(i128, a);
}

View File

@ -1,222 +0,0 @@
const builtin = @import("builtin");
const is_test = builtin.is_test;
const std = @import("std");
const math = std.math;
const expect = std.testing.expect;
pub fn floatXiYf(comptime T: type, x: anytype) T {
@setRuntimeSafety(is_test);
if (x == 0) return 0;
// Various constants whose values follow from the type parameters.
// Any reasonable optimizer will fold and propagate all of these.
const Z = std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(x)));
const uT = std.meta.Int(.unsigned, @bitSizeOf(T));
const inf = math.inf(T);
const float_bits = @bitSizeOf(T);
const int_bits = @bitSizeOf(@TypeOf(x));
const exp_bits = math.floatExponentBits(T);
const fractional_bits = math.floatFractionalBits(T);
const exp_bias = math.maxInt(std.meta.Int(.unsigned, exp_bits - 1));
const implicit_bit = if (T != f80) @as(uT, 1) << fractional_bits else 0;
const max_exp = exp_bias;
// Sign
var abs_val = math.absCast(x);
const sign_bit = if (x < 0) @as(uT, 1) << (float_bits - 1) else 0;
var result: uT = sign_bit;
// Compute significand
var exp = int_bits - @clz(Z, abs_val) - 1;
if (int_bits <= fractional_bits or exp <= fractional_bits) {
const shift_amt = fractional_bits - @intCast(math.Log2Int(uT), exp);
// Shift up result to line up with the significand - no rounding required
result = (@intCast(uT, abs_val) << shift_amt);
result ^= implicit_bit; // Remove implicit integer bit
} else {
var shift_amt = @intCast(math.Log2Int(Z), exp - fractional_bits);
const exact_tie: bool = @ctz(Z, abs_val) == shift_amt - 1;
// Shift down result and remove implicit integer bit
result = @intCast(uT, (abs_val >> (shift_amt - 1))) ^ (implicit_bit << 1);
// Round result, including round-to-even for exact ties
result = ((result + 1) >> 1) & ~@as(uT, @boolToInt(exact_tie));
}
// Compute exponent
if ((int_bits > max_exp) and (exp > max_exp)) // If exponent too large, overflow to infinity
return @bitCast(T, sign_bit | @bitCast(uT, inf));
result += (@as(uT, exp) + exp_bias) << math.floatMantissaBits(T);
// If the result included a carry, we need to restore the explicit integer bit
if (T == f80) result |= 1 << fractional_bits;
return @bitCast(T, sign_bit | result);
}
// Conversion to f16
pub fn __floatsihf(a: i32) callconv(.C) f16 {
return floatXiYf(f16, a);
}
pub fn __floatunsihf(a: u32) callconv(.C) f16 {
return floatXiYf(f16, a);
}
pub fn __floatdihf(a: i64) callconv(.C) f16 {
return floatXiYf(f16, a);
}
pub fn __floatundihf(a: u64) callconv(.C) f16 {
return floatXiYf(f16, a);
}
pub fn __floattihf(a: i128) callconv(.C) f16 {
return floatXiYf(f16, a);
}
pub fn __floatuntihf(a: u128) callconv(.C) f16 {
return floatXiYf(f16, a);
}
// Conversion to f32
pub fn __floatsisf(a: i32) callconv(.C) f32 {
return floatXiYf(f32, a);
}
pub fn __floatunsisf(a: u32) callconv(.C) f32 {
return floatXiYf(f32, a);
}
pub fn __floatdisf(a: i64) callconv(.C) f32 {
return floatXiYf(f32, a);
}
pub fn __floatundisf(a: u64) callconv(.C) f32 {
return floatXiYf(f32, a);
}
pub fn __floattisf(a: i128) callconv(.C) f32 {
return floatXiYf(f32, a);
}
pub fn __floatuntisf(a: u128) callconv(.C) f32 {
return floatXiYf(f32, a);
}
// Conversion to f64
pub fn __floatsidf(a: i32) callconv(.C) f64 {
return floatXiYf(f64, a);
}
pub fn __floatunsidf(a: u32) callconv(.C) f64 {
return floatXiYf(f64, a);
}
pub fn __floatdidf(a: i64) callconv(.C) f64 {
return floatXiYf(f64, a);
}
pub fn __floatundidf(a: u64) callconv(.C) f64 {
return floatXiYf(f64, a);
}
pub fn __floattidf(a: i128) callconv(.C) f64 {
return floatXiYf(f64, a);
}
pub fn __floatuntidf(a: u128) callconv(.C) f64 {
return floatXiYf(f64, a);
}
// Conversion to f80
pub fn __floatsixf(a: i32) callconv(.C) f80 {
return floatXiYf(f80, a);
}
pub fn __floatunsixf(a: u32) callconv(.C) f80 {
return floatXiYf(f80, a);
}
pub fn __floatdixf(a: i64) callconv(.C) f80 {
return floatXiYf(f80, a);
}
pub fn __floatundixf(a: u64) callconv(.C) f80 {
return floatXiYf(f80, a);
}
pub fn __floattixf(a: i128) callconv(.C) f80 {
return floatXiYf(f80, a);
}
pub fn __floatuntixf(a: u128) callconv(.C) f80 {
return floatXiYf(f80, a);
}
// Conversion to f128
pub fn __floatsitf(a: i32) callconv(.C) f128 {
return floatXiYf(f128, a);
}
pub fn __floatunsitf(a: u32) callconv(.C) f128 {
return floatXiYf(f128, a);
}
pub fn __floatditf(a: i64) callconv(.C) f128 {
return floatXiYf(f128, a);
}
pub fn __floatunditf(a: u64) callconv(.C) f128 {
return floatXiYf(f128, a);
}
pub fn __floattitf(a: i128) callconv(.C) f128 {
return floatXiYf(f128, a);
}
pub fn __floatuntitf(a: u128) callconv(.C) f128 {
return floatXiYf(f128, a);
}
// Conversion to f32
pub fn __aeabi_ui2f(arg: u32) callconv(.AAPCS) f32 {
return floatXiYf(f32, arg);
}
pub fn __aeabi_i2f(arg: i32) callconv(.AAPCS) f32 {
return floatXiYf(f32, arg);
}
pub fn __aeabi_ul2f(arg: u64) callconv(.AAPCS) f32 {
return floatXiYf(f32, arg);
}
pub fn __aeabi_l2f(arg: i64) callconv(.AAPCS) f32 {
return floatXiYf(f32, arg);
}
// Conversion to f64
pub fn __aeabi_ui2d(arg: u32) callconv(.AAPCS) f64 {
return floatXiYf(f64, arg);
}
pub fn __aeabi_i2d(arg: i32) callconv(.AAPCS) f64 {
return floatXiYf(f64, arg);
}
pub fn __aeabi_ul2d(arg: u64) callconv(.AAPCS) f64 {
return floatXiYf(f64, arg);
}
pub fn __aeabi_l2d(arg: i64) callconv(.AAPCS) f64 {
return floatXiYf(f64, arg);
}
test {
_ = @import("floatXiYf_test.zig");
}

View File

@ -0,0 +1,55 @@
const Int = @import("std").meta.Int;
const math = @import("std").math;
const Log2Int = math.Log2Int;
pub inline fn floatToInt(comptime I: type, a: anytype) I {
const F = @TypeOf(a);
const float_bits = @typeInfo(F).Float.bits;
const int_bits = @typeInfo(I).Int.bits;
const rep_t = Int(.unsigned, float_bits);
const sig_bits = math.floatMantissaBits(F);
const exp_bits = math.floatExponentBits(F);
const fractional_bits = math.floatFractionalBits(F);
const implicit_bit = if (F != f80) (@as(rep_t, 1) << sig_bits) else 0;
const max_exp = (1 << (exp_bits - 1));
const exp_bias = max_exp - 1;
const sig_mask = (@as(rep_t, 1) << sig_bits) - 1;
// Break a into sign, exponent, significand
const a_rep: rep_t = @bitCast(rep_t, a);
const negative = (a_rep >> (float_bits - 1)) != 0;
const exponent = @intCast(i32, (a_rep << 1) >> (sig_bits + 1)) - exp_bias;
const significand: rep_t = (a_rep & sig_mask) | implicit_bit;
// If the exponent is negative, the result rounds to zero.
if (exponent < 0) return 0;
// If the value is too large for the integer type, saturate.
switch (@typeInfo(I).Int.signedness) {
.unsigned => {
if (negative) return 0;
if (@intCast(c_uint, exponent) >= @minimum(int_bits, max_exp)) return math.maxInt(I);
},
.signed => if (@intCast(c_uint, exponent) >= @minimum(int_bits - 1, max_exp)) {
return if (negative) math.minInt(I) else math.maxInt(I);
},
}
// If 0 <= exponent < sig_bits, right shift to get the result.
// Otherwise, shift left.
var result: I = undefined;
if (exponent < fractional_bits) {
result = @intCast(I, significand >> @intCast(Log2Int(rep_t), fractional_bits - exponent));
} else {
result = @intCast(I, significand) << @intCast(Log2Int(I), exponent - fractional_bits);
}
if ((@typeInfo(I).Int.signedness == .signed) and negative)
return ~result +% 1;
return result;
}
test {
_ = @import("float_to_int_test.zig");
}

View File

@ -1,31 +1,33 @@
const std = @import("std");
const testing = std.testing;
const math = std.math;
const fixXfYi = @import("fixXfYi.zig").fixXfYi;
const __fixunshfti = @import("fixunshfti.zig").__fixunshfti;
const __fixunsxfti = @import("fixunsxfti.zig").__fixunsxfti;
// Conversion from f32
const __fixsfsi = @import("fixXfYi.zig").__fixsfsi;
const __fixunssfsi = @import("fixXfYi.zig").__fixunssfsi;
const __fixsfdi = @import("fixXfYi.zig").__fixsfdi;
const __fixunssfdi = @import("fixXfYi.zig").__fixunssfdi;
const __fixsfti = @import("fixXfYi.zig").__fixsfti;
const __fixunssfti = @import("fixXfYi.zig").__fixunssfti;
const __fixsfsi = @import("fixsfsi.zig").__fixsfsi;
const __fixunssfsi = @import("fixunssfsi.zig").__fixunssfsi;
const __fixsfdi = @import("fixsfdi.zig").__fixsfdi;
const __fixunssfdi = @import("fixunssfdi.zig").__fixunssfdi;
const __fixsfti = @import("fixsfti.zig").__fixsfti;
const __fixunssfti = @import("fixunssfti.zig").__fixunssfti;
// Conversion from f64
const __fixdfsi = @import("fixXfYi.zig").__fixdfsi;
const __fixunsdfsi = @import("fixXfYi.zig").__fixunsdfsi;
const __fixdfdi = @import("fixXfYi.zig").__fixdfdi;
const __fixunsdfdi = @import("fixXfYi.zig").__fixunsdfdi;
const __fixdfti = @import("fixXfYi.zig").__fixdfti;
const __fixunsdfti = @import("fixXfYi.zig").__fixunsdfti;
const __fixdfsi = @import("fixdfsi.zig").__fixdfsi;
const __fixunsdfsi = @import("fixunsdfsi.zig").__fixunsdfsi;
const __fixdfdi = @import("fixdfdi.zig").__fixdfdi;
const __fixunsdfdi = @import("fixunsdfdi.zig").__fixunsdfdi;
const __fixdfti = @import("fixdfti.zig").__fixdfti;
const __fixunsdfti = @import("fixunsdfti.zig").__fixunsdfti;
// Conversion from f128
const __fixtfsi = @import("fixXfYi.zig").__fixtfsi;
const __fixunstfsi = @import("fixXfYi.zig").__fixunstfsi;
const __fixtfdi = @import("fixXfYi.zig").__fixtfdi;
const __fixunstfdi = @import("fixXfYi.zig").__fixunstfdi;
const __fixtfti = @import("fixXfYi.zig").__fixtfti;
const __fixunstfti = @import("fixXfYi.zig").__fixunstfti;
const __fixtfsi = @import("fixtfsi.zig").__fixtfsi;
const __fixunstfsi = @import("fixunstfsi.zig").__fixunstfsi;
const __fixtfdi = @import("fixtfdi.zig").__fixtfdi;
const __fixunstfdi = @import("fixunstfdi.zig").__fixunstfdi;
const __fixtfti = @import("fixtfti.zig").__fixtfti;
const __fixunstfti = @import("fixunstfti.zig").__fixunstfti;
fn test__fixsfsi(a: f32, expected: i32) !void {
const x = __fixsfsi(a);
@ -927,21 +929,21 @@ test "fixunstfti" {
}
fn test__fixunshfti(a: f16, expected: u128) !void {
const x = fixXfYi(u128, a);
const x = __fixunshfti(a);
try testing.expect(x == expected);
}
test "fixXfYi for f16" {
test "fixunshfti for f16" {
try test__fixunshfti(math.inf(f16), math.maxInt(u128));
try test__fixunshfti(math.floatMax(f16), 65504);
}
fn test__fixunsxfti(a: f80, expected: u128) !void {
const x = fixXfYi(u128, a);
const x = __fixunsxfti(a);
try testing.expect(x == expected);
}
test "fixXfYi for f80" {
test "fixunsxfti for f80" {
try test__fixunsxfti(math.inf(f80), math.maxInt(u128));
try test__fixunsxfti(math.floatMax(f80), math.maxInt(u128));
try test__fixunsxfti(math.maxInt(u64), math.maxInt(u64));

View File

@ -0,0 +1,20 @@
const common = @import("./common.zig");
const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
@export(__aeabi_l2d, .{ .name = "__aeabi_l2d", .linkage = common.linkage });
} else {
@export(__floatdidf, .{ .name = "__floatdidf", .linkage = common.linkage });
}
}
pub fn __floatdidf(a: i64) callconv(.C) f64 {
return intToFloat(f64, a);
}
fn __aeabi_l2d(a: i64) callconv(.AAPCS) f64 {
return intToFloat(f64, a);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
@export(__floatdihf, .{ .name = "__floatdihf", .linkage = common.linkage });
}
fn __floatdihf(a: i64) callconv(.C) f16 {
return intToFloat(f16, a);
}

View File

@ -0,0 +1,20 @@
const common = @import("./common.zig");
const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
@export(__aeabi_l2f, .{ .name = "__aeabi_l2f", .linkage = common.linkage });
} else {
@export(__floatdisf, .{ .name = "__floatdisf", .linkage = common.linkage });
}
}
pub fn __floatdisf(a: i64) callconv(.C) f32 {
return intToFloat(f32, a);
}
fn __aeabi_l2f(a: i64) callconv(.AAPCS) f32 {
return intToFloat(f32, a);
}

View File

@ -0,0 +1,26 @@
const common = @import("./common.zig");
const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
@export(__floatdikf, .{ .name = "__floatdikf", .linkage = common.linkage });
} else if (common.want_sparc_abi) {
@export(_Qp_xtoq, .{ .name = "_Qp_xtoq", .linkage = common.linkage });
} else {
@export(__floatditf, .{ .name = "__floatditf", .linkage = common.linkage });
}
}
pub fn __floatditf(a: i64) callconv(.C) f128 {
return intToFloat(f128, a);
}
fn __floatdikf(a: i64) callconv(.C) f128 {
return intToFloat(f128, a);
}
fn _Qp_xtoq(c: *f128, a: i64) callconv(.C) void {
c.* = intToFloat(f128, a);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
@export(__floatdixf, .{ .name = "__floatdixf", .linkage = common.linkage });
}
fn __floatdixf(a: i64) callconv(.C) f80 {
return intToFloat(f80, a);
}

View File

@ -0,0 +1,20 @@
const common = @import("./common.zig");
const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
@export(__aeabi_i2d, .{ .name = "__aeabi_i2d", .linkage = common.linkage });
} else {
@export(__floatsidf, .{ .name = "__floatsidf", .linkage = common.linkage });
}
}
pub fn __floatsidf(a: i32) callconv(.C) f64 {
return intToFloat(f64, a);
}
fn __aeabi_i2d(a: i32) callconv(.AAPCS) f64 {
return intToFloat(f64, a);
}

View File

@ -0,0 +1,12 @@
const common = @import("./common.zig");
const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
@export(__floatsihf, .{ .name = "__floatsihf", .linkage = common.linkage });
}
fn __floatsihf(a: i32) callconv(.C) f16 {
return intToFloat(f16, a);
}

View File

@ -0,0 +1,20 @@
const common = @import("./common.zig");
const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
@export(__aeabi_i2f, .{ .name = "__aeabi_i2f", .linkage = common.linkage });
} else {
@export(__floatsisf, .{ .name = "__floatsisf", .linkage = common.linkage });
}
}
pub fn __floatsisf(a: i32) callconv(.C) f32 {
return intToFloat(f32, a);
}
fn __aeabi_i2f(a: i32) callconv(.AAPCS) f32 {
return intToFloat(f32, a);
}

View File

@ -0,0 +1,26 @@
const common = @import("./common.zig");
const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
@export(__floatsikf, .{ .name = "__floatsikf", .linkage = common.linkage });
} else if (common.want_sparc_abi) {
@export(_Qp_itoq, .{ .name = "_Qp_itoq", .linkage = common.linkage });
} else {
@export(__floatsitf, .{ .name = "__floatsitf", .linkage = common.linkage });
}
}
pub fn __floatsitf(a: i32) callconv(.C) f128 {
return intToFloat(f128, a);
}
fn __floatsikf(a: i32) callconv(.C) f128 {
return intToFloat(f128, a);
}
fn _Qp_itoq(c: *f128, a: i32) callconv(.C) void {
c.* = intToFloat(f128, a);
}

Some files were not shown because too many files have changed in this diff Show More