zig/lib/compiler_rt/truncXfYf2.zig
Andrew Kelley ec95e00e28 flatten lib/std/special and improve "pkg inside another" logic
stage2: change logic for detecting whether the main package is inside
the std package. Previously it relied on realpath() which is not portable.
This uses resolve() which is how imports already work.

 * stage2: fix cleanup bug when creating Module
 * flatten lib/std/special/* to lib/*
   - this was motivated by making main_pkg_is_inside_std false for
     compiler_rt & friends.
 * rename "mini libc" to "universal libc"
2022-05-06 22:41:00 -07:00

153 lines
5.8 KiB
Zig

const std = @import("std");
const builtin = @import("builtin");
const native_arch = builtin.cpu.arch;
// AArch64 is the only ABI (at the moment) to support f16 arguments without the
// need for extending them to wider fp types.
// TODO remove this; do this type selection in the language rather than
// here in compiler-rt.
pub const F16T = if (native_arch.isAARCH64()) f16 else u16;
pub fn __truncsfhf2(a: f32) callconv(.C) F16T {
return @bitCast(F16T, truncXfYf2(f16, f32, a));
}
pub fn __truncdfhf2(a: f64) callconv(.C) F16T {
return @bitCast(F16T, truncXfYf2(f16, f64, a));
}
pub fn __trunctfhf2(a: f128) callconv(.C) F16T {
return @bitCast(F16T, truncXfYf2(f16, f128, a));
}
pub fn __trunctfsf2(a: f128) callconv(.C) f32 {
return truncXfYf2(f32, f128, a);
}
pub fn __trunctfdf2(a: f128) callconv(.C) f64 {
return truncXfYf2(f64, f128, a);
}
pub fn __truncdfsf2(a: f64) callconv(.C) f32 {
return truncXfYf2(f32, f64, a);
}
pub fn __aeabi_d2f(a: f64) callconv(.AAPCS) f32 {
@setRuntimeSafety(false);
return truncXfYf2(f32, f64, a);
}
pub fn __aeabi_d2h(a: f64) callconv(.AAPCS) u16 {
@setRuntimeSafety(false);
return @bitCast(F16T, truncXfYf2(f16, f64, a));
}
pub fn __aeabi_f2h(a: f32) callconv(.AAPCS) u16 {
@setRuntimeSafety(false);
return @bitCast(F16T, truncXfYf2(f16, f32, a));
}
inline fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
const srcSigBits = std.math.floatMantissaBits(src_t);
const dstSigBits = std.math.floatMantissaBits(dst_t);
const SrcShift = std.math.Log2Int(src_rep_t);
// Various constants whose values follow from the type parameters.
// Any reasonable optimizer will fold and propagate all of these.
const srcBits = @typeInfo(src_t).Float.bits;
const srcExpBits = srcBits - srcSigBits - 1;
const srcInfExp = (1 << srcExpBits) - 1;
const srcExpBias = srcInfExp >> 1;
const srcMinNormal = 1 << srcSigBits;
const srcSignificandMask = srcMinNormal - 1;
const srcInfinity = srcInfExp << srcSigBits;
const srcSignMask = 1 << (srcSigBits + srcExpBits);
const srcAbsMask = srcSignMask - 1;
const roundMask = (1 << (srcSigBits - dstSigBits)) - 1;
const halfway = 1 << (srcSigBits - dstSigBits - 1);
const srcQNaN = 1 << (srcSigBits - 1);
const srcNaNCode = srcQNaN - 1;
const dstBits = @typeInfo(dst_t).Float.bits;
const dstExpBits = dstBits - dstSigBits - 1;
const dstInfExp = (1 << dstExpBits) - 1;
const dstExpBias = dstInfExp >> 1;
const underflowExponent = srcExpBias + 1 - dstExpBias;
const overflowExponent = srcExpBias + dstInfExp - dstExpBias;
const underflow = underflowExponent << srcSigBits;
const overflow = overflowExponent << srcSigBits;
const dstQNaN = 1 << (dstSigBits - 1);
const dstNaNCode = dstQNaN - 1;
// Break a into a sign and representation of the absolute value
const aRep: src_rep_t = @bitCast(src_rep_t, a);
const aAbs: src_rep_t = aRep & srcAbsMask;
const sign: src_rep_t = aRep & srcSignMask;
var absResult: dst_rep_t = undefined;
if (aAbs -% underflow < aAbs -% overflow) {
// The exponent of a is within the range of normal numbers in the
// destination format. We can convert by simply right-shifting with
// rounding and adjusting the exponent.
absResult = @truncate(dst_rep_t, aAbs >> (srcSigBits - dstSigBits));
absResult -%= @as(dst_rep_t, srcExpBias - dstExpBias) << dstSigBits;
const roundBits: src_rep_t = aAbs & roundMask;
if (roundBits > halfway) {
// Round to nearest
absResult += 1;
} else if (roundBits == halfway) {
// Ties to even
absResult += absResult & 1;
}
} else if (aAbs > srcInfinity) {
// a is NaN.
// Conjure the result by beginning with infinity, setting the qNaN
// bit and inserting the (truncated) trailing NaN field.
absResult = @intCast(dst_rep_t, dstInfExp) << dstSigBits;
absResult |= dstQNaN;
absResult |= @intCast(dst_rep_t, ((aAbs & srcNaNCode) >> (srcSigBits - dstSigBits)) & dstNaNCode);
} else if (aAbs >= overflow) {
// a overflows to infinity.
absResult = @intCast(dst_rep_t, dstInfExp) << dstSigBits;
} else {
// a underflows on conversion to the destination type or is an exact
// zero. The result may be a denormal or zero. Extract the exponent
// to get the shift amount for the denormalization.
const aExp = @intCast(u32, aAbs >> srcSigBits);
const shift = @intCast(u32, srcExpBias - dstExpBias - aExp + 1);
const significand: src_rep_t = (aRep & srcSignificandMask) | srcMinNormal;
// Right shift by the denormalization amount with sticky.
if (shift > srcSigBits) {
absResult = 0;
} else {
const sticky: src_rep_t = @boolToInt(significand << @intCast(SrcShift, srcBits - shift) != 0);
const denormalizedSignificand: src_rep_t = significand >> @intCast(SrcShift, shift) | sticky;
absResult = @intCast(dst_rep_t, denormalizedSignificand >> (srcSigBits - dstSigBits));
const roundBits: src_rep_t = denormalizedSignificand & roundMask;
if (roundBits > halfway) {
// Round to nearest
absResult += 1;
} else if (roundBits == halfway) {
// Ties to even
absResult += absResult & 1;
}
}
}
const result: dst_rep_t align(@alignOf(dst_t)) = absResult |
@truncate(dst_rep_t, sign >> @intCast(SrcShift, srcBits - dstBits));
return @bitCast(dst_t, result);
}
test {
_ = @import("truncXfYf2_test.zig");
}