compiler-rt: Fix __floatunditf

This builtin converts a u64 into a f128, not a u128 into a f128.

Fixes some weird-ass crashes that happened only on AArch64 systems.
This commit is contained in:
LemonBoy 2020-03-27 20:10:59 +01:00
parent aec4967f36
commit 0512be227c
2 changed files with 6 additions and 6 deletions

View File

@ -2,7 +2,7 @@ const builtin = @import("builtin");
const is_test = builtin.is_test;
const std = @import("std");
pub fn __floatunditf(a: u128) callconv(.C) f128 {
pub fn __floatunditf(a: u64) callconv(.C) f128 {
@setRuntimeSafety(is_test);
if (a == 0) {
@ -14,11 +14,11 @@ pub fn __floatunditf(a: u128) callconv(.C) f128 {
const exponent_bias = (1 << (exponent_bits - 1)) - 1;
const implicit_bit = 1 << mantissa_bits;
const exp = (u128.bit_count - 1) - @clz(u128, a);
const shift = mantissa_bits - @intCast(u7, exp);
const exp: u128 = (u64.bit_count - 1) - @clz(u64, a);
const shift: u7 = mantissa_bits - @intCast(u7, exp);
var result: u128 align(16) = (a << shift) ^ implicit_bit;
result += (@intCast(u128, exp) + exponent_bias) << mantissa_bits;
var result: u128 = (@intCast(u128, a) << shift) ^ implicit_bit;
result += (exp + exponent_bias) << mantissa_bits;
return @bitCast(f128, result);
}

View File

@ -1,6 +1,6 @@
const __floatunditf = @import("floatunditf.zig").__floatunditf;
fn test__floatunditf(a: u128, expected_hi: u64, expected_lo: u64) void {
fn test__floatunditf(a: u64, expected_hi: u64, expected_lo: u64) void {
const x = __floatunditf(a);
const x_repr = @bitCast(u128, x);