Merge pull request #12071 from topolarity/windows-abi-change

compiler_rt: Update Windows ABI for float<->int conversion routines
This commit is contained in:
Andrew Kelley 2022-07-10 23:53:14 -04:00 committed by GitHub
commit 76c89a3de9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 334 additions and 207 deletions

View File

@ -17,6 +17,10 @@ pub const want_aeabi = switch (builtin.abi) {
};
pub const want_ppc_abi = builtin.cpu.arch.isPPC() or builtin.cpu.arch.isPPC64();
// Libcalls that involve u128 on Windows x86-64 are expected by LLVM to use the
// calling convention of @Vector(2, u64), rather than what's standard.
pub const want_windows_v2u64_abi = builtin.os.tag == .windows and builtin.cpu.arch == .x86_64;
/// This governs whether to use these symbol names for f16/f32 conversions
/// rather than the standard names:
/// * __gnu_f2h_ieee

View File

@ -1,12 +1,23 @@
const builtin = @import("builtin");
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixdfti, .{ .name = "__fixdfti", .linkage = common.linkage });
if (common.want_windows_v2u64_abi) {
@export(__fixdfti_windows_x86_64, .{ .name = "__fixdfti", .linkage = common.linkage });
} else {
@export(__fixdfti, .{ .name = "__fixdfti", .linkage = common.linkage });
}
}
pub fn __fixdfti(a: f64) callconv(.C) i128 {
return floatToInt(i128, a);
}
const v2u64 = @Vector(2, u64);
fn __fixdfti_windows_x86_64(a: f64) callconv(.C) v2u64 {
return @bitCast(v2u64, floatToInt(i128, a));
}

View File

@ -1,12 +1,23 @@
const builtin = @import("builtin");
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixhfti, .{ .name = "__fixhfti", .linkage = common.linkage });
if (common.want_windows_v2u64_abi) {
@export(__fixhfti_windows_x86_64, .{ .name = "__fixhfti", .linkage = common.linkage });
} else {
@export(__fixhfti, .{ .name = "__fixhfti", .linkage = common.linkage });
}
}
fn __fixhfti(a: f16) callconv(.C) i128 {
pub fn __fixhfti(a: f16) callconv(.C) i128 {
return floatToInt(i128, a);
}
const v2u64 = @Vector(2, u64);
fn __fixhfti_windows_x86_64(a: f16) callconv(.C) v2u64 {
return @bitCast(v2u64, floatToInt(i128, a));
}

View File

@ -1,12 +1,23 @@
const builtin = @import("builtin");
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixsfti, .{ .name = "__fixsfti", .linkage = common.linkage });
if (common.want_windows_v2u64_abi) {
@export(__fixsfti_windows_x86_64, .{ .name = "__fixsfti", .linkage = common.linkage });
} else {
@export(__fixsfti, .{ .name = "__fixsfti", .linkage = common.linkage });
}
}
pub fn __fixsfti(a: f32) callconv(.C) i128 {
return floatToInt(i128, a);
}
const v2u64 = @Vector(2, u64);
fn __fixsfti_windows_x86_64(a: f32) callconv(.C) v2u64 {
return @bitCast(v2u64, floatToInt(i128, a));
}

View File

@ -1,12 +1,23 @@
const builtin = @import("builtin");
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixtfti, .{ .name = "__fixtfti", .linkage = common.linkage });
if (common.want_windows_v2u64_abi) {
@export(__fixtfti_windows_x86_64, .{ .name = "__fixtfti", .linkage = common.linkage });
} else {
@export(__fixtfti, .{ .name = "__fixtfti", .linkage = common.linkage });
}
}
pub fn __fixtfti(a: f128) callconv(.C) i128 {
return floatToInt(i128, a);
}
const v2u64 = @Vector(2, u64);
fn __fixtfti_windows_x86_64(a: f128) callconv(.C) v2u64 {
return @bitCast(v2u64, floatToInt(i128, a));
}

View File

@ -1,12 +1,23 @@
const builtin = @import("builtin");
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixunsdfti, .{ .name = "__fixunsdfti", .linkage = common.linkage });
if (common.want_windows_v2u64_abi) {
@export(__fixunsdfti_windows_x86_64, .{ .name = "__fixunsdfti", .linkage = common.linkage });
} else {
@export(__fixunsdfti, .{ .name = "__fixunsdfti", .linkage = common.linkage });
}
}
pub fn __fixunsdfti(a: f64) callconv(.C) u128 {
return floatToInt(u128, a);
}
const v2u64 = @Vector(2, u64);
fn __fixunsdfti_windows_x86_64(a: f64) callconv(.C) v2u64 {
return @bitCast(v2u64, floatToInt(u128, a));
}

View File

@ -1,12 +1,23 @@
const builtin = @import("builtin");
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixunshfti, .{ .name = "__fixunshfti", .linkage = common.linkage });
if (common.want_windows_v2u64_abi) {
@export(__fixunshfti_windows_x86_64, .{ .name = "__fixunshfti", .linkage = common.linkage });
} else {
@export(__fixunshfti, .{ .name = "__fixunshfti", .linkage = common.linkage });
}
}
pub fn __fixunshfti(a: f16) callconv(.C) u128 {
return floatToInt(u128, a);
}
const v2u64 = @import("std").meta.Vector(2, u64);
fn __fixunshfti_windows_x86_64(a: f16) callconv(.C) v2u64 {
return @bitCast(v2u64, floatToInt(u128, a));
}

View File

@ -1,12 +1,23 @@
const builtin = @import("builtin");
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixunssfti, .{ .name = "__fixunssfti", .linkage = common.linkage });
if (common.want_windows_v2u64_abi) {
@export(__fixunssfti_windows_x86_64, .{ .name = "__fixunssfti", .linkage = common.linkage });
} else {
@export(__fixunssfti, .{ .name = "__fixunssfti", .linkage = common.linkage });
}
}
pub fn __fixunssfti(a: f32) callconv(.C) u128 {
return floatToInt(u128, a);
}
const v2u64 = @Vector(2, u64);
fn __fixunssfti_windows_x86_64(a: f32) callconv(.C) v2u64 {
return @bitCast(v2u64, floatToInt(u128, a));
}

View File

@ -1,12 +1,23 @@
const builtin = @import("builtin");
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixunstfti, .{ .name = "__fixunstfti", .linkage = common.linkage });
if (common.want_windows_v2u64_abi) {
@export(__fixunstfti_windows_x86_64, .{ .name = "__fixunstfti", .linkage = common.linkage });
} else {
@export(__fixunstfti, .{ .name = "__fixunstfti", .linkage = common.linkage });
}
}
pub fn __fixunstfti(a: f128) callconv(.C) u128 {
return floatToInt(u128, a);
}
const v2u64 = @Vector(2, u64);
fn __fixunstfti_windows_x86_64(a: f128) callconv(.C) v2u64 {
return @bitCast(v2u64, floatToInt(u128, a));
}

View File

@ -1,12 +1,23 @@
const builtin = @import("builtin");
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixunsxfti, .{ .name = "__fixunsxfti", .linkage = common.linkage });
if (common.want_windows_v2u64_abi) {
@export(__fixunsxfti_windows_x86_64, .{ .name = "__fixunsxfti", .linkage = common.linkage });
} else {
@export(__fixunsxfti, .{ .name = "__fixunsxfti", .linkage = common.linkage });
}
}
pub fn __fixunsxfti(a: f80) callconv(.C) u128 {
return floatToInt(u128, a);
}
const v2u64 = @Vector(2, u64);
fn __fixunsxfti_windows_x86_64(a: f80) callconv(.C) v2u64 {
return @bitCast(v2u64, floatToInt(u128, a));
}

View File

@ -1,12 +1,23 @@
const builtin = @import("builtin");
const common = @import("./common.zig");
const floatToInt = @import("./float_to_int.zig").floatToInt;
pub const panic = common.panic;
comptime {
@export(__fixxfti, .{ .name = "__fixxfti", .linkage = common.linkage });
if (common.want_windows_v2u64_abi) {
@export(__fixxfti_windows_x86_64, .{ .name = "__fixxfti", .linkage = common.linkage });
} else {
@export(__fixxfti, .{ .name = "__fixxfti", .linkage = common.linkage });
}
}
fn __fixxfti(a: f80) callconv(.C) i128 {
pub fn __fixxfti(a: f80) callconv(.C) i128 {
return floatToInt(i128, a);
}
const v2u64 = @Vector(2, u64);
fn __fixxfti_windows_x86_64(a: f80) callconv(.C) v2u64 {
return @bitCast(v2u64, floatToInt(i128, a));
}

View File

@ -1,12 +1,21 @@
const builtin = @import("builtin");
const common = @import("./common.zig");
const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
@export(__floattidf, .{ .name = "__floattidf", .linkage = common.linkage });
if (common.want_windows_v2u64_abi) {
@export(__floattidf_windows_x86_64, .{ .name = "__floattidf", .linkage = common.linkage });
} else {
@export(__floattidf, .{ .name = "__floattidf", .linkage = common.linkage });
}
}
pub fn __floattidf(a: i128) callconv(.C) f64 {
return intToFloat(f64, a);
}
fn __floattidf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f64 {
return intToFloat(f64, @bitCast(i128, a));
}

View File

@ -1,12 +1,21 @@
const builtin = @import("builtin");
const common = @import("./common.zig");
const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
@export(__floattihf, .{ .name = "__floattihf", .linkage = common.linkage });
if (common.want_windows_v2u64_abi) {
@export(__floattihf_windows_x86_64, .{ .name = "__floattihf", .linkage = common.linkage });
} else {
@export(__floattihf, .{ .name = "__floattihf", .linkage = common.linkage });
}
}
fn __floattihf(a: i128) callconv(.C) f16 {
pub fn __floattihf(a: i128) callconv(.C) f16 {
return intToFloat(f16, a);
}
fn __floattihf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f16 {
return intToFloat(f16, @bitCast(i128, a));
}

View File

@ -1,12 +1,21 @@
const builtin = @import("builtin");
const common = @import("./common.zig");
const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
@export(__floattisf, .{ .name = "__floattisf", .linkage = common.linkage });
if (common.want_windows_v2u64_abi) {
@export(__floattisf_windows_x86_64, .{ .name = "__floattisf", .linkage = common.linkage });
} else {
@export(__floattisf, .{ .name = "__floattisf", .linkage = common.linkage });
}
}
pub fn __floattisf(a: i128) callconv(.C) f32 {
return intToFloat(f32, a);
}
fn __floattisf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f32 {
return intToFloat(f32, @bitCast(i128, a));
}

View File

@ -1,12 +1,21 @@
const builtin = @import("builtin");
const common = @import("./common.zig");
const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
@export(__floattitf, .{ .name = "__floattitf", .linkage = common.linkage });
if (common.want_windows_v2u64_abi) {
@export(__floattitf_windows_x86_64, .{ .name = "__floattitf", .linkage = common.linkage });
} else {
@export(__floattitf, .{ .name = "__floattitf", .linkage = common.linkage });
}
}
pub fn __floattitf(a: i128) callconv(.C) f128 {
return intToFloat(f128, a);
}
fn __floattitf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f128 {
return intToFloat(f128, @bitCast(i128, a));
}

View File

@ -1,12 +1,21 @@
const builtin = @import("builtin");
const common = @import("./common.zig");
const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
@export(__floattixf, .{ .name = "__floattixf", .linkage = common.linkage });
if (common.want_windows_v2u64_abi) {
@export(__floattixf_windows_x86_64, .{ .name = "__floattixf", .linkage = common.linkage });
} else {
@export(__floattixf, .{ .name = "__floattixf", .linkage = common.linkage });
}
}
fn __floattixf(a: i128) callconv(.C) f80 {
pub fn __floattixf(a: i128) callconv(.C) f80 {
return intToFloat(f80, a);
}
fn __floattixf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f80 {
return intToFloat(f80, @bitCast(i128, a));
}

View File

@ -1,12 +1,21 @@
const builtin = @import("builtin");
const common = @import("./common.zig");
const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
@export(__floatuntidf, .{ .name = "__floatuntidf", .linkage = common.linkage });
if (common.want_windows_v2u64_abi) {
@export(__floatuntidf_windows_x86_64, .{ .name = "__floatuntidf", .linkage = common.linkage });
} else {
@export(__floatuntidf, .{ .name = "__floatuntidf", .linkage = common.linkage });
}
}
pub fn __floatuntidf(a: u128) callconv(.C) f64 {
return intToFloat(f64, a);
}
fn __floatuntidf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f64 {
return intToFloat(f64, @bitCast(u128, a));
}

View File

@ -1,12 +1,21 @@
const builtin = @import("builtin");
const common = @import("./common.zig");
const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
@export(__floatuntihf, .{ .name = "__floatuntihf", .linkage = common.linkage });
if (common.want_windows_v2u64_abi) {
@export(__floatuntihf_windows_x86_64, .{ .name = "__floatuntihf", .linkage = common.linkage });
} else {
@export(__floatuntihf, .{ .name = "__floatuntihf", .linkage = common.linkage });
}
}
fn __floatuntihf(a: u128) callconv(.C) f16 {
pub fn __floatuntihf(a: u128) callconv(.C) f16 {
return intToFloat(f16, a);
}
fn __floatuntihf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f16 {
return intToFloat(f16, @bitCast(u128, a));
}

View File

@ -1,12 +1,21 @@
const builtin = @import("builtin");
const common = @import("./common.zig");
const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
@export(__floatuntisf, .{ .name = "__floatuntisf", .linkage = common.linkage });
if (common.want_windows_v2u64_abi) {
@export(__floatuntisf_windows_x86_64, .{ .name = "__floatuntisf", .linkage = common.linkage });
} else {
@export(__floatuntisf, .{ .name = "__floatuntisf", .linkage = common.linkage });
}
}
pub fn __floatuntisf(a: u128) callconv(.C) f32 {
return intToFloat(f32, a);
}
fn __floatuntisf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f32 {
return intToFloat(f32, @bitCast(u128, a));
}

View File

@ -1,13 +1,16 @@
const builtin = @import("builtin");
const common = @import("./common.zig");
const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
if (common.want_ppc_abi) {
@export(__floatuntikf, .{ .name = "__floatuntikf", .linkage = common.linkage });
const symbol_name = if (common.want_ppc_abi) "__floatuntikf" else "__floatuntitf";
if (common.want_windows_v2u64_abi) {
@export(__floatuntitf_windows_x86_64, .{ .name = symbol_name, .linkage = common.linkage });
} else {
@export(__floatuntitf, .{ .name = "__floatuntitf", .linkage = common.linkage });
@export(__floatuntitf, .{ .name = symbol_name, .linkage = common.linkage });
}
}
@ -15,6 +18,6 @@ pub fn __floatuntitf(a: u128) callconv(.C) f128 {
return intToFloat(f128, a);
}
fn __floatuntikf(a: u128) callconv(.C) f128 {
return intToFloat(f128, a);
fn __floatuntitf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f128 {
return intToFloat(f128, @bitCast(u128, a));
}

View File

@ -1,12 +1,21 @@
const builtin = @import("builtin");
const common = @import("./common.zig");
const intToFloat = @import("./int_to_float.zig").intToFloat;
pub const panic = common.panic;
comptime {
@export(__floatuntixf, .{ .name = "__floatuntixf", .linkage = common.linkage });
if (common.want_windows_v2u64_abi) {
@export(__floatuntixf_windows_x86_64, .{ .name = "__floatuntixf", .linkage = common.linkage });
} else {
@export(__floatuntixf, .{ .name = "__floatuntixf", .linkage = common.linkage });
}
}
pub fn __floatuntixf(a: u128) callconv(.C) f80 {
return intToFloat(f80, a);
}
fn __floatuntixf_windows_x86_64(a: @Vector(2, u64)) callconv(.C) f80 {
return intToFloat(f80, @bitCast(u128, a));
}

View File

@ -5,27 +5,13 @@
const std = @import("std");
const builtin = @import("builtin");
const udivmod = @import("udivmod.zig").udivmod;
const arch = builtin.cpu.arch;
const common = @import("common.zig");
pub const panic = common.panic;
comptime {
if (builtin.os.tag == .windows) {
switch (arch) {
.i386 => {
@export(__modti3, .{ .name = "__modti3", .linkage = common.linkage });
},
.x86_64 => {
// The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
// that LLVM expects compiler-rt to have.
@export(__modti3_windows_x86_64, .{ .name = "__modti3", .linkage = common.linkage });
},
else => {},
}
if (arch.isAARCH64()) {
@export(__modti3, .{ .name = "__modti3", .linkage = common.linkage });
}
if (common.want_windows_v2u64_abi) {
@export(__modti3_windows_x86_64, .{ .name = "__modti3", .linkage = common.linkage });
} else {
@export(__modti3, .{ .name = "__modti3", .linkage = common.linkage });
}
@ -35,10 +21,10 @@ pub fn __modti3(a: i128, b: i128) callconv(.C) i128 {
return mod(a, b);
}
const v128 = @import("std").meta.Vector(2, u64);
const v2u64 = @Vector(2, u64);
fn __modti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
return @bitCast(v128, mod(@bitCast(i128, a), @bitCast(i128, b)));
fn __modti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 {
return @bitCast(v2u64, mod(@bitCast(i128, a), @bitCast(i128, b)));
}
inline fn mod(a: i128, b: i128) i128 {

View File

@ -4,25 +4,14 @@
const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const native_endian = builtin.cpu.arch.endian();
const common = @import("common.zig");
pub const panic = common.panic;
comptime {
if (builtin.os.tag == .windows) {
switch (arch) {
.i386 => {
@export(__multi3, .{ .name = "__multi3", .linkage = common.linkage });
},
.x86_64 => {
// The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
// that LLVM expects compiler-rt to have.
@export(__multi3_windows_x86_64, .{ .name = "__multi3", .linkage = common.linkage });
},
else => {},
}
if (common.want_windows_v2u64_abi) {
@export(__multi3_windows_x86_64, .{ .name = "__multi3", .linkage = common.linkage });
} else {
@export(__multi3, .{ .name = "__multi3", .linkage = common.linkage });
}
@ -32,10 +21,10 @@ pub fn __multi3(a: i128, b: i128) callconv(.C) i128 {
return mul(a, b);
}
const v128 = @Vector(2, u64);
const v2u64 = @Vector(2, u64);
fn __multi3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
return @bitCast(v128, mul(@bitCast(i128, a), @bitCast(i128, b)));
fn __multi3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 {
return @bitCast(v2u64, mul(@bitCast(i128, a), @bitCast(i128, b)));
}
inline fn mul(a: i128, b: i128) i128 {

View File

@ -1,24 +1,13 @@
const std = @import("std");
const builtin = @import("builtin");
const udivmod = @import("udivmod.zig").udivmod;
const arch = builtin.cpu.arch;
const common = @import("common.zig");
pub const panic = common.panic;
comptime {
if (builtin.os.tag == .windows) {
switch (arch) {
.i386 => {
@export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = common.linkage });
},
.x86_64 => {
// The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
// that LLVM expects compiler-rt to have.
@export(__udivmodti4_windows_x86_64, .{ .name = "__udivmodti4", .linkage = common.linkage });
},
else => {},
}
if (common.want_windows_v2u64_abi) {
@export(__udivmodti4_windows_x86_64, .{ .name = "__udivmodti4", .linkage = common.linkage });
} else {
@export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = common.linkage });
}
@ -28,10 +17,10 @@ pub fn __udivmodti4(a: u128, b: u128, maybe_rem: ?*u128) callconv(.C) u128 {
return udivmod(u128, a, b, maybe_rem);
}
const v128 = std.meta.Vector(2, u64);
const v2u64 = @Vector(2, u64);
fn __udivmodti4_windows_x86_64(a: v128, b: v128, maybe_rem: ?*u128) callconv(.C) v128 {
return @bitCast(v128, udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), maybe_rem));
fn __udivmodti4_windows_x86_64(a: v2u64, b: v2u64, maybe_rem: ?*u128) callconv(.C) v2u64 {
return @bitCast(v2u64, udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), maybe_rem));
}
test {

View File

@ -1,27 +1,13 @@
const std = @import("std");
const builtin = @import("builtin");
const udivmod = @import("udivmod.zig").udivmod;
const arch = builtin.cpu.arch;
const common = @import("common.zig");
pub const panic = common.panic;
comptime {
if (builtin.os.tag == .windows) {
switch (arch) {
.i386 => {
@export(__udivti3, .{ .name = "__udivti3", .linkage = common.linkage });
},
.x86_64 => {
// The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
// that LLVM expects compiler-rt to have.
@export(__udivti3_windows_x86_64, .{ .name = "__udivti3", .linkage = common.linkage });
},
else => {},
}
if (arch.isAARCH64()) {
@export(__udivti3, .{ .name = "__udivti3", .linkage = common.linkage });
}
if (common.want_windows_v2u64_abi) {
@export(__udivti3_windows_x86_64, .{ .name = "__udivti3", .linkage = common.linkage });
} else {
@export(__udivti3, .{ .name = "__udivti3", .linkage = common.linkage });
}
@ -31,8 +17,8 @@ pub fn __udivti3(a: u128, b: u128) callconv(.C) u128 {
return udivmod(u128, a, b, null);
}
const v128 = std.meta.Vector(2, u64);
const v2u64 = @Vector(2, u64);
fn __udivti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
return @bitCast(v128, udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), null));
fn __udivti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 {
return @bitCast(v2u64, udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), null));
}

View File

@ -1,27 +1,13 @@
const std = @import("std");
const builtin = @import("builtin");
const udivmod = @import("udivmod.zig").udivmod;
const arch = builtin.cpu.arch;
const common = @import("common.zig");
pub const panic = common.panic;
comptime {
if (builtin.os.tag == .windows) {
switch (arch) {
.i386 => {
@export(__umodti3, .{ .name = "__umodti3", .linkage = common.linkage });
},
.x86_64 => {
// The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
// that LLVM expects compiler-rt to have.
@export(__umodti3_windows_x86_64, .{ .name = "__umodti3", .linkage = common.linkage });
},
else => {},
}
if (arch.isAARCH64()) {
@export(__umodti3, .{ .name = "__umodti3", .linkage = common.linkage });
}
if (common.want_windows_v2u64_abi) {
@export(__umodti3_windows_x86_64, .{ .name = "__umodti3", .linkage = common.linkage });
} else {
@export(__umodti3, .{ .name = "__umodti3", .linkage = common.linkage });
}
@ -33,10 +19,10 @@ pub fn __umodti3(a: u128, b: u128) callconv(.C) u128 {
return r;
}
const v128 = std.meta.Vector(2, u64);
const v2u64 = @Vector(2, u64);
fn __umodti3_windows_x86_64(a: v128, b: v128) callconv(.C) v128 {
fn __umodti3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 {
var r: u128 = undefined;
_ = udivmod(u128, @bitCast(u128, a), @bitCast(u128, b), &r);
return @bitCast(v128, r);
return @bitCast(v2u64, r);
}

View File

@ -2284,10 +2284,6 @@ test "float.hexadecimal.precision" {
}
test "float.decimal" {
if (builtin.zig_backend == .stage1 and builtin.os.tag == .windows) {
// https://github.com/ziglang/zig/issues/12063
return error.SkipZigTest;
}
try expectFmt("f64: 152314000000000000000000000000", "f64: {d}", .{@as(f64, 1.52314e+29)});
try expectFmt("f32: 0", "f32: {d}", .{@as(f32, 0.0)});
try expectFmt("f32: 0", "f32: {d:.0}", .{@as(f32, 0.0)});
@ -2311,10 +2307,6 @@ test "float.decimal" {
}
test "float.libc.sanity" {
if (builtin.zig_backend == .stage1 and builtin.os.tag == .windows) {
// https://github.com/ziglang/zig/issues/12063
return error.SkipZigTest;
}
try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 916964781)))});
try expectFmt("f64: 0.00001", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 925353389)))});
try expectFmt("f64: 0.10000", "f64: {d:.5}", .{@as(f64, @bitCast(f32, @as(u32, 1036831278)))});

View File

@ -4802,7 +4802,7 @@ pub const FuncGen = struct {
const operand_bits = @intCast(u16, operand_scalar_ty.bitSize(target));
const rt_int_bits = compilerRtIntBits(operand_bits);
const rt_int_ty = self.context.intType(rt_int_bits);
const extended = e: {
var extended = e: {
if (operand_scalar_ty.isSignedInt()) {
break :e self.builder.buildSExtOrBitCast(operand, rt_int_ty, "");
} else {
@ -4819,7 +4819,16 @@ pub const FuncGen = struct {
compiler_rt_operand_abbrev,
compiler_rt_dest_abbrev,
}) catch unreachable;
const param_types = [1]*const llvm.Type{rt_int_ty};
var param_types = [1]*const llvm.Type{rt_int_ty};
if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) {
// On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard
// i128 calling convention to adhere to the ABI that LLVM expects compiler-rt to have.
const v2i64 = self.context.intType(64).vectorType(2);
extended = self.builder.buildBitCast(extended, v2i64, "");
param_types = [1]*const llvm.Type{v2i64};
}
const libc_fn = self.getLibcFunction(fn_name, &param_types, dest_llvm_ty);
const params = [1]*const llvm.Value{extended};
@ -4851,7 +4860,12 @@ pub const FuncGen = struct {
}
const rt_int_bits = compilerRtIntBits(@intCast(u16, dest_scalar_ty.bitSize(target)));
const libc_ret_ty = self.context.intType(rt_int_bits);
const ret_ty = self.context.intType(rt_int_bits);
const libc_ret_ty = if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) b: {
// On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard
// i128 calling convention to adhere to the ABI that LLVM expects compiler-rt to have.
break :b self.context.intType(64).vectorType(2);
} else ret_ty;
const operand_bits = operand_scalar_ty.floatBits(target);
const compiler_rt_operand_abbrev = compilerRtFloatAbbrev(operand_bits);
@ -4871,13 +4885,11 @@ pub const FuncGen = struct {
const libc_fn = self.getLibcFunction(fn_name, &param_types, libc_ret_ty);
const params = [1]*const llvm.Value{operand};
const result = self.builder.buildCall(libc_fn, &params, params.len, .C, .Auto, "");
var result = self.builder.buildCall(libc_fn, &params, params.len, .C, .Auto, "");
if (libc_ret_ty == dest_llvm_ty) {
return result;
}
return self.builder.buildTrunc(result, dest_llvm_ty, "");
if (libc_ret_ty != ret_ty) result = self.builder.buildBitCast(result, ret_ty, "");
if (ret_ty != dest_llvm_ty) result = self.builder.buildTrunc(result, dest_llvm_ty, "");
return result;
}
fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*const llvm.Value {
@ -6490,8 +6502,15 @@ pub const FuncGen = struct {
const one = int_llvm_ty.constInt(1, .False);
const shift_amt = int_llvm_ty.constInt(float_bits - 1, .False);
const sign_mask = one.constShl(shift_amt);
const bitcasted_operand = self.builder.buildBitCast(params[0], int_llvm_ty, "");
const result = self.builder.buildXor(bitcasted_operand, sign_mask, "");
const result = if (ty.zigTypeTag() == .Vector) blk: {
const splat_sign_mask = self.builder.buildVectorSplat(ty.vectorLen(), sign_mask, "");
const cast_ty = int_llvm_ty.vectorType(ty.vectorLen());
const bitcasted_operand = self.builder.buildBitCast(params[0], cast_ty, "");
break :blk self.builder.buildXor(bitcasted_operand, splat_sign_mask, "");
} else blk: {
const bitcasted_operand = self.builder.buildBitCast(params[0], int_llvm_ty, "");
break :blk self.builder.buildXor(bitcasted_operand, sign_mask, "");
};
return self.builder.buildBitCast(result, llvm_ty, "");
},
.add, .sub, .div, .mul => FloatOpStrat{

View File

@ -3371,14 +3371,12 @@ static LLVMValueRef add_icmp(CodeGen *g, LLVMValueRef val, Icmp kind) {
}
static LLVMValueRef gen_soft_int_to_float_op(CodeGen *g, LLVMValueRef value_ref, ZigType *operand_type, ZigType *result_type) {
uint32_t vector_len = operand_type->id == ZigTypeIdVector ? operand_type->data.vector.len : 0;
// Handle integers of non-pot bitsize by widening them.
const size_t bitsize = operand_type->data.integral.bit_count;
const bool is_signed = operand_type->data.integral.is_signed;
if (bitsize < 32 || !is_power_of_2(bitsize)) {
const size_t wider_bitsize = bitsize < 32 ? 32 : round_to_next_power_of_2(bitsize);
ZigType *const wider_type = get_int_type(g, is_signed, wider_bitsize);
ZigType *wider_type = get_int_type(g, is_signed, wider_bitsize);
value_ref = gen_widen_or_shorten(g, false, operand_type, wider_type, value_ref);
operand_type = wider_type;
}
@ -3395,35 +3393,22 @@ static LLVMValueRef gen_soft_int_to_float_op(CodeGen *g, LLVMValueRef value_ref,
}
int param_count = 1;
LLVMValueRef func_ref = get_soft_float_fn(g, fn_name, param_count, operand_type->llvm_type, result_type->llvm_type);
LLVMValueRef result;
if (vector_len == 0) {
LLVMValueRef params[1] = {value_ref};
result = LLVMBuildCall(g->builder, func_ref, params, param_count, "");
LLVMValueRef func_ref;
if ((operand_type->data.integral.bit_count == 128) && (g->zig_target->os == OsWindows) && (g->zig_target->arch == ZigLLVM_x86_64)) {
// On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard i128 calling
// convention to adhere to the ABI that LLVM expects compiler-rt to have.
LLVMTypeRef v2i64 = LLVMVectorType(LLVMInt64Type(), 2);
value_ref = LLVMBuildBitCast(g->builder, value_ref, v2i64, "");
func_ref = get_soft_float_fn(g, fn_name, param_count, v2i64, result_type->llvm_type);
} else {
ZigType *alloca_ty = operand_type;
result = build_alloca(g, alloca_ty, "", 0);
LLVMTypeRef usize_ref = g->builtin_types.entry_usize->llvm_type;
for (uint32_t i = 0; i < vector_len; i++) {
LLVMValueRef index_value = LLVMConstInt(usize_ref, i, false);
LLVMValueRef params[1] = {
LLVMBuildExtractElement(g->builder, value_ref, index_value, ""),
};
LLVMValueRef call_result = LLVMBuildCall(g->builder, func_ref, params, param_count, "");
LLVMBuildInsertElement(g->builder, LLVMBuildLoad(g->builder, result, ""),
call_result, index_value, "");
}
result = LLVMBuildLoad(g->builder, result, "");
func_ref = get_soft_float_fn(g, fn_name, param_count, operand_type->llvm_type, result_type->llvm_type);
}
return result;
LLVMValueRef params[1] = {value_ref};
return LLVMBuildCall(g->builder, func_ref, params, param_count, "");
}
static LLVMValueRef gen_soft_float_to_int_op(CodeGen *g, LLVMValueRef value_ref, ZigType *operand_type, ZigType *result_type) {
uint32_t vector_len = operand_type->id == ZigTypeIdVector ? operand_type->data.vector.len : 0;
// Handle integers of non-pot bitsize by truncating a sufficiently wide pot integer
const size_t bitsize = result_type->data.integral.bit_count;
const bool is_signed = result_type->data.integral.is_signed;
@ -3445,46 +3430,41 @@ static LLVMValueRef gen_soft_float_to_int_op(CodeGen *g, LLVMValueRef value_ref,
}
int param_count = 1;
LLVMValueRef func_ref = get_soft_float_fn(g, fn_name, param_count, operand_type->llvm_type, wider_type->llvm_type);
LLVMValueRef result;
if (vector_len == 0) {
LLVMValueRef params[1] = {value_ref};
result = LLVMBuildCall(g->builder, func_ref, params, param_count, "");
LLVMValueRef func_ref;
if ((wider_type->data.integral.bit_count == 128) && (g->zig_target->os == OsWindows) && (g->zig_target->arch == ZigLLVM_x86_64)) {
// On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard i128 calling
// convention to adhere to the ABI that LLVM expects compiler-rt to have.
LLVMTypeRef v2i64 = LLVMVectorType(LLVMInt64Type(), 2);
func_ref = get_soft_float_fn(g, fn_name, param_count, operand_type->llvm_type, v2i64);
} else {
ZigType *alloca_ty = operand_type;
result = build_alloca(g, alloca_ty, "", 0);
func_ref = get_soft_float_fn(g, fn_name, param_count, operand_type->llvm_type, wider_type->llvm_type);
}
LLVMTypeRef usize_ref = g->builtin_types.entry_usize->llvm_type;
for (uint32_t i = 0; i < vector_len; i++) {
LLVMValueRef index_value = LLVMConstInt(usize_ref, i, false);
LLVMValueRef params[1] = {
LLVMBuildExtractElement(g->builder, value_ref, index_value, ""),
};
LLVMValueRef call_result = LLVMBuildCall(g->builder, func_ref, params, param_count, "");
LLVMBuildInsertElement(g->builder, LLVMBuildLoad(g->builder, result, ""),
call_result, index_value, "");
}
LLVMValueRef params[1] = {value_ref};
LLVMValueRef result = LLVMBuildCall(g->builder, func_ref, params, param_count, "");
result = LLVMBuildLoad(g->builder, result, "");
if ((wider_type->data.integral.bit_count == 128) && (g->zig_target->os == OsWindows) && (g->zig_target->arch == ZigLLVM_x86_64)) {
result = LLVMBuildBitCast(g->builder, result, wider_type->llvm_type, "");
}
// Handle integers of non-pot bitsize by shortening them on the output
if (result_type != wider_type) {
return gen_widen_or_shorten(g, false, wider_type, result_type, result);
result = gen_widen_or_shorten(g, false, wider_type, result_type, result);
}
return result;
}
static LLVMValueRef gen_soft_float_bin_op(CodeGen *g, LLVMValueRef op1_value, LLVMValueRef op2_value, ZigType *operand_type, IrBinOp op_id) {
uint32_t vector_len = operand_type->id == ZigTypeIdVector ? operand_type->data.vector.len : 0;
LLVMTypeRef return_type = operand_type->llvm_type;
int param_count = 2;
const char *compiler_rt_type_abbrev = get_compiler_rt_type_abbrev(operand_type);
const char *math_float_prefix = libc_float_prefix(g, operand_type);
const char *math_float_suffix = libc_float_suffix(g, operand_type);
ZigType *operand_scalar_type = (operand_type->id == ZigTypeIdVector) ? operand_type->data.vector.elem_type : operand_type;
LLVMTypeRef return_scalar_type = operand_scalar_type->llvm_type;
const char *compiler_rt_type_abbrev = get_compiler_rt_type_abbrev(operand_scalar_type);
const char *math_float_prefix = libc_float_prefix(g, operand_scalar_type);
const char *math_float_suffix = libc_float_suffix(g, operand_scalar_type);
char fn_name[64];
Icmp res_icmp = NONE;
@ -3511,32 +3491,32 @@ static LLVMValueRef gen_soft_float_bin_op(CodeGen *g, LLVMValueRef op1_value, LL
case IrBinOpShlSat:
zig_unreachable();
case IrBinOpCmpEq:
return_type = g->builtin_types.entry_i32->llvm_type;
return_scalar_type = g->builtin_types.entry_i32->llvm_type;
snprintf(fn_name, sizeof(fn_name), "__eq%sf2", compiler_rt_type_abbrev);
res_icmp = EQ_ZERO;
break;
case IrBinOpCmpNotEq:
return_type = g->builtin_types.entry_i32->llvm_type;
return_scalar_type = g->builtin_types.entry_i32->llvm_type;
snprintf(fn_name, sizeof(fn_name), "__ne%sf2", compiler_rt_type_abbrev);
res_icmp = NE_ZERO;
break;
case IrBinOpCmpLessOrEq:
return_type = g->builtin_types.entry_i32->llvm_type;
return_scalar_type = g->builtin_types.entry_i32->llvm_type;
snprintf(fn_name, sizeof(fn_name), "__le%sf2", compiler_rt_type_abbrev);
res_icmp = LE_ZERO;
break;
case IrBinOpCmpLessThan:
return_type = g->builtin_types.entry_i32->llvm_type;
return_scalar_type = g->builtin_types.entry_i32->llvm_type;
snprintf(fn_name, sizeof(fn_name), "__le%sf2", compiler_rt_type_abbrev);
res_icmp = EQ_NEG;
break;
case IrBinOpCmpGreaterOrEq:
return_type = g->builtin_types.entry_i32->llvm_type;
return_scalar_type = g->builtin_types.entry_i32->llvm_type;
snprintf(fn_name, sizeof(fn_name), "__ge%sf2", compiler_rt_type_abbrev);
res_icmp = GE_ZERO;
break;
case IrBinOpCmpGreaterThan:
return_type = g->builtin_types.entry_i32->llvm_type;
return_scalar_type = g->builtin_types.entry_i32->llvm_type;
snprintf(fn_name, sizeof(fn_name), "__ge%sf2", compiler_rt_type_abbrev);
res_icmp = EQ_ONE;
break;
@ -3569,7 +3549,7 @@ static LLVMValueRef gen_soft_float_bin_op(CodeGen *g, LLVMValueRef op1_value, LL
zig_unreachable();
}
LLVMValueRef func_ref = get_soft_float_fn(g, fn_name, param_count, operand_type->llvm_type, return_type);
LLVMValueRef func_ref = get_soft_float_fn(g, fn_name, param_count, operand_scalar_type->llvm_type, return_scalar_type);
LLVMValueRef result;
if (vector_len == 0) {

View File

@ -101,18 +101,20 @@ test "vector float operators" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
var v: @Vector(4, f32) = [4]f32{ 10, 20, 30, 40 };
var x: @Vector(4, f32) = [4]f32{ 1, 2, 3, 4 };
try expect(mem.eql(f32, &@as([4]f32, v + x), &[4]f32{ 11, 22, 33, 44 }));
try expect(mem.eql(f32, &@as([4]f32, v - x), &[4]f32{ 9, 18, 27, 36 }));
try expect(mem.eql(f32, &@as([4]f32, v * x), &[4]f32{ 10, 40, 90, 160 }));
try expect(mem.eql(f32, &@as([4]f32, -x), &[4]f32{ -1, -2, -3, -4 }));
}
};
try S.doTheTest();
comptime try S.doTheTest();
inline for ([_]type{ f16, f32, f64, f80, f128 }) |T| {
const S = struct {
fn doTheTest() !void {
var v: @Vector(4, T) = [4]T{ 10, 20, 30, 40 };
var x: @Vector(4, T) = [4]T{ 1, 2, 3, 4 };
try expect(mem.eql(T, &@as([4]T, v + x), &[4]T{ 11, 22, 33, 44 }));
try expect(mem.eql(T, &@as([4]T, v - x), &[4]T{ 9, 18, 27, 36 }));
try expect(mem.eql(T, &@as([4]T, v * x), &[4]T{ 10, 40, 90, 160 }));
try expect(mem.eql(T, &@as([4]T, -x), &[4]T{ -1, -2, -3, -4 }));
}
};
try S.doTheTest();
comptime try S.doTheTest();
}
}
test "vector bit operators" {