From a31a749c42505e53308c0f2426db283ea130e776 Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Wed, 19 Jan 2022 12:26:30 +0200 Subject: [PATCH 1/8] stage1: add f80 type --- CMakeLists.txt | 31 +++++ src/AstGen.zig | 2 + src/Sema.zig | 3 + src/Zir.zig | 5 + src/stage1/all_types.hpp | 2 + src/stage1/analyze.cpp | 13 ++ src/stage1/codegen.cpp | 13 ++ src/stage1/ir.cpp | 262 ++++++++++++++++++++++++++++++++--- src/stage1/softfloat.hpp | 4 + src/stage1/softfloat_ext.cpp | 38 +++-- src/stage1/softfloat_ext.hpp | 4 + src/stage1/target.cpp | 11 ++ src/stage1/target.hpp | 1 + src/type.zig | 17 +++ src/value.zig | 5 + 15 files changed, 380 insertions(+), 31 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 0cdedece99..e12e040fe4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -142,15 +142,19 @@ include_directories(${CLANG_INCLUDE_DIRS}) # No patches have been applied to SoftFloat-3e set(EMBEDDED_SOFTFLOAT_SOURCES "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/f128M_isSignalingNaN.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/extF80M_isSignalingNaN.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToF128M.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToExtF80M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToF16UI.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToF32UI.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToF64UI.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_f128MToCommonNaN.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_extF80MToCommonNaN.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_f16UIToCommonNaN.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_f32UIToCommonNaN.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_f64UIToCommonNaN.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_propagateNaNF128M.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_propagateNaNExtF80M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_propagateNaNF16UI.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/softfloat_raiseFlags.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_add.c" @@ -170,6 +174,7 @@ set(EMBEDDED_SOFTFLOAT_SOURCES "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_f16.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_f32.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_f64.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_extF80M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_i32.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_i32_r_minMag.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_i64.c" @@ -178,6 +183,20 @@ set(EMBEDDED_SOFTFLOAT_SOURCES "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_ui32_r_minMag.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_ui64.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_ui64_r_minMag.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/extF80M_add.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/extF80M_div.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/extF80M_eq.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/extF80M_le.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/extF80M_lt.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/extF80M_mul.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/extF80M_rem.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/extF80M_roundToInt.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/extF80M_sqrt.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/extF80M_sub.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/extF80M_to_f16.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/extF80M_to_f32.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/extF80M_to_f64.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/extF80M_to_f128M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_add.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_div.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_eq.c" @@ -188,9 +207,12 @@ set(EMBEDDED_SOFTFLOAT_SOURCES "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_roundToInt.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_sqrt.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_sub.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_to_extF80M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_to_f128M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_to_f64.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f32_to_extF80M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f32_to_f128M.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f64_to_extF80M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f64_to_f128M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f64_to_f16.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/i32_to_f128M.c" @@ -198,6 +220,7 @@ set(EMBEDDED_SOFTFLOAT_SOURCES "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_addCarryM.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_addComplCarryM.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_addF128M.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_addExtF80M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_addM.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_addMagsF16.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_addMagsF32.c" @@ -208,12 +231,14 @@ set(EMBEDDED_SOFTFLOAT_SOURCES "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_approxRecip_1Ks.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_compare128M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_compare96M.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_compareNonnormExtF80M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_countLeadingZeros16.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_countLeadingZeros32.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_countLeadingZeros64.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_countLeadingZeros8.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_eq128.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_invalidF128M.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_invalidExtF80M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_isNaNF128M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_le128.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_lt128.c" @@ -224,7 +249,9 @@ set(EMBEDDED_SOFTFLOAT_SOURCES "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_mulAddF32.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_mulAddF64.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_negXM.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_normExtF80SigM.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_normRoundPackMToF128M.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_normRoundPackMToExtF80M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_normRoundPackToF16.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_normRoundPackToF32.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_normRoundPackToF64.c" @@ -235,6 +262,7 @@ set(EMBEDDED_SOFTFLOAT_SOURCES "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_remStepMBy32.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_roundMToI64.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_roundMToUI64.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_roundPackMToExtF80M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_roundPackMToF128M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_roundPackToF16.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_roundPackToF32.c" @@ -263,11 +291,14 @@ set(EMBEDDED_SOFTFLOAT_SOURCES "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_subMagsF32.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_subMagsF64.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_tryPropagateNaNF128M.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_tryPropagateNaNExtF80M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_mulAdd.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_mulAdd.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/softfloat_state.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/ui32_to_f128M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/ui64_to_f128M.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/ui32_to_extF80M.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/ui64_to_extF80M.c" ) add_library(embedded_softfloat STATIC ${EMBEDDED_SOFTFLOAT_SOURCES}) if(MSVC) diff --git a/src/AstGen.zig b/src/AstGen.zig index 8328264306..cb6947b7c1 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -7723,6 +7723,7 @@ const primitives = std.ComptimeStringMap(Zir.Inst.Ref, .{ .{ "f16", .f16_type }, .{ "f32", .f32_type }, .{ "f64", .f64_type }, + .{ "f80", .f80_type }, .{ "false", .bool_false }, .{ "i16", .i16_type }, .{ "i32", .i32_type }, @@ -8732,6 +8733,7 @@ fn rvalue( as_ty | @enumToInt(Zir.Inst.Ref.f16_type), as_ty | @enumToInt(Zir.Inst.Ref.f32_type), as_ty | @enumToInt(Zir.Inst.Ref.f64_type), + as_ty | @enumToInt(Zir.Inst.Ref.f80_type), as_ty | @enumToInt(Zir.Inst.Ref.f128_type), as_ty | @enumToInt(Zir.Inst.Ref.anyopaque_type), as_ty | @enumToInt(Zir.Inst.Ref.bool_type), diff --git a/src/Sema.zig b/src/Sema.zig index 84907a2044..ac67b3f07f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -16950,6 +16950,7 @@ pub fn typeHasOnePossibleValue( .f16, .f32, .f64, + .f80, .f128, .c_longdouble, .comptime_int, @@ -17227,6 +17228,7 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .f16 => return .f16_type, .f32 => return .f32_type, .f64 => return .f64_type, + .f80 => return .f80_type, .f128 => return .f128_type, .anyopaque => return .anyopaque_type, .bool => return .bool_type, @@ -17572,6 +17574,7 @@ fn typeRequiresComptime(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) C .f16, .f32, .f64, + .f80, .f128, .anyopaque, .bool, diff --git a/src/Zir.zig b/src/Zir.zig index 86819d10f2..1ff103a876 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -1639,6 +1639,7 @@ pub const Inst = struct { f16_type, f32_type, f64_type, + f80_type, f128_type, anyopaque_type, bool_type, @@ -1809,6 +1810,10 @@ pub const Inst = struct { .ty = Type.initTag(.type), .val = Value.initTag(.f64_type), }, + .f80_type = .{ + .ty = Type.initTag(.type), + .val = Value.initTag(.f80_type), + }, .f128_type = .{ .ty = Type.initTag(.type), .val = Value.initTag(.f128_type), diff --git a/src/stage1/all_types.hpp b/src/stage1/all_types.hpp index b5a7f07975..38bc3d0c87 100644 --- a/src/stage1/all_types.hpp +++ b/src/stage1/all_types.hpp @@ -516,6 +516,7 @@ struct ZigValue { float16_t x_f16; float x_f32; double x_f64; + extFloat80_t x_f80; float128_t x_f128; bool x_bool; ConstBoundFnValue x_bound_fn; @@ -2089,6 +2090,7 @@ struct CodeGen { ZigType *entry_f16; ZigType *entry_f32; ZigType *entry_f64; + ZigType *entry_f80; ZigType *entry_f128; ZigType *entry_void; ZigType *entry_unreachable; diff --git a/src/stage1/analyze.cpp b/src/stage1/analyze.cpp index ff68198fc3..09fd41e24d 100644 --- a/src/stage1/analyze.cpp +++ b/src/stage1/analyze.cpp @@ -5647,6 +5647,9 @@ static uint32_t hash_combine_const_val(uint32_t hash_val, ZigValue *const_val) { case 16: return hash_combine(hash_val, &const_val->data.x_f16); case 32: return hash_combine(hash_val, &const_val->data.x_f32); case 64: return hash_combine(hash_val, &const_val->data.x_f64); + case 80: + hash_val = hash_combine(hash_val, &const_val->data.x_f80.signExp); + return hash_combine(hash_val, &const_val->data.x_f80.signif); case 128: return hash_combine(hash_val, &const_val->data.x_f128); default: zig_unreachable(); } @@ -6325,6 +6328,7 @@ void init_const_float(ZigValue *const_val, ZigType *type, double value) { case 64: const_val->data.x_f64 = value; break; + case 80: case 128: // if we need this, we should add a function that accepts a float128_t param zig_unreachable(); @@ -7218,6 +7222,8 @@ bool const_values_equal(CodeGen *g, ZigValue *a, ZigValue *b) { return a->data.x_f32 == b->data.x_f32; case 64: return a->data.x_f64 == b->data.x_f64; + case 80: + return extF80M_eq(&a->data.x_f80, &b->data.x_f80); case 128: return f128M_eq(&a->data.x_f128, &b->data.x_f128); default: @@ -7470,6 +7476,13 @@ void render_const_value(CodeGen *g, Buf *buf, ZigValue *const_val) { case 64: buf_appendf(buf, "%f", const_val->data.x_f64); return; + case 80: { + float64_t f64_value = extF80M_to_f64(&const_val->data.x_f80); + double double_value; + memcpy(&double_value, &f64_value, sizeof(double)); + buf_appendf(buf, "%f", double_value); + return; + } case 128: { const size_t extra_len = 100; diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp index f9f37c2eb4..efb33e8bdc 100644 --- a/src/stage1/codegen.cpp +++ b/src/stage1/codegen.cpp @@ -7692,6 +7692,12 @@ static LLVMValueRef gen_const_val(CodeGen *g, ZigValue *const_val, const char *n return LLVMConstReal(get_llvm_type(g, type_entry), const_val->data.x_f32); case 64: return LLVMConstReal(get_llvm_type(g, type_entry), const_val->data.x_f64); + case 80: { + uint64_t buf[2]; + memcpy(&buf, &const_val->data.x_f80, 16); + LLVMValueRef as_int = LLVMConstIntOfArbitraryPrecision(LLVMInt128Type(), 2, buf); + return LLVMConstBitCast(as_int, get_llvm_type(g, type_entry)); + } case 128: { uint64_t buf[2]; @@ -8911,6 +8917,13 @@ static void define_builtin_types(CodeGen *g) { add_fp_entry(g, "f64", 64, LLVMDoubleType(), &g->builtin_types.entry_f64); add_fp_entry(g, "f128", 128, LLVMFP128Type(), &g->builtin_types.entry_f128); + if (target_has_f80(g->zig_target)) { + add_fp_entry(g, "f80", 80, LLVMX86FP80Type(), &g->builtin_types.entry_f80); + } else { + // use f128 for correct size and alignment + add_fp_entry(g, "f80", 128, LLVMFP128Type(), &g->builtin_types.entry_f80); + } + switch (g->zig_target->arch) { case ZigLLVM_x86: case ZigLLVM_x86_64: diff --git a/src/stage1/ir.cpp b/src/stage1/ir.cpp index cc68ce0d3c..1b9e9638e2 100644 --- a/src/stage1/ir.cpp +++ b/src/stage1/ir.cpp @@ -2688,6 +2688,12 @@ static bool float_has_fraction(ZigValue *const_val) { return floorf(const_val->data.x_f32) != const_val->data.x_f32; case 64: return floor(const_val->data.x_f64) != const_val->data.x_f64; + case 80: + { + extFloat80_t floored; + extF80M_roundToInt(&const_val->data.x_f80, softfloat_round_minMag, false, &floored); + return !extF80M_eq(&floored, &const_val->data.x_f80); + } case 128: { float128_t floored; @@ -2716,6 +2722,15 @@ static void float_append_buf(Buf *buf, ZigValue *const_val) { case 64: buf_appendf(buf, "%f", const_val->data.x_f64); break; + case 80: + { + float64_t f64_value = extF80M_to_f64(&const_val->data.x_f80); + double double_value; + memcpy(&double_value, &f64_value, sizeof(double)); + + buf_appendf(buf, "%f", const_val->data.x_f64); + break; + } case 128: { // TODO actual implementation @@ -2772,6 +2787,15 @@ static void float_init_bigint(BigInt *bigint, ZigValue *const_val) { bigint->is_negative = true; } break; + case 80: + { + float128_t f128_value; + extF80M_to_f128M(&const_val->data.x_f80, &f128_value); + BigFloat tmp_float; + bigfloat_init_128(&tmp_float, f128_value); + bigint_init_bigfloat(bigint, &tmp_float); + } + break; case 128: { BigFloat tmp_float; @@ -2801,8 +2825,11 @@ static void float_init_bigfloat(ZigValue *dest_val, BigFloat *bigfloat) { case 64: dest_val->data.x_f64 = bigfloat_to_f64(bigfloat); break; - case 80: - zig_panic("TODO: float_init_bigfloat c_longdouble"); + case 80: { + float128_t f128_value = bigfloat_to_f128(bigfloat); + f128M_to_extF80M(&f128_value, &dest_val->data.x_f80); + break; + } case 128: dest_val->data.x_f128 = bigfloat_to_f128(bigfloat); break; @@ -2828,6 +2855,9 @@ static void float_init_f16(ZigValue *dest_val, float16_t x) { case 64: dest_val->data.x_f64 = zig_f16_to_double(x); break; + case 80: + f16_to_extF80M(x, &dest_val->data.x_f80); + break; case 128: f16_to_f128M(x, &dest_val->data.x_f128); break; @@ -2853,6 +2883,12 @@ static void float_init_f32(ZigValue *dest_val, float x) { case 64: dest_val->data.x_f64 = x; break; + case 80: { + float32_t x_f32; + memcpy(&x_f32, &x, sizeof(float)); + f32_to_extF80M(x_f32, &dest_val->data.x_f80); + break; + } case 128: { float32_t x_f32; @@ -2882,6 +2918,12 @@ static void float_init_f64(ZigValue *dest_val, double x) { case 64: dest_val->data.x_f64 = x; break; + case 80: { + float64_t x_f64; + memcpy(&x_f64, &x, sizeof(double)); + f64_to_extF80M(x_f64, &dest_val->data.x_f80); + break; + } case 128: { float64_t x_f64; @@ -2917,6 +2959,9 @@ static void float_init_f128(ZigValue *dest_val, float128_t x) { memcpy(&dest_val->data.x_f64, &f64_val, sizeof(double)); break; } + case 80: + f128M_to_extF80M(&x, &dest_val->data.x_f80); + break; case 128: { memcpy(&dest_val->data.x_f128, &x, sizeof(float128_t)); @@ -2944,6 +2989,12 @@ static void float_init_float(ZigValue *dest_val, ZigValue *src_val) { case 64: float_init_f64(dest_val, src_val->data.x_f64); break; + case 80: { + float128_t f128_value; + extF80M_to_f128M(&src_val->data.x_f80, &f128_value); + float_init_f128(dest_val, f128_value); + break; + } case 128: float_init_f128(dest_val, src_val->data.x_f128); break; @@ -2966,6 +3017,8 @@ static bool float_is_nan(ZigValue *op) { return op->data.x_f32 != op->data.x_f32; case 64: return op->data.x_f64 != op->data.x_f64; + case 80: + return zig_extF80_isNaN(&op->data.x_f80); case 128: return zig_f128_isNaN(&op->data.x_f128); default: @@ -3006,6 +3059,14 @@ static Cmp float_cmp(ZigValue *op1, ZigValue *op2) { } else { return CmpEQ; } + case 80: + if (extF80M_lt(&op1->data.x_f80, &op2->data.x_f80)) { + return CmpLT; + } else if (extF80M_eq(&op1->data.x_f80, &op2->data.x_f80)) { + return CmpEQ; + } else { + return CmpGT; + } case 128: if (f128M_lt(&op1->data.x_f128, &op2->data.x_f128)) { return CmpLT; @@ -3061,7 +3122,18 @@ static Cmp float_cmp_zero(ZigValue *op) { } else { return CmpEQ; } - case 128: + case 80: { + extFloat80_t zero_float; + ui32_to_extF80M(0, &zero_float); + if (extF80M_lt(&op->data.x_f80, &zero_float)) { + return CmpLT; + } else if (extF80M_eq(&op->data.x_f80, &zero_float)) { + return CmpEQ; + } else { + return CmpGT; + } + } + case 128: { float128_t zero_float; ui32_to_f128M(0, &zero_float); if (f128M_lt(&op->data.x_f128, &zero_float)) { @@ -3071,6 +3143,7 @@ static Cmp float_cmp_zero(ZigValue *op) { } else { return CmpGT; } + } default: zig_unreachable(); } @@ -3095,6 +3168,9 @@ static void float_add(ZigValue *out_val, ZigValue *op1, ZigValue *op2) { case 64: out_val->data.x_f64 = op1->data.x_f64 + op2->data.x_f64; return; + case 80: + extF80M_add(&op1->data.x_f80, &op2->data.x_f80, &out_val->data.x_f80); + return; case 128: f128M_add(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128); return; @@ -3122,6 +3198,9 @@ static void float_sub(ZigValue *out_val, ZigValue *op1, ZigValue *op2) { case 64: out_val->data.x_f64 = op1->data.x_f64 - op2->data.x_f64; return; + case 80: + extF80M_sub(&op1->data.x_f80, &op2->data.x_f80, &out_val->data.x_f80); + return; case 128: f128M_sub(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128); return; @@ -3149,6 +3228,9 @@ static void float_mul(ZigValue *out_val, ZigValue *op1, ZigValue *op2) { case 64: out_val->data.x_f64 = op1->data.x_f64 * op2->data.x_f64; return; + case 80: + extF80M_mul(&op1->data.x_f80, &op2->data.x_f80, &out_val->data.x_f80); + return; case 128: f128M_mul(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128); return; @@ -3176,6 +3258,9 @@ static void float_div(ZigValue *out_val, ZigValue *op1, ZigValue *op2) { case 64: out_val->data.x_f64 = op1->data.x_f64 / op2->data.x_f64; return; + case 80: + extF80M_div(&op1->data.x_f80, &op2->data.x_f80, &out_val->data.x_f80); + return; case 128: f128M_div(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128); return; @@ -3204,6 +3289,10 @@ static void float_div_trunc(ZigValue *out_val, ZigValue *op1, ZigValue *op2) { case 64: out_val->data.x_f64 = trunc(op1->data.x_f64 / op2->data.x_f64); return; + case 80: + extF80M_div(&op1->data.x_f80, &op2->data.x_f80, &out_val->data.x_f80); + extF80M_roundToInt(&out_val->data.x_f80, softfloat_round_minMag, false, &out_val->data.x_f80); + return; case 128: f128M_div(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128); f128M_roundToInt(&out_val->data.x_f128, softfloat_round_minMag, false, &out_val->data.x_f128); @@ -3233,6 +3322,10 @@ static void float_div_floor(ZigValue *out_val, ZigValue *op1, ZigValue *op2) { case 64: out_val->data.x_f64 = floor(op1->data.x_f64 / op2->data.x_f64); return; + case 80: + extF80M_div(&op1->data.x_f80, &op2->data.x_f80, &out_val->data.x_f80); + extF80M_roundToInt(&out_val->data.x_f80, softfloat_round_min, false, &out_val->data.x_f80); + return; case 128: f128M_div(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128); f128M_roundToInt(&out_val->data.x_f128, softfloat_round_min, false, &out_val->data.x_f128); @@ -3261,6 +3354,9 @@ static void float_rem(ZigValue *out_val, ZigValue *op1, ZigValue *op2) { case 64: out_val->data.x_f64 = fmod(op1->data.x_f64, op2->data.x_f64); return; + case 80: + extF80M_rem(&op1->data.x_f80, &op2->data.x_f80, &out_val->data.x_f80); + return; case 128: f128M_rem(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128); return; @@ -3290,6 +3386,14 @@ static void zig_f128M_mod(const float128_t* a, const float128_t* b, float128_t* f128M_sub(a, c, c); } +// c = a - b * trunc(a / b) +static void zig_extF80M_mod(const extFloat80_t* a, const extFloat80_t* b, extFloat80_t* c) { + extF80M_div(a, b, c); + extF80M_roundToInt(c, softfloat_round_min, true, c); + extF80M_mul(b, c, c); + extF80M_sub(a, c, c); +} + static void float_mod(ZigValue *out_val, ZigValue *op1, ZigValue *op2) { assert(op1->type == op2->type); out_val->type = op1->type; @@ -3306,6 +3410,9 @@ static void float_mod(ZigValue *out_val, ZigValue *op1, ZigValue *op2) { case 64: out_val->data.x_f64 = fmod(fmod(op1->data.x_f64, op2->data.x_f64) + op2->data.x_f64, op2->data.x_f64); return; + case 80: + zig_extF80M_mod(&op1->data.x_f80, &op2->data.x_f80, &out_val->data.x_f80); + return; case 128: zig_f128M_mod(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128); return; @@ -3351,6 +3458,15 @@ static void float_max(ZigValue *out_val, ZigValue *op1, ZigValue *op2) { out_val->data.x_f64 = op1->data.x_f64 > op2->data.x_f64 ? op1->data.x_f64 : op2->data.x_f64; } return; + case 80: + if (zig_extF80_isNaN(&op1->data.x_f80)) { + out_val->data.x_f80 = op2->data.x_f80; + } else if (zig_extF80_isNaN(&op2->data.x_f80)) { + out_val->data.x_f80 = op1->data.x_f80; + } else { + out_val->data.x_f80 = extF80M_lt(&op1->data.x_f80, &op2->data.x_f80) ? op2->data.x_f80 : op1->data.x_f80; + } + return; case 128: if (zig_f128_isNaN(&op1->data.x_f128)) { out_val->data.x_f128 = op2->data.x_f128; @@ -3402,6 +3518,15 @@ static void float_min(ZigValue *out_val, ZigValue *op1, ZigValue *op2) { out_val->data.x_f64 = op1->data.x_f32 < op2->data.x_f64 ? op1->data.x_f64 : op2->data.x_f64; } return; + case 80: + if (zig_extF80_isNaN(&op1->data.x_f80)) { + out_val->data.x_f80 = op2->data.x_f80; + } else if (zig_extF80_isNaN(&op2->data.x_f80)) { + out_val->data.x_f80 = op1->data.x_f80; + } else { + out_val->data.x_f80 = extF80M_lt(&op1->data.x_f80, &op2->data.x_f80) ? op1->data.x_f80 : op2->data.x_f80; + } + return; case 128: if (zig_f128_isNaN(&op1->data.x_f128)) { out_val->data.x_f128 = op2->data.x_f128; @@ -3434,6 +3559,9 @@ static void float_negate(ZigValue *out_val, ZigValue *op) { case 64: out_val->data.x_f64 = -op->data.x_f64; return; + case 80: + extF80M_neg(&op->data.x_f80, &out_val->data.x_f80); + return; case 128: f128M_neg(&op->data.x_f128, &out_val->data.x_f128); return; @@ -3462,6 +3590,9 @@ void float_write_ieee597(ZigValue *op, uint8_t *buf, bool target_is_big_endian) case 64: memcpy(buf, &op->data.x_f64, 8); break; + case 80: + memcpy(buf, &op->data.x_f80, 16); + break; case 128: memcpy(buf, &op->data.x_f128, 16); break; @@ -3511,6 +3642,9 @@ void float_read_ieee597(ZigValue *val, uint8_t *buf, bool target_is_big_endian) case 64: memcpy(&val->data.x_f64, ptr, 8); return; + case 80: + memcpy(&val->data.x_f80, ptr, 16); + return; case 128: memcpy(&val->data.x_f128, ptr, 16); return; @@ -3538,8 +3672,12 @@ static void value_to_bigfloat(BigFloat *out, ZigValue *val) { case 64: bigfloat_init_64(out, val->data.x_f64); return; - case 80: - zig_panic("TODO: value_to_bigfloat c_longdouble"); + case 80: { + float128_t f128_value; + extF80M_to_f128M(&val->data.x_f80, &f128_value); + bigfloat_init_128(out, f128_value); + return; + } case 128: bigfloat_init_128(out, val->data.x_f128); return; @@ -3628,8 +3766,14 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, Stage1AirInst *instruc bigfloat_init_64(&orig_bf, tmp); break; } - case 80: - zig_panic("TODO: ir_num_lit_fits_in_other_type c_longdouble"); + case 80: { + float128_t tmp = bigfloat_to_f128(&tmp_bf); + extFloat80_t tmp80; + f128M_to_extF80M(&tmp, &tmp80); + extF80M_to_f128M(&tmp80, &tmp); + bigfloat_init_128(&orig_bf, tmp); + break; + } case 128: { float128_t tmp = bigfloat_to_f128(&tmp_bf); bigfloat_init_128(&orig_bf, tmp); @@ -3673,8 +3817,15 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, Stage1AirInst *instruc } break; } - case 80: - zig_panic("TODO: ir_num_lit_fits_in_other_type c_longdouble"); + case 80: { + float16_t tmp = extF80M_to_f16(&const_val->data.x_f80); + extFloat80_t orig; + f16_to_extF80M(tmp, &orig); + if (extF80M_eq(&orig, &const_val->data.x_f80)) { + return true; + } + break; + } case 128: { float16_t tmp = f128M_to_f16(&const_val->data.x_f128); float128_t orig; @@ -3698,8 +3849,15 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, Stage1AirInst *instruc } break; } - case 80: - zig_panic("TODO: ir_num_lit_fits_in_other_type c_longdouble"); + case 80: { + float32_t tmp = extF80M_to_f32(&const_val->data.x_f80); + extFloat80_t orig; + f32_to_extF80M(tmp, &orig); + if (extF80M_eq(&orig, &const_val->data.x_f80)) { + return true; + } + break; + } case 128: { float32_t tmp = f128M_to_f32(&const_val->data.x_f128); float128_t orig; @@ -3715,8 +3873,15 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, Stage1AirInst *instruc break; case 64: switch (const_val->type->data.floating.bit_count) { - case 80: - zig_panic("TODO: ir_num_lit_fits_in_other_type c_longdouble"); + case 80: { + float64_t tmp = extF80M_to_f64(&const_val->data.x_f80); + extFloat80_t orig; + f64_to_extF80M(tmp, &orig); + if (extF80M_eq(&orig, &const_val->data.x_f80)) { + return true; + } + break; + } case 128: { float64_t tmp = f128M_to_f64(&const_val->data.x_f128); float128_t orig; @@ -3730,9 +3895,17 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, Stage1AirInst *instruc zig_unreachable(); } break; - case 80: + case 80: { assert(const_val->type->data.floating.bit_count == 128); - zig_panic("TODO: ir_num_lit_fits_in_other_type c_longdouble"); + extFloat80_t tmp; + f128M_to_extF80M(&const_val->data.x_f128, &tmp); + float128_t orig; + extF80M_to_f128M(&tmp, &orig); + if (f128M_eq(&orig, &const_val->data.x_f128)) { + return true; + } + break; + } case 128: return true; default: @@ -5143,8 +5316,11 @@ static bool eval_const_expr_implicit_cast(IrAnalyze *ira, Scope *scope, AstNode case 64: const_val->data.x_f64 = bigfloat_to_f64(&other_val->data.x_bigfloat); break; - case 80: - zig_panic("TODO: eval_const_expr_implicit_cast c_longdouble"); + case 80: { + float128_t tmp = bigfloat_to_f128(&other_val->data.x_bigfloat); + f128M_to_extF80M(&tmp, &const_val->data.x_f80); + break; + } case 128: const_val->data.x_f128 = bigfloat_to_f128(&other_val->data.x_bigfloat); break; @@ -5172,8 +5348,11 @@ static bool eval_const_expr_implicit_cast(IrAnalyze *ira, Scope *scope, AstNode case 64: const_val->data.x_f64 = bigfloat_to_f64(&bigfloat); break; - case 80: - zig_panic("TODO: eval_const_expr_implicit_cast c_longdouble"); + case 80: { + float128_t tmp = bigfloat_to_f128(&other_val->data.x_bigfloat); + f128M_to_extF80M(&tmp, &const_val->data.x_f80); + break; + } case 128: const_val->data.x_f128 = bigfloat_to_f128(&bigfloat); break; @@ -18960,6 +19139,7 @@ static ZigType *type_info_to_type(IrAnalyze *ira, Scope *scope, AstNode *source_ case 16: return ira->codegen->builtin_types.entry_f16; case 32: return ira->codegen->builtin_types.entry_f32; case 64: return ira->codegen->builtin_types.entry_f64; + case 80: return ira->codegen->builtin_types.entry_f80; case 128: return ira->codegen->builtin_types.entry_f128; } ir_add_error_node(ira, source_node, buf_sprintf("%d-bit float unsupported", bits)); @@ -21943,6 +22123,8 @@ static void ir_eval_mul_add(IrAnalyze *ira, ZigType *float_type, case 64: out_val->data.x_f64 = fma(op1->data.x_f64, op2->data.x_f64, op3->data.x_f64); break; + case 80: + zig_panic("compiler bug: TODO: implement 'mulAdd' for type 'f80'. See https://github.com/ziglang/zig/issues/4026"); case 128: f128M_mulAdd(&op1->data.x_f128, &op2->data.x_f128, &op3->data.x_f128, &out_val->data.x_f128); break; @@ -24156,10 +24338,44 @@ static ErrorMsg *ir_eval_float_op(IrAnalyze *ira, Scope *scope, AstNode *source_ } break; } - case 80: - return ir_add_error_node(ira, source_node, - buf_sprintf("compiler bug: TODO: implement '%s' for type '%s'. See https://github.com/ziglang/zig/issues/4026", - float_op_to_name(fop), buf_ptr(&float_type->name))); + case 80: { + extFloat80_t *out = &out_val->data.x_f80; + extFloat80_t *in = &op->data.x_f80; + switch (fop) { + case BuiltinFnIdSqrt: + extF80M_sqrt(in, out); + break; + case BuiltinFnIdFabs: + extF80M_abs(in, out); + break; + case BuiltinFnIdFloor: + extF80M_roundToInt(in, softfloat_round_min, false, out); + break; + case BuiltinFnIdCeil: + extF80M_roundToInt(in, softfloat_round_max, false, out); + break; + case BuiltinFnIdTrunc: + extF80M_trunc(in, out); + break; + case BuiltinFnIdRound: + extF80M_roundToInt(in, softfloat_round_near_maxMag, false, out); + break; + case BuiltinFnIdNearbyInt: + case BuiltinFnIdSin: + case BuiltinFnIdCos: + case BuiltinFnIdExp: + case BuiltinFnIdExp2: + case BuiltinFnIdLog: + case BuiltinFnIdLog10: + case BuiltinFnIdLog2: + return ir_add_error_node(ira, source_node, + buf_sprintf("compiler bug: TODO: implement '%s' for type '%s'. See https://github.com/ziglang/zig/issues/4026", + float_op_to_name(fop), buf_ptr(&float_type->name))); + default: + zig_unreachable(); + } + break; + } case 128: { float128_t *out, *in; if (float_type->id == ZigTypeIdComptimeFloat) { diff --git a/src/stage1/softfloat.hpp b/src/stage1/softfloat.hpp index 0d43292c4d..ec0315d684 100644 --- a/src/stage1/softfloat.hpp +++ b/src/stage1/softfloat.hpp @@ -56,4 +56,8 @@ static inline bool zig_f128_isNaN(float128_t *aPtr) { || ((absA64 == UINT64_C(0x7FFF000000000000)) && lo); } +static inline bool zig_extF80_isNaN(extFloat80_t *aPtr) { + return aPtr->signExp & 0x7FFF && aPtr->signif; +} + #endif diff --git a/src/stage1/softfloat_ext.cpp b/src/stage1/softfloat_ext.cpp index d0b8d1a5b3..bb4c134d9e 100644 --- a/src/stage1/softfloat_ext.cpp +++ b/src/stage1/softfloat_ext.cpp @@ -28,13 +28,6 @@ void f128M_trunc(const float128_t *aPtr, float128_t *zPtr) { } } -float16_t f16_neg(const float16_t a) { - union { uint16_t ui; float16_t f; } uA; - // Toggle the sign bit. - uA.ui = a.v ^ (UINT16_C(1) << 15); - return uA.f; -} - void f128M_neg(const float128_t *aPtr, float128_t *zPtr) { // Toggle the sign bit. #if ZIG_BYTE_ORDER == ZIG_LITTLE_ENDIAN @@ -46,4 +39,33 @@ void f128M_neg(const float128_t *aPtr, float128_t *zPtr) { #else #error Unsupported endian #endif -} \ No newline at end of file +} + +void extF80M_abs(const extFloat80_t *aPtr, extFloat80_t *zPtr) { + // Clear the sign bit. + zPtr->signExp = aPtr->signExp & UINT16_C(0x7FFF); + zPtr->signif = aPtr->signif; +} + +void extF80M_trunc(const extFloat80_t *aPtr, extFloat80_t *zPtr) { + extFloat80_t zero_float; + ui32_to_extF80M(0, &zero_float); + if (extF80M_lt(aPtr, &zero_float)) { + extF80M_roundToInt(aPtr, softfloat_round_max, false, zPtr); + } else { + extF80M_roundToInt(aPtr, softfloat_round_min, false, zPtr); + } +} + +void extF80M_neg(const extFloat80_t *aPtr, extFloat80_t *zPtr) { + // Toggle the sign bit. + zPtr->signExp = aPtr->signExp ^ UINT16_C(0x8000); + zPtr->signif = aPtr->signif; +} + +float16_t f16_neg(const float16_t a) { + union { uint16_t ui; float16_t f; } uA; + // Toggle the sign bit. + uA.ui = a.v ^ (UINT16_C(1) << 15); + return uA.f; +} diff --git a/src/stage1/softfloat_ext.hpp b/src/stage1/softfloat_ext.hpp index 42922a5226..4e6fd753c8 100644 --- a/src/stage1/softfloat_ext.hpp +++ b/src/stage1/softfloat_ext.hpp @@ -7,6 +7,10 @@ void f128M_abs(const float128_t *aPtr, float128_t *zPtr); void f128M_trunc(const float128_t *aPtr, float128_t *zPtr); void f128M_neg(const float128_t *aPtr, float128_t *zPtr); +void extF80M_abs(const extFloat80_t *aPtr, extFloat80_t *zPtr); +void extF80M_trunc(const extFloat80_t *aPtr, extFloat80_t *zPtr); +void extF80M_neg(const extFloat80_t *aPtr, extFloat80_t *zPtr); + float16_t f16_neg(const float16_t a); #endif \ No newline at end of file diff --git a/src/stage1/target.cpp b/src/stage1/target.cpp index feb2c7f143..a505b4bd21 100644 --- a/src/stage1/target.cpp +++ b/src/stage1/target.cpp @@ -1019,6 +1019,17 @@ bool target_long_double_is_f128(const ZigTarget *target) { } } +bool target_has_f80(const ZigTarget *target) { + switch (target->arch) { + case ZigLLVM_x86: + case ZigLLVM_x86_64: + return true; + + default: + return false; + } +} + bool target_is_riscv(const ZigTarget *target) { return target->arch == ZigLLVM_riscv32 || target->arch == ZigLLVM_riscv64; } diff --git a/src/stage1/target.hpp b/src/stage1/target.hpp index 6851d88618..2e26033549 100644 --- a/src/stage1/target.hpp +++ b/src/stage1/target.hpp @@ -81,6 +81,7 @@ bool target_is_sparc(const ZigTarget *target); bool target_is_android(const ZigTarget *target); bool target_has_debug_info(const ZigTarget *target); bool target_long_double_is_f128(const ZigTarget *target); +bool target_has_f80(const ZigTarget *target); uint32_t target_arch_pointer_bit_width(ZigLLVM_ArchType arch); uint32_t target_arch_largest_atomic_bits(ZigLLVM_ArchType arch); diff --git a/src/type.zig b/src/type.zig index 0020ccd7cc..23a741eed0 100644 --- a/src/type.zig +++ b/src/type.zig @@ -58,6 +58,7 @@ pub const Type = extern union { .f16, .f32, .f64, + .f80, .f128, .c_longdouble, => return .Float, @@ -833,6 +834,7 @@ pub const Type = extern union { .f16, .f32, .f64, + .f80, .f128, .bool, .void, @@ -1053,6 +1055,7 @@ pub const Type = extern union { .f16, .f32, .f64, + .f80, .f128, .bool, .void, @@ -1371,6 +1374,7 @@ pub const Type = extern union { .f16, .f32, .f64, + .f80, .f128, .bool, .void, @@ -1473,6 +1477,7 @@ pub const Type = extern union { .f16 => return Value.initTag(.f16_type), .f32 => return Value.initTag(.f32_type), .f64 => return Value.initTag(.f64_type), + .f80 => return Value.initTag(.f80_type), .f128 => return Value.initTag(.f128_type), .bool => return Value.initTag(.bool_type), .void => return Value.initTag(.void_type), @@ -1543,6 +1548,7 @@ pub const Type = extern union { .f16, .f32, .f64, + .f80, .f128, .bool, .anyerror, @@ -1858,6 +1864,7 @@ pub const Type = extern union { .f16 => return 2, .f32 => return 4, .f64 => return 8, + .f80 => return 16, .f128 => return 16, .c_longdouble => return 16, @@ -2138,6 +2145,7 @@ pub const Type = extern union { .f16 => return 2, .f32 => return 4, .f64 => return 8, + .f80 => return 16, .f128 => return 16, .c_longdouble => return 16, @@ -2277,6 +2285,7 @@ pub const Type = extern union { .i16, .u16, .f16 => 16, .i32, .u32, .f32 => 32, .i64, .u64, .f64 => 64, + .f80 => 80, .u128, .i128, .f128 => 128, .isize, @@ -3170,6 +3179,7 @@ pub const Type = extern union { .f16, .f32, .f64, + .f80, .f128, .c_longdouble, => true, @@ -3184,6 +3194,7 @@ pub const Type = extern union { .f16, .f32, .f64, + .f80, .f128, .c_longdouble, .comptime_float, @@ -3200,6 +3211,7 @@ pub const Type = extern union { .f16 => 16, .f32 => 32, .f64 => 64, + .f80 => 80, .f128, .comptime_float => 128, .c_longdouble => CType.longdouble.sizeInBits(target), @@ -3340,6 +3352,7 @@ pub const Type = extern union { .f16, .f32, .f64, + .f80, .f128, .c_longdouble, .comptime_int, @@ -3381,6 +3394,7 @@ pub const Type = extern union { .f16, .f32, .f64, + .f80, .f128, .c_longdouble, .comptime_int, @@ -3579,6 +3593,7 @@ pub const Type = extern union { .f16, .f32, .f64, + .f80, .f128, .anyopaque, .bool, @@ -4334,6 +4349,7 @@ pub const Type = extern union { f16, f32, f64, + f80, f128, anyopaque, bool, @@ -4453,6 +4469,7 @@ pub const Type = extern union { .f16, .f32, .f64, + .f80, .f128, .anyopaque, .bool, diff --git a/src/value.zig b/src/value.zig index 9d9895a6e0..faf4f38e80 100644 --- a/src/value.zig +++ b/src/value.zig @@ -47,6 +47,7 @@ pub const Value = extern union { f16_type, f32_type, f64_type, + f80_type, f128_type, anyopaque_type, bool_type, @@ -205,6 +206,7 @@ pub const Value = extern union { .f16_type, .f32_type, .f64_type, + .f80_type, .f128_type, .anyopaque_type, .bool_type, @@ -398,6 +400,7 @@ pub const Value = extern union { .f16_type, .f32_type, .f64_type, + .f80_type, .f128_type, .anyopaque_type, .bool_type, @@ -630,6 +633,7 @@ pub const Value = extern union { .f16_type => return out_stream.writeAll("f16"), .f32_type => return out_stream.writeAll("f32"), .f64_type => return out_stream.writeAll("f64"), + .f80_type => return out_stream.writeAll("f80"), .f128_type => return out_stream.writeAll("f128"), .anyopaque_type => return out_stream.writeAll("anyopaque"), .bool_type => return out_stream.writeAll("bool"), @@ -824,6 +828,7 @@ pub const Value = extern union { .f16_type => Type.initTag(.f16), .f32_type => Type.initTag(.f32), .f64_type => Type.initTag(.f64), + .f80_type => Type.initTag(.f80), .f128_type => Type.initTag(.f128), .anyopaque_type => Type.initTag(.anyopaque), .bool_type => Type.initTag(.bool), From 67d04a988a06f52f4abca848f3579a8037070afe Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Wed, 19 Jan 2022 18:47:51 +0200 Subject: [PATCH 2/8] std: add f80 bits --- doc/langref.html.in | 6 ++++++ lib/std/math.zig | 18 ++++++++++++++++++ lib/std/math/epsilon.zig | 1 + lib/std/math/inf.zig | 1 + lib/std/math/nan.zig | 3 +++ 5 files changed, 29 insertions(+) diff --git a/doc/langref.html.in b/doc/langref.html.in index 8b7e9b87e6..9863ee67da 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -737,6 +737,11 @@ pub fn main() void { double 64-bit floating point (52-bit mantissa) IEEE-754-2008 binary64 + + {#syntax#}f80{#endsyntax#} + double + 64-bit floating point (64-bit mantissa) IEEE-754-2008 80-bit extended precision + {#syntax#}f128{#endsyntax#} _Float128 @@ -1500,6 +1505,7 @@ fn divide(a: i32, b: i32) i32 {
  • {#syntax#}f16{#endsyntax#} - IEEE-754-2008 binary16
  • {#syntax#}f32{#endsyntax#} - IEEE-754-2008 binary32
  • {#syntax#}f64{#endsyntax#} - IEEE-754-2008 binary64
  • +
  • {#syntax#}f80{#endsyntax#} - IEEE-754-2008 80-bit extended precision
  • {#syntax#}f128{#endsyntax#} - IEEE-754-2008 binary128
  • {#syntax#}c_longdouble{#endsyntax#} - matches long double for the target C ABI
  • diff --git a/lib/std/math.zig b/lib/std/math.zig index cb911b1263..59532d7ab2 100644 --- a/lib/std/math.zig +++ b/lib/std/math.zig @@ -43,7 +43,21 @@ pub const f128_max = @bitCast(f128, @as(u128, 0x7FFEFFFFFFFFFFFFFFFFFFFFFFFFFFFF pub const f128_epsilon = @bitCast(f128, @as(u128, 0x3F8F0000000000000000000000000000)); pub const f128_toint = 1.0 / f128_epsilon; +const F80Repr = if (@import("builtin").cpu.arch.endian() == .Little) extern struct { + fraction: u64, + exp: u16, +} else extern struct { + exp: u16, + fraction: u64, +}; + // float.h details +pub const f80_true_min = @ptrCast(*const f80, &F80Repr{ .fraction = 1, .exp = 0 }).*; +pub const f80_min = @ptrCast(*const f80, &F80Repr{ .fraction = 0x8000000000000000, .exp = 1 }).*; +pub const f80_max = @ptrCast(*const f80, &F80Repr{ .fraction = 0xFFFFFFFFFFFFFFFF, .exp = 0x7FFE }).*; +pub const f80_epsilon = @ptrCast(*const f80, &F80Repr{ .fraction = 0x8000000000000000, .exp = 0x3FC0 }).*; +pub const f80_toint = 1.0 / f80_epsilon; + pub const f64_true_min = 4.94065645841246544177e-324; pub const f64_min = 2.2250738585072014e-308; pub const f64_max = 1.79769313486231570815e+308; @@ -91,6 +105,10 @@ pub const qnan_f64 = @bitCast(f64, qnan_u64); pub const inf_u64 = @as(u64, 0x7FF << 52); pub const inf_f64 = @bitCast(f64, inf_u64); +pub const inf_f80 = @ptrCast(*const f80, &F80Repr{ .fraction = 0x8000000000000000, .exp = 0x7fff }).*; +pub const nan_f80 = @ptrCast(*const f80, &F80Repr{ .fraction = 0xA000000000000000, .exp = 0x7fff }).*; +pub const qnan_f80 = @ptrCast(*const f80, &F80Repr{ .fraction = 0xC000000000000000, .exp = 0x7fff }).*; + pub const nan_u128 = @as(u128, 0x7fff0000000000000000000000000001); pub const nan_f128 = @bitCast(f128, nan_u128); diff --git a/lib/std/math/epsilon.zig b/lib/std/math/epsilon.zig index de0297ee63..7f78be1aab 100644 --- a/lib/std/math/epsilon.zig +++ b/lib/std/math/epsilon.zig @@ -8,6 +8,7 @@ pub fn epsilon(comptime T: type) T { f16 => math.f16_epsilon, f32 => math.f32_epsilon, f64 => math.f64_epsilon, + f80 => math.f80_epsilon, f128 => math.f128_epsilon, else => @compileError("epsilon not implemented for " ++ @typeName(T)), }; diff --git a/lib/std/math/inf.zig b/lib/std/math/inf.zig index 86ff245533..fd7d7c4380 100644 --- a/lib/std/math/inf.zig +++ b/lib/std/math/inf.zig @@ -7,6 +7,7 @@ pub fn inf(comptime T: type) T { f16 => math.inf_f16, f32 => math.inf_f32, f64 => math.inf_f64, + f80 => math.inf_f80, f128 => math.inf_f128, else => @compileError("inf not implemented for " ++ @typeName(T)), }; diff --git a/lib/std/math/nan.zig b/lib/std/math/nan.zig index 5a01a5b3bd..634af1f0d6 100644 --- a/lib/std/math/nan.zig +++ b/lib/std/math/nan.zig @@ -6,6 +6,7 @@ pub fn nan(comptime T: type) T { f16 => math.nan_f16, f32 => math.nan_f32, f64 => math.nan_f64, + f80 => math.nan_f80, f128 => math.nan_f128, else => @compileError("nan not implemented for " ++ @typeName(T)), }; @@ -19,6 +20,8 @@ pub fn snan(comptime T: type) T { f16 => @bitCast(f16, math.nan_u16), f32 => @bitCast(f32, math.nan_u32), f64 => @bitCast(f64, math.nan_u64), + f80 => @bitCast(f80, math.nan_u80), + f128 => @bitCast(f128, math.nan_u128), else => @compileError("snan not implemented for " ++ @typeName(T)), }; } From 8e9fd042b8a56fc4bb8eeae3878b095af96eb1a6 Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Wed, 19 Jan 2022 21:16:23 +0200 Subject: [PATCH 3/8] stage1: emit calls to compiler-rt for f80 on unsupported targets --- src/stage1/codegen.cpp | 240 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 236 insertions(+), 4 deletions(-) diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp index efb33e8bdc..f8e12e1d78 100644 --- a/src/stage1/codegen.cpp +++ b/src/stage1/codegen.cpp @@ -1598,6 +1598,81 @@ static LLVMValueRef gen_assert_zero(CodeGen *g, LLVMValueRef expr_val, ZigType * return nullptr; } + +static LLVMValueRef gen_soft_f80_widen_or_shorten(CodeGen *g, ZigType *actual_type, + ZigType *wanted_type, LLVMValueRef expr_val) +{ + ZigType *scalar_actual_type = (actual_type->id == ZigTypeIdVector) ? + actual_type->data.vector.elem_type : actual_type; + ZigType *scalar_wanted_type = (wanted_type->id == ZigTypeIdVector) ? + wanted_type->data.vector.elem_type : wanted_type; + uint64_t actual_bits = scalar_actual_type->data.floating.bit_count; + uint64_t wanted_bits = scalar_wanted_type->data.floating.bit_count; + + + LLVMTypeRef param_type; + LLVMTypeRef return_type; + const char *func_name; + + if (actual_bits == wanted_bits) { + return expr_val; + } else if (actual_bits == 80) { + param_type = g->builtin_types.entry_f80->llvm_type; + switch (wanted_bits) { + case 16: + return_type = g->builtin_types.entry_f16->llvm_type; + func_name = "__truncxfhf2"; + break; + case 32: + return_type = g->builtin_types.entry_f32->llvm_type; + func_name = "__truncxfff2"; + break; + case 64: + return_type = g->builtin_types.entry_f64->llvm_type; + func_name = "__truncxfdf2"; + break; + case 128: + return_type = g->builtin_types.entry_f128->llvm_type; + func_name = "__extendxftf2"; + break; + default: + zig_unreachable(); + } + } else if (wanted_bits == 80) { + return_type = g->builtin_types.entry_f80->llvm_type; + switch (actual_bits) { + case 16: + param_type = g->builtin_types.entry_f16->llvm_type; + func_name = "__extendhfxf2"; + break; + case 32: + param_type = g->builtin_types.entry_f32->llvm_type; + func_name = "__extendffxf2"; + break; + case 64: + param_type = g->builtin_types.entry_f64->llvm_type; + func_name = "__extenddfxf2"; + break; + case 128: + param_type = g->builtin_types.entry_f128->llvm_type; + func_name = "__trunctfxf2"; + break; + default: + zig_unreachable(); + } + } else { + zig_unreachable(); + } + + LLVMValueRef func_ref = LLVMGetNamedFunction(g->module, func_name); + if (func_ref == nullptr) { + LLVMTypeRef fn_type = LLVMFunctionType(return_type, ¶m_type, 1, false); + func_ref = LLVMAddFunction(g->module, func_name, fn_type); + } + + return LLVMBuildCall(g->builder, func_ref, &expr_val, 1, ""); +} + static LLVMValueRef gen_widen_or_shorten(CodeGen *g, bool want_runtime_safety, ZigType *actual_type, ZigType *wanted_type, LLVMValueRef expr_val) { @@ -1612,6 +1687,13 @@ static LLVMValueRef gen_widen_or_shorten(CodeGen *g, bool want_runtime_safety, Z uint64_t actual_bits; uint64_t wanted_bits; if (scalar_actual_type->id == ZigTypeIdFloat) { + + if ((scalar_actual_type == g->builtin_types.entry_f80 + || scalar_wanted_type == g->builtin_types.entry_f80) + && !target_has_f80(g->zig_target)) + { + return gen_soft_f80_widen_or_shorten(g, actual_type, wanted_type, expr_val); + } actual_bits = scalar_actual_type->data.floating.bit_count; wanted_bits = scalar_wanted_type->data.floating.bit_count; } else if (scalar_actual_type->id == ZigTypeIdInt) { @@ -3142,6 +3224,142 @@ static void gen_shift_rhs_check(CodeGen *g, ZigType *lhs_type, ZigType *rhs_type } } +static LLVMValueRef get_soft_f80_bin_op_func(CodeGen *g, const char *name, int param_count, LLVMTypeRef return_type) { + LLVMValueRef existing_llvm_fn = LLVMGetNamedFunction(g->module, name); + if (existing_llvm_fn != nullptr) return existing_llvm_fn; + + LLVMTypeRef float_type_ref = g->builtin_types.entry_f80->llvm_type; + LLVMTypeRef param_types[2] = { float_type_ref, float_type_ref }; + LLVMTypeRef fn_type = LLVMFunctionType(return_type, param_types, param_count, false); + return LLVMAddFunction(g->module, name, fn_type); +} + +static LLVMValueRef ir_render_soft_f80_bin_op(CodeGen *g, Stage1Air *executable, + Stage1AirInstBinOp *bin_op_instruction) +{ + // TODO support vectors + IrBinOp op_id = bin_op_instruction->op_id; + Stage1AirInst *op1 = bin_op_instruction->op1; + Stage1AirInst *op2 = bin_op_instruction->op2; + + LLVMValueRef op1_value = ir_llvm_value(g, op1); + LLVMValueRef op2_value = ir_llvm_value(g, op2); + + bool div_exact_safety_check = false; + LLVMTypeRef return_type = g->builtin_types.entry_f80->llvm_type; + int param_count = 2; + const char *func_name; + switch (op_id) { + case IrBinOpInvalid: + case IrBinOpArrayCat: + case IrBinOpArrayMult: + case IrBinOpRemUnspecified: + case IrBinOpBitShiftLeftLossy: + case IrBinOpBitShiftLeftExact: + case IrBinOpBitShiftRightLossy: + case IrBinOpBitShiftRightExact: + case IrBinOpBoolOr: + case IrBinOpBoolAnd: + case IrBinOpMultWrap: + case IrBinOpAddWrap: + case IrBinOpSubWrap: + case IrBinOpBinOr: + case IrBinOpBinXor: + case IrBinOpBinAnd: + case IrBinOpAddSat: + case IrBinOpSubSat: + case IrBinOpMultSat: + case IrBinOpShlSat: + zig_unreachable(); + case IrBinOpCmpEq: + return_type = g->builtin_types.entry_i32->llvm_type; + func_name = "__eqxf2"; + break; + case IrBinOpCmpNotEq: + return_type = g->builtin_types.entry_i32->llvm_type; + func_name = "__nexf2"; + break; + case IrBinOpCmpLessOrEq: + case IrBinOpCmpLessThan: + return_type = g->builtin_types.entry_i32->llvm_type; + func_name = "__lexf2"; + break; + case IrBinOpCmpGreaterOrEq: + case IrBinOpCmpGreaterThan: + return_type = g->builtin_types.entry_i32->llvm_type; + func_name = "__gexf2"; + break; + case IrBinOpMaximum: + func_name = "__fmaxx"; + break; + case IrBinOpMinimum: + func_name = "__fminx"; + break; + case IrBinOpMult: + func_name = "__mulxf3"; + break; + case IrBinOpAdd: + func_name = "__addxf3"; + break; + case IrBinOpSub: + func_name = "__subxf3"; + break; + case IrBinOpDivUnspecified: + func_name = "__divxf3"; + break; + case IrBinOpDivExact: + func_name = "__divxf3"; + div_exact_safety_check = bin_op_instruction->safety_check_on && + ir_want_runtime_safety(g, &bin_op_instruction->base); + break; + case IrBinOpDivTrunc: + param_count = 1; + func_name = "__truncx"; + break; + case IrBinOpDivFloor: + param_count = 1; + func_name = "__floorx"; + break; + case IrBinOpRemRem: + param_count = 1; + func_name = "__remx"; + break; + case IrBinOpRemMod: + param_count = 1; + func_name = "__modx"; + break; + default: + zig_unreachable(); + } + + LLVMValueRef func_ref = get_soft_f80_bin_op_func(g, func_name, param_count, return_type); + + LLVMValueRef params[2] = {op1_value, op2_value}; + LLVMValueRef result = LLVMBuildCall(g->builder, func_ref, params, param_count, ""); + + if (div_exact_safety_check) { + // Safety check: a / b == floor(a / b) + func_ref = get_soft_f80_bin_op_func(g, "__floorx", 1, return_type); + LLVMValueRef floored = LLVMBuildCall(g->builder, func_ref, &result, 1, ""); + + LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "DivExactOk"); + LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "DivExactFail"); + + LLVMValueRef params[2] = {result, floored}; + func_ref = get_soft_f80_bin_op_func(g, "__eqxf2", 2, g->builtin_types.entry_i32->llvm_type); + LLVMValueRef ok_bit = LLVMBuildCall(g->builder, func_ref, params, 2, ""); + + LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block); + + LLVMPositionBuilderAtEnd(g->builder, fail_block); + gen_safety_crash(g, PanicMsgIdExactDivisionRemainder); + + LLVMPositionBuilderAtEnd(g->builder, ok_block); + } + + return result; +} + static LLVMValueRef ir_render_bin_op(CodeGen *g, Stage1Air *executable, Stage1AirInstBinOp *bin_op_instruction) { @@ -3151,6 +3369,10 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, Stage1Air *executable, ZigType *operand_type = op1->value->type; ZigType *scalar_type = (operand_type->id == ZigTypeIdVector) ? operand_type->data.vector.elem_type : operand_type; + if (scalar_type == g->builtin_types.entry_f80 && !target_has_f80(g->zig_target)) { + return ir_render_soft_f80_bin_op(g, executable, bin_op_instruction); + } + bool want_runtime_safety = bin_op_instruction->safety_check_on && ir_want_runtime_safety(g, &bin_op_instruction->base); @@ -3158,7 +3380,6 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, Stage1Air *executable, LLVMValueRef op1_value = ir_llvm_value(g, op1); LLVMValueRef op2_value = ir_llvm_value(g, op2); - switch (op_id) { case IrBinOpInvalid: case IrBinOpArrayCat: @@ -5927,7 +6148,7 @@ static LLVMValueRef ir_render_prefetch(CodeGen *g, Stage1Air *executable, Stage1 static_assert(PrefetchCacheInstruction == 0, ""); static_assert(PrefetchCacheData == 1, ""); assert(instruction->cache == PrefetchCacheData || instruction->cache == PrefetchCacheInstruction); - + // LLVM fails during codegen of instruction cache prefetchs for these architectures. // This is an LLVM bug as the prefetch intrinsic should be a noop if not supported by the target. // To work around this, simply don't emit llvm.prefetch in this case. @@ -8920,8 +9141,19 @@ static void define_builtin_types(CodeGen *g) { if (target_has_f80(g->zig_target)) { add_fp_entry(g, "f80", 80, LLVMX86FP80Type(), &g->builtin_types.entry_f80); } else { - // use f128 for correct size and alignment - add_fp_entry(g, "f80", 128, LLVMFP128Type(), &g->builtin_types.entry_f80); + ZigType *entry = new_type_table_entry(ZigTypeIdFloat); + entry->llvm_type = get_int_type(g, false, 128)->llvm_type; + entry->size_in_bits = 8 * LLVMStoreSizeOfType(g->target_data_ref, entry->llvm_type); + entry->abi_size = LLVMABISizeOfType(g->target_data_ref, entry->llvm_type); + entry->abi_align = 16; + buf_init_from_str(&entry->name, "f80"); + entry->data.floating.bit_count = 80; + + entry->llvm_di_type = ZigLLVMCreateDebugBasicType(g->dbuilder, buf_ptr(&entry->name), + entry->size_in_bits, ZigLLVMEncoding_DW_ATE_unsigned()); + + g->builtin_types.entry_f80 = entry; + g->primitive_type_table.put(&entry->name, entry); } switch (g->zig_target->arch) { From 0f3bd2afa320252d4f0ece627917feaade734064 Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Wed, 19 Jan 2022 22:39:14 +0200 Subject: [PATCH 4/8] stage1: handle compiler-rt calls on vectors of f80 --- src/stage1/codegen.cpp | 63 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 54 insertions(+), 9 deletions(-) diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp index f8e12e1d78..548abebc6f 100644 --- a/src/stage1/codegen.cpp +++ b/src/stage1/codegen.cpp @@ -3237,10 +3237,10 @@ static LLVMValueRef get_soft_f80_bin_op_func(CodeGen *g, const char *name, int p static LLVMValueRef ir_render_soft_f80_bin_op(CodeGen *g, Stage1Air *executable, Stage1AirInstBinOp *bin_op_instruction) { - // TODO support vectors IrBinOp op_id = bin_op_instruction->op_id; Stage1AirInst *op1 = bin_op_instruction->op1; Stage1AirInst *op2 = bin_op_instruction->op2; + uint32_t vector_len = op1->value->type->id == ZigTypeIdVector ? op1->value->type->data.vector.len : 0; LLVMValueRef op1_value = ir_llvm_value(g, op1); LLVMValueRef op2_value = ir_llvm_value(g, op2); @@ -3334,21 +3334,63 @@ static LLVMValueRef ir_render_soft_f80_bin_op(CodeGen *g, Stage1Air *executable, LLVMValueRef func_ref = get_soft_f80_bin_op_func(g, func_name, param_count, return_type); - LLVMValueRef params[2] = {op1_value, op2_value}; - LLVMValueRef result = LLVMBuildCall(g->builder, func_ref, params, param_count, ""); + LLVMValueRef result; + if (vector_len == 0) { + LLVMValueRef params[2] = {op1_value, op2_value}; + result = LLVMBuildCall(g->builder, func_ref, params, param_count, ""); + } else { + result = build_alloca(g, op1->value->type, "", 0); + } + + LLVMTypeRef usize_ref = g->builtin_types.entry_usize->llvm_type; + for (uint32_t i = 0; i < vector_len; i++) { + LLVMValueRef index_value = LLVMConstInt(usize_ref, i, false); + LLVMValueRef params[2] = { + LLVMBuildExtractElement(g->builder, op1_value, index_value, ""), + LLVMBuildExtractElement(g->builder, op2_value, index_value, ""), + }; + LLVMValueRef call_result = LLVMBuildCall(g->builder, func_ref, params, param_count, ""); + LLVMBuildInsertElement(g->builder, LLVMBuildLoad(g->builder, result, ""), + call_result, index_value, ""); + } if (div_exact_safety_check) { // Safety check: a / b == floor(a / b) - func_ref = get_soft_f80_bin_op_func(g, "__floorx", 1, return_type); - LLVMValueRef floored = LLVMBuildCall(g->builder, func_ref, &result, 1, ""); + LLVMValueRef floor_func = get_soft_f80_bin_op_func(g, "__floorx", 1, return_type); + LLVMValueRef eq_func = get_soft_f80_bin_op_func(g, "__eqxf2", 2, g->builtin_types.entry_i32->llvm_type); + LLVMValueRef ok_bit; + if (vector_len == 0) { + LLVMValueRef floored = LLVMBuildCall(g->builder, floor_func, &result, 1, ""); + + LLVMValueRef params[2] = {result, floored}; + ok_bit = LLVMBuildCall(g->builder, eq_func, params, 2, ""); + } else { + ZigType *bool_vec_ty = get_vector_type(g, vector_len, g->builtin_types.entry_bool); + ok_bit = build_alloca(g, bool_vec_ty, "", 0); + } + + for (uint32_t i = 0; i < vector_len; i++) { + LLVMValueRef index_value = LLVMConstInt(usize_ref, i, false); + LLVMValueRef div_res = LLVMBuildExtractElement(g->builder, + LLVMBuildLoad(g->builder, result, ""), index_value, ""); + + LLVMValueRef params[2] = { + div_res, + LLVMBuildCall(g->builder, floor_func, &div_res, 1, ""), + }; + LLVMValueRef cmp_res = LLVMBuildCall(g->builder, eq_func, params, 2, ""); + cmp_res = LLVMBuildTrunc(g->builder, cmp_res, g->builtin_types.entry_bool->llvm_type, ""); + LLVMBuildInsertElement(g->builder, LLVMBuildLoad(g->builder, ok_bit, ""), + cmp_res, index_value, ""); + } + + if (vector_len != 0) { + ok_bit = ZigLLVMBuildAndReduce(g->builder, LLVMBuildLoad(g->builder, ok_bit, "")); + } LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "DivExactOk"); LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "DivExactFail"); - LLVMValueRef params[2] = {result, floored}; - func_ref = get_soft_f80_bin_op_func(g, "__eqxf2", 2, g->builtin_types.entry_i32->llvm_type); - LLVMValueRef ok_bit = LLVMBuildCall(g->builder, func_ref, params, 2, ""); - LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block); LLVMPositionBuilderAtEnd(g->builder, fail_block); @@ -3357,6 +3399,9 @@ static LLVMValueRef ir_render_soft_f80_bin_op(CodeGen *g, Stage1Air *executable, LLVMPositionBuilderAtEnd(g->builder, ok_block); } + if (vector_len != 0) { + result = LLVMBuildLoad(g->builder, result, ""); + } return result; } From f8b204bb189125e85a7f14f08c4bab60a462e76e Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Wed, 19 Jan 2022 23:05:35 +0200 Subject: [PATCH 5/8] stage1: call compiler-rt for math builtins on f80 on unsupported targets --- src/stage1/codegen.cpp | 135 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 135 insertions(+) diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp index 548abebc6f..2051d39a7c 100644 --- a/src/stage1/codegen.cpp +++ b/src/stage1/codegen.cpp @@ -6888,13 +6888,148 @@ static LLVMValueRef ir_render_atomic_store(CodeGen *g, Stage1Air *executable, return nullptr; } +static LLVMValueRef ir_render_soft_f80_float_op(CodeGen *g, Stage1Air *executable, Stage1AirInstFloatOp *instruction) { + ZigType *op_type = instruction->operand->value->type; + uint32_t vector_len = op_type->id == ZigTypeIdVector ? op_type->data.vector.len : 0; + + const char *func_name; + switch (instruction->fn_id) { + case BuiltinFnIdSqrt: + func_name = "__sqrt"; + break; + case BuiltinFnIdSin: + func_name = "__sinx"; + break; + case BuiltinFnIdCos: + func_name = "__cosx"; + break; + case BuiltinFnIdExp: + func_name = "__expx"; + break; + case BuiltinFnIdExp2: + func_name = "__exp2x"; + break; + case BuiltinFnIdLog: + func_name = "__logx"; + break; + case BuiltinFnIdLog2: + func_name = "__log2x"; + break; + case BuiltinFnIdLog10: + func_name = "__log10x"; + break; + case BuiltinFnIdFabs: + func_name = "__fabsx"; + break; + case BuiltinFnIdFloor: + func_name = "__floorx"; + break; + case BuiltinFnIdCeil: + func_name = "__ceilx"; + break; + case BuiltinFnIdTrunc: + func_name = "__truncx"; + break; + case BuiltinFnIdNearbyInt: + func_name = "__nearbyintx"; + break; + case BuiltinFnIdRound: + func_name = "__roundx"; + break; + default: + zig_unreachable(); + } + + + LLVMValueRef func_ref = LLVMGetNamedFunction(g->module, func_name); + if (func_ref == nullptr) { + LLVMTypeRef f80_ref = g->builtin_types.entry_f80->llvm_type; + LLVMTypeRef fn_type = LLVMFunctionType(f80_ref, &f80_ref, 1, false); + func_ref = LLVMAddFunction(g->module, func_name, fn_type); + } + + LLVMValueRef operand = ir_llvm_value(g, instruction->operand); + LLVMValueRef result; + if (vector_len == 0) { + result = LLVMBuildCall(g->builder, func_ref, &operand, 1, ""); + } else { + result = build_alloca(g, instruction->operand->value->type, "", 0); + } + + LLVMTypeRef usize_ref = g->builtin_types.entry_usize->llvm_type; + for (uint32_t i = 0; i < vector_len; i++) { + LLVMValueRef index_value = LLVMConstInt(usize_ref, i, false); + LLVMValueRef param = LLVMBuildExtractElement(g->builder, operand, index_value, ""); + LLVMValueRef call_result = LLVMBuildCall(g->builder, func_ref, ¶m, 1, ""); + LLVMBuildInsertElement(g->builder, LLVMBuildLoad(g->builder, result, ""), + call_result, index_value, ""); + } + if (vector_len != 0) { + result = LLVMBuildLoad(g->builder, result, ""); + } + return result; +} + static LLVMValueRef ir_render_float_op(CodeGen *g, Stage1Air *executable, Stage1AirInstFloatOp *instruction) { + ZigType *op_type = instruction->operand->value->type; + op_type = op_type->id == ZigTypeIdVector ? op_type->data.vector.elem_type : op_type; + if (op_type == g->builtin_types.entry_f80 && !target_has_f80(g->zig_target)) { + return ir_render_soft_f80_float_op(g, executable, instruction); + } LLVMValueRef operand = ir_llvm_value(g, instruction->operand); LLVMValueRef fn_val = get_float_fn(g, instruction->base.value->type, ZigLLVMFnIdFloatOp, instruction->fn_id); return LLVMBuildCall(g->builder, fn_val, &operand, 1, ""); } +static LLVMValueRef ir_render_soft_f80_mul_add(CodeGen *g, Stage1Air *executable, Stage1AirInstMulAdd *instruction) { + ZigType *op_type = instruction->op1->value->type; + uint32_t vector_len = op_type->id == ZigTypeIdVector ? op_type->data.vector.len : 0; + + const char *func_name = "__fmax"; + LLVMValueRef func_ref = LLVMGetNamedFunction(g->module, func_name); + if (func_ref == nullptr) { + LLVMTypeRef f80_ref = g->builtin_types.entry_f80->llvm_type; + LLVMTypeRef params[3] = { f80_ref, f80_ref, f80_ref }; + LLVMTypeRef fn_type = LLVMFunctionType(f80_ref, params, 3, false); + func_ref = LLVMAddFunction(g->module, func_name, fn_type); + } + + LLVMValueRef op1 = ir_llvm_value(g, instruction->op1); + LLVMValueRef op2 = ir_llvm_value(g, instruction->op2); + LLVMValueRef op3 = ir_llvm_value(g, instruction->op3); + LLVMValueRef result; + if (vector_len == 0) { + LLVMValueRef params[3] = { op1, op2, op3 }; + result = LLVMBuildCall(g->builder, func_ref, params, 3, ""); + } else { + result = build_alloca(g, instruction->op1->value->type, "", 0); + } + + LLVMTypeRef usize_ref = g->builtin_types.entry_usize->llvm_type; + for (uint32_t i = 0; i < vector_len; i++) { + LLVMValueRef index_value = LLVMConstInt(usize_ref, i, false); + + LLVMValueRef params[3] = { + LLVMBuildExtractElement(g->builder, op1, index_value, ""), + LLVMBuildExtractElement(g->builder, op2, index_value, ""), + LLVMBuildExtractElement(g->builder, op3, index_value, ""), + }; + LLVMValueRef call_result = LLVMBuildCall(g->builder, func_ref, params, 3, ""); + LLVMBuildInsertElement(g->builder, LLVMBuildLoad(g->builder, result, ""), + call_result, index_value, ""); + } + if (vector_len != 0) { + result = LLVMBuildLoad(g->builder, result, ""); + } + return result; +} + static LLVMValueRef ir_render_mul_add(CodeGen *g, Stage1Air *executable, Stage1AirInstMulAdd *instruction) { + ZigType *op_type = instruction->op1->value->type; + op_type = op_type->id == ZigTypeIdVector ? op_type->data.vector.elem_type : op_type; + if (op_type == g->builtin_types.entry_f80 && !target_has_f80(g->zig_target)) { + return ir_render_soft_f80_mul_add(g, executable, instruction); + } LLVMValueRef op1 = ir_llvm_value(g, instruction->op1); LLVMValueRef op2 = ir_llvm_value(g, instruction->op2); LLVMValueRef op3 = ir_llvm_value(g, instruction->op3); From 4411f9c019f8f41baeacd39be01af9f961df4e4e Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Thu, 20 Jan 2022 10:29:24 +0200 Subject: [PATCH 6/8] add behavior tests for f80 --- src/stage1/softfloat.hpp | 2 +- test/behavior/floatop_stage1.zig | 81 ++++++++++++++++++++++++++++++++ test/behavior/math_stage1.zig | 19 ++++++++ test/behavior/muladd.zig | 6 +++ test/behavior/type_stage1.zig | 3 +- test/behavior/widening.zig | 5 ++ 6 files changed, 114 insertions(+), 2 deletions(-) diff --git a/src/stage1/softfloat.hpp b/src/stage1/softfloat.hpp index ec0315d684..a0d270d55f 100644 --- a/src/stage1/softfloat.hpp +++ b/src/stage1/softfloat.hpp @@ -57,7 +57,7 @@ static inline bool zig_f128_isNaN(float128_t *aPtr) { } static inline bool zig_extF80_isNaN(extFloat80_t *aPtr) { - return aPtr->signExp & 0x7FFF && aPtr->signif; + return (aPtr->signExp & 0x7FFF) == 0x7FFF && aPtr->signif & UINT64_C(0x7FFFFFFFFFFFFFFF); } #endif diff --git a/test/behavior/floatop_stage1.zig b/test/behavior/floatop_stage1.zig index b1b7eb2b92..303288a118 100644 --- a/test/behavior/floatop_stage1.zig +++ b/test/behavior/floatop_stage1.zig @@ -4,6 +4,7 @@ const math = std.math; const pi = std.math.pi; const e = std.math.e; const Vector = std.meta.Vector; +const has_f80_rt = @import("builtin").cpu.arch == .x86_64; const epsilon = 0.000001; @@ -27,6 +28,10 @@ fn testSqrt() !void { var a: f64 = 25; try expect(@sqrt(a) == 5); } + if (has_f80_rt) { + var a: f80 = 25; + try expect(@sqrt(a) == 5); + } { const a: comptime_float = 25.0; try expect(@sqrt(a) == 5.0); @@ -86,6 +91,10 @@ fn testSin() !void { var a: f64 = 0; try expect(@sin(a) == 0); } + // { + // var a: f80 = 0; + // try expect(@sin(a) == 0); + // } { var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 3.3, 4.4 }; var result = @sin(v); @@ -116,6 +125,10 @@ fn testCos() !void { var a: f64 = 0; try expect(@cos(a) == 1); } + // { + // var a: f80 = 0; + // try expect(@cos(a) == 1); + // } { var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 3.3, 4.4 }; var result = @cos(v); @@ -146,6 +159,10 @@ fn testExp() !void { var a: f64 = 0; try expect(@exp(a) == 1); } + // { + // var a: f80 = 0; + // try expect(@exp(a) == 1); + // } { var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 }; var result = @exp(v); @@ -176,6 +193,10 @@ fn testExp2() !void { var a: f64 = 2; try expect(@exp2(a) == 4); } + // { + // var a: f80 = 2; + // try expect(@exp2(a) == 4); + // } { var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 }; var result = @exp2(v); @@ -208,6 +229,10 @@ fn testLog() !void { var a: f64 = e; try expect(@log(a) == 1 or @log(a) == @bitCast(f64, @as(u64, 0x3ff0000000000000))); } + // { + // var a: f80 = e; + // try expect(@log(a) == 1); + // } { var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 }; var result = @log(v); @@ -238,6 +263,10 @@ fn testLog2() !void { var a: f64 = 4; try expect(@log2(a) == 2); } + // { + // var a: f80 = 4; + // try expect(@log2(a) == 2); + // } { var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 }; var result = @log2(v); @@ -268,6 +297,10 @@ fn testLog10() !void { var a: f64 = 1000; try expect(@log10(a) == 3); } + // { + // var a: f80 = 1000; + // try expect(@log10(a) == 3); + // } { var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 }; var result = @log10(v); @@ -304,6 +337,12 @@ fn testFabs() !void { try expect(@fabs(a) == 2.5); try expect(@fabs(b) == 2.5); } + // { + // var a: f80 = -2.5; + // var b: f80 = 2.5; + // try expect(@fabs(a) == 2.5); + // try expect(@fabs(b) == 2.5); + // } { var v: Vector(4, f32) = [_]f32{ 1.1, -2.2, 0.3, -0.4 }; var result = @fabs(v); @@ -334,6 +373,10 @@ fn testFloor() !void { var a: f64 = 3.5; try expect(@floor(a) == 3); } + // { + // var a: f80 = 3.5; + // try expect(@floor(a) == 3); + // } { var v: Vector(4, f32) = [_]f32{ 1.1, -2.2, 0.3, -0.4 }; var result = @floor(v); @@ -364,6 +407,10 @@ fn testCeil() !void { var a: f64 = 3.5; try expect(@ceil(a) == 4); } + // { + // var a: f80 = 3.5; + // try expect(@ceil(a) == 4); + // } { var v: Vector(4, f32) = [_]f32{ 1.1, -2.2, 0.3, -0.4 }; var result = @ceil(v); @@ -394,6 +441,10 @@ fn testTrunc() !void { var a: f64 = -3.5; try expect(@trunc(a) == -3); } + // { + // var a: f80 = -3.5; + // try expect(@trunc(a) == -3); + // } { var v: Vector(4, f32) = [_]f32{ 1.1, -2.2, 0.3, -0.4 }; var result = @trunc(v); @@ -403,3 +454,33 @@ fn testTrunc() !void { try expect(math.approxEqAbs(f32, @trunc(@as(f32, -0.4)), result[3], epsilon)); } } + +test "floating point comparisons" { + if (has_f80_rt) try testFloatComparisons(); + comptime try testFloatComparisons(); +} + +fn testFloatComparisons() !void { + inline for ([_]type{ f16, f32, f64, f80, f128 }) |ty| { + // No decimal part + { + const x: ty = 1.0; + try expect(x == 1); + try expect(x != 0); + try expect(x > 0); + try expect(x < 2); + try expect(x >= 1); + try expect(x <= 1); + } + // Non-zero decimal part + { + const x: ty = 1.5; + try expect(x != 1); + try expect(x != 2); + try expect(x > 1); + try expect(x < 2); + try expect(x >= 1); + try expect(x <= 2); + } + } +} diff --git a/test/behavior/math_stage1.zig b/test/behavior/math_stage1.zig index 72824348dd..298f2ffc85 100644 --- a/test/behavior/math_stage1.zig +++ b/test/behavior/math_stage1.zig @@ -5,6 +5,7 @@ const expectEqualSlices = std.testing.expectEqualSlices; const maxInt = std.math.maxInt; const minInt = std.math.minInt; const mem = std.mem; +const has_f80_rt = @import("builtin").cpu.arch == .x86_64; test "allow signed integer division/remainder when values are comptime known and positive or exact" { try expect(5 / 3 == 1); @@ -194,6 +195,8 @@ fn testSqrt(comptime T: type, x: T) !void { test "@fabs" { try testFabs(f128, 12.0); comptime try testFabs(f128, 12.0); + if (has_f80_rt) try testFabs(f80, 12.0); + // comptime try testFabs(f80, 12.0); try testFabs(f64, 12.0); comptime try testFabs(f64, 12.0); try testFabs(f32, 12.0); @@ -217,6 +220,8 @@ test "@floor" { // FIXME: Generates a floorl function call // testFloor(f128, 12.0); comptime try testFloor(f128, 12.0); + // try testFloor(f80, 12.0); + comptime try testFloor(f80, 12.0); try testFloor(f64, 12.0); comptime try testFloor(f64, 12.0); try testFloor(f32, 12.0); @@ -240,6 +245,8 @@ test "@ceil" { // FIXME: Generates a ceill function call //testCeil(f128, 12.0); comptime try testCeil(f128, 12.0); + // try testCeil(f80, 12.0); + comptime try testCeil(f80, 12.0); try testCeil(f64, 12.0); comptime try testCeil(f64, 12.0); try testCeil(f32, 12.0); @@ -263,6 +270,14 @@ test "@trunc" { // FIXME: Generates a truncl function call //testTrunc(f128, 12.0); comptime try testTrunc(f128, 12.0); + // try testTrunc(f80, 12.0); + // comptime try testTrunc(f80, 12.0); + comptime { + const x: f80 = 12.0; + const y = x + 0.8; + const z = @trunc(y); + try expectEqual(x, z); + } try testTrunc(f64, 12.0); comptime try testTrunc(f64, 12.0); try testTrunc(f32, 12.0); @@ -294,6 +309,8 @@ test "@round" { // FIXME: Generates a roundl function call //testRound(f128, 12.0); comptime try testRound(f128, 12.0); + // try testRound(f80, 12.0); + comptime try testRound(f80, 12.0); try testRound(f64, 12.0); comptime try testRound(f64, 12.0); try testRound(f32, 12.0); @@ -333,10 +350,12 @@ test "NaN comparison" { try testNanEqNan(f32); try testNanEqNan(f64); try testNanEqNan(f128); + if (has_f80_rt) try testNanEqNan(f80); comptime try testNanEqNan(f16); comptime try testNanEqNan(f32); comptime try testNanEqNan(f64); comptime try testNanEqNan(f128); + // comptime try testNanEqNan(f80); } fn testNanEqNan(comptime F: type) !void { diff --git a/test/behavior/muladd.zig b/test/behavior/muladd.zig index cff7b5e2ad..46e938e1a9 100644 --- a/test/behavior/muladd.zig +++ b/test/behavior/muladd.zig @@ -25,6 +25,12 @@ fn testMulAdd() !void { var c: f64 = 6.25; try expect(@mulAdd(f64, a, b, c) == 20); } + // { + // var a: f16 = 5.5; + // var b: f80 = 2.5; + // var c: f80 = 6.25; + // try expect(@mulAdd(f80, a, b, c) == 20); + // } if (builtin.os.tag == .macos and builtin.cpu.arch == .aarch64) { // https://github.com/ziglang/zig/issues/9900 return error.SkipZigTest; diff --git a/test/behavior/type_stage1.zig b/test/behavior/type_stage1.zig index a87cba08d4..911e6963c9 100644 --- a/test/behavior/type_stage1.zig +++ b/test/behavior/type_stage1.zig @@ -13,8 +13,9 @@ test "Type.Float" { try testing.expect(f16 == @Type(TypeInfo{ .Float = TypeInfo.Float{ .bits = 16 } })); try testing.expect(f32 == @Type(TypeInfo{ .Float = TypeInfo.Float{ .bits = 32 } })); try testing.expect(f64 == @Type(TypeInfo{ .Float = TypeInfo.Float{ .bits = 64 } })); + try testing.expect(f80 == @Type(TypeInfo{ .Float = TypeInfo.Float{ .bits = 80 } })); try testing.expect(f128 == @Type(TypeInfo{ .Float = TypeInfo.Float{ .bits = 128 } })); - try testTypes(&[_]type{ f16, f32, f64, f128 }); + try testTypes(&[_]type{ f16, f32, f64, f80, f128 }); } test "Type.Array" { diff --git a/test/behavior/widening.zig b/test/behavior/widening.zig index afca1ae143..035d4fa4f9 100644 --- a/test/behavior/widening.zig +++ b/test/behavior/widening.zig @@ -2,6 +2,7 @@ const std = @import("std"); const expect = std.testing.expect; const mem = std.mem; const builtin = @import("builtin"); +const has_f80_rt = @import("builtin").cpu.arch == .x86_64; test "integer widening" { var a: u8 = 250; @@ -27,6 +28,10 @@ test "float widening" { try expect(a == b); try expect(b == c); try expect(c == d); + if (has_f80_rt) { + var e: f80 = c; + try expect(c == e); + } } test "float widening f16 to f128" { From 3c827be876a39cbe199e5cd7c6e90edef3a090b5 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 27 Jan 2022 21:36:43 -0700 Subject: [PATCH 7/8] fix invalid const bitcast of f80 LLVM bitcast wants integers that match the number of bits. So the const bitcast has to use an i80, not an i128. This commit makes the behavior tests fail for me, so it seems I did not correctly construct the type. But it gets rid of the LLVM segfault. I noticed that the strategy of memcpy the buf worked if I simply did an LLVMConstTrunc() on the i128 to make it into an i80 before the LLVMConstBitCast(). But is that correct in the face of different endianness? I'm not sure. --- src/stage1/codegen.cpp | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp index 2051d39a7c..58670cf822 100644 --- a/src/stage1/codegen.cpp +++ b/src/stage1/codegen.cpp @@ -8094,10 +8094,18 @@ static LLVMValueRef gen_const_val(CodeGen *g, ZigValue *const_val, const char *n case 64: return LLVMConstReal(get_llvm_type(g, type_entry), const_val->data.x_f64); case 80: { - uint64_t buf[2]; - memcpy(&buf, &const_val->data.x_f80, 16); - LLVMValueRef as_int = LLVMConstIntOfArbitraryPrecision(LLVMInt128Type(), 2, buf); - return LLVMConstBitCast(as_int, get_llvm_type(g, type_entry)); + LLVMTypeRef llvm_i80 = LLVMIntType(80); + LLVMValueRef x; + if (g->is_big_endian) { + x = LLVMConstInt(llvm_i80, const_val->data.x_f80.signExp, false); + x = LLVMConstShl(x, LLVMConstInt(llvm_i80, 64, false)); + x = LLVMConstOr(x, LLVMConstInt(llvm_i80, const_val->data.x_f80.signif, false)); + } else { + x = LLVMConstInt(llvm_i80, const_val->data.x_f80.signif, false); + x = LLVMConstShl(x, LLVMConstInt(llvm_i80, 16, false)); + x = LLVMConstOr(x, LLVMConstInt(llvm_i80, const_val->data.x_f80.signExp, false)); + } + return LLVMConstBitCast(x, get_llvm_type(g, type_entry)); } case 128: { From a0a71709bc2104c708f045fbb42c6247aff136ac Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 28 Jan 2022 11:40:19 -0700 Subject: [PATCH 8/8] stage1: lower const f80 a different way this way passes the behavior tests --- src/stage1/codegen.cpp | 43 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp index 58670cf822..b97f009d62 100644 --- a/src/stage1/codegen.cpp +++ b/src/stage1/codegen.cpp @@ -8094,33 +8094,32 @@ static LLVMValueRef gen_const_val(CodeGen *g, ZigValue *const_val, const char *n case 64: return LLVMConstReal(get_llvm_type(g, type_entry), const_val->data.x_f64); case 80: { - LLVMTypeRef llvm_i80 = LLVMIntType(80); - LLVMValueRef x; - if (g->is_big_endian) { - x = LLVMConstInt(llvm_i80, const_val->data.x_f80.signExp, false); - x = LLVMConstShl(x, LLVMConstInt(llvm_i80, 64, false)); - x = LLVMConstOr(x, LLVMConstInt(llvm_i80, const_val->data.x_f80.signif, false)); - } else { - x = LLVMConstInt(llvm_i80, const_val->data.x_f80.signif, false); - x = LLVMConstShl(x, LLVMConstInt(llvm_i80, 16, false)); - x = LLVMConstOr(x, LLVMConstInt(llvm_i80, const_val->data.x_f80.signExp, false)); - } - return LLVMConstBitCast(x, get_llvm_type(g, type_entry)); + uint64_t buf[2]; + memcpy(&buf, &const_val->data.x_f80, 16); +#if ZIG_BYTE_ORDER == ZIG_BIG_ENDIAN + uint64_t tmp = buf[0]; + buf[0] = buf[1]; + buf[1] = tmp; +#endif + LLVMValueRef as_i128 = LLVMConstIntOfArbitraryPrecision(LLVMInt128Type(), 2, buf); + LLVMValueRef as_int = LLVMConstTrunc(as_i128, LLVMIntType(80)); + return LLVMConstBitCast(as_int, get_llvm_type(g, type_entry)); } case 128: { uint64_t buf[2]; - // LLVM seems to require that the lower half of the f128 be placed first in the buffer. - #if defined(ZIG_BYTE_ORDER) && ZIG_BYTE_ORDER == ZIG_LITTLE_ENDIAN - buf[0] = const_val->data.x_f128.v[0]; - buf[1] = const_val->data.x_f128.v[1]; - #elif defined(ZIG_BYTE_ORDER) && ZIG_BYTE_ORDER == ZIG_BIG_ENDIAN - buf[0] = const_val->data.x_f128.v[1]; - buf[1] = const_val->data.x_f128.v[0]; - #else - #error Unsupported endian - #endif + // LLVM seems to require that the lower half of the f128 be + // placed first in the buffer. +#if ZIG_BYTE_ORDER == ZIG_LITTLE_ENDIAN + buf[0] = const_val->data.x_f128.v[0]; + buf[1] = const_val->data.x_f128.v[1]; +#elif ZIG_BYTE_ORDER == ZIG_BIG_ENDIAN + buf[0] = const_val->data.x_f128.v[1]; + buf[1] = const_val->data.x_f128.v[0]; +#else +#error Unsupported endian +#endif LLVMValueRef as_int = LLVMConstIntOfArbitraryPrecision(LLVMInt128Type(), 2, buf); return LLVMConstBitCast(as_int, get_llvm_type(g, type_entry));