diff --git a/doc/langref.html.in b/doc/langref.html.in index 3be3131843..e16a95295c 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -8076,94 +8076,146 @@ test "vector @splat" { {#header_close#} {#header_open|@sqrt#} -
{#syntax#}@sqrt(comptime T: type, value: T) T{#endsyntax#}
+
{#syntax#}@sqrt(value: var) @TypeOf(value){#endsyntax#}

Performs the square root of a floating point number. Uses a dedicated hardware instruction - when available. Supports {#syntax#}f16{#endsyntax#}, {#syntax#}f32{#endsyntax#}, {#syntax#}f64{#endsyntax#}, and {#syntax#}f128{#endsyntax#}, as well as vectors. + when available. +

+

+ Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that + some float operations are not yet implemented for all float types.

{#header_close#} {#header_open|@sin#} -
{#syntax#}@sin(comptime T: type, value: T) T{#endsyntax#}
+
{#syntax#}@sin(value: var) @TypeOf(value){#endsyntax#}

Sine trigometric function on a floating point number. Uses a dedicated hardware instruction - when available. Currently supports {#syntax#}f32{#endsyntax#} and {#syntax#}f64{#endsyntax#}. + when available. +

+

+ Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that + some float operations are not yet implemented for all float types.

{#header_close#} {#header_open|@cos#} -
{#syntax#}@cos(comptime T: type, value: T) T{#endsyntax#}
+
{#syntax#}@cos(value: var) @TypeOf(value){#endsyntax#}

Cosine trigometric function on a floating point number. Uses a dedicated hardware instruction - when available. Currently supports {#syntax#}f32{#endsyntax#} and {#syntax#}f64{#endsyntax#}. + when available. +

+

+ Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that + some float operations are not yet implemented for all float types.

{#header_close#} {#header_open|@exp#} -
{#syntax#}@exp(comptime T: type, value: T) T{#endsyntax#}
+
{#syntax#}@exp(value: var) @TypeOf(value){#endsyntax#}

Base-e exponential function on a floating point number. Uses a dedicated hardware instruction - when available. Currently supports {#syntax#}f32{#endsyntax#} and {#syntax#}f64{#endsyntax#}. + when available. +

+

+ Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that + some float operations are not yet implemented for all float types.

{#header_close#} {#header_open|@exp2#} -
{#syntax#}@exp2(comptime T: type, value: T) T{#endsyntax#}
+
{#syntax#}@exp2(value: var) @TypeOf(value){#endsyntax#}

Base-2 exponential function on a floating point number. Uses a dedicated hardware instruction - when available. Currently supports {#syntax#}f32{#endsyntax#} and {#syntax#}f64{#endsyntax#}. + when available. +

+

+ Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that + some float operations are not yet implemented for all float types.

{#header_close#} - {#header_open|@ln#} -
{#syntax#}@ln(comptime T: type, value: T) T{#endsyntax#}
+ {#header_open|@log#} +
{#syntax#}@log(value: var) @TypeOf(value){#endsyntax#}

Returns the natural logarithm of a floating point number. Uses a dedicated hardware instruction - when available. Currently supports {#syntax#}f32{#endsyntax#} and {#syntax#}f64{#endsyntax#}. + when available. +

+

+ Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that + some float operations are not yet implemented for all float types.

{#header_close#} {#header_open|@log2#} -
{#syntax#}@log2(comptime T: type, value: T) T{#endsyntax#}
+
{#syntax#}@log2(value: var) @TypeOf(value){#endsyntax#}

Returns the logarithm to the base 2 of a floating point number. Uses a dedicated hardware instruction - when available. Currently supports {#syntax#}f32{#endsyntax#} and {#syntax#}f64{#endsyntax#}. + when available. +

+

+ Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that + some float operations are not yet implemented for all float types.

{#header_close#} {#header_open|@log10#} -
{#syntax#}@log10(comptime T: type, value: T) T{#endsyntax#}
+
{#syntax#}@log10(value: var) @TypeOf(value){#endsyntax#}

Returns the logarithm to the base 10 of a floating point number. Uses a dedicated hardware instruction - when available. Currently supports {#syntax#}f32{#endsyntax#} and {#syntax#}f64{#endsyntax#}. + when available. +

+

+ Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that + some float operations are not yet implemented for all float types.

{#header_close#} {#header_open|@fabs#} -
{#syntax#}@fabs(comptime T: type, value: T) T{#endsyntax#}
+
{#syntax#}@fabs(value: var) @TypeOf(value){#endsyntax#}

Returns the absolute value of a floating point number. Uses a dedicated hardware instruction - when available. Currently supports {#syntax#}f32{#endsyntax#} and {#syntax#}f64{#endsyntax#}. + when available. +

+

+ Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that + some float operations are not yet implemented for all float types.

{#header_close#} {#header_open|@floor#} -
{#syntax#}@floor(comptime T: type, value: T) T{#endsyntax#}
+
{#syntax#}@floor(value: var) @TypeOf(value){#endsyntax#}

- Returns the largest integral value not greater than the given floating point number. Uses a dedicated hardware instruction - when available. Currently supports {#syntax#}f32{#endsyntax#} and {#syntax#}f64{#endsyntax#}. + Returns the largest integral value not greater than the given floating point number. + Uses a dedicated hardware instruction when available. +

+

+ Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that + some float operations are not yet implemented for all float types.

{#header_close#} {#header_open|@ceil#} -
{#syntax#}@ceil(comptime T: type, value: T) T{#endsyntax#}
+
{#syntax#}@ceil(value: var) @TypeOf(value){#endsyntax#}

- Returns the largest integral value not less than the given floating point number. Uses a dedicated hardware instruction - when available. Currently supports {#syntax#}f32{#endsyntax#} and {#syntax#}f64{#endsyntax#}. + Returns the largest integral value not less than the given floating point number. + Uses a dedicated hardware instruction when available. +

+

+ Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that + some float operations are not yet implemented for all float types.

{#header_close#} {#header_open|@trunc#} -
{#syntax#}@trunc(comptime T: type, value: T) T{#endsyntax#}
+
{#syntax#}@trunc(value: var) @TypeOf(value){#endsyntax#}

- Rounds the given floating point number to an integer, towards zero. Uses a dedicated hardware instruction - when available. Currently supports {#syntax#}f32{#endsyntax#} and {#syntax#}f64{#endsyntax#}. + Rounds the given floating point number to an integer, towards zero. + Uses a dedicated hardware instruction when available. +

+

+ Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that + some float operations are not yet implemented for all float types.

{#header_close#} {#header_open|@round#} -
{#syntax#}@round(comptime T: type, value: T) T{#endsyntax#}
+
{#syntax#}@round(value: var) @TypeOf(value){#endsyntax#}

Rounds the given floating point number to an integer, away from zero. Uses a dedicated hardware instruction - when available. Currently supports {#syntax#}f32{#endsyntax#} and {#syntax#}f64{#endsyntax#}. + when available. +

+

+ Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that + some float operations are not yet implemented for all float types.

{#header_close#} diff --git a/lib/std/math/sqrt.zig b/lib/std/math/sqrt.zig index 17c7003af1..493e2cacf6 100644 --- a/lib/std/math/sqrt.zig +++ b/lib/std/math/sqrt.zig @@ -12,12 +12,12 @@ const maxInt = std.math.maxInt; /// - sqrt(+-0) = +-0 /// - sqrt(x) = nan if x < 0 /// - sqrt(nan) = nan -pub fn sqrt(x: var) (if (@typeId(@TypeOf(x)) == TypeId.Int) @IntType(false, @TypeOf(x).bit_count / 2) else @TypeOf(x)) { +/// TODO Decide if all this logic should be implemented directly in the @sqrt bultin function. +pub fn sqrt(x: var) Sqrt(@TypeOf(x)) { const T = @TypeOf(x); - switch (@typeId(T)) { - TypeId.ComptimeFloat => return @as(T, @sqrt(f64, x)), // TODO upgrade to f128 - TypeId.Float => return @sqrt(T, x), - TypeId.ComptimeInt => comptime { + switch (@typeInfo(T)) { + .Float, .ComptimeFloat => return @sqrt(x), + .ComptimeInt => comptime { if (x > maxInt(u128)) { @compileError("sqrt not implemented for comptime_int greater than 128 bits"); } @@ -26,83 +26,11 @@ pub fn sqrt(x: var) (if (@typeId(@TypeOf(x)) == TypeId.Int) @IntType(false, @Typ } return @as(T, sqrt_int(u128, x)); }, - TypeId.Int => return sqrt_int(T, x), + .Int => return sqrt_int(T, x), else => @compileError("sqrt not implemented for " ++ @typeName(T)), } } -test "math.sqrt" { - expect(sqrt(@as(f16, 0.0)) == @sqrt(f16, 0.0)); - expect(sqrt(@as(f32, 0.0)) == @sqrt(f32, 0.0)); - expect(sqrt(@as(f64, 0.0)) == @sqrt(f64, 0.0)); -} - -test "math.sqrt16" { - const epsilon = 0.000001; - - expect(@sqrt(f16, 0.0) == 0.0); - expect(math.approxEq(f16, @sqrt(f16, 2.0), 1.414214, epsilon)); - expect(math.approxEq(f16, @sqrt(f16, 3.6), 1.897367, epsilon)); - expect(@sqrt(f16, 4.0) == 2.0); - expect(math.approxEq(f16, @sqrt(f16, 7.539840), 2.745877, epsilon)); - expect(math.approxEq(f16, @sqrt(f16, 19.230934), 4.385309, epsilon)); - expect(@sqrt(f16, 64.0) == 8.0); - expect(math.approxEq(f16, @sqrt(f16, 64.1), 8.006248, epsilon)); - expect(math.approxEq(f16, @sqrt(f16, 8942.230469), 94.563370, epsilon)); -} - -test "math.sqrt32" { - const epsilon = 0.000001; - - expect(@sqrt(f32, 0.0) == 0.0); - expect(math.approxEq(f32, @sqrt(f32, 2.0), 1.414214, epsilon)); - expect(math.approxEq(f32, @sqrt(f32, 3.6), 1.897367, epsilon)); - expect(@sqrt(f32, 4.0) == 2.0); - expect(math.approxEq(f32, @sqrt(f32, 7.539840), 2.745877, epsilon)); - expect(math.approxEq(f32, @sqrt(f32, 19.230934), 4.385309, epsilon)); - expect(@sqrt(f32, 64.0) == 8.0); - expect(math.approxEq(f32, @sqrt(f32, 64.1), 8.006248, epsilon)); - expect(math.approxEq(f32, @sqrt(f32, 8942.230469), 94.563370, epsilon)); -} - -test "math.sqrt64" { - const epsilon = 0.000001; - - expect(@sqrt(f64, 0.0) == 0.0); - expect(math.approxEq(f64, @sqrt(f64, 2.0), 1.414214, epsilon)); - expect(math.approxEq(f64, @sqrt(f64, 3.6), 1.897367, epsilon)); - expect(@sqrt(f64, 4.0) == 2.0); - expect(math.approxEq(f64, @sqrt(f64, 7.539840), 2.745877, epsilon)); - expect(math.approxEq(f64, @sqrt(f64, 19.230934), 4.385309, epsilon)); - expect(@sqrt(f64, 64.0) == 8.0); - expect(math.approxEq(f64, @sqrt(f64, 64.1), 8.006248, epsilon)); - expect(math.approxEq(f64, @sqrt(f64, 8942.230469), 94.563367, epsilon)); -} - -test "math.sqrt16.special" { - expect(math.isPositiveInf(@sqrt(f16, math.inf(f16)))); - expect(@sqrt(f16, 0.0) == 0.0); - expect(@sqrt(f16, -0.0) == -0.0); - expect(math.isNan(@sqrt(f16, -1.0))); - expect(math.isNan(@sqrt(f16, math.nan(f16)))); -} - -test "math.sqrt32.special" { - expect(math.isPositiveInf(@sqrt(f32, math.inf(f32)))); - expect(@sqrt(f32, 0.0) == 0.0); - expect(@sqrt(f32, -0.0) == -0.0); - expect(math.isNan(@sqrt(f32, -1.0))); - expect(math.isNan(@sqrt(f32, math.nan(f32)))); -} - -test "math.sqrt64.special" { - expect(math.isPositiveInf(@sqrt(f64, math.inf(f64)))); - expect(@sqrt(f64, 0.0) == 0.0); - expect(@sqrt(f64, -0.0) == -0.0); - expect(math.isNan(@sqrt(f64, -1.0))); - expect(math.isNan(@sqrt(f64, math.nan(f64)))); -} - fn sqrt_int(comptime T: type, value: T) @IntType(false, T.bit_count / 2) { var op = value; var res: T = 0; @@ -134,3 +62,12 @@ test "math.sqrt_int" { expect(sqrt_int(u32, 9) == 3); expect(sqrt_int(u32, 10) == 3); } + +/// Returns the return type `sqrt` will return given an operand of type `T`. +pub fn Sqrt(comptime T: type) type { + return switch (@typeInfo(T)) { + .Int => |int| @IntType(false, int.bits / 2), + else => T, + }; +} + diff --git a/lib/std/special/c.zig b/lib/std/special/c.zig index 0895b1e6f9..08c59337c5 100644 --- a/lib/std/special/c.zig +++ b/lib/std/special/c.zig @@ -728,6 +728,29 @@ export fn sqrt(x: f64) f64 { return @bitCast(f64, uz); } +test "sqrt" { + const epsilon = 0.000001; + + std.testing.expect(sqrt(0.0) == 0.0); + std.testing.expect(std.math.approxEq(f64, sqrt(2.0), 1.414214, epsilon)); + std.testing.expect(std.math.approxEq(f64, sqrt(3.6), 1.897367, epsilon)); + std.testing.expect(sqrt(4.0) == 2.0); + std.testing.expect(std.math.approxEq(f64, sqrt(7.539840), 2.745877, epsilon)); + std.testing.expect(std.math.approxEq(f64, sqrt(19.230934), 4.385309, epsilon)); + std.testing.expect(sqrt(64.0) == 8.0); + std.testing.expect(std.math.approxEq(f64, sqrt(64.1), 8.006248, epsilon)); + std.testing.expect(std.math.approxEq(f64, sqrt(8942.230469), 94.563367, epsilon)); +} + +test "sqrt special" { + std.testing.expect(std.math.isPositiveInf(sqrt(std.math.inf(f64)))); + std.testing.expect(sqrt(0.0) == 0.0); + std.testing.expect(sqrt(-0.0) == -0.0); + std.testing.expect(std.math.isNan(sqrt(-1.0))); + std.testing.expect(std.math.isNan(sqrt(std.math.nan(f64)))); +} + + export fn sqrtf(x: f32) f32 { const tiny: f32 = 1.0e-30; const sign: i32 = @bitCast(i32, @as(u32, 0x80000000)); @@ -803,3 +826,26 @@ export fn sqrtf(x: f32) f32 { ix += m << 23; return @bitCast(f32, ix); } + +test "sqrtf" { + const epsilon = 0.000001; + + std.testing.expect(sqrtf(0.0) == 0.0); + std.testing.expect(std.math.approxEq(f32, sqrtf(2.0), 1.414214, epsilon)); + std.testing.expect(std.math.approxEq(f32, sqrtf(3.6), 1.897367, epsilon)); + std.testing.expect(sqrtf(4.0) == 2.0); + std.testing.expect(std.math.approxEq(f32, sqrtf(7.539840), 2.745877, epsilon)); + std.testing.expect(std.math.approxEq(f32, sqrtf(19.230934), 4.385309, epsilon)); + std.testing.expect(sqrtf(64.0) == 8.0); + std.testing.expect(std.math.approxEq(f32, sqrtf(64.1), 8.006248, epsilon)); + std.testing.expect(std.math.approxEq(f32, sqrtf(8942.230469), 94.563370, epsilon)); +} + +test "sqrtf special" { + std.testing.expect(std.math.isPositiveInf(sqrtf(std.math.inf(f32)))); + std.testing.expect(sqrtf(0.0) == 0.0); + std.testing.expect(sqrtf(-0.0) == -0.0); + std.testing.expect(std.math.isNan(sqrtf(-1.0))); + std.testing.expect(std.math.isNan(sqrtf(std.math.nan(f32)))); +} + diff --git a/src/all_types.hpp b/src/all_types.hpp index ea46ab81a6..dd2b918fc6 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1680,7 +1680,7 @@ enum BuiltinFnId { BuiltinFnIdCos, BuiltinFnIdExp, BuiltinFnIdExp2, - BuiltinFnIdLn, + BuiltinFnIdLog, BuiltinFnIdLog2, BuiltinFnIdLog10, BuiltinFnIdFabs, @@ -3840,9 +3840,8 @@ struct IrInstructionAddImplicitReturnType { struct IrInstructionFloatOp { IrInstruction base; - BuiltinFnId op; - IrInstruction *type; - IrInstruction *op1; + BuiltinFnId fn_id; + IrInstruction *operand; }; struct IrInstructionCheckRuntimeScope { diff --git a/src/codegen.cpp b/src/codegen.cpp index 734fc3be2b..17ae34a1c4 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -764,7 +764,7 @@ static LLVMValueRef get_float_fn(CodeGen *g, ZigType *type_entry, ZigLLVMFnId fn name = "fma"; num_args = 3; } else if (fn_id == ZigLLVMFnIdFloatOp) { - name = float_op_to_name(op, true); + name = float_op_to_name(op); num_args = 1; } else { zig_unreachable(); @@ -5785,10 +5785,9 @@ static LLVMValueRef ir_render_atomic_store(CodeGen *g, IrExecutable *executable, } static LLVMValueRef ir_render_float_op(CodeGen *g, IrExecutable *executable, IrInstructionFloatOp *instruction) { - LLVMValueRef op = ir_llvm_value(g, instruction->op1); - assert(instruction->base.value->type->id == ZigTypeIdFloat); - LLVMValueRef fn_val = get_float_fn(g, instruction->base.value->type, ZigLLVMFnIdFloatOp, instruction->op); - return LLVMBuildCall(g->builder, fn_val, &op, 1, ""); + LLVMValueRef operand = ir_llvm_value(g, instruction->operand); + LLVMValueRef fn_val = get_float_fn(g, instruction->base.value->type, ZigLLVMFnIdFloatOp, instruction->fn_id); + return LLVMBuildCall(g->builder, fn_val, &operand, 1, ""); } static LLVMValueRef ir_render_mul_add(CodeGen *g, IrExecutable *executable, IrInstructionMulAdd *instruction) { @@ -8201,20 +8200,20 @@ static void define_builtin_fns(CodeGen *g) { create_builtin_fn(g, BuiltinFnIdDivFloor, "divFloor", 2); create_builtin_fn(g, BuiltinFnIdRem, "rem", 2); create_builtin_fn(g, BuiltinFnIdMod, "mod", 2); - create_builtin_fn(g, BuiltinFnIdSqrt, "sqrt", 2); - create_builtin_fn(g, BuiltinFnIdSin, "sin", 2); - create_builtin_fn(g, BuiltinFnIdCos, "cos", 2); - create_builtin_fn(g, BuiltinFnIdExp, "exp", 2); - create_builtin_fn(g, BuiltinFnIdExp2, "exp2", 2); - create_builtin_fn(g, BuiltinFnIdLn, "ln", 2); - create_builtin_fn(g, BuiltinFnIdLog2, "log2", 2); - create_builtin_fn(g, BuiltinFnIdLog10, "log10", 2); - create_builtin_fn(g, BuiltinFnIdFabs, "fabs", 2); - create_builtin_fn(g, BuiltinFnIdFloor, "floor", 2); - create_builtin_fn(g, BuiltinFnIdCeil, "ceil", 2); - create_builtin_fn(g, BuiltinFnIdTrunc, "trunc", 2); - create_builtin_fn(g, BuiltinFnIdNearbyInt, "nearbyInt", 2); - create_builtin_fn(g, BuiltinFnIdRound, "round", 2); + create_builtin_fn(g, BuiltinFnIdSqrt, "sqrt", 1); + create_builtin_fn(g, BuiltinFnIdSin, "sin", 1); + create_builtin_fn(g, BuiltinFnIdCos, "cos", 1); + create_builtin_fn(g, BuiltinFnIdExp, "exp", 1); + create_builtin_fn(g, BuiltinFnIdExp2, "exp2", 1); + create_builtin_fn(g, BuiltinFnIdLog, "log", 1); + create_builtin_fn(g, BuiltinFnIdLog2, "log2", 1); + create_builtin_fn(g, BuiltinFnIdLog10, "log10", 1); + create_builtin_fn(g, BuiltinFnIdFabs, "fabs", 1); + create_builtin_fn(g, BuiltinFnIdFloor, "floor", 1); + create_builtin_fn(g, BuiltinFnIdCeil, "ceil", 1); + create_builtin_fn(g, BuiltinFnIdTrunc, "trunc", 1); + create_builtin_fn(g, BuiltinFnIdNearbyInt, "nearbyInt", 1); + create_builtin_fn(g, BuiltinFnIdRound, "round", 1); create_builtin_fn(g, BuiltinFnIdMulAdd, "mulAdd", 4); create_builtin_fn(g, BuiltinFnIdNewStackCall, "newStackCall", SIZE_MAX); create_builtin_fn(g, BuiltinFnIdAsyncCall, "asyncCall", SIZE_MAX); diff --git a/src/ir.cpp b/src/ir.cpp index 08efdbd6e4..56d4cdea94 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -3125,9 +3125,7 @@ static IrInstruction *ir_build_overflow_op(IrBuilder *irb, Scope *scope, AstNode //TODO Powi, Pow, minnum, maxnum, maximum, minimum, copysign, // lround, llround, lrint, llrint // So far this is only non-complicated type functions. -const char *float_op_to_name(BuiltinFnId op, bool llvm_name) { - const bool b = llvm_name; - +const char *float_op_to_name(BuiltinFnId op) { switch (op) { case BuiltinFnIdSqrt: return "sqrt"; @@ -3139,8 +3137,8 @@ const char *float_op_to_name(BuiltinFnId op, bool llvm_name) { return "exp"; case BuiltinFnIdExp2: return "exp2"; - case BuiltinFnIdLn: - return b ? "log" : "ln"; + case BuiltinFnIdLog: + return "log"; case BuiltinFnIdLog10: return "log10"; case BuiltinFnIdLog2: @@ -3154,7 +3152,7 @@ const char *float_op_to_name(BuiltinFnId op, bool llvm_name) { case BuiltinFnIdTrunc: return "trunc"; case BuiltinFnIdNearbyInt: - return b ? "nearbyint" : "nearbyInt"; + return "nearbyint"; case BuiltinFnIdRound: return "round"; default: @@ -3162,14 +3160,14 @@ const char *float_op_to_name(BuiltinFnId op, bool llvm_name) { } } -static IrInstruction *ir_build_float_op(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *type, IrInstruction *op1, BuiltinFnId op) { +static IrInstruction *ir_build_float_op(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *operand, + BuiltinFnId fn_id) +{ IrInstructionFloatOp *instruction = ir_build_instruction(irb, scope, source_node); - instruction->type = type; - instruction->op1 = op1; - instruction->op = op; + instruction->operand = operand; + instruction->fn_id = fn_id; - if (type != nullptr) ir_ref_instruction(type, irb->current_basic_block); - ir_ref_instruction(op1, irb->current_basic_block); + ir_ref_instruction(operand, irb->current_basic_block); return &instruction->base; } @@ -5497,7 +5495,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo case BuiltinFnIdCos: case BuiltinFnIdExp: case BuiltinFnIdExp2: - case BuiltinFnIdLn: + case BuiltinFnIdLog: case BuiltinFnIdLog2: case BuiltinFnIdLog10: case BuiltinFnIdFabs: @@ -5512,13 +5510,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - AstNode *arg1_node = node->data.fn_call_expr.params.at(1); - IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope); - if (arg1_value == irb->codegen->invalid_instruction) - return arg1_value; - - IrInstruction *ir_sqrt = ir_build_float_op(irb, scope, node, arg0_value, arg1_value, builtin_fn->id); - return ir_lval_wrap(irb, scope, ir_sqrt, lval, result_loc); + IrInstruction *inst = ir_build_float_op(irb, scope, node, arg0_value, builtin_fn->id); + return ir_lval_wrap(irb, scope, inst, lval, result_loc); } case BuiltinFnIdTruncate: { @@ -27643,7 +27636,7 @@ static IrInstruction *ir_analyze_instruction_save_err_ret_addr(IrAnalyze *ira, I return result; } -static void ir_eval_float_op(IrAnalyze *ira, IrInstruction *source_instr, BuiltinFnId fop, ZigType *float_type, +static ErrorMsg *ir_eval_float_op(IrAnalyze *ira, IrInstruction *source_instr, BuiltinFnId fop, ZigType *float_type, ZigValue *op, ZigValue *out_val) { assert(ira && source_instr && float_type && out_val && op); @@ -27670,24 +27663,49 @@ static void ir_eval_float_op(IrAnalyze *ira, IrInstruction *source_instr, Builti out_val->data.x_f16 = f16_sqrt(op->data.x_f16); break; case BuiltinFnIdSin: + out_val->data.x_f16 = zig_double_to_f16(sin(zig_f16_to_double(op->data.x_f16))); + break; case BuiltinFnIdCos: + out_val->data.x_f16 = zig_double_to_f16(cos(zig_f16_to_double(op->data.x_f16))); + break; case BuiltinFnIdExp: + out_val->data.x_f16 = zig_double_to_f16(exp(zig_f16_to_double(op->data.x_f16))); + break; case BuiltinFnIdExp2: - case BuiltinFnIdLn: + out_val->data.x_f16 = zig_double_to_f16(exp2(zig_f16_to_double(op->data.x_f16))); + break; + case BuiltinFnIdLog: + out_val->data.x_f16 = zig_double_to_f16(log(zig_f16_to_double(op->data.x_f16))); + break; case BuiltinFnIdLog10: + out_val->data.x_f16 = zig_double_to_f16(log10(zig_f16_to_double(op->data.x_f16))); + break; case BuiltinFnIdLog2: + out_val->data.x_f16 = zig_double_to_f16(log2(zig_f16_to_double(op->data.x_f16))); + break; case BuiltinFnIdFabs: + out_val->data.x_f16 = zig_double_to_f16(fabs(zig_f16_to_double(op->data.x_f16))); + break; case BuiltinFnIdFloor: + out_val->data.x_f16 = zig_double_to_f16(floor(zig_f16_to_double(op->data.x_f16))); + break; case BuiltinFnIdCeil: + out_val->data.x_f16 = zig_double_to_f16(ceil(zig_f16_to_double(op->data.x_f16))); + break; case BuiltinFnIdTrunc: + out_val->data.x_f16 = zig_double_to_f16(trunc(zig_f16_to_double(op->data.x_f16))); + break; case BuiltinFnIdNearbyInt: + out_val->data.x_f16 = zig_double_to_f16(nearbyint(zig_f16_to_double(op->data.x_f16))); + break; case BuiltinFnIdRound: - zig_panic("unimplemented f16 builtin"); + out_val->data.x_f16 = zig_double_to_f16(round(zig_f16_to_double(op->data.x_f16))); + break; default: zig_unreachable(); }; break; - }; + } case 32: { switch (fop) { case BuiltinFnIdSqrt: @@ -27705,7 +27723,7 @@ static void ir_eval_float_op(IrAnalyze *ira, IrInstruction *source_instr, Builti case BuiltinFnIdExp2: out_val->data.x_f32 = exp2f(op->data.x_f32); break; - case BuiltinFnIdLn: + case BuiltinFnIdLog: out_val->data.x_f32 = logf(op->data.x_f32); break; case BuiltinFnIdLog10: @@ -27736,7 +27754,7 @@ static void ir_eval_float_op(IrAnalyze *ira, IrInstruction *source_instr, Builti zig_unreachable(); }; break; - }; + } case 64: { switch (fop) { case BuiltinFnIdSqrt: @@ -27754,7 +27772,7 @@ static void ir_eval_float_op(IrAnalyze *ira, IrInstruction *source_instr, Builti case BuiltinFnIdExp2: out_val->data.x_f64 = exp2(op->data.x_f64); break; - case BuiltinFnIdLn: + case BuiltinFnIdLog: out_val->data.x_f64 = log(op->data.x_f64); break; case BuiltinFnIdLog10: @@ -27785,7 +27803,11 @@ static void ir_eval_float_op(IrAnalyze *ira, IrInstruction *source_instr, Builti zig_unreachable(); } break; - }; + } + case 80: + return ir_add_error(ira, source_instr, + buf_sprintf("compiler bug: TODO: implement '%s' for type '%s'. See https://github.com/ziglang/zig/issues/4026", + float_op_to_name(fop), buf_ptr(&float_type->name))); case 128: { float128_t *out, *in; if (float_type->id == ZigTypeIdComptimeFloat) { @@ -27804,7 +27826,7 @@ static void ir_eval_float_op(IrAnalyze *ira, IrInstruction *source_instr, Builti case BuiltinFnIdCos: case BuiltinFnIdExp: case BuiltinFnIdExp2: - case BuiltinFnIdLn: + case BuiltinFnIdLog: case BuiltinFnIdLog10: case BuiltinFnIdLog2: case BuiltinFnIdFabs: @@ -27812,94 +27834,86 @@ static void ir_eval_float_op(IrAnalyze *ira, IrInstruction *source_instr, Builti case BuiltinFnIdCeil: case BuiltinFnIdTrunc: case BuiltinFnIdRound: - zig_panic("unimplemented f128 builtin"); + return ir_add_error(ira, source_instr, + buf_sprintf("compiler bug: TODO: implement '%s' for type '%s'. See https://github.com/ziglang/zig/issues/4026", + float_op_to_name(fop), buf_ptr(&float_type->name))); default: zig_unreachable(); } break; - }; + } default: zig_unreachable(); } + out_val->special = ConstValSpecialStatic; + return nullptr; } -static IrInstruction *ir_analyze_float_op(IrAnalyze *ira, IrInstruction *source_instr, - ZigType *expr_type, AstNode *expr_type_src_node, IrInstruction *operand, BuiltinFnId op) -{ - // Only allow float types, and vectors of floats. - ZigType *float_type = (expr_type->id == ZigTypeIdVector) ? expr_type->data.vector.elem_type : expr_type; - if (float_type->id != ZigTypeIdFloat && float_type->id != ZigTypeIdComptimeFloat) { - ir_add_error_node(ira, expr_type_src_node, - buf_sprintf("@%s does not support type '%s'", - float_op_to_name(op, false), buf_ptr(&float_type->name))); +static IrInstruction *ir_analyze_instruction_float_op(IrAnalyze *ira, IrInstructionFloatOp *instruction) { + IrInstruction *operand = instruction->operand->child; + ZigType *operand_type = operand->value->type; + if (type_is_invalid(operand_type)) + return ira->codegen->invalid_instruction; + + // This instruction accepts floats and vectors of floats. + ZigType *scalar_type = (operand_type->id == ZigTypeIdVector) ? + operand_type->data.vector.elem_type : operand_type; + + if (scalar_type->id != ZigTypeIdFloat && scalar_type->id != ZigTypeIdComptimeFloat) { + ir_add_error(ira, operand, + buf_sprintf("expected float type, found '%s'", buf_ptr(&scalar_type->name))); return ira->codegen->invalid_instruction; } - IrInstruction *casted_op = ir_implicit_cast(ira, operand, float_type); - if (type_is_invalid(casted_op->value->type)) - return ira->codegen->invalid_instruction; - - if (instr_is_comptime(casted_op)) { - if ((float_type->id == ZigTypeIdComptimeFloat || - float_type->data.floating.bit_count == 16 || - float_type->data.floating.bit_count == 128) && - op != BuiltinFnIdSqrt) - { - ir_add_error(ira, source_instr, - buf_sprintf("compiler bug: TODO make @%s support type '%s'", - float_op_to_name(op, false), buf_ptr(&float_type->name))); + if (instr_is_comptime(operand)) { + ZigValue *operand_val = ir_resolve_const(ira, operand, UndefOk); + if (operand_val == nullptr) return ira->codegen->invalid_instruction; - } + if (operand_val->special == ConstValSpecialUndef) + return ir_const_undef(ira, &instruction->base, operand_type); - ZigValue *op1_const = ir_resolve_const(ira, casted_op, UndefBad); - if (!op1_const) - return ira->codegen->invalid_instruction; - - IrInstruction *result = ir_const(ira, source_instr, expr_type); + IrInstruction *result = ir_const(ira, &instruction->base, operand_type); ZigValue *out_val = result->value; - if (expr_type->id == ZigTypeIdVector) { - expand_undef_array(ira->codegen, op1_const); + if (operand_type->id == ZigTypeIdVector) { + expand_undef_array(ira->codegen, operand_val); out_val->special = ConstValSpecialUndef; expand_undef_array(ira->codegen, out_val); - size_t len = expr_type->data.vector.len; + size_t len = operand_type->data.vector.len; for (size_t i = 0; i < len; i += 1) { - ZigValue *float_operand_op1 = &op1_const->data.x_array.data.s_none.elements[i]; + ZigValue *elem_operand = &operand_val->data.x_array.data.s_none.elements[i]; ZigValue *float_out_val = &out_val->data.x_array.data.s_none.elements[i]; - assert(float_operand_op1->type == float_type); - assert(float_out_val->type == float_type); - ir_eval_float_op(ira, source_instr, op, float_type, op1_const, float_out_val); - float_out_val->type = float_type; + ir_assert(elem_operand->type == scalar_type, &instruction->base); + ir_assert(float_out_val->type == scalar_type, &instruction->base); + ErrorMsg *msg = ir_eval_float_op(ira, &instruction->base, instruction->fn_id, scalar_type, + elem_operand, float_out_val); + if (msg != nullptr) { + add_error_note(ira->codegen, msg, instruction->base.source_node, + buf_sprintf("when computing vector element at index %" ZIG_PRI_usize, i)); + return ira->codegen->invalid_instruction; + } + float_out_val->type = scalar_type; } - out_val->type = expr_type; + out_val->type = operand_type; out_val->special = ConstValSpecialStatic; } else { - ir_eval_float_op(ira, source_instr, op, float_type, op1_const, out_val); + if (ir_eval_float_op(ira, &instruction->base, instruction->fn_id, scalar_type, + operand_val, out_val) != nullptr) + { + return ira->codegen->invalid_instruction; + } } return result; } - ir_assert(float_type->id == ZigTypeIdFloat, source_instr); + ir_assert(scalar_type->id == ZigTypeIdFloat, &instruction->base); - IrInstruction *result = ir_build_float_op(&ira->new_irb, source_instr->scope, - source_instr->source_node, nullptr, casted_op, op); - result->value->type = expr_type; + IrInstruction *result = ir_build_float_op(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, operand, instruction->fn_id); + result->value->type = operand_type; return result; } -static IrInstruction *ir_analyze_instruction_float_op(IrAnalyze *ira, IrInstructionFloatOp *instruction) { - ZigType *expr_type = ir_resolve_type(ira, instruction->type->child); - if (type_is_invalid(expr_type)) - return ira->codegen->invalid_instruction; - - IrInstruction *operand = instruction->op1->child; - if (type_is_invalid(operand->value->type)) - return ira->codegen->invalid_instruction; - - return ir_analyze_float_op(ira, &instruction->base, expr_type, instruction->type->source_node, - operand, instruction->op); -} - static IrInstruction *ir_analyze_instruction_bswap(IrAnalyze *ira, IrInstructionBswap *instruction) { Error err; diff --git a/src/ir.hpp b/src/ir.hpp index a20dc2d232..003bf4897d 100644 --- a/src/ir.hpp +++ b/src/ir.hpp @@ -33,7 +33,7 @@ bool ir_has_side_effects(IrInstruction *instruction); struct IrAnalyze; ZigValue *const_ptr_pointee(IrAnalyze *ira, CodeGen *codegen, ZigValue *const_val, AstNode *source_node); -const char *float_op_to_name(BuiltinFnId op, bool llvm_name); +const char *float_op_to_name(BuiltinFnId op); // for debugging purposes void dbg_ir_break(const char *src_file, uint32_t line); diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 06dbe0f2b5..6c7f216219 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -2005,15 +2005,8 @@ static void ir_print_add_implicit_return_type(IrPrint *irp, IrInstructionAddImpl } static void ir_print_float_op(IrPrint *irp, IrInstructionFloatOp *instruction) { - - fprintf(irp->f, "@%s(", float_op_to_name(instruction->op, false)); - if (instruction->type != nullptr) { - ir_print_other_instruction(irp, instruction->type); - } else { - fprintf(irp->f, "null"); - } - fprintf(irp->f, ","); - ir_print_other_instruction(irp, instruction->op1); + fprintf(irp->f, "@%s(", float_op_to_name(instruction->fn_id)); + ir_print_other_instruction(irp, instruction->operand); fprintf(irp->f, ")"); } diff --git a/test/stage1/behavior/floatop.zig b/test/stage1/behavior/floatop.zig index eb386d75f9..5fe1162502 100644 --- a/test/stage1/behavior/floatop.zig +++ b/test/stage1/behavior/floatop.zig @@ -1,6 +1,10 @@ -const expect = @import("std").testing.expect; -const pi = @import("std").math.pi; -const e = @import("std").math.e; +const std = @import("std"); +const expect = std.testing.expect; +const math = std.math; +const pi = std.math.pi; +const e = std.math.e; + +const epsilon = 0.000001; test "@sqrt" { comptime testSqrt(); @@ -10,25 +14,55 @@ test "@sqrt" { fn testSqrt() void { { var a: f16 = 4; - expect(@sqrt(f16, a) == 2); + expect(@sqrt(a) == 2); } { var a: f32 = 9; - expect(@sqrt(f32, a) == 3); + expect(@sqrt(a) == 3); + var b: f32 = 1.1; + expect(math.approxEq(f32, @sqrt(b), 1.0488088481701516, epsilon)); } { var a: f64 = 25; - expect(@sqrt(f64, a) == 5); + expect(@sqrt(a) == 5); } { const a: comptime_float = 25.0; - expect(@sqrt(comptime_float, a) == 5.0); + expect(@sqrt(a) == 5.0); } - // Waiting on a c.zig implementation + // TODO https://github.com/ziglang/zig/issues/4026 //{ // var a: f128 = 49; - // expect(@sqrt(f128, a) == 7); + // expect(@sqrt(a) == 7); //} + { + var v: @Vector(4, f32) = [_]f32{1.1, 2.2, 3.3, 4.4}; + var result = @sqrt(v); + expect(math.approxEq(f32, @sqrt(@as(f32, 1.1)), result[0], epsilon)); + expect(math.approxEq(f32, @sqrt(@as(f32, 2.2)), result[1], epsilon)); + expect(math.approxEq(f32, @sqrt(@as(f32, 3.3)), result[2], epsilon)); + expect(math.approxEq(f32, @sqrt(@as(f32, 4.4)), result[3], epsilon)); + } +} + +test "more @sqrt f16 tests" { + // TODO these are not all passing at comptime + expect(@sqrt(@as(f16, 0.0)) == 0.0); + expect(math.approxEq(f16, @sqrt(@as(f16, 2.0)), 1.414214, epsilon)); + expect(math.approxEq(f16, @sqrt(@as(f16, 3.6)), 1.897367, epsilon)); + expect(@sqrt(@as(f16, 4.0)) == 2.0); + expect(math.approxEq(f16, @sqrt(@as(f16, 7.539840)), 2.745877, epsilon)); + expect(math.approxEq(f16, @sqrt(@as(f16, 19.230934)), 4.385309, epsilon)); + expect(@sqrt(@as(f16, 64.0)) == 8.0); + expect(math.approxEq(f16, @sqrt(@as(f16, 64.1)), 8.006248, epsilon)); + expect(math.approxEq(f16, @sqrt(@as(f16, 8942.230469)), 94.563370, epsilon)); + + // special cases + expect(math.isPositiveInf(@sqrt(@as(f16, math.inf(f16))))); + expect(@sqrt(@as(f16, 0.0)) == 0.0); + expect(@sqrt(@as(f16, -0.0)) == -0.0); + expect(math.isNan(@sqrt(@as(f16, -1.0)))); + expect(math.isNan(@sqrt(@as(f16, math.nan(f16))))); } test "@sin" { @@ -37,26 +71,28 @@ test "@sin" { } fn testSin() void { - // TODO - this is actually useful and should be implemented - // (all the trig functions for f16) - // but will probably wait till self-hosted - //{ - // var a: f16 = pi; - // expect(@sin(f16, a/2) == 1); - //} + // TODO test f128, and c_longdouble + // https://github.com/ziglang/zig/issues/4026 + { + var a: f16 = 0; + expect(@sin(a) == 0); + } { var a: f32 = 0; - expect(@sin(f32, a) == 0); + expect(@sin(a) == 0); } { var a: f64 = 0; - expect(@sin(f64, a) == 0); + expect(@sin(a) == 0); + } + { + var v: @Vector(4, f32) = [_]f32{1.1, 2.2, 3.3, 4.4}; + var result = @sin(v); + expect(math.approxEq(f32, @sin(@as(f32, 1.1)), result[0], epsilon)); + expect(math.approxEq(f32, @sin(@as(f32, 2.2)), result[1], epsilon)); + expect(math.approxEq(f32, @sin(@as(f32, 3.3)), result[2], epsilon)); + expect(math.approxEq(f32, @sin(@as(f32, 4.4)), result[3], epsilon)); } - // TODO - //{ - // var a: f16 = pi; - // expect(@sqrt(f128, a/2) == 1); - //} } test "@cos" { @@ -65,13 +101,27 @@ test "@cos" { } fn testCos() void { + // TODO test f128, and c_longdouble + // https://github.com/ziglang/zig/issues/4026 + { + var a: f16 = 0; + expect(@cos(a) == 1); + } { var a: f32 = 0; - expect(@cos(f32, a) == 1); + expect(@cos(a) == 1); } { var a: f64 = 0; - expect(@cos(f64, a) == 1); + expect(@cos(a) == 1); + } + { + var v: @Vector(4, f32) = [_]f32{1.1, 2.2, 3.3, 4.4}; + var result = @cos(v); + expect(math.approxEq(f32, @cos(@as(f32, 1.1)), result[0], epsilon)); + expect(math.approxEq(f32, @cos(@as(f32, 2.2)), result[1], epsilon)); + expect(math.approxEq(f32, @cos(@as(f32, 3.3)), result[2], epsilon)); + expect(math.approxEq(f32, @cos(@as(f32, 4.4)), result[3], epsilon)); } } @@ -81,13 +131,27 @@ test "@exp" { } fn testExp() void { + // TODO test f128, and c_longdouble + // https://github.com/ziglang/zig/issues/4026 + { + var a: f16 = 0; + expect(@exp(a) == 1); + } { var a: f32 = 0; - expect(@exp(f32, a) == 1); + expect(@exp(a) == 1); } { var a: f64 = 0; - expect(@exp(f64, a) == 1); + expect(@exp(a) == 1); + } + { + var v: @Vector(4, f32) = [_]f32{1.1, 2.2, 0.3, 0.4}; + var result = @exp(v); + expect(math.approxEq(f32, @exp(@as(f32, 1.1)), result[0], epsilon)); + expect(math.approxEq(f32, @exp(@as(f32, 2.2)), result[1], epsilon)); + expect(math.approxEq(f32, @exp(@as(f32, 0.3)), result[2], epsilon)); + expect(math.approxEq(f32, @exp(@as(f32, 0.4)), result[3], epsilon)); } } @@ -97,31 +161,59 @@ test "@exp2" { } fn testExp2() void { + // TODO test f128, and c_longdouble + // https://github.com/ziglang/zig/issues/4026 + { + var a: f16 = 2; + expect(@exp2(a) == 4); + } { var a: f32 = 2; - expect(@exp2(f32, a) == 4); + expect(@exp2(a) == 4); } { var a: f64 = 2; - expect(@exp2(f64, a) == 4); + expect(@exp2(a) == 4); + } + { + var v: @Vector(4, f32) = [_]f32{1.1, 2.2, 0.3, 0.4}; + var result = @exp2(v); + expect(math.approxEq(f32, @exp2(@as(f32, 1.1)), result[0], epsilon)); + expect(math.approxEq(f32, @exp2(@as(f32, 2.2)), result[1], epsilon)); + expect(math.approxEq(f32, @exp2(@as(f32, 0.3)), result[2], epsilon)); + expect(math.approxEq(f32, @exp2(@as(f32, 0.4)), result[3], epsilon)); } } -test "@ln" { +test "@log" { // Old musl (and glibc?), and our current math.ln implementation do not return 1 // so also accept those values. - comptime testLn(); - testLn(); + comptime testLog(); + testLog(); } -fn testLn() void { +fn testLog() void { + // TODO test f128, and c_longdouble + // https://github.com/ziglang/zig/issues/4026 + { + var a: f16 = e; + expect(math.approxEq(f16, @log(a), 1, epsilon)); + } { var a: f32 = e; - expect(@ln(f32, a) == 1 or @ln(f32, a) == @bitCast(f32, @as(u32, 0x3f7fffff))); + expect(@log(a) == 1 or @log(a) == @bitCast(f32, @as(u32, 0x3f7fffff))); } { var a: f64 = e; - expect(@ln(f64, a) == 1 or @ln(f64, a) == @bitCast(f64, @as(u64, 0x3ff0000000000000))); + expect(@log(a) == 1 or @log(a) == @bitCast(f64, @as(u64, 0x3ff0000000000000))); + } + { + var v: @Vector(4, f32) = [_]f32{1.1, 2.2, 0.3, 0.4}; + var result = @log(v); + expect(math.approxEq(f32, @log(@as(f32, 1.1)), result[0], epsilon)); + expect(math.approxEq(f32, @log(@as(f32, 2.2)), result[1], epsilon)); + expect(math.approxEq(f32, @log(@as(f32, 0.3)), result[2], epsilon)); + expect(math.approxEq(f32, @log(@as(f32, 0.4)), result[3], epsilon)); } } @@ -131,13 +223,27 @@ test "@log2" { } fn testLog2() void { + // TODO test f128, and c_longdouble + // https://github.com/ziglang/zig/issues/4026 + { + var a: f16 = 4; + expect(@log2(a) == 2); + } { var a: f32 = 4; - expect(@log2(f32, a) == 2); + expect(@log2(a) == 2); } { var a: f64 = 4; - expect(@log2(f64, a) == 2); + expect(@log2(a) == 2); + } + { + var v: @Vector(4, f32) = [_]f32{1.1, 2.2, 0.3, 0.4}; + var result = @log2(v); + expect(math.approxEq(f32, @log2(@as(f32, 1.1)), result[0], epsilon)); + expect(math.approxEq(f32, @log2(@as(f32, 2.2)), result[1], epsilon)); + expect(math.approxEq(f32, @log2(@as(f32, 0.3)), result[2], epsilon)); + expect(math.approxEq(f32, @log2(@as(f32, 0.4)), result[3], epsilon)); } } @@ -147,13 +253,27 @@ test "@log10" { } fn testLog10() void { + // TODO test f128, and c_longdouble + // https://github.com/ziglang/zig/issues/4026 + { + var a: f16 = 100; + expect(@log10(a) == 2); + } { var a: f32 = 100; - expect(@log10(f32, a) == 2); + expect(@log10(a) == 2); } { var a: f64 = 1000; - expect(@log10(f64, a) == 3); + expect(@log10(a) == 3); + } + { + var v: @Vector(4, f32) = [_]f32{1.1, 2.2, 0.3, 0.4}; + var result = @log10(v); + expect(math.approxEq(f32, @log10(@as(f32, 1.1)), result[0], epsilon)); + expect(math.approxEq(f32, @log10(@as(f32, 2.2)), result[1], epsilon)); + expect(math.approxEq(f32, @log10(@as(f32, 0.3)), result[2], epsilon)); + expect(math.approxEq(f32, @log10(@as(f32, 0.4)), result[3], epsilon)); } } @@ -163,17 +283,33 @@ test "@fabs" { } fn testFabs() void { + // TODO test f128, and c_longdouble + // https://github.com/ziglang/zig/issues/4026 + { + var a: f16 = -2.5; + var b: f16 = 2.5; + expect(@fabs(a) == 2.5); + expect(@fabs(b) == 2.5); + } { var a: f32 = -2.5; var b: f32 = 2.5; - expect(@fabs(f32, a) == 2.5); - expect(@fabs(f32, b) == 2.5); + expect(@fabs(a) == 2.5); + expect(@fabs(b) == 2.5); } { var a: f64 = -2.5; var b: f64 = 2.5; - expect(@fabs(f64, a) == 2.5); - expect(@fabs(f64, b) == 2.5); + expect(@fabs(a) == 2.5); + expect(@fabs(b) == 2.5); + } + { + var v: @Vector(4, f32) = [_]f32{1.1, -2.2, 0.3, -0.4}; + var result = @fabs(v); + expect(math.approxEq(f32, @fabs(@as(f32, 1.1)), result[0], epsilon)); + expect(math.approxEq(f32, @fabs(@as(f32, -2.2)), result[1], epsilon)); + expect(math.approxEq(f32, @fabs(@as(f32, 0.3)), result[2], epsilon)); + expect(math.approxEq(f32, @fabs(@as(f32, -0.4)), result[3], epsilon)); } } @@ -183,13 +319,27 @@ test "@floor" { } fn testFloor() void { + // TODO test f128, and c_longdouble + // https://github.com/ziglang/zig/issues/4026 + { + var a: f16 = 2.1; + expect(@floor(a) == 2); + } { var a: f32 = 2.1; - expect(@floor(f32, a) == 2); + expect(@floor(a) == 2); } { var a: f64 = 3.5; - expect(@floor(f64, a) == 3); + expect(@floor(a) == 3); + } + { + var v: @Vector(4, f32) = [_]f32{1.1, -2.2, 0.3, -0.4}; + var result = @floor(v); + expect(math.approxEq(f32, @floor(@as(f32, 1.1)), result[0], epsilon)); + expect(math.approxEq(f32, @floor(@as(f32, -2.2)), result[1], epsilon)); + expect(math.approxEq(f32, @floor(@as(f32, 0.3)), result[2], epsilon)); + expect(math.approxEq(f32, @floor(@as(f32, -0.4)), result[3], epsilon)); } } @@ -199,13 +349,27 @@ test "@ceil" { } fn testCeil() void { + // TODO test f128, and c_longdouble + // https://github.com/ziglang/zig/issues/4026 + { + var a: f16 = 2.1; + expect(@ceil(a) == 3); + } { var a: f32 = 2.1; - expect(@ceil(f32, a) == 3); + expect(@ceil(a) == 3); } { var a: f64 = 3.5; - expect(@ceil(f64, a) == 4); + expect(@ceil(a) == 4); + } + { + var v: @Vector(4, f32) = [_]f32{1.1, -2.2, 0.3, -0.4}; + var result = @ceil(v); + expect(math.approxEq(f32, @ceil(@as(f32, 1.1)), result[0], epsilon)); + expect(math.approxEq(f32, @ceil(@as(f32, -2.2)), result[1], epsilon)); + expect(math.approxEq(f32, @ceil(@as(f32, 0.3)), result[2], epsilon)); + expect(math.approxEq(f32, @ceil(@as(f32, -0.4)), result[3], epsilon)); } } @@ -215,29 +379,45 @@ test "@trunc" { } fn testTrunc() void { + // TODO test f128, and c_longdouble + // https://github.com/ziglang/zig/issues/4026 + { + var a: f16 = 2.1; + expect(@trunc(a) == 2); + } { var a: f32 = 2.1; - expect(@trunc(f32, a) == 2); + expect(@trunc(a) == 2); } { var a: f64 = -3.5; - expect(@trunc(f64, a) == -3); + expect(@trunc(a) == -3); + } + { + var v: @Vector(4, f32) = [_]f32{1.1, -2.2, 0.3, -0.4}; + var result = @trunc(v); + expect(math.approxEq(f32, @trunc(@as(f32, 1.1)), result[0], epsilon)); + expect(math.approxEq(f32, @trunc(@as(f32, -2.2)), result[1], epsilon)); + expect(math.approxEq(f32, @trunc(@as(f32, 0.3)), result[2], epsilon)); + expect(math.approxEq(f32, @trunc(@as(f32, -0.4)), result[3], epsilon)); } } -// This is waiting on library support for the Windows build (not sure why the other's don't need it) -//test "@nearbyInt" { +// TODO This is waiting on library support for the Windows build (not sure why the other's don't need it) +//test "@nearbyint" { // comptime testNearbyInt(); // testNearbyInt(); //} //fn testNearbyInt() void { +// // TODO test f16, f128, and c_longdouble +// // https://github.com/ziglang/zig/issues/4026 // { // var a: f32 = 2.1; -// expect(@nearbyInt(f32, a) == 2); +// expect(@nearbyint(a) == 2); // } // { // var a: f64 = -3.75; -// expect(@nearbyInt(f64, a) == -4); +// expect(@nearbyint(a) == -4); // } //} diff --git a/test/stage1/behavior/math.zig b/test/stage1/behavior/math.zig index 90260d4e02..e00b1a83fa 100644 --- a/test/stage1/behavior/math.zig +++ b/test/stage1/behavior/math.zig @@ -587,12 +587,12 @@ test "@sqrt" { const x = 14.0; const y = x * x; - const z = @sqrt(@TypeOf(y), y); + const z = @sqrt(y); comptime expect(z == x); } fn testSqrt(comptime T: type, x: T) void { - expect(@sqrt(T, x * x) == x); + expect(@sqrt(x * x) == x); } test "comptime_int param and return" {