diff --git a/.gitignore b/.gitignore
index 20b208975a..5616da8e58 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
zig-cache/
build/
build-*/
+docgen_tmp/
diff --git a/CMakeLists.txt b/CMakeLists.txt
index e502901bd2..9701de9e42 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -22,7 +22,7 @@ set(ZIG_VERSION "${ZIG_VERSION_MAJOR}.${ZIG_VERSION_MINOR}.${ZIG_VERSION_PATCH}"
find_program(GIT_EXE NAMES git)
if(GIT_EXE)
execute_process(
- COMMAND ${GIT_EXE} name-rev HEAD --tags --name-only --no-undefined --always
+ COMMAND ${GIT_EXE} -C ${CMAKE_SOURCE_DIR} name-rev HEAD --tags --name-only --no-undefined --always
OUTPUT_VARIABLE ZIG_GIT_REV
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(ZIG_GIT_REV MATCHES "\\^0$")
@@ -261,12 +261,15 @@ endif()
set(EMBEDDED_SOFTFLOAT_SOURCES
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/f128M_isSignalingNaN.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToF128M.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToF16UI.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToF32UI.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToF64UI.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_f128MToCommonNaN.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_f16UIToCommonNaN.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_f32UIToCommonNaN.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_f64UIToCommonNaN.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_propagateNaNF128M.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_propagateNaNF16UI.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/softfloat_raiseFlags.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_add.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_div.c"
@@ -293,8 +296,20 @@ set(EMBEDDED_SOFTFLOAT_SOURCES
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_ui32_r_minMag.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_ui64.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_ui64_r_minMag.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_add.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_div.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_eq.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_lt.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_mul.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_rem.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_roundToInt.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_sqrt.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_sub.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_to_f128M.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_to_f64.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f32_to_f128M.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f64_to_f128M.c"
+ "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f64_to_f16.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_add256M.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_addCarryM.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_addComplCarryM.c"
@@ -416,7 +431,8 @@ set(ZIG_CPP_SOURCES
set(ZIG_STD_FILES
"array_list.zig"
"atomic/index.zig"
- "atomic/queue.zig"
+ "atomic/queue_mpmc.zig"
+ "atomic/queue_mpsc.zig"
"atomic/stack.zig"
"base64.zig"
"buf_map.zig"
@@ -558,6 +574,7 @@ set(ZIG_STD_FILES
"special/compiler_rt/aullrem.zig"
"special/compiler_rt/comparetf2.zig"
"special/compiler_rt/divti3.zig"
+ "special/compiler_rt/extendXfYf2.zig"
"special/compiler_rt/fixuint.zig"
"special/compiler_rt/fixunsdfdi.zig"
"special/compiler_rt/fixunsdfsi.zig"
@@ -568,8 +585,17 @@ set(ZIG_STD_FILES
"special/compiler_rt/fixunstfdi.zig"
"special/compiler_rt/fixunstfsi.zig"
"special/compiler_rt/fixunstfti.zig"
+ "special/compiler_rt/floatunditf.zig"
+ "special/compiler_rt/floatunsitf.zig"
+ "special/compiler_rt/floatuntidf.zig"
+ "special/compiler_rt/floatuntisf.zig"
+ "special/compiler_rt/floatuntitf.zig"
+ "special/compiler_rt/floattidf.zig"
+ "special/compiler_rt/floattisf.zig"
+ "special/compiler_rt/floattitf.zig"
"special/compiler_rt/muloti4.zig"
"special/compiler_rt/index.zig"
+ "special/compiler_rt/truncXfYf2.zig"
"special/compiler_rt/udivmod.zig"
"special/compiler_rt/udivmoddi4.zig"
"special/compiler_rt/udivmodti4.zig"
diff --git a/doc/langref.html.in b/doc/langref.html.in
index f1ae2bafaa..1da4205b89 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -367,6 +367,11 @@ pub fn main() void {
f32 |
float |
@@ -654,6 +659,7 @@ fn divide(a: i32, b: i32) i32 {
{#header_open|Floats#}
Zig has the following floating point types:
+ f16 - IEEE-754-2008 binary16
f32 - IEEE-754-2008 binary32
f64 - IEEE-754-2008 binary64
f128 - IEEE-754-2008 binary128
@@ -1456,8 +1462,7 @@ test "pointer array access" {
// Taking an address of an individual element gives a
// pointer to a single item. This kind of pointer
// does not support pointer arithmetic.
-
- var array = []u8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
+ var array = []u8{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
const ptr = &array[2];
assert(@typeOf(ptr) == *u8);
@@ -1469,7 +1474,7 @@ test "pointer array access" {
test "pointer slicing" {
// In Zig, we prefer using slices over null-terminated pointers.
// You can turn an array into a slice using slice syntax:
- var array = []u8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
+ var array = []u8{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
const slice = array[2..4];
assert(slice.len == 2);
@@ -1541,13 +1546,13 @@ test "pointer casting" {
// To convert one pointer type to another, use @ptrCast. This is an unsafe
// operation that Zig cannot protect you against. Use @ptrCast only when other
// conversions are not possible.
- const bytes align(@alignOf(u32)) = []u8{0x12, 0x12, 0x12, 0x12};
+ const bytes align(@alignOf(u32)) = []u8{ 0x12, 0x12, 0x12, 0x12 };
const u32_ptr = @ptrCast(*const u32, &bytes[0]);
assert(u32_ptr.* == 0x12121212);
// Even this example is contrived - there are better ways to do the above than
// pointer casting. For example, using a slice narrowing cast:
- const u32_value = ([]const u32)(bytes[0..])[0];
+ const u32_value = @bytesToSlice(u32, bytes[0..])[0];
assert(u32_value == 0x12121212);
// And even another way, the most straightforward way to do it:
@@ -1630,13 +1635,13 @@ test "function alignment" {
const assert = @import("std").debug.assert;
test "pointer alignment safety" {
- var array align(4) = []u32{0x11111111, 0x11111111};
- const bytes = ([]u8)(array[0..]);
+ var array align(4) = []u32{ 0x11111111, 0x11111111 };
+ const bytes = @sliceToBytes(array[0..]);
assert(foo(bytes) == 0x11111111);
}
fn foo(bytes: []u8) u32 {
const slice4 = bytes[1..5];
- const int_slice = ([]u32)(@alignCast(4, slice4));
+ const int_slice = @bytesToSlice(u32, @alignCast(4, slice4));
return int_slice[0];
}
{#code_end#}
@@ -1728,8 +1733,8 @@ test "slice pointer" {
test "slice widening" {
// Zig supports slice widening and slice narrowing. Cast a slice of u8
// to a slice of anything else, and Zig will perform the length conversion.
- const array align(@alignOf(u32)) = []u8{0x12, 0x12, 0x12, 0x12, 0x13, 0x13, 0x13, 0x13};
- const slice = ([]const u32)(array[0..]);
+ const array align(@alignOf(u32)) = []u8{ 0x12, 0x12, 0x12, 0x12, 0x13, 0x13, 0x13, 0x13 };
+ const slice = @bytesToSlice(u32, array[0..]);
assert(slice.len == 2);
assert(slice[0] == 0x12121212);
assert(slice[1] == 0x13131313);
@@ -1901,9 +1906,9 @@ const Value = enum(u2) {
// Now you can cast between u2 and Value.
// The ordinal value starts from 0, counting up for each member.
test "enum ordinal value" {
- assert(u2(Value.Zero) == 0);
- assert(u2(Value.One) == 1);
- assert(u2(Value.Two) == 2);
+ assert(@enumToInt(Value.Zero) == 0);
+ assert(@enumToInt(Value.One) == 1);
+ assert(@enumToInt(Value.Two) == 2);
}
// You can override the ordinal value for an enum.
@@ -1913,9 +1918,9 @@ const Value2 = enum(u32) {
Million = 1000000,
};
test "set enum ordinal value" {
- assert(u32(Value2.Hundred) == 100);
- assert(u32(Value2.Thousand) == 1000);
- assert(u32(Value2.Million) == 1000000);
+ assert(@enumToInt(Value2.Hundred) == 100);
+ assert(@enumToInt(Value2.Thousand) == 1000);
+ assert(@enumToInt(Value2.Million) == 1000000);
}
// Enums can have methods, the same as structs and unions.
@@ -2356,11 +2361,18 @@ fn eventuallyErrorSequence() error!u32 {
break :blk numbers_left;
};
}
+ {#code_end#}
+
+ {#header_open|inline while#}
+
+ While loops can be inlined. This causes the loop to be unrolled, which
+ allows the code to do some things which only work at compile time,
+ such as use types as first class values.
+
+ {#code_begin|test#}
+const assert = @import("std").debug.assert;
test "inline while loop" {
- // While loops can be inlined. This causes the loop to be unrolled, which
- // allows the code to do some things which only work at compile time,
- // such as use types as first class values.
comptime var i = 0;
var sum: usize = 0;
inline while (i < 3) : (i += 1) {
@@ -2379,6 +2391,16 @@ fn typeNameLength(comptime T: type) usize {
return @typeName(T).len;
}
{#code_end#}
+
+ It is recommended to use inline loops only for one of these reasons:
+
+
+ - You need the loop to execute at {#link|comptime#} for the semantics to work.
+ -
+ You have a benchmark to prove that forcibly unrolling the loop in this way is measurably faster.
+
+
+ {#header_close#}
{#see_also|if|Optionals|Errors|comptime|unreachable#}
{#header_close#}
{#header_open|for#}
@@ -2446,15 +2468,20 @@ test "for else" {
break :blk sum;
};
}
-
+ {#code_end#}
+ {#header_open|inline for#}
+
+ For loops can be inlined. This causes the loop to be unrolled, which
+ allows the code to do some things which only work at compile time,
+ such as use types as first class values.
+ The capture value and iterator value of inlined for loops are
+ compile-time known.
+
+ {#code_begin|test#}
+const assert = @import("std").debug.assert;
test "inline for loop" {
const nums = []i32{2, 4, 6};
- // For loops can be inlined. This causes the loop to be unrolled, which
- // allows the code to do some things which only work at compile time,
- // such as use types as first class values.
- // The capture value and iterator value of inlined for loops are
- // compile-time known.
var sum: usize = 0;
inline for (nums) |i| {
const T = switch (i) {
@@ -2472,6 +2499,16 @@ fn typeNameLength(comptime T: type) usize {
return @typeName(T).len;
}
{#code_end#}
+
+ It is recommended to use inline loops only for one of these reasons:
+
+
+ - You need the loop to execute at {#link|comptime#} for the semantics to work.
+ -
+ You have a benchmark to prove that forcibly unrolling the loop in this way is measurably faster.
+
+
+ {#header_close#}
{#see_also|while|comptime|Arrays|Slices#}
{#header_close#}
{#header_open|if#}
@@ -3542,14 +3579,191 @@ const optional_value: ?i32 = null;
{#header_close#}
{#header_close#}
{#header_open|Casting#}
- TODO: explain implicit vs explicit casting
- TODO: resolve peer types builtin
- TODO: truncate builtin
- TODO: bitcast builtin
- TODO: int to ptr builtin
- TODO: ptr to int builtin
- TODO: ptrcast builtin
- TODO: explain number literals vs concrete types
+
+ A type cast converts a value of one type to another.
+ Zig has {#link|Implicit Casts#} for conversions that are known to be completely safe and unambiguous,
+ and {#link|Explicit Casts#} for conversions that one would not want to happen on accident.
+ There is also a third kind of type conversion called {#link|Peer Type Resolution#} for
+ the case when a result type must be decided given multiple operand types.
+
+ {#header_open|Implicit Casts#}
+
+ An implicit cast occurs when one type is expected, but different type is provided:
+
+ {#code_begin|test#}
+test "implicit cast - variable declaration" {
+ var a: u8 = 1;
+ var b: u16 = a;
+}
+
+test "implicit cast - function call" {
+ var a: u8 = 1;
+ foo(a);
+}
+
+fn foo(b: u16) void {}
+
+test "implicit cast - invoke a type as a function" {
+ var a: u8 = 1;
+ var b = u16(a);
+}
+ {#code_end#}
+
+ Implicit casts are only allowed when it is completely unambiguous how to get from one type to another,
+ and the transformation is guaranteed to be safe.
+
+ {#header_open|Implicit Cast: Stricter Qualification#}
+
+ Values which have the same representation at runtime can be cast to increase the strictness
+ of the qualifiers, no matter how nested the qualifiers are:
+
+
+ const - non-const to const is allowed
+ volatile - non-volatile to volatile is allowed
+ align - bigger to smaller alignment is allowed
+ - {#link|error sets|Error Set Type#} to supersets is allowed
+
+
+ These casts are no-ops at runtime since the value representation does not change.
+
+ {#code_begin|test#}
+test "implicit cast - const qualification" {
+ var a: i32 = 1;
+ var b: *i32 = &a;
+ foo(b);
+}
+
+fn foo(a: *const i32) void {}
+ {#code_end#}
+
+ In addition, pointers implicitly cast to const optional pointers:
+
+ {#code_begin|test#}
+const std = @import("std");
+const assert = std.debug.assert;
+const mem = std.mem;
+
+test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
+ const window_name = [1][*]const u8{c"window name"};
+ const x: [*]const ?[*]const u8 = &window_name;
+ assert(mem.eql(u8, std.cstr.toSliceConst(x[0].?), "window name"));
+}
+ {#code_end#}
+ {#header_close#}
+ {#header_open|Implicit Cast: Integer and Float Widening#}
+
+ {#link|Integers#} implicitly cast to integer types which can represent every value of the old type, and likewise
+ {#link|Floats#} implicitly cast to float types which can represent every value of the old type.
+
+ {#code_begin|test#}
+const std = @import("std");
+const assert = std.debug.assert;
+const mem = std.mem;
+
+test "integer widening" {
+ var a: u8 = 250;
+ var b: u16 = a;
+ var c: u32 = b;
+ var d: u64 = c;
+ var e: u64 = d;
+ var f: u128 = e;
+ assert(f == a);
+}
+
+test "implicit unsigned integer to signed integer" {
+ var a: u8 = 250;
+ var b: i16 = a;
+ assert(b == 250);
+}
+
+test "float widening" {
+ var a: f16 = 12.34;
+ var b: f32 = a;
+ var c: f64 = b;
+ var d: f128 = c;
+ assert(d == a);
+}
+ {#code_end#}
+ {#header_close#}
+ {#header_open|Implicit Cast: Arrays#}
+ TODO: [N]T to []const T
+ TODO: *const [N]T to []const T
+ TODO: [N]T to *const []const T
+ TODO: [N]T to ?[]const T
+ TODO: *[N]T to []T
+ TODO: *[N]T to [*]T
+ TODO: *T to *[1]T
+ TODO: [N]T to E![]const T
+ {#header_close#}
+ {#header_open|Implicit Cast: Optionals#}
+ TODO: T to ?T
+ TODO: T to E!?T
+ TODO: null to ?T
+ {#header_close#}
+ {#header_open|Implicit Cast: T to E!T#}
+ TODO
+ {#header_close#}
+ {#header_open|Implicit Cast: E to E!T#}
+ TODO
+ {#header_close#}
+ {#header_open|Implicit Cast: comptime_int to *const integer#}
+ TODO
+ {#header_close#}
+ {#header_open|Implicit Cast: comptime_float to *const float#}
+ TODO
+ {#header_close#}
+ {#header_open|Implicit Cast: compile-time known numbers#}
+ TODO
+ {#header_close#}
+ {#header_open|Implicit Cast: union to enum#}
+ TODO
+ {#header_close#}
+ {#header_open|Implicit Cast: enum to union#}
+ TODO
+ {#header_close#}
+ {#header_open|Implicit Cast: T to *T when @sizeOf(T) == 0#}
+ TODO
+ {#header_close#}
+ {#header_open|Implicit Cast: undefined#}
+ TODO
+ {#header_close#}
+ {#header_open|Implicit Cast: T to *const T#}
+ TODO
+ {#header_close#}
+ {#header_close#}
+
+ {#header_open|Explicit Casts#}
+
+ Explicit casts are performed via {#link|Builtin Functions#}.
+ Some explicit casts are safe; some are not.
+ Some explicit casts perform language-level assertions; some do not.
+ Some explicit casts are no-ops at runtime; some are not.
+
+
+ - {#link|@bitCast#} - change type but maintain bit representation
+ - {#link|@alignCast#} - make a pointer have more alignment
+ - {#link|@boolToInt#} - convert true to 1 and false to 0
+ - {#link|@bytesToSlice#} - convert a slice of bytes to a slice of another type
+ - {#link|@enumToInt#} - obtain the integer tag value of an enum or tagged union
+ - {#link|@errSetCast#} - convert to a smaller error set
+ - {#link|@errorToInt#} - obtain the integer value of an error code
+ - {#link|@floatCast#} - convert a larger float to a smaller float
+ - {#link|@floatToInt#} - obtain the integer part of a float value
+ - {#link|@intCast#} - convert between integer types
+ - {#link|@intToEnum#} - obtain an enum value based on its integer tag value
+ - {#link|@intToError#} - obtain an error code based on its integer value
+ - {#link|@intToFloat#} - convert an integer to a float value
+ - {#link|@intToPtr#} - convert an address to a pointer
+ - {#link|@ptrCast#} - convert between pointer types
+ - {#link|@ptrToInt#} - obtain the address of a pointer
+ - {#link|@sliceToBytes#} - convert a slice of anything to a slice of bytes
+ - {#link|@truncate#} - convert between integer types, chopping off bits
+
+ {#header_close#}
+
+ {#header_open|Peer Type Resolution#}
+ TODO
+ {#header_close#}
{#header_close#}
{#header_open|void#}
@@ -4223,13 +4437,8 @@ pub fn main() void {
task in userland. It does so without introducing another language on top of Zig, such as
a macro language or a preprocessor language. It's Zig all the way down.
- TODO: suggestion to not use inline unless necessary
{#header_close#}
- {#header_close#}
- {#header_open|inline#}
- TODO: inline while
- TODO: inline for
- TODO: suggestion to not use inline unless necessary
+ {#see_also|inline while|inline for#}
{#header_close#}
{#header_open|Assembly#}
TODO: example of inline assembly
@@ -4651,6 +4860,18 @@ comptime {
{#header_close#}
+ {#header_open|@bytesToSlice#}
+
@bytesToSlice(comptime Element: type, bytes: []u8) []Element
+
+ Converts a slice of bytes or array of bytes into a slice of Element.
+ The resulting slice has the same {#link|pointer|Pointers#} properties as the parameter.
+
+
+ Attempting to convert a number of bytes with a length that does not evenly divide into a slice of
+ elements results in safety-protected {#link|Undefined Behavior#}.
+
+ {#header_close#}
+
{#header_open|@cDefine#}
@cDefine(comptime name: []u8, value)
@@ -4919,12 +5140,23 @@ test "main" {
{#see_also|@import#}
{#header_close#}
- {#header_open|@export#}
- @export(comptime name: []const u8, target: var, linkage: builtin.GlobalLinkage) []const u8
+
+ {#header_open|@enumToInt#}
+ @enumToInt(enum_value: var) var
- Creates a symbol in the output object file.
+ Converts an enumeration value into its integer tag type.
+
+ {#see_also|@intToEnum#}
+ {#header_close#}
+
+ {#header_open|@errSetCast#}
+ @errSetCast(comptime T: DestType, value: var) DestType
+
+ Converts an error value from one error set to another error set. Attempting to convert an error
+ which is not in the destination error set results in safety-protected {#link|Undefined Behavior#}.
{#header_close#}
+
{#header_open|@errorName#}
@errorName(err: error) []u8
@@ -4941,6 +5173,7 @@ test "main" {
error name table will be generated.
{#header_close#}
+
{#header_open|@errorReturnTrace#}
@errorReturnTrace() ?*builtin.StackTrace
@@ -4949,6 +5182,33 @@ test "main" {
stack trace object. Otherwise returns `null`.
{#header_close#}
+
+ {#header_open|@errorToInt#}
+ @errorToInt(err: var) @IntType(false, @sizeOf(error) * 8)
+
+ Supports the following types:
+
+
+ - error unions
+ E!void
+
+
+ Converts an error to the integer representation of an error.
+
+
+ It is generally recommended to avoid this
+ cast, as the integer representation of an error is not stable across source code changes.
+
+ {#see_also|@intToError#}
+ {#header_close#}
+
+ {#header_open|@export#}
+ @export(comptime name: []const u8, target: var, linkage: builtin.GlobalLinkage) []const u8
+
+ Creates a symbol in the output object file.
+
+ {#header_close#}
+
{#header_open|@fence#}
@fence(order: AtomicOrder)
@@ -4985,8 +5245,12 @@ test "main" {
@floatToInt(comptime DestType: type, float: var) DestType
Converts the integer part of a floating point number to the destination type.
- To convert the other way, use {#link|@intToFloat#}. This cast is always safe.
+
+ If the integer part of the floating point number cannot fit in the destination type,
+ it invokes safety-checked {#link|Undefined Behavior#}.
+
+ {#see_also|@intToFloat#}
{#header_close#}
{#header_open|@frameAddress#}
@@ -5049,10 +5313,38 @@ fn add(a: i32, b: i32) i32 { return a + b; }
Converts an integer to another integer while keeping the same numerical value.
Attempting to convert a number which is out of range of the destination type results in
- {#link|Undefined Behavior#}.
+ safety-protected {#link|Undefined Behavior#}.
{#header_close#}
+ {#header_open|@intToEnum#}
+ @intToEnum(comptime DestType: type, int_value: @TagType(DestType)) DestType
+
+ Converts an integer into an {#link|enum#} value.
+
+
+ Attempting to convert an integer which represents no value in the chosen enum type invokes
+ safety-checked {#link|Undefined Behavior#}.
+
+ {#see_also|@enumToInt#}
+ {#header_close#}
+
+ {#header_open|@intToError#}
+ @intToError(value: @IntType(false, @sizeOf(error) * 8)) error
+
+ Converts from the integer representation of an error into the global error set type.
+
+
+ It is generally recommended to avoid this
+ cast, as the integer representation of an error is not stable across source code changes.
+
+
+ Attempting to convert an integer that does not correspond to any error results in
+ safety-protected {#link|Undefined Behavior#}.
+
+ {#see_also|@errorToInt#}
+ {#header_close#}
+
{#header_open|@intToFloat#}
@intToFloat(comptime DestType: type, int: var) DestType
@@ -5413,12 +5705,6 @@ pub const FloatMode = enum {
{#see_also|Compile Variables#}
{#header_close#}
- {#header_open|@setGlobalSection#}
- @setGlobalSection(global_variable_name, comptime section_name: []const u8) bool
-
- Puts the global variable in the specified section.
-
- {#header_close#}
{#header_open|@shlExact#}
@shlExact(value: T, shift_amt: Log2T) T
@@ -5456,8 +5742,9 @@ pub const FloatMode = enum {
{#see_also|@shlExact|@shlWithOverflow#}
{#header_close#}
+
{#header_open|@sizeOf#}
- @sizeOf(comptime T: type) (number literal)
+ @sizeOf(comptime T: type) comptime_int
This function returns the number of bytes it takes to store T in memory.
@@ -5465,6 +5752,15 @@ pub const FloatMode = enum {
The result is a target-specific compile time constant.
{#header_close#}
+
+ {#header_open|@sliceToBytes#}
+ @sliceToBytes(value: var) []u8
+
+ Converts a slice or array to a slice of u8. The resulting slice has the same
+ {#link|pointer|Pointers#} properties as the parameter.
+
+ {#header_close#}
+
{#header_open|@sqrt#}
@sqrt(comptime T: type, value: T) T
@@ -5600,10 +5896,17 @@ pub const TypeInfo = union(TypeId) {
};
pub const Pointer = struct {
+ size: Size,
is_const: bool,
is_volatile: bool,
alignment: u32,
child: type,
+
+ pub const Size = enum {
+ One,
+ Many,
+ Slice,
+ };
};
pub const Array = struct {
@@ -5667,7 +5970,7 @@ pub const TypeInfo = union(TypeId) {
pub const Union = struct {
layout: ContainerLayout,
- tag_type: type,
+ tag_type: ?type,
fields: []UnionField,
defs: []Definition,
};
@@ -5684,20 +5987,20 @@ pub const TypeInfo = union(TypeId) {
pub const FnArg = struct {
is_generic: bool,
is_noalias: bool,
- arg_type: type,
+ arg_type: ?type,
};
pub const Fn = struct {
calling_convention: CallingConvention,
is_generic: bool,
is_var_args: bool,
- return_type: type,
- async_allocator_type: type,
+ return_type: ?type,
+ async_allocator_type: ?type,
args: []FnArg,
};
pub const Promise = struct {
- child: type,
+ child: ?type,
};
pub const Definition = struct {
@@ -5764,7 +6067,7 @@ pub const TypeInfo = union(TypeId) {
{#code_begin|syntax#}
const Builder = @import("std").build.Builder;
-pub fn build(b: &Builder) void {
+pub fn build(b: *Builder) void {
const exe = b.addExecutable("example", "example.zig");
exe.setBuildMode(b.standardReleaseOptions());
b.default_step.dependOn(&exe.step);
@@ -5817,10 +6120,10 @@ pub fn build(b: &Builder) void {
{#header_open|Undefined Behavior#}
Zig has many instances of undefined behavior. If undefined behavior is
- detected at compile-time, Zig emits an error. Most undefined behavior that
- cannot be detected at compile-time can be detected at runtime. In these cases,
- Zig has safety checks. Safety checks can be disabled on a per-block basis
- with {#link|setRuntimeSafety#}. The {#link|ReleaseFast#}
+ detected at compile-time, Zig emits a compile error and refuses to continue.
+ Most undefined behavior that cannot be detected at compile-time can be detected
+ at runtime. In these cases, Zig has safety checks. Safety checks can be disabled
+ on a per-block basis with {#link|setRuntimeSafety#}. The {#link|ReleaseFast#}
build mode disables all safety checks in order to facilitate optimizations.
@@ -5841,7 +6144,14 @@ fn assert(ok: bool) void {
if (!ok) unreachable; // assertion failure
}
{#code_end#}
-
At runtime crashes with the message reached unreachable code and a stack trace.
+ At runtime:
+ {#code_begin|exe_err#}
+const std = @import("std");
+
+pub fn main() void {
+ std.debug.assert(false);
+}
+ {#code_end#}
{#header_close#}
{#header_open|Index out of Bounds#}
At compile-time:
@@ -5851,7 +6161,16 @@ comptime {
const garbage = array[5];
}
{#code_end#}
- At runtime crashes with the message index out of bounds and a stack trace.
+ At runtime:
+ {#code_begin|exe_err#}
+pub fn main() void {
+ var x = foo("hello");
+}
+
+fn foo(x: []const u8) u8 {
+ return x[5];
+}
+ {#code_end#}
{#header_close#}
{#header_open|Cast Negative Number to Unsigned Integer#}
At compile-time:
@@ -5861,10 +6180,18 @@ comptime {
const unsigned = @intCast(u32, value);
}
{#code_end#}
- At runtime crashes with the message attempt to cast negative value to unsigned integer and a stack trace.
+ At runtime:
+ {#code_begin|exe_err#}
+const std = @import("std");
+
+pub fn main() void {
+ var value: i32 = -1;
+ var unsigned = @intCast(u32, value);
+ std.debug.warn("value: {}\n", unsigned);
+}
+ {#code_end#}
- If you are trying to obtain the maximum value of an unsigned integer, use @maxValue(T),
- where T is the integer type, such as u32.
+ To obtain the maximum value of an unsigned integer, use {#link|@maxValue#}.
{#header_close#}
{#header_open|Cast Truncates Data#}
@@ -5875,11 +6202,18 @@ comptime {
const byte = @intCast(u8, spartan_count);
}
{#code_end#}
- At runtime crashes with the message integer cast truncated bits and a stack trace.
+ At runtime:
+ {#code_begin|exe_err#}
+const std = @import("std");
+
+pub fn main() void {
+ var spartan_count: u16 = 300;
+ const byte = @intCast(u8, spartan_count);
+ std.debug.warn("value: {}\n", byte);
+}
+ {#code_end#}
- If you are trying to truncate bits, use @truncate(T, value),
- where T is the integer type, such as u32, and value
- is the value you want to truncate.
+ To truncate bits, use {#link|@truncate#}.
{#header_close#}
{#header_open|Integer Overflow#}
@@ -5891,9 +6225,9 @@ comptime {
- (negation)
* (multiplication)
/ (division)
- @divTrunc (division)
- @divFloor (division)
- @divExact (division)
+ {#link|@divTrunc#} (division)
+ {#link|@divFloor#} (division)
+ {#link|@divExact#} (division)
Example with addition at compile-time:
{#code_begin|test_err|operation caused overflow#}
@@ -5902,7 +6236,16 @@ comptime {
byte += 1;
}
{#code_end#}
- At runtime crashes with the message integer overflow and a stack trace.
+ At runtime:
+ {#code_begin|exe_err#}
+const std = @import("std");
+
+pub fn main() void {
+ var byte: u8 = 255;
+ byte += 1;
+ std.debug.warn("value: {}\n", byte);
+}
+ {#code_end#}
{#header_close#}
{#header_open|Standard Library Math Functions#}
These functions provided by the standard library return possible errors.
@@ -5937,13 +6280,13 @@ pub fn main() !void {
occurred, as well as returning the overflowed bits:
- @addWithOverflow
- @subWithOverflow
- @mulWithOverflow
- @shlWithOverflow
+ - {#link|@addWithOverflow#}
+ - {#link|@subWithOverflow#}
+ - {#link|@mulWithOverflow#}
+ - {#link|@shlWithOverflow#}
- Example of @addWithOverflow:
+ Example of {#link|@addWithOverflow#}:
{#code_begin|exe#}
const warn = @import("std").debug.warn;
@@ -5989,7 +6332,16 @@ comptime {
const x = @shlExact(u8(0b01010101), 2);
}
{#code_end#}
- At runtime crashes with the message left shift overflowed bits and a stack trace.
+ At runtime:
+ {#code_begin|exe_err#}
+const std = @import("std");
+
+pub fn main() void {
+ var x: u8 = 0b01010101;
+ var y = @shlExact(x, 2);
+ std.debug.warn("value: {}\n", y);
+}
+ {#code_end#}
{#header_close#}
{#header_open|Exact Right Shift Overflow#}
At compile-time:
@@ -5998,7 +6350,16 @@ comptime {
const x = @shrExact(u8(0b10101010), 2);
}
{#code_end#}
- At runtime crashes with the message right shift overflowed bits and a stack trace.
+ At runtime:
+ {#code_begin|exe_err#}
+const std = @import("std");
+
+pub fn main() void {
+ var x: u8 = 0b10101010;
+ var y = @shrExact(x, 2);
+ std.debug.warn("value: {}\n", y);
+}
+ {#code_end#}
{#header_close#}
{#header_open|Division by Zero#}
At compile-time:
@@ -6009,8 +6370,17 @@ comptime {
const c = a / b;
}
{#code_end#}
- At runtime crashes with the message division by zero and a stack trace.
+ At runtime:
+ {#code_begin|exe_err#}
+const std = @import("std");
+pub fn main() void {
+ var a: u32 = 1;
+ var b: u32 = 0;
+ var c = a / b;
+ std.debug.warn("value: {}\n", c);
+}
+ {#code_end#}
{#header_close#}
{#header_open|Remainder Division by Zero#}
At compile-time:
@@ -6021,14 +6391,57 @@ comptime {
const c = a % b;
}
{#code_end#}
- At runtime crashes with the message remainder division by zero and a stack trace.
+ At runtime:
+ {#code_begin|exe_err#}
+const std = @import("std");
+pub fn main() void {
+ var a: u32 = 10;
+ var b: u32 = 0;
+ var c = a % b;
+ std.debug.warn("value: {}\n", c);
+}
+ {#code_end#}
{#header_close#}
{#header_open|Exact Division Remainder#}
- TODO
+ At compile-time:
+ {#code_begin|test_err|exact division had a remainder#}
+comptime {
+ const a: u32 = 10;
+ const b: u32 = 3;
+ const c = @divExact(a, b);
+}
+ {#code_end#}
+ At runtime:
+ {#code_begin|exe_err#}
+const std = @import("std");
+
+pub fn main() void {
+ var a: u32 = 10;
+ var b: u32 = 3;
+ var c = @divExact(a, b);
+ std.debug.warn("value: {}\n", c);
+}
+ {#code_end#}
{#header_close#}
{#header_open|Slice Widen Remainder#}
- TODO
+ At compile-time:
+ {#code_begin|test_err|unable to convert#}
+comptime {
+ var bytes = [5]u8{ 1, 2, 3, 4, 5 };
+ var slice = @bytesToSlice(u32, bytes);
+}
+ {#code_end#}
+ At runtime:
+ {#code_begin|exe_err#}
+const std = @import("std");
+
+pub fn main() void {
+ var bytes = [5]u8{ 1, 2, 3, 4, 5 };
+ var slice = @bytesToSlice(u32, bytes[0..]);
+ std.debug.warn("value: {}\n", slice[0]);
+}
+ {#code_end#}
{#header_close#}
{#header_open|Attempt to Unwrap Null#}
At compile-time:
@@ -6038,7 +6451,16 @@ comptime {
const number = optional_number.?;
}
{#code_end#}
- At runtime crashes with the message attempt to unwrap null and a stack trace.
+ At runtime:
+ {#code_begin|exe_err#}
+const std = @import("std");
+
+pub fn main() void {
+ var optional_number: ?i32 = null;
+ var number = optional_number.?;
+ std.debug.warn("value: {}\n", number);
+}
+ {#code_end#}
One way to avoid this crash is to test for null instead of assuming non-null, with
the if expression:
{#code_begin|exe|test#}
@@ -6053,6 +6475,7 @@ pub fn main() void {
}
}
{#code_end#}
+ {#see_also|Optionals#}
{#header_close#}
{#header_open|Attempt to Unwrap Error#}
At compile-time:
@@ -6065,7 +6488,19 @@ fn getNumberOrFail() !i32 {
return error.UnableToReturnNumber;
}
{#code_end#}
- At runtime crashes with the message attempt to unwrap error: ErrorCode and a stack trace.
+ At runtime:
+ {#code_begin|exe_err#}
+const std = @import("std");
+
+pub fn main() void {
+ const number = getNumberOrFail() catch unreachable;
+ std.debug.warn("value: {}\n", number);
+}
+
+fn getNumberOrFail() !i32 {
+ return error.UnableToReturnNumber;
+}
+ {#code_end#}
One way to avoid this crash is to test for an error instead of assuming a successful result, with
the if expression:
{#code_begin|exe#}
@@ -6085,30 +6520,76 @@ fn getNumberOrFail() !i32 {
return error.UnableToReturnNumber;
}
{#code_end#}
+ {#see_also|Errors#}
{#header_close#}
{#header_open|Invalid Error Code#}
At compile-time:
{#code_begin|test_err|integer value 11 represents no error#}
comptime {
const err = error.AnError;
- const number = u32(err) + 10;
- const invalid_err = error(number);
+ const number = @errorToInt(err) + 10;
+ const invalid_err = @intToError(number);
+}
+ {#code_end#}
+ At runtime:
+ {#code_begin|exe_err#}
+const std = @import("std");
+
+pub fn main() void {
+ var err = error.AnError;
+ var number = @errorToInt(err) + 500;
+ var invalid_err = @intToError(number);
+ std.debug.warn("value: {}\n", number);
}
{#code_end#}
- At runtime crashes with the message invalid error code and a stack trace.
{#header_close#}
{#header_open|Invalid Enum Cast#}
- TODO
+ At compile-time:
+ {#code_begin|test_err|has no tag matching integer value 3#}
+const Foo = enum {
+ A,
+ B,
+ C,
+};
+comptime {
+ const a: u2 = 3;
+ const b = @intToEnum(Foo, a);
+}
+ {#code_end#}
+ At runtime:
+ {#code_begin|exe_err#}
+const std = @import("std");
+const Foo = enum {
+ A,
+ B,
+ C,
+};
+
+pub fn main() void {
+ var a: u2 = 3;
+ var b = @intToEnum(Foo, a);
+ std.debug.warn("value: {}\n", @tagName(b));
+}
+ {#code_end#}
{#header_close#}
+
+ {#header_open|Invalid Error Set Cast#}
+ TODO
+ {#header_close#}
+
{#header_open|Incorrect Pointer Alignment#}
TODO
{#header_close#}
{#header_open|Wrong Union Field Access#}
TODO
-
{#header_close#}
+
+ {#header_open|Out of Bounds Float To Integer Cast#}
+ TODO
+ {#header_close#}
+
{#header_close#}
{#header_open|Memory#}
TODO: explain no default allocator in zig
@@ -6793,7 +7274,7 @@ hljs.registerLanguage("zig", function(t) {
a = t.IR + "\\s*\\(",
c = {
keyword: "const align var extern stdcallcc nakedcc volatile export pub noalias inline struct packed enum union break return try catch test continue unreachable comptime and or asm defer errdefer if else switch while for fn use bool f32 f64 void type noreturn error i8 u8 i16 u16 i32 u32 i64 u64 isize usize i8w u8w i16w i32w u32w i64w u64w isizew usizew c_short c_ushort c_int c_uint c_long c_ulong c_longlong c_ulonglong resume cancel await async orelse",
- built_in: "atomicLoad breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage setGlobalSection divTrunc divFloor enumTagName intToPtr ptrToInt panic ptrCast intCast floatCast intToFloat floatToInt boolToInt bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz import cImport errorName embedFile cmpxchgStrong cmpxchgWeak fence divExact truncate atomicRmw sqrt field typeInfo typeName newStackCall",
+ built_in: "atomicLoad breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage divTrunc divFloor enumTagName intToPtr ptrToInt panic ptrCast intCast floatCast intToFloat floatToInt boolToInt bytesToSlice sliceToBytes errSetCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz import cImport errorName embedFile cmpxchgStrong cmpxchgWeak fence divExact truncate atomicRmw sqrt field typeInfo typeName newStackCall errorToInt intToError enumToInt intToEnum",
literal: "true false null undefined"
},
n = [e, t.CLCM, t.CBCM, s, r];
diff --git a/src-self-hosted/arg.zig b/src-self-hosted/arg.zig
index dc89483213..2ab44e5fdf 100644
--- a/src-self-hosted/arg.zig
+++ b/src-self-hosted/arg.zig
@@ -168,7 +168,7 @@ pub const Args = struct {
}
// e.g. --names value1 value2 value3
- pub fn many(self: *Args, name: []const u8) ?[]const []const u8 {
+ pub fn many(self: *Args, name: []const u8) []const []const u8 {
if (self.flags.get(name)) |entry| {
switch (entry.value) {
FlagArg.Many => |inner| {
@@ -177,7 +177,7 @@ pub const Args = struct {
else => @panic("attempted to retrieve flag with wrong type"),
}
} else {
- return null;
+ return []const []const u8{};
}
}
};
diff --git a/src-self-hosted/errmsg.zig b/src-self-hosted/errmsg.zig
index 32d2450aac..b6fd78d8f6 100644
--- a/src-self-hosted/errmsg.zig
+++ b/src-self-hosted/errmsg.zig
@@ -35,7 +35,7 @@ pub fn createFromParseError(
var out_stream = &std.io.BufferOutStream.init(&text_buf).stream;
try parse_error.render(&tree.tokens, out_stream);
- const msg = try allocator.construct(Msg{
+ const msg = try allocator.create(Msg{
.tree = tree,
.path = path,
.text = text_buf.toOwnedSlice(),
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index f7f38130b5..d17fc94c82 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -1,6 +1,7 @@
const std = @import("std");
const builtin = @import("builtin");
+const event = std.event;
const os = std.os;
const io = std.io;
const mem = std.mem;
@@ -26,15 +27,11 @@ const usage =
\\
\\Commands:
\\
- \\ build Build project from build.zig
\\ build-exe [source] Create executable from source or object files
\\ build-lib [source] Create library from source or object files
\\ build-obj [source] Create object from source or assembly
\\ fmt [source] Parse file and render in canonical zig format
- \\ run [source] Create executable and run immediately
\\ targets List available compilation targets
- \\ test [source] Create and run a test build
- \\ translate-c [source] Convert c code to zig code
\\ version Print version number and exit
\\ zen Print zen of zig and exit
\\
@@ -47,7 +44,10 @@ const Command = struct {
};
pub fn main() !void {
- var allocator = std.heap.c_allocator;
+ // This allocator needs to be thread-safe because we use it for the event.Loop
+ // which multiplexes coroutines onto kernel threads.
+ // libc allocator is guaranteed to have this property.
+ const allocator = std.heap.c_allocator;
var stdout_file = try std.io.getStdOut();
var stdout_out_stream = std.io.FileOutStream.init(&stdout_file);
@@ -58,18 +58,16 @@ pub fn main() !void {
stderr = &stderr_out_stream.stream;
const args = try os.argsAlloc(allocator);
- defer os.argsFree(allocator, args);
+ // TODO I'm getting unreachable code here, which shouldn't happen
+ //defer os.argsFree(allocator, args);
if (args.len <= 1) {
+ try stderr.write("expected command argument\n\n");
try stderr.write(usage);
os.exit(1);
}
const commands = []Command{
- Command{
- .name = "build",
- .exec = cmdBuild,
- },
Command{
.name = "build-exe",
.exec = cmdBuildExe,
@@ -86,22 +84,10 @@ pub fn main() !void {
.name = "fmt",
.exec = cmdFmt,
},
- Command{
- .name = "run",
- .exec = cmdRun,
- },
Command{
.name = "targets",
.exec = cmdTargets,
},
- Command{
- .name = "test",
- .exec = cmdTest,
- },
- Command{
- .name = "translate-c",
- .exec = cmdTranslateC,
- },
Command{
.name = "version",
.exec = cmdVersion,
@@ -124,177 +110,15 @@ pub fn main() !void {
for (commands) |command| {
if (mem.eql(u8, command.name, args[1])) {
- try command.exec(allocator, args[2..]);
- return;
+ return command.exec(allocator, args[2..]);
}
}
try stderr.print("unknown command: {}\n\n", args[1]);
try stderr.write(usage);
+ os.exit(1);
}
-// cmd:build ///////////////////////////////////////////////////////////////////////////////////////
-
-const usage_build =
- \\usage: zig build
- \\
- \\General Options:
- \\ --help Print this help and exit
- \\ --init Generate a build.zig template
- \\ --build-file [file] Override path to build.zig
- \\ --cache-dir [path] Override path to cache directory
- \\ --verbose Print commands before executing them
- \\ --prefix [path] Override default install prefix
- \\
- \\Project-Specific Options:
- \\
- \\ Project-specific options become available when the build file is found.
- \\
- \\Advanced Options:
- \\ --build-file [file] Override path to build.zig
- \\ --cache-dir [path] Override path to cache directory
- \\ --verbose-tokenize Enable compiler debug output for tokenization
- \\ --verbose-ast Enable compiler debug output for parsing into an AST
- \\ --verbose-link Enable compiler debug output for linking
- \\ --verbose-ir Enable compiler debug output for Zig IR
- \\ --verbose-llvm-ir Enable compiler debug output for LLVM IR
- \\ --verbose-cimport Enable compiler debug output for C imports
- \\
- \\
-;
-
-const args_build_spec = []Flag{
- Flag.Bool("--help"),
- Flag.Bool("--init"),
- Flag.Arg1("--build-file"),
- Flag.Arg1("--cache-dir"),
- Flag.Bool("--verbose"),
- Flag.Arg1("--prefix"),
-
- Flag.Arg1("--build-file"),
- Flag.Arg1("--cache-dir"),
- Flag.Bool("--verbose-tokenize"),
- Flag.Bool("--verbose-ast"),
- Flag.Bool("--verbose-link"),
- Flag.Bool("--verbose-ir"),
- Flag.Bool("--verbose-llvm-ir"),
- Flag.Bool("--verbose-cimport"),
-};
-
-const missing_build_file =
- \\No 'build.zig' file found.
- \\
- \\Initialize a 'build.zig' template file with `zig build --init`,
- \\or build an executable directly with `zig build-exe $FILENAME.zig`.
- \\
- \\See: `zig build --help` or `zig help` for more options.
- \\
-;
-
-fn cmdBuild(allocator: *Allocator, args: []const []const u8) !void {
- var flags = try Args.parse(allocator, args_build_spec, args);
- defer flags.deinit();
-
- if (flags.present("help")) {
- try stderr.write(usage_build);
- os.exit(0);
- }
-
- const zig_lib_dir = try introspect.resolveZigLibDir(allocator);
- defer allocator.free(zig_lib_dir);
-
- const zig_std_dir = try os.path.join(allocator, zig_lib_dir, "std");
- defer allocator.free(zig_std_dir);
-
- const special_dir = try os.path.join(allocator, zig_std_dir, "special");
- defer allocator.free(special_dir);
-
- const build_runner_path = try os.path.join(allocator, special_dir, "build_runner.zig");
- defer allocator.free(build_runner_path);
-
- const build_file = flags.single("build-file") orelse "build.zig";
- const build_file_abs = try os.path.resolve(allocator, ".", build_file);
- defer allocator.free(build_file_abs);
-
- const build_file_exists = os.File.access(allocator, build_file_abs, os.default_file_mode) catch false;
-
- if (flags.present("init")) {
- if (build_file_exists) {
- try stderr.print("build.zig already exists\n");
- os.exit(1);
- }
-
- // need a new scope for proper defer scope finalization on exit
- {
- const build_template_path = try os.path.join(allocator, special_dir, "build_file_template.zig");
- defer allocator.free(build_template_path);
-
- try os.copyFile(allocator, build_template_path, build_file_abs);
- try stderr.print("wrote build.zig template\n");
- }
-
- os.exit(0);
- }
-
- if (!build_file_exists) {
- try stderr.write(missing_build_file);
- os.exit(1);
- }
-
- // TODO: Invoke build.zig entrypoint directly?
- var zig_exe_path = try os.selfExePath(allocator);
- defer allocator.free(zig_exe_path);
-
- var build_args = ArrayList([]const u8).init(allocator);
- defer build_args.deinit();
-
- const build_file_basename = os.path.basename(build_file_abs);
- const build_file_dirname = os.path.dirname(build_file_abs) orelse ".";
-
- var full_cache_dir: []u8 = undefined;
- if (flags.single("cache-dir")) |cache_dir| {
- full_cache_dir = try os.path.resolve(allocator, ".", cache_dir, full_cache_dir);
- } else {
- full_cache_dir = try os.path.join(allocator, build_file_dirname, "zig-cache");
- }
- defer allocator.free(full_cache_dir);
-
- const path_to_build_exe = try os.path.join(allocator, full_cache_dir, "build");
- defer allocator.free(path_to_build_exe);
-
- try build_args.append(path_to_build_exe);
- try build_args.append(zig_exe_path);
- try build_args.append(build_file_dirname);
- try build_args.append(full_cache_dir);
-
- var proc = try os.ChildProcess.init(build_args.toSliceConst(), allocator);
- defer proc.deinit();
-
- var term = try proc.spawnAndWait();
- switch (term) {
- os.ChildProcess.Term.Exited => |status| {
- if (status != 0) {
- try stderr.print("{} exited with status {}\n", build_args.at(0), status);
- os.exit(1);
- }
- },
- os.ChildProcess.Term.Signal => |signal| {
- try stderr.print("{} killed by signal {}\n", build_args.at(0), signal);
- os.exit(1);
- },
- os.ChildProcess.Term.Stopped => |signal| {
- try stderr.print("{} stopped by signal {}\n", build_args.at(0), signal);
- os.exit(1);
- },
- os.ChildProcess.Term.Unknown => |status| {
- try stderr.print("{} encountered unknown failure {}\n", build_args.at(0), status);
- os.exit(1);
- },
- }
-}
-
-// cmd:build-exe ///////////////////////////////////////////////////////////////////////////////////
-
const usage_build_generic =
\\usage: zig build-exe [file]
\\ zig build-lib [file]
@@ -315,8 +139,11 @@ const usage_build_generic =
\\ --output-h [file] Override generated header file path
\\ --pkg-begin [name] [path] Make package available to import and push current pkg
\\ --pkg-end Pop current pkg
- \\ --release-fast Build with optimizations on and safety off
- \\ --release-safe Build with optimizations on and safety on
+ \\ --mode [mode] Set the build mode
+ \\ debug (default) optimizations off, safety on
+ \\ release-fast optimizations on, safety off
+ \\ release-safe optimizations on, safety on
+ \\ release-small optimize for small binary, safety off
\\ --static Output will be statically linked
\\ --strip Exclude debug symbols
\\ --target-arch [name] Specify target architecture
@@ -367,6 +194,12 @@ const args_build_generic = []Flag{
"off",
"on",
}),
+ Flag.Option("--mode", []const []const u8{
+ "debug",
+ "release-fast",
+ "release-safe",
+ "release-small",
+ }),
Flag.ArgMergeN("--assembly", 1),
Flag.Arg1("--cache-dir"),
@@ -383,8 +216,6 @@ const args_build_generic = []Flag{
// NOTE: Parsed manually after initial check
Flag.ArgN("--pkg-begin", 2),
Flag.Bool("--pkg-end"),
- Flag.Bool("--release-fast"),
- Flag.Bool("--release-safe"),
Flag.Bool("--static"),
Flag.Bool("--strip"),
Flag.Arg1("--target-arch"),
@@ -431,16 +262,25 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
defer flags.deinit();
if (flags.present("help")) {
- try stderr.write(usage_build_generic);
+ try stdout.write(usage_build_generic);
os.exit(0);
}
- var build_mode = builtin.Mode.Debug;
- if (flags.present("release-fast")) {
- build_mode = builtin.Mode.ReleaseFast;
- } else if (flags.present("release-safe")) {
- build_mode = builtin.Mode.ReleaseSafe;
- }
+ const build_mode = blk: {
+ if (flags.single("mode")) |mode_flag| {
+ if (mem.eql(u8, mode_flag, "debug")) {
+ break :blk builtin.Mode.Debug;
+ } else if (mem.eql(u8, mode_flag, "release-fast")) {
+ break :blk builtin.Mode.ReleaseFast;
+ } else if (mem.eql(u8, mode_flag, "release-safe")) {
+ break :blk builtin.Mode.ReleaseSafe;
+ } else if (mem.eql(u8, mode_flag, "release-small")) {
+ break :blk builtin.Mode.ReleaseSmall;
+ } else unreachable;
+ } else {
+ break :blk builtin.Mode.Debug;
+ }
+ };
const color = blk: {
if (flags.single("color")) |color_flag| {
@@ -456,20 +296,21 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
}
};
- var emit_type = Module.Emit.Binary;
- if (flags.single("emit")) |emit_flag| {
- if (mem.eql(u8, emit_flag, "asm")) {
- emit_type = Module.Emit.Assembly;
- } else if (mem.eql(u8, emit_flag, "bin")) {
- emit_type = Module.Emit.Binary;
- } else if (mem.eql(u8, emit_flag, "llvm-ir")) {
- emit_type = Module.Emit.LlvmIr;
+ const emit_type = blk: {
+ if (flags.single("emit")) |emit_flag| {
+ if (mem.eql(u8, emit_flag, "asm")) {
+ break :blk Module.Emit.Assembly;
+ } else if (mem.eql(u8, emit_flag, "bin")) {
+ break :blk Module.Emit.Binary;
+ } else if (mem.eql(u8, emit_flag, "llvm-ir")) {
+ break :blk Module.Emit.LlvmIr;
+ } else unreachable;
} else {
- unreachable;
+ break :blk Module.Emit.Binary;
}
- }
+ };
- var cur_pkg = try Module.CliPkg.init(allocator, "", "", null); // TODO: Need a path, name?
+ var cur_pkg = try CliPkg.init(allocator, "", "", null);
defer cur_pkg.deinit();
var i: usize = 0;
@@ -482,15 +323,16 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
i += 1;
const new_pkg_path = args[i];
- var new_cur_pkg = try Module.CliPkg.init(allocator, new_pkg_name, new_pkg_path, cur_pkg);
+ var new_cur_pkg = try CliPkg.init(allocator, new_pkg_name, new_pkg_path, cur_pkg);
try cur_pkg.children.append(new_cur_pkg);
cur_pkg = new_cur_pkg;
} else if (mem.eql(u8, "--pkg-end", arg_name)) {
- if (cur_pkg.parent == null) {
+ if (cur_pkg.parent) |parent| {
+ cur_pkg = parent;
+ } else {
try stderr.print("encountered --pkg-end with no matching --pkg-begin\n");
os.exit(1);
}
- cur_pkg = cur_pkg.parent.?;
}
}
@@ -499,43 +341,42 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
os.exit(1);
}
- var in_file: ?[]const u8 = undefined;
- switch (flags.positionals.len) {
- 0 => {
- try stderr.write("--name [name] not provided and unable to infer\n");
- os.exit(1);
- },
- 1 => {
- in_file = flags.positionals.at(0);
- },
+ const provided_name = flags.single("name");
+ const root_source_file = switch (flags.positionals.len) {
+ 0 => null,
+ 1 => flags.positionals.at(0),
else => {
- try stderr.write("only one zig input file is accepted during build\n");
+ try stderr.print("unexpected extra parameter: {}\n", flags.positionals.at(1));
os.exit(1);
},
- }
-
- const basename = os.path.basename(in_file.?);
- var it = mem.split(basename, ".");
- const root_name = it.next() orelse {
- try stderr.write("file name cannot be empty\n");
- os.exit(1);
};
- const asm_a = flags.many("assembly");
- const obj_a = flags.many("object");
- if (in_file == null and (obj_a == null or obj_a.?.len == 0) and (asm_a == null or asm_a.?.len == 0)) {
+ const root_name = if (provided_name) |n| n else blk: {
+ if (root_source_file) |file| {
+ const basename = os.path.basename(file);
+ var it = mem.split(basename, ".");
+ break :blk it.next() orelse basename;
+ } else {
+ try stderr.write("--name [name] not provided and unable to infer\n");
+ os.exit(1);
+ }
+ };
+
+ const assembly_files = flags.many("assembly");
+ const link_objects = flags.many("object");
+ if (root_source_file == null and link_objects.len == 0 and assembly_files.len == 0) {
try stderr.write("Expected source file argument or at least one --object or --assembly argument\n");
os.exit(1);
}
- if (out_type == Module.Kind.Obj and (obj_a != null and obj_a.?.len != 0)) {
+ if (out_type == Module.Kind.Obj and link_objects.len != 0) {
try stderr.write("When building an object file, --object arguments are invalid\n");
os.exit(1);
}
- const zig_root_source_file = in_file;
-
- const full_cache_dir = os.path.resolve(allocator, ".", flags.single("cache-dir") orelse "zig-cache"[0..]) catch {
+ const rel_cache_dir = flags.single("cache-dir") orelse "zig-cache"[0..];
+ const full_cache_dir = os.path.resolve(allocator, ".", rel_cache_dir) catch {
+ try stderr.print("invalid cache dir: {}\n", rel_cache_dir);
os.exit(1);
};
defer allocator.free(full_cache_dir);
@@ -543,10 +384,12 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
const zig_lib_dir = introspect.resolveZigLibDir(allocator) catch os.exit(1);
defer allocator.free(zig_lib_dir);
+ var loop = try event.Loop.init(allocator);
+
var module = try Module.create(
- allocator,
+ &loop,
root_name,
- zig_root_source_file,
+ root_source_file,
Target.Native,
out_type,
build_mode,
@@ -561,24 +404,21 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
module.is_test = false;
- if (flags.single("linker-script")) |linker_script| {
- module.linker_script = linker_script;
- }
-
+ module.linker_script = flags.single("linker-script");
module.each_lib_rpath = flags.present("each-lib-rpath");
var clang_argv_buf = ArrayList([]const u8).init(allocator);
defer clang_argv_buf.deinit();
- if (flags.many("mllvm")) |mllvm_flags| {
- for (mllvm_flags) |mllvm| {
- try clang_argv_buf.append("-mllvm");
- try clang_argv_buf.append(mllvm);
- }
- module.llvm_argv = mllvm_flags;
- module.clang_argv = clang_argv_buf.toSliceConst();
+ const mllvm_flags = flags.many("mllvm");
+ for (mllvm_flags) |mllvm| {
+ try clang_argv_buf.append("-mllvm");
+ try clang_argv_buf.append(mllvm);
}
+ module.llvm_argv = mllvm_flags;
+ module.clang_argv = clang_argv_buf.toSliceConst();
+
module.strip = flags.present("strip");
module.is_static = flags.present("static");
@@ -610,18 +450,9 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
module.verbose_cimport = flags.present("verbose-cimport");
module.err_color = color;
-
- if (flags.many("library-path")) |lib_dirs| {
- module.lib_dirs = lib_dirs;
- }
-
- if (flags.many("framework")) |frameworks| {
- module.darwin_frameworks = frameworks;
- }
-
- if (flags.many("rpath")) |rpath_list| {
- module.rpath_list = rpath_list;
- }
+ module.lib_dirs = flags.many("library-path");
+ module.darwin_frameworks = flags.many("framework");
+ module.rpath_list = flags.many("rpath");
if (flags.single("output-h")) |output_h| {
module.out_h_path = output_h;
@@ -644,41 +475,51 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
}
module.emit_file_type = emit_type;
- if (flags.many("object")) |objects| {
- module.link_objects = objects;
- }
- if (flags.many("assembly")) |assembly_files| {
- module.assembly_files = assembly_files;
- }
+ module.link_objects = link_objects;
+ module.assembly_files = assembly_files;
+ module.link_out_file = flags.single("out-file");
try module.build();
- try module.link(flags.single("out-file") orelse null);
+ const process_build_events_handle = try async processBuildEvents(module, true);
+ defer cancel process_build_events_handle;
+ loop.run();
+}
- if (flags.present("print-timing-info")) {
- // codegen_print_timing_info(g, stderr);
+async fn processBuildEvents(module: *Module, watch: bool) void {
+ while (watch) {
+ // TODO directly awaiting async should guarantee memory allocation elision
+ const build_event = await (async module.events.get() catch unreachable);
+
+ switch (build_event) {
+ Module.Event.Ok => {
+ std.debug.warn("Build succeeded\n");
+ // for now we stop after 1
+ module.loop.stop();
+ return;
+ },
+ Module.Event.Error => |err| {
+ std.debug.warn("build failed: {}\n", @errorName(err));
+ @panic("TODO error return trace");
+ },
+ Module.Event.Fail => |errs| {
+ @panic("TODO print compile error messages");
+ },
+ }
}
-
- try stderr.print("building {}: {}\n", @tagName(out_type), in_file);
}
fn cmdBuildExe(allocator: *Allocator, args: []const []const u8) !void {
- try buildOutputType(allocator, args, Module.Kind.Exe);
+ return buildOutputType(allocator, args, Module.Kind.Exe);
}
-// cmd:build-lib ///////////////////////////////////////////////////////////////////////////////////
-
fn cmdBuildLib(allocator: *Allocator, args: []const []const u8) !void {
- try buildOutputType(allocator, args, Module.Kind.Lib);
+ return buildOutputType(allocator, args, Module.Kind.Lib);
}
-// cmd:build-obj ///////////////////////////////////////////////////////////////////////////////////
-
fn cmdBuildObj(allocator: *Allocator, args: []const []const u8) !void {
- try buildOutputType(allocator, args, Module.Kind.Obj);
+ return buildOutputType(allocator, args, Module.Kind.Obj);
}
-// cmd:fmt /////////////////////////////////////////////////////////////////////////////////////////
-
const usage_fmt =
\\usage: zig fmt [file]...
\\
@@ -707,7 +548,7 @@ const Fmt = struct {
// file_path must outlive Fmt
fn addToQueue(self: *Fmt, file_path: []const u8) !void {
- const new_node = try self.seen.allocator.construct(std.LinkedList([]const u8).Node{
+ const new_node = try self.seen.allocator.create(std.LinkedList([]const u8).Node{
.prev = undefined,
.next = undefined,
.data = file_path,
@@ -735,7 +576,7 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
defer flags.deinit();
if (flags.present("help")) {
- try stderr.write(usage_fmt);
+ try stdout.write(usage_fmt);
os.exit(0);
}
@@ -863,162 +704,16 @@ fn cmdTargets(allocator: *Allocator, args: []const []const u8) !void {
}
}
-// cmd:version /////////////////////////////////////////////////////////////////////////////////////
-
fn cmdVersion(allocator: *Allocator, args: []const []const u8) !void {
try stdout.print("{}\n", std.cstr.toSliceConst(c.ZIG_VERSION_STRING));
}
-// cmd:test ////////////////////////////////////////////////////////////////////////////////////////
-
-const usage_test =
- \\usage: zig test [file]...
- \\
- \\Options:
- \\ --help Print this help and exit
- \\
- \\
-;
-
const args_test_spec = []Flag{Flag.Bool("--help")};
-fn cmdTest(allocator: *Allocator, args: []const []const u8) !void {
- var flags = try Args.parse(allocator, args_build_spec, args);
- defer flags.deinit();
-
- if (flags.present("help")) {
- try stderr.write(usage_test);
- os.exit(0);
- }
-
- if (flags.positionals.len != 1) {
- try stderr.write("expected exactly one zig source file\n");
- os.exit(1);
- }
-
- // compile the test program into the cache and run
-
- // NOTE: May be overlap with buildOutput, take the shared part out.
- try stderr.print("testing file {}\n", flags.positionals.at(0));
-}
-
-// cmd:run /////////////////////////////////////////////////////////////////////////////////////////
-
-// Run should be simple and not expose the full set of arguments provided by build-exe. If specific
-// build requirements are need, the user should `build-exe` then `run` manually.
-const usage_run =
- \\usage: zig run [file] --
- \\
- \\Options:
- \\ --help Print this help and exit
- \\
- \\
-;
-
-const args_run_spec = []Flag{Flag.Bool("--help")};
-
-fn cmdRun(allocator: *Allocator, args: []const []const u8) !void {
- var compile_args = args;
- var runtime_args: []const []const u8 = []const []const u8{};
-
- for (args) |argv, i| {
- if (mem.eql(u8, argv, "--")) {
- compile_args = args[0..i];
- runtime_args = args[i + 1 ..];
- break;
- }
- }
- var flags = try Args.parse(allocator, args_run_spec, compile_args);
- defer flags.deinit();
-
- if (flags.present("help")) {
- try stderr.write(usage_run);
- os.exit(0);
- }
-
- if (flags.positionals.len != 1) {
- try stderr.write("expected exactly one zig source file\n");
- os.exit(1);
- }
-
- try stderr.print("runtime args:\n");
- for (runtime_args) |cargs| {
- try stderr.print("{}\n", cargs);
- }
-}
-
-// cmd:translate-c /////////////////////////////////////////////////////////////////////////////////
-
-const usage_translate_c =
- \\usage: zig translate-c [file]
- \\
- \\Options:
- \\ --help Print this help and exit
- \\ --enable-timing-info Print timing diagnostics
- \\ --output [path] Output file to write generated zig file (default: stdout)
- \\
- \\
-;
-
-const args_translate_c_spec = []Flag{
- Flag.Bool("--help"),
- Flag.Bool("--enable-timing-info"),
- Flag.Arg1("--libc-include-dir"),
- Flag.Arg1("--output"),
-};
-
-fn cmdTranslateC(allocator: *Allocator, args: []const []const u8) !void {
- var flags = try Args.parse(allocator, args_translate_c_spec, args);
- defer flags.deinit();
-
- if (flags.present("help")) {
- try stderr.write(usage_translate_c);
- os.exit(0);
- }
-
- if (flags.positionals.len != 1) {
- try stderr.write("expected exactly one c source file\n");
- os.exit(1);
- }
-
- // set up codegen
-
- const zig_root_source_file = null;
-
- // NOTE: translate-c shouldn't require setting up the full codegen instance as it does in
- // the C++ compiler.
-
- // codegen_create(g);
- // codegen_set_out_name(g, null);
- // codegen_translate_c(g, flags.positional.at(0))
-
- var output_stream = stdout;
- if (flags.single("output")) |output_file| {
- var file = try os.File.openWrite(allocator, output_file);
- defer file.close();
-
- var file_stream = io.FileOutStream.init(&file);
- // TODO: Not being set correctly, still stdout
- output_stream = &file_stream.stream;
- }
-
- // ast_render(g, output_stream, g->root_import->root, 4);
- try output_stream.write("pub const example = 10;\n");
-
- if (flags.present("enable-timing-info")) {
- // codegen_print_timing_info(g, stdout);
- try stderr.write("printing timing info for translate-c\n");
- }
-}
-
-// cmd:help ////////////////////////////////////////////////////////////////////////////////////////
-
fn cmdHelp(allocator: *Allocator, args: []const []const u8) !void {
- try stderr.write(usage);
+ try stdout.write(usage);
}
-// cmd:zen /////////////////////////////////////////////////////////////////////////////////////////
-
const info_zen =
\\
\\ * Communicate intent precisely.
@@ -1040,8 +735,6 @@ fn cmdZen(allocator: *Allocator, args: []const []const u8) !void {
try stdout.write(info_zen);
}
-// cmd:internal ////////////////////////////////////////////////////////////////////////////////////
-
const usage_internal =
\\usage: zig internal [subcommand]
\\
@@ -1095,3 +788,27 @@ fn cmdInternalBuildInfo(allocator: *Allocator, args: []const []const u8) !void {
std.cstr.toSliceConst(c.ZIG_DIA_GUIDS_LIB),
);
}
+
+const CliPkg = struct {
+ name: []const u8,
+ path: []const u8,
+ children: ArrayList(*CliPkg),
+ parent: ?*CliPkg,
+
+ pub fn init(allocator: *mem.Allocator, name: []const u8, path: []const u8, parent: ?*CliPkg) !*CliPkg {
+ var pkg = try allocator.create(CliPkg{
+ .name = name,
+ .path = path,
+ .children = ArrayList(*CliPkg).init(allocator),
+ .parent = parent,
+ });
+ return pkg;
+ }
+
+ pub fn deinit(self: *CliPkg) void {
+ for (self.children.toSliceConst()) |child| {
+ child.deinit();
+ }
+ self.children.deinit();
+ }
+};
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
index 575105f25f..cf27c826c8 100644
--- a/src-self-hosted/module.zig
+++ b/src-self-hosted/module.zig
@@ -11,9 +11,11 @@ const warn = std.debug.warn;
const Token = std.zig.Token;
const ArrayList = std.ArrayList;
const errmsg = @import("errmsg.zig");
+const ast = std.zig.ast;
+const event = std.event;
pub const Module = struct {
- allocator: *mem.Allocator,
+ loop: *event.Loop,
name: Buffer,
root_src_path: ?[]const u8,
module: llvm.ModuleRef,
@@ -76,6 +78,52 @@ pub const Module = struct {
kind: Kind,
+ link_out_file: ?[]const u8,
+ events: *event.Channel(Event),
+
+ // TODO handle some of these earlier and report them in a way other than error codes
+ pub const BuildError = error{
+ OutOfMemory,
+ EndOfStream,
+ BadFd,
+ Io,
+ IsDir,
+ Unexpected,
+ SystemResources,
+ SharingViolation,
+ PathAlreadyExists,
+ FileNotFound,
+ AccessDenied,
+ PipeBusy,
+ FileTooBig,
+ SymLinkLoop,
+ ProcessFdQuotaExceeded,
+ NameTooLong,
+ SystemFdQuotaExceeded,
+ NoDevice,
+ PathNotFound,
+ NoSpaceLeft,
+ NotDir,
+ FileSystem,
+ OperationAborted,
+ IoPending,
+ BrokenPipe,
+ WouldBlock,
+ FileClosed,
+ DestinationAddressRequired,
+ DiskQuota,
+ InputOutput,
+ NoStdHandles,
+ Overflow,
+ NotSupported,
+ };
+
+ pub const Event = union(enum) {
+ Ok,
+ Fail: []errmsg.Msg,
+ Error: BuildError,
+ };
+
pub const DarwinVersionMin = union(enum) {
None,
MacOS: []const u8,
@@ -103,31 +151,17 @@ pub const Module = struct {
LlvmIr,
};
- pub const CliPkg = struct {
+ pub fn create(
+ loop: *event.Loop,
name: []const u8,
- path: []const u8,
- children: ArrayList(*CliPkg),
- parent: ?*CliPkg,
-
- pub fn init(allocator: *mem.Allocator, name: []const u8, path: []const u8, parent: ?*CliPkg) !*CliPkg {
- var pkg = try allocator.create(CliPkg);
- pkg.name = name;
- pkg.path = path;
- pkg.children = ArrayList(*CliPkg).init(allocator);
- pkg.parent = parent;
- return pkg;
- }
-
- pub fn deinit(self: *CliPkg) void {
- for (self.children.toSliceConst()) |child| {
- child.deinit();
- }
- self.children.deinit();
- }
- };
-
- pub fn create(allocator: *mem.Allocator, name: []const u8, root_src_path: ?[]const u8, target: *const Target, kind: Kind, build_mode: builtin.Mode, zig_lib_dir: []const u8, cache_dir: []const u8) !*Module {
- var name_buffer = try Buffer.init(allocator, name);
+ root_src_path: ?[]const u8,
+ target: *const Target,
+ kind: Kind,
+ build_mode: builtin.Mode,
+ zig_lib_dir: []const u8,
+ cache_dir: []const u8,
+ ) !*Module {
+ var name_buffer = try Buffer.init(loop.allocator, name);
errdefer name_buffer.deinit();
const context = c.LLVMContextCreate() orelse return error.OutOfMemory;
@@ -139,11 +173,12 @@ pub const Module = struct {
const builder = c.LLVMCreateBuilderInContext(context) orelse return error.OutOfMemory;
errdefer c.LLVMDisposeBuilder(builder);
- const module_ptr = try allocator.create(Module);
- errdefer allocator.destroy(module_ptr);
+ const events = try event.Channel(Event).create(loop, 0);
+ errdefer events.destroy();
- module_ptr.* = Module{
- .allocator = allocator,
+ return loop.allocator.create(Module{
+ .loop = loop,
+ .events = events,
.name = name_buffer,
.root_src_path = root_src_path,
.module = module,
@@ -188,7 +223,7 @@ pub const Module = struct {
.link_objects = [][]const u8{},
.windows_subsystem_windows = false,
.windows_subsystem_console = false,
- .link_libs_list = ArrayList(*LinkLib).init(allocator),
+ .link_libs_list = ArrayList(*LinkLib).init(loop.allocator),
.libc_link_lib = null,
.err_color = errmsg.Color.Auto,
.darwin_frameworks = [][]const u8{},
@@ -196,8 +231,8 @@ pub const Module = struct {
.test_filters = [][]const u8{},
.test_name_prefix = null,
.emit_file_type = Emit.Binary,
- };
- return module_ptr;
+ .link_out_file = null,
+ });
}
fn dump(self: *Module) void {
@@ -205,58 +240,70 @@ pub const Module = struct {
}
pub fn destroy(self: *Module) void {
+ self.events.destroy();
c.LLVMDisposeBuilder(self.builder);
c.LLVMDisposeModule(self.module);
c.LLVMContextDispose(self.context);
self.name.deinit();
- self.allocator.destroy(self);
+ self.a().destroy(self);
}
pub fn build(self: *Module) !void {
if (self.llvm_argv.len != 0) {
- var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.allocator, [][]const []const u8{
+ var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.a(), [][]const []const u8{
[][]const u8{"zig (LLVM option parsing)"},
self.llvm_argv,
});
defer c_compatible_args.deinit();
+ // TODO this sets global state
c.ZigLLVMParseCommandLineOptions(self.llvm_argv.len + 1, c_compatible_args.ptr);
}
+ _ = try async self.buildAsync();
+ }
+
+ async fn buildAsync(self: *Module) void {
+ while (true) {
+ // TODO directly awaiting async should guarantee memory allocation elision
+ // TODO also async before suspending should guarantee memory allocation elision
+ (await (async self.addRootSrc() catch unreachable)) catch |err| {
+ await (async self.events.put(Event{ .Error = err }) catch unreachable);
+ return;
+ };
+ await (async self.events.put(Event.Ok) catch unreachable);
+ }
+ }
+
+ async fn addRootSrc(self: *Module) !void {
const root_src_path = self.root_src_path orelse @panic("TODO handle null root src path");
- const root_src_real_path = os.path.real(self.allocator, root_src_path) catch |err| {
+ const root_src_real_path = os.path.real(self.a(), root_src_path) catch |err| {
try printError("unable to get real path '{}': {}", root_src_path, err);
return err;
};
- errdefer self.allocator.free(root_src_real_path);
+ errdefer self.a().free(root_src_real_path);
- const source_code = io.readFileAlloc(self.allocator, root_src_real_path) catch |err| {
+ const source_code = io.readFileAlloc(self.a(), root_src_real_path) catch |err| {
try printError("unable to open '{}': {}", root_src_real_path, err);
return err;
};
- errdefer self.allocator.free(source_code);
+ errdefer self.a().free(source_code);
- warn("====input:====\n");
-
- warn("{}", source_code);
-
- warn("====parse:====\n");
-
- var tree = try std.zig.parse(self.allocator, source_code);
+ var tree = try std.zig.parse(self.a(), source_code);
defer tree.deinit();
- var stderr_file = try std.io.getStdErr();
- var stderr_file_out_stream = std.io.FileOutStream.init(&stderr_file);
- const out_stream = &stderr_file_out_stream.stream;
-
- warn("====fmt:====\n");
- _ = try std.zig.render(self.allocator, out_stream, &tree);
-
- warn("====ir:====\n");
- warn("TODO\n\n");
-
- warn("====llvm ir:====\n");
- self.dump();
+ //var it = tree.root_node.decls.iterator();
+ //while (it.next()) |decl_ptr| {
+ // const decl = decl_ptr.*;
+ // switch (decl.id) {
+ // ast.Node.Comptime => @panic("TODO"),
+ // ast.Node.VarDecl => @panic("TODO"),
+ // ast.Node.UseDecl => @panic("TODO"),
+ // ast.Node.FnDef => @panic("TODO"),
+ // ast.Node.TestDecl => @panic("TODO"),
+ // else => unreachable,
+ // }
+ //}
}
pub fn link(self: *Module, out_file: ?[]const u8) !void {
@@ -279,19 +326,22 @@ pub const Module = struct {
}
}
- const link_lib = try self.allocator.create(LinkLib);
- link_lib.* = LinkLib{
+ const link_lib = try self.a().create(LinkLib{
.name = name,
.path = null,
.provided_explicitly = provided_explicitly,
- .symbols = ArrayList([]u8).init(self.allocator),
- };
+ .symbols = ArrayList([]u8).init(self.a()),
+ });
try self.link_libs_list.append(link_lib);
if (is_libc) {
self.libc_link_lib = link_lib;
}
return link_lib;
}
+
+ fn a(self: Module) *mem.Allocator {
+ return self.loop.allocator;
+ }
};
fn printError(comptime format: []const u8, args: ...) !void {
diff --git a/src/all_types.hpp b/src/all_types.hpp
index f82d5d6e8b..3ad4a5d84a 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -234,6 +234,16 @@ enum RuntimeHintPtr {
RuntimeHintPtrNonStack,
};
+enum RuntimeHintSliceId {
+ RuntimeHintSliceIdUnknown,
+ RuntimeHintSliceIdLen,
+};
+
+struct RuntimeHintSlice {
+ enum RuntimeHintSliceId id;
+ uint64_t len;
+};
+
struct ConstGlobalRefs {
LLVMValueRef llvm_value;
LLVMValueRef llvm_global;
@@ -248,6 +258,7 @@ struct ConstExprValue {
// populated if special == ConstValSpecialStatic
BigInt x_bigint;
BigFloat x_bigfloat;
+ float16_t x_f16;
float x_f32;
double x_f64;
float128_t x_f128;
@@ -270,6 +281,7 @@ struct ConstExprValue {
RuntimeHintErrorUnion rh_error_union;
RuntimeHintOptional rh_maybe;
RuntimeHintPtr rh_ptr;
+ RuntimeHintSlice rh_slice;
} data;
};
@@ -1222,7 +1234,7 @@ struct TypeTableEntry {
// use these fields to make sure we don't duplicate type table entries for the same type
TypeTableEntry *pointer_parent[2]; // [0 - mut, 1 - const]
- TypeTableEntry *maybe_parent;
+ TypeTableEntry *optional_parent;
TypeTableEntry *promise_parent;
TypeTableEntry *promise_frame_parent;
// If we generate a constant name value for this type, we memoize it here.
@@ -1359,9 +1371,16 @@ enum BuiltinFnId {
BuiltinFnIdTruncate,
BuiltinFnIdIntCast,
BuiltinFnIdFloatCast,
+ BuiltinFnIdErrSetCast,
+ BuiltinFnIdToBytes,
+ BuiltinFnIdFromBytes,
BuiltinFnIdIntToFloat,
BuiltinFnIdFloatToInt,
BuiltinFnIdBoolToInt,
+ BuiltinFnIdErrToInt,
+ BuiltinFnIdIntToErr,
+ BuiltinFnIdEnumToInt,
+ BuiltinFnIdIntToEnum,
BuiltinFnIdIntType,
BuiltinFnIdSetCold,
BuiltinFnIdSetRuntimeSafety,
@@ -1416,6 +1435,7 @@ enum PanicMsgId {
PanicMsgIdIncorrectAlignment,
PanicMsgIdBadUnionField,
PanicMsgIdBadEnumValue,
+ PanicMsgIdFloatToInt,
PanicMsgIdCount,
};
@@ -1579,6 +1599,7 @@ struct CodeGen {
TypeTableEntry *entry_i128;
TypeTableEntry *entry_isize;
TypeTableEntry *entry_usize;
+ TypeTableEntry *entry_f16;
TypeTableEntry *entry_f32;
TypeTableEntry *entry_f64;
TypeTableEntry *entry_f128;
@@ -2074,6 +2095,7 @@ enum IrInstructionId {
IrInstructionIdIntToPtr,
IrInstructionIdPtrToInt,
IrInstructionIdIntToEnum,
+ IrInstructionIdEnumToInt,
IrInstructionIdIntToErr,
IrInstructionIdErrToInt,
IrInstructionIdCheckSwitchProngs,
@@ -2119,6 +2141,9 @@ enum IrInstructionId {
IrInstructionIdMergeErrRetTraces,
IrInstructionIdMarkErrRetTracePtr,
IrInstructionIdSqrt,
+ IrInstructionIdErrSetCast,
+ IrInstructionIdToBytes,
+ IrInstructionIdFromBytes,
};
struct IrInstruction {
@@ -2654,6 +2679,26 @@ struct IrInstructionFloatCast {
IrInstruction *target;
};
+struct IrInstructionErrSetCast {
+ IrInstruction base;
+
+ IrInstruction *dest_type;
+ IrInstruction *target;
+};
+
+struct IrInstructionToBytes {
+ IrInstruction base;
+
+ IrInstruction *target;
+};
+
+struct IrInstructionFromBytes {
+ IrInstruction base;
+
+ IrInstruction *dest_child_type;
+ IrInstruction *target;
+};
+
struct IrInstructionIntToFloat {
IrInstruction base;
@@ -2864,6 +2909,13 @@ struct IrInstructionIntToPtr {
struct IrInstructionIntToEnum {
IrInstruction base;
+ IrInstruction *dest_type;
+ IrInstruction *target;
+};
+
+struct IrInstructionEnumToInt {
+ IrInstruction base;
+
IrInstruction *target;
};
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 10cdb0af6f..6f94deb9fd 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -482,7 +482,7 @@ TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type)
return return_type->promise_frame_parent;
}
- TypeTableEntry *awaiter_handle_type = get_maybe_type(g, g->builtin_types.entry_promise);
+ TypeTableEntry *awaiter_handle_type = get_optional_type(g, g->builtin_types.entry_promise);
TypeTableEntry *result_ptr_type = get_pointer_to_type(g, return_type, false);
ZigList field_names = {};
@@ -513,9 +513,9 @@ TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type)
return entry;
}
-TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) {
- if (child_type->maybe_parent) {
- TypeTableEntry *entry = child_type->maybe_parent;
+TypeTableEntry *get_optional_type(CodeGen *g, TypeTableEntry *child_type) {
+ if (child_type->optional_parent) {
+ TypeTableEntry *entry = child_type->optional_parent;
return entry;
} else {
ensure_complete_type(g, child_type);
@@ -592,7 +592,7 @@ TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) {
entry->data.maybe.child_type = child_type;
- child_type->maybe_parent = entry;
+ child_type->optional_parent = entry;
return entry;
}
}
@@ -1470,6 +1470,17 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
calling_convention_name(fn_type_id.cc)));
return g->builtin_types.entry_invalid;
}
+ if (param_node->data.param_decl.type != nullptr) {
+ TypeTableEntry *type_entry = analyze_type_expr(g, child_scope, param_node->data.param_decl.type);
+ if (type_is_invalid(type_entry)) {
+ return g->builtin_types.entry_invalid;
+ }
+ FnTypeParamInfo *param_info = &fn_type_id.param_info[fn_type_id.next_param_index];
+ param_info->type = type_entry;
+ param_info->is_noalias = param_node->data.param_decl.is_noalias;
+ fn_type_id.next_param_index += 1;
+ }
+
return get_generic_fn_type(g, &fn_type_id);
} else if (param_is_var_args) {
if (fn_type_id.cc == CallingConventionC) {
@@ -2307,8 +2318,9 @@ static void resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type) {
return;
if (enum_type->data.enumeration.zero_bits_loop_flag) {
- enum_type->data.enumeration.zero_bits_known = true;
- enum_type->data.enumeration.zero_bits_loop_flag = false;
+ add_node_error(g, enum_type->data.enumeration.decl_node,
+ buf_sprintf("'%s' depends on itself", buf_ptr(&enum_type->name)));
+ enum_type->data.enumeration.is_invalid = true;
return;
}
@@ -2984,7 +2996,7 @@ static void typecheck_panic_fn(CodeGen *g, FnTableEntry *panic_fn) {
return wrong_panic_prototype(g, proto_node, fn_type);
}
- TypeTableEntry *optional_ptr_to_stack_trace_type = get_maybe_type(g, get_ptr_to_stack_trace_type(g));
+ TypeTableEntry *optional_ptr_to_stack_trace_type = get_optional_type(g, get_ptr_to_stack_trace_type(g));
if (fn_type_id->param_info[1].type != optional_ptr_to_stack_trace_type) {
return wrong_panic_prototype(g, proto_node, fn_type);
}
@@ -3716,6 +3728,7 @@ TypeUnionField *find_union_field_by_tag(TypeTableEntry *type_entry, const BigInt
}
TypeEnumField *find_enum_field_by_tag(TypeTableEntry *enum_type, const BigInt *tag) {
+ assert(enum_type->data.enumeration.zero_bits_known);
for (uint32_t i = 0; i < enum_type->data.enumeration.src_field_count; i += 1) {
TypeEnumField *field = &enum_type->data.enumeration.fields[i];
if (bigint_cmp(&field->value, tag) == CmpEQ) {
@@ -4656,6 +4669,13 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
}
case TypeTableEntryIdFloat:
switch (const_val->type->data.floating.bit_count) {
+ case 16:
+ {
+ uint16_t result;
+ static_assert(sizeof(result) == sizeof(const_val->data.x_f16), "");
+ memcpy(&result, &const_val->data.x_f16, sizeof(result));
+ return result * 65537u;
+ }
case 32:
{
uint32_t result;
@@ -5116,6 +5136,9 @@ void init_const_float(ConstExprValue *const_val, TypeTableEntry *type, double va
bigfloat_init_64(&const_val->data.x_bigfloat, value);
} else if (type->id == TypeTableEntryIdFloat) {
switch (type->data.floating.bit_count) {
+ case 16:
+ const_val->data.x_f16 = zig_double_to_f16(value);
+ break;
case 32:
const_val->data.x_f32 = value;
break;
@@ -5429,6 +5452,8 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) {
case TypeTableEntryIdFloat:
assert(a->type->data.floating.bit_count == b->type->data.floating.bit_count);
switch (a->type->data.floating.bit_count) {
+ case 16:
+ return f16_eq(a->data.x_f16, b->data.x_f16);
case 32:
return a->data.x_f32 == b->data.x_f32;
case 64:
@@ -5446,8 +5471,22 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) {
case TypeTableEntryIdPointer:
case TypeTableEntryIdFn:
return const_values_equal_ptr(a, b);
- case TypeTableEntryIdArray:
- zig_panic("TODO");
+ case TypeTableEntryIdArray: {
+ assert(a->type->data.array.len == b->type->data.array.len);
+ assert(a->data.x_array.special != ConstArraySpecialUndef);
+ assert(b->data.x_array.special != ConstArraySpecialUndef);
+
+ size_t len = a->type->data.array.len;
+ ConstExprValue *a_elems = a->data.x_array.s_none.elements;
+ ConstExprValue *b_elems = b->data.x_array.s_none.elements;
+
+ for (size_t i = 0; i < len; ++i) {
+ if (!const_values_equal(&a_elems[i], &b_elems[i]))
+ return false;
+ }
+
+ return true;
+ }
case TypeTableEntryIdStruct:
for (size_t i = 0; i < a->type->data.structure.src_field_count; i += 1) {
ConstExprValue *field_a = &a->data.x_struct.fields[i];
@@ -5558,7 +5597,7 @@ void render_const_val_ptr(CodeGen *g, Buf *buf, ConstExprValue *const_val, TypeT
return;
}
case ConstPtrSpecialHardCodedAddr:
- buf_appendf(buf, "(*%s)(%" ZIG_PRI_x64 ")", buf_ptr(&type_entry->data.pointer.child_type->name),
+ buf_appendf(buf, "(%s)(%" ZIG_PRI_x64 ")", buf_ptr(&type_entry->name),
const_val->data.x_ptr.data.hard_coded_addr.addr);
return;
case ConstPtrSpecialDiscard:
@@ -5602,6 +5641,9 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
return;
case TypeTableEntryIdFloat:
switch (type_entry->data.floating.bit_count) {
+ case 16:
+ buf_appendf(buf, "%f", zig_f16_to_double(const_val->data.x_f16));
+ return;
case 32:
buf_appendf(buf, "%f", const_val->data.x_f32);
return;
diff --git a/src/analyze.hpp b/src/analyze.hpp
index 88e06b2390..c2730197e2 100644
--- a/src/analyze.hpp
+++ b/src/analyze.hpp
@@ -24,7 +24,7 @@ TypeTableEntry *get_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits);
TypeTableEntry **get_c_int_type_ptr(CodeGen *g, CIntType c_int_type);
TypeTableEntry *get_c_int_type(CodeGen *g, CIntType c_int_type);
TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id);
-TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type);
+TypeTableEntry *get_optional_type(CodeGen *g, TypeTableEntry *child_type);
TypeTableEntry *get_array_type(CodeGen *g, TypeTableEntry *child_type, uint64_t array_size);
TypeTableEntry *get_slice_type(CodeGen *g, TypeTableEntry *ptr_type);
TypeTableEntry *get_partial_container_type(CodeGen *g, Scope *scope, ContainerKind kind,
diff --git a/src/bigfloat.cpp b/src/bigfloat.cpp
index dcb6db61db..cc442fa3b7 100644
--- a/src/bigfloat.cpp
+++ b/src/bigfloat.cpp
@@ -18,6 +18,10 @@ void bigfloat_init_128(BigFloat *dest, float128_t x) {
dest->value = x;
}
+void bigfloat_init_16(BigFloat *dest, float16_t x) {
+ f16_to_f128M(x, &dest->value);
+}
+
void bigfloat_init_32(BigFloat *dest, float x) {
float32_t f32_val;
memcpy(&f32_val, &x, sizeof(float));
@@ -146,6 +150,10 @@ Cmp bigfloat_cmp(const BigFloat *op1, const BigFloat *op2) {
}
}
+float16_t bigfloat_to_f16(const BigFloat *bigfloat) {
+ return f128M_to_f16(&bigfloat->value);
+}
+
float bigfloat_to_f32(const BigFloat *bigfloat) {
float32_t f32_value = f128M_to_f32(&bigfloat->value);
float result;
diff --git a/src/bigfloat.hpp b/src/bigfloat.hpp
index e212c30c87..c6ae567945 100644
--- a/src/bigfloat.hpp
+++ b/src/bigfloat.hpp
@@ -22,6 +22,7 @@ struct BigFloat {
struct Buf;
+void bigfloat_init_16(BigFloat *dest, float16_t x);
void bigfloat_init_32(BigFloat *dest, float x);
void bigfloat_init_64(BigFloat *dest, double x);
void bigfloat_init_128(BigFloat *dest, float128_t x);
@@ -29,6 +30,7 @@ void bigfloat_init_bigfloat(BigFloat *dest, const BigFloat *x);
void bigfloat_init_bigint(BigFloat *dest, const BigInt *op);
int bigfloat_init_buf_base10(BigFloat *dest, const uint8_t *buf_ptr, size_t buf_len);
+float16_t bigfloat_to_f16(const BigFloat *bigfloat);
float bigfloat_to_f32(const BigFloat *bigfloat);
double bigfloat_to_f64(const BigFloat *bigfloat);
float128_t bigfloat_to_f128(const BigFloat *bigfloat);
diff --git a/src/bigint.cpp b/src/bigint.cpp
index 367ae79b6c..bb227a7c3d 100644
--- a/src/bigint.cpp
+++ b/src/bigint.cpp
@@ -1683,10 +1683,15 @@ void bigint_incr(BigInt *x) {
bigint_init_unsigned(x, 1);
return;
}
-
- if (x->digit_count == 1 && x->data.digit != UINT64_MAX) {
- x->data.digit += 1;
- return;
+
+ if (x->digit_count == 1) {
+ if (x->is_negative && x->data.digit != 0) {
+ x->data.digit -= 1;
+ return;
+ } else if (!x->is_negative && x->data.digit != UINT64_MAX) {
+ x->data.digit += 1;
+ return;
+ }
}
BigInt copy;
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 3a26f0729b..bfc20cb1d2 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -17,6 +17,7 @@
#include "os.hpp"
#include "translate_c.hpp"
#include "target.hpp"
+#include "util.hpp"
#include "zig_llvm.h"
#include
@@ -865,6 +866,8 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) {
return buf_create_from_str("access of inactive union field");
case PanicMsgIdBadEnumValue:
return buf_create_from_str("invalid enum value");
+ case PanicMsgIdFloatToInt:
+ return buf_create_from_str("integer part of floating point value out of bounds");
}
zig_unreachable();
}
@@ -1644,7 +1647,7 @@ static LLVMValueRef gen_widen_or_shorten(CodeGen *g, bool want_runtime_safety, T
return trunc_val;
}
LLVMValueRef orig_val;
- if (actual_type->data.integral.is_signed) {
+ if (wanted_type->data.integral.is_signed) {
orig_val = LLVMBuildSExt(g->builder, trunc_val, actual_type->type_ref, "");
} else {
orig_val = LLVMBuildZExt(g->builder, trunc_val, actual_type->type_ref, "");
@@ -2207,12 +2210,12 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable,
} else if (type_entry->id == TypeTableEntryIdInt) {
LLVMIntPredicate pred = cmp_op_to_int_predicate(op_id, type_entry->data.integral.is_signed);
return LLVMBuildICmp(g->builder, pred, op1_value, op2_value, "");
- } else if (type_entry->id == TypeTableEntryIdEnum) {
- LLVMIntPredicate pred = cmp_op_to_int_predicate(op_id, false);
- return LLVMBuildICmp(g->builder, pred, op1_value, op2_value, "");
- } else if (type_entry->id == TypeTableEntryIdErrorSet ||
+ } else if (type_entry->id == TypeTableEntryIdEnum ||
+ type_entry->id == TypeTableEntryIdErrorSet ||
type_entry->id == TypeTableEntryIdPointer ||
- type_entry->id == TypeTableEntryIdBool)
+ type_entry->id == TypeTableEntryIdBool ||
+ type_entry->id == TypeTableEntryIdPromise ||
+ type_entry->id == TypeTableEntryIdFn)
{
LLVMIntPredicate pred = cmp_op_to_int_predicate(op_id, false);
return LLVMBuildICmp(g->builder, pred, op1_value, op2_value, "");
@@ -2509,15 +2512,41 @@ static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable,
} else {
return LLVMBuildUIToFP(g->builder, expr_val, wanted_type->type_ref, "");
}
- case CastOpFloatToInt:
+ case CastOpFloatToInt: {
assert(wanted_type->id == TypeTableEntryIdInt);
ZigLLVMSetFastMath(g->builder, ir_want_fast_math(g, &cast_instruction->base));
+
+ bool want_safety = ir_want_runtime_safety(g, &cast_instruction->base);
+
+ LLVMValueRef result;
if (wanted_type->data.integral.is_signed) {
- return LLVMBuildFPToSI(g->builder, expr_val, wanted_type->type_ref, "");
+ result = LLVMBuildFPToSI(g->builder, expr_val, wanted_type->type_ref, "");
} else {
- return LLVMBuildFPToUI(g->builder, expr_val, wanted_type->type_ref, "");
+ result = LLVMBuildFPToUI(g->builder, expr_val, wanted_type->type_ref, "");
}
+ if (want_safety) {
+ LLVMValueRef back_to_float;
+ if (wanted_type->data.integral.is_signed) {
+ back_to_float = LLVMBuildSIToFP(g->builder, result, LLVMTypeOf(expr_val), "");
+ } else {
+ back_to_float = LLVMBuildUIToFP(g->builder, result, LLVMTypeOf(expr_val), "");
+ }
+ LLVMValueRef difference = LLVMBuildFSub(g->builder, expr_val, back_to_float, "");
+ LLVMValueRef one_pos = LLVMConstReal(LLVMTypeOf(expr_val), 1.0f);
+ LLVMValueRef one_neg = LLVMConstReal(LLVMTypeOf(expr_val), -1.0f);
+ LLVMValueRef ok_bit_pos = LLVMBuildFCmp(g->builder, LLVMRealOLT, difference, one_pos, "");
+ LLVMValueRef ok_bit_neg = LLVMBuildFCmp(g->builder, LLVMRealOGT, difference, one_neg, "");
+ LLVMValueRef ok_bit = LLVMBuildAnd(g->builder, ok_bit_pos, ok_bit_neg, "");
+ LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "FloatCheckOk");
+ LLVMBasicBlockRef bad_block = LLVMAppendBasicBlock(g->cur_fn_val, "FloatCheckFail");
+ LLVMBuildCondBr(g->builder, ok_bit, ok_block, bad_block);
+ LLVMPositionBuilderAtEnd(g->builder, bad_block);
+ gen_safety_crash(g, PanicMsgIdFloatToInt);
+ LLVMPositionBuilderAtEnd(g->builder, ok_block);
+ }
+ return result;
+ }
case CastOpBoolToInt:
assert(wanted_type->id == TypeTableEntryIdInt);
assert(actual_type->id == TypeTableEntryIdBool);
@@ -2607,8 +2636,25 @@ static LLVMValueRef ir_render_int_to_enum(CodeGen *g, IrExecutable *executable,
TypeTableEntry *tag_int_type = wanted_type->data.enumeration.tag_int_type;
LLVMValueRef target_val = ir_llvm_value(g, instruction->target);
- return gen_widen_or_shorten(g, ir_want_runtime_safety(g, &instruction->base),
+ LLVMValueRef tag_int_value = gen_widen_or_shorten(g, ir_want_runtime_safety(g, &instruction->base),
instruction->target->value.type, tag_int_type, target_val);
+
+ if (ir_want_runtime_safety(g, &instruction->base)) {
+ LLVMBasicBlockRef bad_value_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadValue");
+ LLVMBasicBlockRef ok_value_block = LLVMAppendBasicBlock(g->cur_fn_val, "OkValue");
+ size_t field_count = wanted_type->data.enumeration.src_field_count;
+ LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, tag_int_value, bad_value_block, field_count);
+ for (size_t field_i = 0; field_i < field_count; field_i += 1) {
+ LLVMValueRef this_tag_int_value = bigint_to_llvm_const(tag_int_type->type_ref,
+ &wanted_type->data.enumeration.fields[field_i].value);
+ LLVMAddCase(switch_instr, this_tag_int_value, ok_value_block);
+ }
+ LLVMPositionBuilderAtEnd(g->builder, bad_value_block);
+ gen_safety_crash(g, PanicMsgIdBadEnumValue);
+
+ LLVMPositionBuilderAtEnd(g->builder, ok_value_block);
+ }
+ return tag_int_value;
}
static LLVMValueRef ir_render_int_to_err(CodeGen *g, IrExecutable *executable, IrInstructionIntToErr *instruction) {
@@ -4638,6 +4684,10 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdIntToFloat:
case IrInstructionIdFloatToInt:
case IrInstructionIdBoolToInt:
+ case IrInstructionIdErrSetCast:
+ case IrInstructionIdFromBytes:
+ case IrInstructionIdToBytes:
+ case IrInstructionIdEnumToInt:
zig_unreachable();
case IrInstructionIdReturn:
@@ -5090,6 +5140,8 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
const_val->data.x_err_set->value, false);
case TypeTableEntryIdFloat:
switch (type_entry->data.floating.bit_count) {
+ case 16:
+ return LLVMConstReal(type_entry->type_ref, zig_f16_to_double(const_val->data.x_f16));
case 32:
return LLVMConstReal(type_entry->type_ref, const_val->data.x_f32);
case 64:
@@ -6056,58 +6108,30 @@ static void define_builtin_types(CodeGen *g) {
g->builtin_types.entry_usize = entry;
}
}
- {
+
+ auto add_fp_entry = [] (CodeGen *g,
+ const char *name,
+ uint32_t bit_count,
+ LLVMTypeRef type_ref,
+ TypeTableEntry **field) {
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdFloat);
- entry->type_ref = LLVMFloatType();
- buf_init_from_str(&entry->name, "f32");
- entry->data.floating.bit_count = 32;
+ entry->type_ref = type_ref;
+ buf_init_from_str(&entry->name, name);
+ entry->data.floating.bit_count = bit_count;
uint64_t debug_size_in_bits = 8*LLVMStoreSizeOfType(g->target_data_ref, entry->type_ref);
entry->di_type = ZigLLVMCreateDebugBasicType(g->dbuilder, buf_ptr(&entry->name),
debug_size_in_bits,
ZigLLVMEncoding_DW_ATE_float());
- g->builtin_types.entry_f32 = entry;
+ *field = entry;
g->primitive_type_table.put(&entry->name, entry);
- }
- {
- TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdFloat);
- entry->type_ref = LLVMDoubleType();
- buf_init_from_str(&entry->name, "f64");
- entry->data.floating.bit_count = 64;
+ };
+ add_fp_entry(g, "f16", 16, LLVMHalfType(), &g->builtin_types.entry_f16);
+ add_fp_entry(g, "f32", 32, LLVMFloatType(), &g->builtin_types.entry_f32);
+ add_fp_entry(g, "f64", 64, LLVMDoubleType(), &g->builtin_types.entry_f64);
+ add_fp_entry(g, "f128", 128, LLVMFP128Type(), &g->builtin_types.entry_f128);
+ add_fp_entry(g, "c_longdouble", 80, LLVMX86FP80Type(), &g->builtin_types.entry_c_longdouble);
- uint64_t debug_size_in_bits = 8*LLVMStoreSizeOfType(g->target_data_ref, entry->type_ref);
- entry->di_type = ZigLLVMCreateDebugBasicType(g->dbuilder, buf_ptr(&entry->name),
- debug_size_in_bits,
- ZigLLVMEncoding_DW_ATE_float());
- g->builtin_types.entry_f64 = entry;
- g->primitive_type_table.put(&entry->name, entry);
- }
- {
- TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdFloat);
- entry->type_ref = LLVMFP128Type();
- buf_init_from_str(&entry->name, "f128");
- entry->data.floating.bit_count = 128;
-
- uint64_t debug_size_in_bits = 8*LLVMStoreSizeOfType(g->target_data_ref, entry->type_ref);
- entry->di_type = ZigLLVMCreateDebugBasicType(g->dbuilder, buf_ptr(&entry->name),
- debug_size_in_bits,
- ZigLLVMEncoding_DW_ATE_float());
- g->builtin_types.entry_f128 = entry;
- g->primitive_type_table.put(&entry->name, entry);
- }
- {
- TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdFloat);
- entry->type_ref = LLVMX86FP80Type();
- buf_init_from_str(&entry->name, "c_longdouble");
- entry->data.floating.bit_count = 80;
-
- uint64_t debug_size_in_bits = 8*LLVMStoreSizeOfType(g->target_data_ref, entry->type_ref);
- entry->di_type = ZigLLVMCreateDebugBasicType(g->dbuilder, buf_ptr(&entry->name),
- debug_size_in_bits,
- ZigLLVMEncoding_DW_ATE_float());
- g->builtin_types.entry_c_longdouble = entry;
- g->primitive_type_table.put(&entry->name, entry);
- }
{
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdVoid);
entry->type_ref = LLVMVoidType();
@@ -6231,6 +6255,10 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdIntToFloat, "intToFloat", 2);
create_builtin_fn(g, BuiltinFnIdFloatToInt, "floatToInt", 2);
create_builtin_fn(g, BuiltinFnIdBoolToInt, "boolToInt", 1);
+ create_builtin_fn(g, BuiltinFnIdErrToInt, "errorToInt", 1);
+ create_builtin_fn(g, BuiltinFnIdIntToErr, "intToError", 1);
+ create_builtin_fn(g, BuiltinFnIdEnumToInt, "enumToInt", 1);
+ create_builtin_fn(g, BuiltinFnIdIntToEnum, "intToEnum", 2);
create_builtin_fn(g, BuiltinFnIdCompileErr, "compileError", 1);
create_builtin_fn(g, BuiltinFnIdCompileLog, "compileLog", SIZE_MAX);
create_builtin_fn(g, BuiltinFnIdIntType, "IntType", 2); // TODO rename to Int
@@ -6267,6 +6295,9 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdErrorReturnTrace, "errorReturnTrace", 0);
create_builtin_fn(g, BuiltinFnIdAtomicRmw, "atomicRmw", 5);
create_builtin_fn(g, BuiltinFnIdAtomicLoad, "atomicLoad", 3);
+ create_builtin_fn(g, BuiltinFnIdErrSetCast, "errSetCast", 2);
+ create_builtin_fn(g, BuiltinFnIdToBytes, "sliceToBytes", 1);
+ create_builtin_fn(g, BuiltinFnIdFromBytes, "bytesToSlice", 2);
}
static const char *bool_to_str(bool b) {
@@ -6538,7 +6569,7 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
"\n"
" pub const Union = struct {\n"
" layout: ContainerLayout,\n"
- " tag_type: type,\n"
+ " tag_type: ?type,\n"
" fields: []UnionField,\n"
" defs: []Definition,\n"
" };\n"
@@ -6555,20 +6586,20 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
" pub const FnArg = struct {\n"
" is_generic: bool,\n"
" is_noalias: bool,\n"
- " arg_type: type,\n"
+ " arg_type: ?type,\n"
" };\n"
"\n"
" pub const Fn = struct {\n"
" calling_convention: CallingConvention,\n"
" is_generic: bool,\n"
" is_var_args: bool,\n"
- " return_type: type,\n"
- " async_allocator_type: type,\n"
+ " return_type: ?type,\n"
+ " async_allocator_type: ?type,\n"
" args: []FnArg,\n"
" };\n"
"\n"
" pub const Promise = struct {\n"
- " child: type,\n"
+ " child: ?type,\n"
" };\n"
"\n"
" pub const Definition = struct {\n"
diff --git a/src/ir.cpp b/src/ir.cpp
index c75a3ae7c1..5df5c1d676 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -11,9 +11,10 @@
#include "ir.hpp"
#include "ir_print.hpp"
#include "os.hpp"
-#include "translate_c.hpp"
#include "range_set.hpp"
#include "softfloat.hpp"
+#include "translate_c.hpp"
+#include "util.hpp"
struct IrExecContext {
ConstExprValue *mem_slot_list;
@@ -65,12 +66,7 @@ enum ConstCastResultId {
ConstCastResultIdNullWrapPtr,
};
-struct ConstCastErrSetMismatch {
- ZigList missing_errors;
-};
-
struct ConstCastOnly;
-
struct ConstCastArg {
size_t arg_index;
ConstCastOnly *child;
@@ -80,15 +76,22 @@ struct ConstCastArgNoAlias {
size_t arg_index;
};
+struct ConstCastOptionalMismatch;
+struct ConstCastPointerMismatch;
+struct ConstCastSliceMismatch;
+struct ConstCastErrUnionErrSetMismatch;
+struct ConstCastErrUnionPayloadMismatch;
+struct ConstCastErrSetMismatch;
+
struct ConstCastOnly {
ConstCastResultId id;
union {
- ConstCastErrSetMismatch error_set;
- ConstCastOnly *pointer_child;
- ConstCastOnly *slice_child;
- ConstCastOnly *optional_child;
- ConstCastOnly *error_union_payload;
- ConstCastOnly *error_union_error_set;
+ ConstCastErrSetMismatch *error_set_mismatch;
+ ConstCastPointerMismatch *pointer_mismatch;
+ ConstCastSliceMismatch *slice_mismatch;
+ ConstCastOptionalMismatch *optional;
+ ConstCastErrUnionPayloadMismatch *error_union_payload;
+ ConstCastErrUnionErrSetMismatch *error_union_error_set;
ConstCastOnly *return_type;
ConstCastOnly *async_allocator_type;
ConstCastOnly *null_wrap_ptr_child;
@@ -97,6 +100,39 @@ struct ConstCastOnly {
} data;
};
+struct ConstCastOptionalMismatch {
+ ConstCastOnly child;
+ TypeTableEntry *wanted_child;
+ TypeTableEntry *actual_child;
+};
+
+struct ConstCastPointerMismatch {
+ ConstCastOnly child;
+ TypeTableEntry *wanted_child;
+ TypeTableEntry *actual_child;
+};
+
+struct ConstCastSliceMismatch {
+ ConstCastOnly child;
+ TypeTableEntry *wanted_child;
+ TypeTableEntry *actual_child;
+};
+
+struct ConstCastErrUnionErrSetMismatch {
+ ConstCastOnly child;
+ TypeTableEntry *wanted_err_set;
+ TypeTableEntry *actual_err_set;
+};
+
+struct ConstCastErrUnionPayloadMismatch {
+ ConstCastOnly child;
+ TypeTableEntry *wanted_payload;
+ TypeTableEntry *actual_payload;
+};
+
+struct ConstCastErrSetMismatch {
+ ZigList missing_errors;
+};
static IrInstruction *ir_gen_node(IrBuilder *irb, AstNode *node, Scope *scope);
static IrInstruction *ir_gen_node_extra(IrBuilder *irb, AstNode *node, Scope *scope, LVal lval);
@@ -468,6 +504,18 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionFloatCast *) {
return IrInstructionIdFloatCast;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionErrSetCast *) {
+ return IrInstructionIdErrSetCast;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionToBytes *) {
+ return IrInstructionIdToBytes;
+}
+
+static constexpr IrInstructionId ir_instruction_id(IrInstructionFromBytes *) {
+ return IrInstructionIdFromBytes;
+}
+
static constexpr IrInstructionId ir_instruction_id(IrInstructionIntToFloat *) {
return IrInstructionIdIntToFloat;
}
@@ -588,6 +636,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionIntToEnum *) {
return IrInstructionIdIntToEnum;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionEnumToInt *) {
+ return IrInstructionIdEnumToInt;
+}
+
static constexpr IrInstructionId ir_instruction_id(IrInstructionIntToErr *) {
return IrInstructionIdIntToErr;
}
@@ -1941,6 +1993,37 @@ static IrInstruction *ir_build_float_cast(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
+static IrInstruction *ir_build_err_set_cast(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *dest_type, IrInstruction *target) {
+ IrInstructionErrSetCast *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->dest_type = dest_type;
+ instruction->target = target;
+
+ ir_ref_instruction(dest_type, irb->current_basic_block);
+ ir_ref_instruction(target, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_to_bytes(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *target) {
+ IrInstructionToBytes *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->target = target;
+
+ ir_ref_instruction(target, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+static IrInstruction *ir_build_from_bytes(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *dest_child_type, IrInstruction *target) {
+ IrInstructionFromBytes *instruction = ir_build_instruction(irb, scope, source_node);
+ instruction->dest_child_type = dest_child_type;
+ instruction->target = target;
+
+ ir_ref_instruction(dest_child_type, irb->current_basic_block);
+ ir_ref_instruction(target, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
static IrInstruction *ir_build_int_to_float(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *dest_type, IrInstruction *target) {
IrInstructionIntToFloat *instruction = ir_build_instruction(irb, scope, source_node);
instruction->dest_type = dest_type;
@@ -2335,10 +2418,26 @@ static IrInstruction *ir_build_ptr_to_int(IrBuilder *irb, Scope *scope, AstNode
}
static IrInstruction *ir_build_int_to_enum(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *target)
+ IrInstruction *dest_type, IrInstruction *target)
{
IrInstructionIntToEnum *instruction = ir_build_instruction(
irb, scope, source_node);
+ instruction->dest_type = dest_type;
+ instruction->target = target;
+
+ if (dest_type) ir_ref_instruction(dest_type, irb->current_basic_block);
+ ir_ref_instruction(target, irb->current_basic_block);
+
+ return &instruction->base;
+}
+
+
+
+static IrInstruction *ir_build_enum_to_int(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstruction *target)
+{
+ IrInstructionEnumToInt *instruction = ir_build_instruction(
+ irb, scope, source_node);
instruction->target = target;
ir_ref_instruction(target, irb->current_basic_block);
@@ -2946,7 +3045,7 @@ static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode
ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_field_ptr, return_value);
IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node,
- get_maybe_type(irb->codegen, irb->codegen->builtin_types.entry_promise));
+ get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise));
// TODO replace replacement_value with @intToPtr(?promise, 0x1) when it doesn't crash zig
IrInstruction *replacement_value = irb->exec->coro_handle;
IrInstruction *maybe_await_handle = ir_build_atomic_rmw(irb, scope, node,
@@ -4054,6 +4153,46 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
IrInstruction *result = ir_build_float_cast(irb, scope, node, arg0_value, arg1_value);
return ir_lval_wrap(irb, scope, result, lval);
}
+ case BuiltinFnIdErrSetCast:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
+ IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope);
+ if (arg1_value == irb->codegen->invalid_instruction)
+ return arg1_value;
+
+ IrInstruction *result = ir_build_err_set_cast(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, result, lval);
+ }
+ case BuiltinFnIdFromBytes:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
+ IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope);
+ if (arg1_value == irb->codegen->invalid_instruction)
+ return arg1_value;
+
+ IrInstruction *result = ir_build_from_bytes(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, result, lval);
+ }
+ case BuiltinFnIdToBytes:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ IrInstruction *result = ir_build_to_bytes(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, result, lval);
+ }
case BuiltinFnIdIntToFloat:
{
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
@@ -4084,6 +4223,26 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
IrInstruction *result = ir_build_float_to_int(irb, scope, node, arg0_value, arg1_value);
return ir_lval_wrap(irb, scope, result, lval);
}
+ case BuiltinFnIdErrToInt:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ IrInstruction *result = ir_build_err_to_int(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, result, lval);
+ }
+ case BuiltinFnIdIntToErr:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ IrInstruction *result = ir_build_int_to_err(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, result, lval);
+ }
case BuiltinFnIdBoolToInt:
{
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
@@ -4605,6 +4764,31 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
// this value does not mean anything since we passed non-null values for other arg
AtomicOrderMonotonic);
}
+ case BuiltinFnIdIntToEnum:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
+ IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope);
+ if (arg1_value == irb->codegen->invalid_instruction)
+ return arg1_value;
+
+ IrInstruction *result = ir_build_int_to_enum(irb, scope, node, arg0_value, arg1_value);
+ return ir_lval_wrap(irb, scope, result, lval);
+ }
+ case BuiltinFnIdEnumToInt:
+ {
+ AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
+ IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
+ if (arg0_value == irb->codegen->invalid_instruction)
+ return arg0_value;
+
+ IrInstruction *result = ir_build_enum_to_int(irb, scope, node, arg0_value);
+ return ir_lval_wrap(irb, scope, result, lval);
+ }
}
zig_unreachable();
}
@@ -6471,7 +6655,7 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, Ast
ir_build_store_ptr(irb, parent_scope, node, result_ptr_field_ptr, my_result_var_ptr);
IrInstruction *save_token = ir_build_coro_save(irb, parent_scope, node, irb->exec->coro_handle);
IrInstruction *promise_type_val = ir_build_const_type(irb, parent_scope, node,
- get_maybe_type(irb->codegen, irb->codegen->builtin_types.entry_promise));
+ get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise));
IrInstruction *maybe_await_handle = ir_build_atomic_rmw(irb, parent_scope, node,
promise_type_val, awaiter_field_ptr, nullptr, irb->exec->coro_handle, nullptr,
AtomicRmwOp_xchg, AtomicOrderSeqCst);
@@ -6805,7 +6989,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
VariableTableEntry *await_handle_var = ir_create_var(irb, node, coro_scope, nullptr, false, false, true, const_bool_false);
IrInstruction *null_value = ir_build_const_null(irb, coro_scope, node);
IrInstruction *await_handle_type_val = ir_build_const_type(irb, coro_scope, node,
- get_maybe_type(irb->codegen, irb->codegen->builtin_types.entry_promise));
+ get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise));
ir_build_var_decl(irb, coro_scope, node, await_handle_var, await_handle_type_val, nullptr, null_value);
irb->exec->await_handle_var_ptr = ir_build_var_ptr(irb, coro_scope, node, await_handle_var);
@@ -6928,6 +7112,12 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
IrInstruction *dest_err_ret_trace_ptr = ir_build_load_ptr(irb, scope, node, err_ret_trace_ptr_field_ptr);
ir_build_merge_err_ret_traces(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr, dest_err_ret_trace_ptr);
}
+ // Before we destroy the coroutine frame, we need to load the target promise into
+ // a register or local variable which does not get spilled into the frame,
+ // otherwise llvm tries to access memory inside the destroyed frame.
+ IrInstruction *unwrapped_await_handle_ptr = ir_build_unwrap_maybe(irb, scope, node,
+ irb->exec->await_handle_var_ptr, false);
+ IrInstruction *await_handle_in_block = ir_build_load_ptr(irb, scope, node, unwrapped_await_handle_ptr);
ir_build_br(irb, scope, node, check_free_block, const_bool_false);
ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_final_cleanup_block);
@@ -6942,6 +7132,14 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
incoming_values[1] = const_bool_true;
IrInstruction *resume_awaiter = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values);
+ IrBasicBlock **merge_incoming_blocks = allocate(2);
+ IrInstruction **merge_incoming_values = allocate(2);
+ merge_incoming_blocks[0] = irb->exec->coro_final_cleanup_block;
+ merge_incoming_values[0] = ir_build_const_undefined(irb, scope, node);
+ merge_incoming_blocks[1] = irb->exec->coro_normal_final;
+ merge_incoming_values[1] = await_handle_in_block;
+ IrInstruction *awaiter_handle = ir_build_phi(irb, scope, node, 2, merge_incoming_blocks, merge_incoming_values);
+
Buf *free_field_name = buf_create_from_str(ASYNC_FREE_FIELD_NAME);
IrInstruction *implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node,
ImplicitAllocatorIdLocalVar);
@@ -6968,9 +7166,6 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
ir_build_cond_br(irb, scope, node, resume_awaiter, resume_block, irb->exec->coro_suspend_block, const_bool_false);
ir_set_cursor_at_end_and_append_block(irb, resume_block);
- IrInstruction *unwrapped_await_handle_ptr = ir_build_unwrap_maybe(irb, scope, node,
- irb->exec->await_handle_var_ptr, false);
- IrInstruction *awaiter_handle = ir_build_load_ptr(irb, scope, node, unwrapped_await_handle_ptr);
ir_build_coro_resume(irb, scope, node, awaiter_handle);
ir_build_br(irb, scope, node, irb->exec->coro_suspend_block, const_bool_false);
}
@@ -7055,6 +7250,11 @@ static bool float_has_fraction(ConstExprValue *const_val) {
return bigfloat_has_fraction(&const_val->data.x_bigfloat);
} else if (const_val->type->id == TypeTableEntryIdFloat) {
switch (const_val->type->data.floating.bit_count) {
+ case 16:
+ {
+ float16_t floored = f16_roundToInt(const_val->data.x_f16, softfloat_round_minMag, false);
+ return !f16_eq(floored, const_val->data.x_f16);
+ }
case 32:
return floorf(const_val->data.x_f32) != const_val->data.x_f32;
case 64:
@@ -7078,6 +7278,9 @@ static void float_append_buf(Buf *buf, ConstExprValue *const_val) {
bigfloat_append_buf(buf, &const_val->data.x_bigfloat);
} else if (const_val->type->id == TypeTableEntryIdFloat) {
switch (const_val->type->data.floating.bit_count) {
+ case 16:
+ buf_appendf(buf, "%f", zig_f16_to_double(const_val->data.x_f16));
+ break;
case 32:
buf_appendf(buf, "%f", const_val->data.x_f32);
break;
@@ -7113,6 +7316,17 @@ static void float_init_bigint(BigInt *bigint, ConstExprValue *const_val) {
bigint_init_bigfloat(bigint, &const_val->data.x_bigfloat);
} else if (const_val->type->id == TypeTableEntryIdFloat) {
switch (const_val->type->data.floating.bit_count) {
+ case 16:
+ {
+ double x = zig_f16_to_double(const_val->data.x_f16);
+ if (x >= 0) {
+ bigint_init_unsigned(bigint, (uint64_t)x);
+ } else {
+ bigint_init_unsigned(bigint, (uint64_t)-x);
+ bigint->is_negative = true;
+ }
+ break;
+ }
case 32:
if (const_val->data.x_f32 >= 0) {
bigint_init_unsigned(bigint, (uint64_t)(const_val->data.x_f32));
@@ -7149,6 +7363,9 @@ static void float_init_bigfloat(ConstExprValue *dest_val, BigFloat *bigfloat) {
bigfloat_init_bigfloat(&dest_val->data.x_bigfloat, bigfloat);
} else if (dest_val->type->id == TypeTableEntryIdFloat) {
switch (dest_val->type->data.floating.bit_count) {
+ case 16:
+ dest_val->data.x_f16 = bigfloat_to_f16(bigfloat);
+ break;
case 32:
dest_val->data.x_f32 = bigfloat_to_f32(bigfloat);
break;
@@ -7166,11 +7383,39 @@ static void float_init_bigfloat(ConstExprValue *dest_val, BigFloat *bigfloat) {
}
}
+static void float_init_f16(ConstExprValue *dest_val, float16_t x) {
+ if (dest_val->type->id == TypeTableEntryIdComptimeFloat) {
+ bigfloat_init_16(&dest_val->data.x_bigfloat, x);
+ } else if (dest_val->type->id == TypeTableEntryIdFloat) {
+ switch (dest_val->type->data.floating.bit_count) {
+ case 16:
+ dest_val->data.x_f16 = x;
+ break;
+ case 32:
+ dest_val->data.x_f32 = zig_f16_to_double(x);
+ break;
+ case 64:
+ dest_val->data.x_f64 = zig_f16_to_double(x);
+ break;
+ case 128:
+ f16_to_f128M(x, &dest_val->data.x_f128);
+ break;
+ default:
+ zig_unreachable();
+ }
+ } else {
+ zig_unreachable();
+ }
+}
+
static void float_init_f32(ConstExprValue *dest_val, float x) {
if (dest_val->type->id == TypeTableEntryIdComptimeFloat) {
bigfloat_init_32(&dest_val->data.x_bigfloat, x);
} else if (dest_val->type->id == TypeTableEntryIdFloat) {
switch (dest_val->type->data.floating.bit_count) {
+ case 16:
+ dest_val->data.x_f16 = zig_double_to_f16(x);
+ break;
case 32:
dest_val->data.x_f32 = x;
break;
@@ -7197,6 +7442,9 @@ static void float_init_f64(ConstExprValue *dest_val, double x) {
bigfloat_init_64(&dest_val->data.x_bigfloat, x);
} else if (dest_val->type->id == TypeTableEntryIdFloat) {
switch (dest_val->type->data.floating.bit_count) {
+ case 16:
+ dest_val->data.x_f16 = zig_double_to_f16(x);
+ break;
case 32:
dest_val->data.x_f32 = x;
break;
@@ -7223,6 +7471,9 @@ static void float_init_f128(ConstExprValue *dest_val, float128_t x) {
bigfloat_init_128(&dest_val->data.x_bigfloat, x);
} else if (dest_val->type->id == TypeTableEntryIdFloat) {
switch (dest_val->type->data.floating.bit_count) {
+ case 16:
+ dest_val->data.x_f16 = f128M_to_f16(&x);
+ break;
case 32:
{
float32_t f32_val = f128M_to_f32(&x);
@@ -7253,6 +7504,9 @@ static void float_init_float(ConstExprValue *dest_val, ConstExprValue *src_val)
float_init_bigfloat(dest_val, &src_val->data.x_bigfloat);
} else if (src_val->type->id == TypeTableEntryIdFloat) {
switch (src_val->type->data.floating.bit_count) {
+ case 16:
+ float_init_f16(dest_val, src_val->data.x_f16);
+ break;
case 32:
float_init_f32(dest_val, src_val->data.x_f32);
break;
@@ -7276,6 +7530,14 @@ static Cmp float_cmp(ConstExprValue *op1, ConstExprValue *op2) {
return bigfloat_cmp(&op1->data.x_bigfloat, &op2->data.x_bigfloat);
} else if (op1->type->id == TypeTableEntryIdFloat) {
switch (op1->type->data.floating.bit_count) {
+ case 16:
+ if (f16_lt(op1->data.x_f16, op2->data.x_f16)) {
+ return CmpLT;
+ } else if (f16_lt(op2->data.x_f16, op1->data.x_f16)) {
+ return CmpGT;
+ } else {
+ return CmpEQ;
+ }
case 32:
if (op1->data.x_f32 > op2->data.x_f32) {
return CmpGT;
@@ -7313,6 +7575,17 @@ static Cmp float_cmp_zero(ConstExprValue *op) {
return bigfloat_cmp_zero(&op->data.x_bigfloat);
} else if (op->type->id == TypeTableEntryIdFloat) {
switch (op->type->data.floating.bit_count) {
+ case 16:
+ {
+ const float16_t zero = zig_double_to_f16(0);
+ if (f16_lt(op->data.x_f16, zero)) {
+ return CmpLT;
+ } else if (f16_lt(zero, op->data.x_f16)) {
+ return CmpGT;
+ } else {
+ return CmpEQ;
+ }
+ }
case 32:
if (op->data.x_f32 < 0.0) {
return CmpLT;
@@ -7354,6 +7627,9 @@ static void float_add(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal
bigfloat_add(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
} else if (op1->type->id == TypeTableEntryIdFloat) {
switch (op1->type->data.floating.bit_count) {
+ case 16:
+ out_val->data.x_f16 = f16_add(op1->data.x_f16, op2->data.x_f16);
+ return;
case 32:
out_val->data.x_f32 = op1->data.x_f32 + op2->data.x_f32;
return;
@@ -7378,6 +7654,9 @@ static void float_sub(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal
bigfloat_sub(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
} else if (op1->type->id == TypeTableEntryIdFloat) {
switch (op1->type->data.floating.bit_count) {
+ case 16:
+ out_val->data.x_f16 = f16_sub(op1->data.x_f16, op2->data.x_f16);
+ return;
case 32:
out_val->data.x_f32 = op1->data.x_f32 - op2->data.x_f32;
return;
@@ -7402,6 +7681,9 @@ static void float_mul(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal
bigfloat_mul(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
} else if (op1->type->id == TypeTableEntryIdFloat) {
switch (op1->type->data.floating.bit_count) {
+ case 16:
+ out_val->data.x_f16 = f16_mul(op1->data.x_f16, op2->data.x_f16);
+ return;
case 32:
out_val->data.x_f32 = op1->data.x_f32 * op2->data.x_f32;
return;
@@ -7426,6 +7708,9 @@ static void float_div(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal
bigfloat_div(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
} else if (op1->type->id == TypeTableEntryIdFloat) {
switch (op1->type->data.floating.bit_count) {
+ case 16:
+ out_val->data.x_f16 = f16_div(op1->data.x_f16, op2->data.x_f16);
+ return;
case 32:
out_val->data.x_f32 = op1->data.x_f32 / op2->data.x_f32;
return;
@@ -7450,21 +7735,15 @@ static void float_div_trunc(ConstExprValue *out_val, ConstExprValue *op1, ConstE
bigfloat_div_trunc(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
} else if (op1->type->id == TypeTableEntryIdFloat) {
switch (op1->type->data.floating.bit_count) {
+ case 16:
+ out_val->data.x_f16 = f16_div(op1->data.x_f16, op2->data.x_f16);
+ out_val->data.x_f16 = f16_roundToInt(out_val->data.x_f16, softfloat_round_minMag, false);
+ return;
case 32:
- out_val->data.x_f32 = op1->data.x_f32 / op2->data.x_f32;
- if (out_val->data.x_f32 >= 0.0) {
- out_val->data.x_f32 = floorf(out_val->data.x_f32);
- } else {
- out_val->data.x_f32 = ceilf(out_val->data.x_f32);
- }
+ out_val->data.x_f32 = truncf(op1->data.x_f32 / op2->data.x_f32);
return;
case 64:
- out_val->data.x_f64 = op1->data.x_f64 / op2->data.x_f64;
- if (out_val->data.x_f64 >= 0.0) {
- out_val->data.x_f64 = floor(out_val->data.x_f64);
- } else {
- out_val->data.x_f64 = ceil(out_val->data.x_f64);
- }
+ out_val->data.x_f64 = trunc(op1->data.x_f64 / op2->data.x_f64);
return;
case 128:
f128M_div(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
@@ -7485,6 +7764,10 @@ static void float_div_floor(ConstExprValue *out_val, ConstExprValue *op1, ConstE
bigfloat_div_floor(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
} else if (op1->type->id == TypeTableEntryIdFloat) {
switch (op1->type->data.floating.bit_count) {
+ case 16:
+ out_val->data.x_f16 = f16_div(op1->data.x_f16, op2->data.x_f16);
+ out_val->data.x_f16 = f16_roundToInt(out_val->data.x_f16, softfloat_round_min, false);
+ return;
case 32:
out_val->data.x_f32 = floorf(op1->data.x_f32 / op2->data.x_f32);
return;
@@ -7510,6 +7793,9 @@ static void float_rem(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal
bigfloat_rem(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
} else if (op1->type->id == TypeTableEntryIdFloat) {
switch (op1->type->data.floating.bit_count) {
+ case 16:
+ out_val->data.x_f16 = f16_rem(op1->data.x_f16, op2->data.x_f16);
+ return;
case 32:
out_val->data.x_f32 = fmodf(op1->data.x_f32, op2->data.x_f32);
return;
@@ -7527,6 +7813,24 @@ static void float_rem(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal
}
}
+// c = a - b * trunc(a / b)
+static float16_t zig_f16_mod(float16_t a, float16_t b) {
+ float16_t c;
+ c = f16_div(a, b);
+ c = f16_roundToInt(c, softfloat_round_min, true);
+ c = f16_mul(b, c);
+ c = f16_sub(a, c);
+ return c;
+}
+
+// c = a - b * trunc(a / b)
+static void zig_f128M_mod(const float128_t* a, const float128_t* b, float128_t* c) {
+ f128M_div(a, b, c);
+ f128M_roundToInt(c, softfloat_round_min, true, c);
+ f128M_mul(b, c, c);
+ f128M_sub(a, c, c);
+}
+
static void float_mod(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) {
assert(op1->type == op2->type);
out_val->type = op1->type;
@@ -7534,6 +7838,9 @@ static void float_mod(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal
bigfloat_mod(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat);
} else if (op1->type->id == TypeTableEntryIdFloat) {
switch (op1->type->data.floating.bit_count) {
+ case 16:
+ out_val->data.x_f16 = zig_f16_mod(op1->data.x_f16, op2->data.x_f16);
+ return;
case 32:
out_val->data.x_f32 = fmodf(fmodf(op1->data.x_f32, op2->data.x_f32) + op2->data.x_f32, op2->data.x_f32);
return;
@@ -7541,9 +7848,7 @@ static void float_mod(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal
out_val->data.x_f64 = fmod(fmod(op1->data.x_f64, op2->data.x_f64) + op2->data.x_f64, op2->data.x_f64);
return;
case 128:
- f128M_rem(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
- f128M_add(&out_val->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
- f128M_rem(&out_val->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
+ zig_f128M_mod(&op1->data.x_f128, &op2->data.x_f128, &out_val->data.x_f128);
return;
default:
zig_unreachable();
@@ -7559,6 +7864,12 @@ static void float_negate(ConstExprValue *out_val, ConstExprValue *op) {
bigfloat_negate(&out_val->data.x_bigfloat, &op->data.x_bigfloat);
} else if (op->type->id == TypeTableEntryIdFloat) {
switch (op->type->data.floating.bit_count) {
+ case 16:
+ {
+ const float16_t zero = zig_double_to_f16(0);
+ out_val->data.x_f16 = f16_sub(zero, op->data.x_f16);
+ return;
+ }
case 32:
out_val->data.x_f32 = -op->data.x_f32;
return;
@@ -7581,6 +7892,9 @@ static void float_negate(ConstExprValue *out_val, ConstExprValue *op) {
void float_write_ieee597(ConstExprValue *op, uint8_t *buf, bool is_big_endian) {
if (op->type->id == TypeTableEntryIdFloat) {
switch (op->type->data.floating.bit_count) {
+ case 16:
+ memcpy(buf, &op->data.x_f16, 2); // TODO wrong when compiler is big endian
+ return;
case 32:
memcpy(buf, &op->data.x_f32, 4); // TODO wrong when compiler is big endian
return;
@@ -7601,6 +7915,9 @@ void float_write_ieee597(ConstExprValue *op, uint8_t *buf, bool is_big_endian) {
void float_read_ieee597(ConstExprValue *val, uint8_t *buf, bool is_big_endian) {
if (val->type->id == TypeTableEntryIdFloat) {
switch (val->type->data.floating.bit_count) {
+ case 16:
+ memcpy(&val->data.x_f16, buf, 2); // TODO wrong when compiler is big endian
+ return;
case 32:
memcpy(&val->data.x_f32, buf, 4); // TODO wrong when compiler is big endian
return;
@@ -7796,9 +8113,10 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
// * and [*] can do a const-cast-only to ?* and ?[*], respectively
// but not if there is a mutable parent pointer
+ // and not if the pointer is zero bits
if (!wanted_is_mutable && wanted_type->id == TypeTableEntryIdOptional &&
wanted_type->data.maybe.child_type->id == TypeTableEntryIdPointer &&
- actual_type->id == TypeTableEntryIdPointer)
+ actual_type->id == TypeTableEntryIdPointer && type_has_bits(actual_type))
{
ConstCastOnly child = types_match_const_cast_only(ira,
wanted_type->data.maybe.child_type, actual_type, source_node, wanted_is_mutable);
@@ -7824,8 +8142,10 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
actual_type->data.pointer.child_type, source_node, !wanted_type->data.pointer.is_const);
if (child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdPointerChild;
- result.data.pointer_child = allocate_nonzero(1);
- *result.data.pointer_child = child;
+ result.data.pointer_mismatch = allocate_nonzero(1);
+ result.data.pointer_mismatch->child = child;
+ result.data.pointer_mismatch->wanted_child = wanted_type->data.pointer.child_type;
+ result.data.pointer_mismatch->actual_child = actual_type->data.pointer.child_type;
}
return result;
}
@@ -7844,8 +8164,10 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
actual_ptr_type->data.pointer.child_type, source_node, !wanted_ptr_type->data.pointer.is_const);
if (child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdSliceChild;
- result.data.slice_child = allocate_nonzero(1);
- *result.data.slice_child = child;
+ result.data.slice_mismatch = allocate_nonzero(1);
+ result.data.slice_mismatch->child = child;
+ result.data.slice_mismatch->actual_child = actual_ptr_type->data.pointer.child_type;
+ result.data.slice_mismatch->wanted_child = wanted_ptr_type->data.pointer.child_type;
}
return result;
}
@@ -7857,8 +8179,10 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
actual_type->data.maybe.child_type, source_node, wanted_is_mutable);
if (child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdOptionalChild;
- result.data.optional_child = allocate_nonzero(1);
- *result.data.optional_child = child;
+ result.data.optional = allocate_nonzero(1);
+ result.data.optional->child = child;
+ result.data.optional->wanted_child = wanted_type->data.maybe.child_type;
+ result.data.optional->actual_child = actual_type->data.maybe.child_type;
}
return result;
}
@@ -7869,16 +8193,20 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
actual_type->data.error_union.payload_type, source_node, wanted_is_mutable);
if (payload_child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdErrorUnionPayload;
- result.data.error_union_payload = allocate_nonzero(1);
- *result.data.error_union_payload = payload_child;
+ result.data.error_union_payload = allocate_nonzero(1);
+ result.data.error_union_payload->child = payload_child;
+ result.data.error_union_payload->wanted_payload = wanted_type->data.error_union.payload_type;
+ result.data.error_union_payload->actual_payload = actual_type->data.error_union.payload_type;
return result;
}
ConstCastOnly error_set_child = types_match_const_cast_only(ira, wanted_type->data.error_union.err_set_type,
actual_type->data.error_union.err_set_type, source_node, wanted_is_mutable);
if (error_set_child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdErrorUnionErrorSet;
- result.data.error_union_error_set = allocate_nonzero(1);
- *result.data.error_union_error_set = error_set_child;
+ result.data.error_union_error_set = allocate_nonzero(1);
+ result.data.error_union_error_set->child = error_set_child;
+ result.data.error_union_error_set->wanted_err_set = wanted_type->data.error_union.err_set_type;
+ result.data.error_union_error_set->actual_err_set = actual_type->data.error_union.err_set_type;
return result;
}
return result;
@@ -7920,8 +8248,9 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
if (error_entry == nullptr) {
if (result.id == ConstCastResultIdOk) {
result.id = ConstCastResultIdErrSet;
+ result.data.error_set_mismatch = allocate(1);
}
- result.data.error_set.missing_errors.append(contained_error_entry);
+ result.data.error_set_mismatch->missing_errors.append(contained_error_entry);
}
}
free(errors);
@@ -8016,325 +8345,6 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
return result;
}
-enum ImplicitCastMatchResult {
- ImplicitCastMatchResultNo,
- ImplicitCastMatchResultYes,
- ImplicitCastMatchResultReportedError,
-};
-
-static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira, TypeTableEntry *wanted_type,
- TypeTableEntry *actual_type, IrInstruction *value)
-{
- AstNode *source_node = value->source_node;
- ConstCastOnly const_cast_result = types_match_const_cast_only(ira, wanted_type, actual_type,
- source_node, false);
- if (const_cast_result.id == ConstCastResultIdOk) {
- return ImplicitCastMatchResultYes;
- }
-
- // if we got here with error sets, make an error showing the incompatibilities
- ZigList *missing_errors = nullptr;
- if (const_cast_result.id == ConstCastResultIdErrSet) {
- missing_errors = &const_cast_result.data.error_set.missing_errors;
- }
- if (const_cast_result.id == ConstCastResultIdErrorUnionErrorSet) {
- if (const_cast_result.data.error_union_error_set->id == ConstCastResultIdErrSet) {
- missing_errors = &const_cast_result.data.error_union_error_set->data.error_set.missing_errors;
- } else if (const_cast_result.data.error_union_error_set->id == ConstCastResultIdErrSetGlobal) {
- ErrorMsg *msg = ir_add_error(ira, value,
- buf_sprintf("expected '%s', found '%s'", buf_ptr(&wanted_type->name), buf_ptr(&actual_type->name)));
- add_error_note(ira->codegen, msg, value->source_node,
- buf_sprintf("unable to cast global error set into smaller set"));
- return ImplicitCastMatchResultReportedError;
- }
- } else if (const_cast_result.id == ConstCastResultIdErrSetGlobal) {
- ErrorMsg *msg = ir_add_error(ira, value,
- buf_sprintf("expected '%s', found '%s'", buf_ptr(&wanted_type->name), buf_ptr(&actual_type->name)));
- add_error_note(ira->codegen, msg, value->source_node,
- buf_sprintf("unable to cast global error set into smaller set"));
- return ImplicitCastMatchResultReportedError;
- }
- if (missing_errors != nullptr) {
- ErrorMsg *msg = ir_add_error(ira, value,
- buf_sprintf("expected '%s', found '%s'", buf_ptr(&wanted_type->name), buf_ptr(&actual_type->name)));
- for (size_t i = 0; i < missing_errors->length; i += 1) {
- ErrorTableEntry *error_entry = missing_errors->at(i);
- add_error_note(ira->codegen, msg, error_entry->decl_node,
- buf_sprintf("'error.%s' not a member of destination error set", buf_ptr(&error_entry->name)));
- }
-
- return ImplicitCastMatchResultReportedError;
- }
-
- // implicit conversion from ?T to ?U
- if (wanted_type->id == TypeTableEntryIdOptional && actual_type->id == TypeTableEntryIdOptional) {
- ImplicitCastMatchResult res = ir_types_match_with_implicit_cast(ira, wanted_type->data.maybe.child_type,
- actual_type->data.maybe.child_type, value);
- if (res != ImplicitCastMatchResultNo)
- return res;
- }
-
- // implicit conversion from non maybe type to maybe type
- if (wanted_type->id == TypeTableEntryIdOptional) {
- ImplicitCastMatchResult res = ir_types_match_with_implicit_cast(ira, wanted_type->data.maybe.child_type,
- actual_type, value);
- if (res != ImplicitCastMatchResultNo)
- return res;
- }
-
- // implicit conversion from null literal to maybe type
- if (wanted_type->id == TypeTableEntryIdOptional &&
- actual_type->id == TypeTableEntryIdNull)
- {
- return ImplicitCastMatchResultYes;
- }
-
- // implicit T to U!T
- if (wanted_type->id == TypeTableEntryIdErrorUnion &&
- ir_types_match_with_implicit_cast(ira, wanted_type->data.error_union.payload_type, actual_type, value))
- {
- return ImplicitCastMatchResultYes;
- }
-
- // implicit conversion from error set to error union type
- if (wanted_type->id == TypeTableEntryIdErrorUnion &&
- actual_type->id == TypeTableEntryIdErrorSet)
- {
- return ImplicitCastMatchResultYes;
- }
-
- // implicit conversion from T to U!?T
- if (wanted_type->id == TypeTableEntryIdErrorUnion &&
- wanted_type->data.error_union.payload_type->id == TypeTableEntryIdOptional &&
- ir_types_match_with_implicit_cast(ira,
- wanted_type->data.error_union.payload_type->data.maybe.child_type,
- actual_type, value))
- {
- return ImplicitCastMatchResultYes;
- }
-
- // implicit widening conversion
- if (wanted_type->id == TypeTableEntryIdInt &&
- actual_type->id == TypeTableEntryIdInt &&
- wanted_type->data.integral.is_signed == actual_type->data.integral.is_signed &&
- wanted_type->data.integral.bit_count >= actual_type->data.integral.bit_count)
- {
- return ImplicitCastMatchResultYes;
- }
-
- // small enough unsigned ints can get casted to large enough signed ints
- if (wanted_type->id == TypeTableEntryIdInt && wanted_type->data.integral.is_signed &&
- actual_type->id == TypeTableEntryIdInt && !actual_type->data.integral.is_signed &&
- wanted_type->data.integral.bit_count > actual_type->data.integral.bit_count)
- {
- return ImplicitCastMatchResultYes;
- }
-
- // implicit float widening conversion
- if (wanted_type->id == TypeTableEntryIdFloat &&
- actual_type->id == TypeTableEntryIdFloat &&
- wanted_type->data.floating.bit_count >= actual_type->data.floating.bit_count)
- {
- return ImplicitCastMatchResultYes;
- }
-
- // implicit [N]T to []const T
- if (is_slice(wanted_type) && actual_type->id == TypeTableEntryIdArray) {
- TypeTableEntry *ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
- assert(ptr_type->id == TypeTableEntryIdPointer);
-
- if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
- source_node, false).id == ConstCastResultIdOk)
- {
- return ImplicitCastMatchResultYes;
- }
- }
-
- // implicit &const [N]T to []const T
- if (is_slice(wanted_type) &&
- actual_type->id == TypeTableEntryIdPointer &&
- actual_type->data.pointer.ptr_len == PtrLenSingle &&
- actual_type->data.pointer.is_const &&
- actual_type->data.pointer.child_type->id == TypeTableEntryIdArray)
- {
- TypeTableEntry *ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
- assert(ptr_type->id == TypeTableEntryIdPointer);
-
- TypeTableEntry *array_type = actual_type->data.pointer.child_type;
-
- if ((ptr_type->data.pointer.is_const || array_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, array_type->data.array.child_type,
- source_node, false).id == ConstCastResultIdOk)
- {
- return ImplicitCastMatchResultYes;
- }
- }
-
- // implicit [N]T to &const []const T
- if (wanted_type->id == TypeTableEntryIdPointer &&
- wanted_type->data.pointer.is_const &&
- wanted_type->data.pointer.ptr_len == PtrLenSingle &&
- is_slice(wanted_type->data.pointer.child_type) &&
- actual_type->id == TypeTableEntryIdArray)
- {
- TypeTableEntry *ptr_type =
- wanted_type->data.pointer.child_type->data.structure.fields[slice_ptr_index].type_entry;
- assert(ptr_type->id == TypeTableEntryIdPointer);
- if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type,
- actual_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk)
- {
- return ImplicitCastMatchResultYes;
- }
- }
-
- // implicit *[N]T to [*]T
- if (wanted_type->id == TypeTableEntryIdPointer &&
- wanted_type->data.pointer.ptr_len == PtrLenUnknown &&
- actual_type->id == TypeTableEntryIdPointer &&
- actual_type->data.pointer.ptr_len == PtrLenSingle &&
- actual_type->data.pointer.child_type->id == TypeTableEntryIdArray &&
- types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
- actual_type->data.pointer.child_type->data.array.child_type, source_node,
- !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
- {
- return ImplicitCastMatchResultYes;
- }
-
- // implicit *[N]T to []T
- if (is_slice(wanted_type) &&
- actual_type->id == TypeTableEntryIdPointer &&
- actual_type->data.pointer.ptr_len == PtrLenSingle &&
- actual_type->data.pointer.child_type->id == TypeTableEntryIdArray)
- {
- TypeTableEntry *slice_ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
- assert(slice_ptr_type->id == TypeTableEntryIdPointer);
- if (types_match_const_cast_only(ira, slice_ptr_type->data.pointer.child_type,
- actual_type->data.pointer.child_type->data.array.child_type, source_node,
- !slice_ptr_type->data.pointer.is_const).id == ConstCastResultIdOk)
- {
- return ImplicitCastMatchResultYes;
- }
- }
-
- // implicit [N]T to ?[]const T
- if (wanted_type->id == TypeTableEntryIdOptional &&
- is_slice(wanted_type->data.maybe.child_type) &&
- actual_type->id == TypeTableEntryIdArray)
- {
- TypeTableEntry *ptr_type =
- wanted_type->data.maybe.child_type->data.structure.fields[slice_ptr_index].type_entry;
- assert(ptr_type->id == TypeTableEntryIdPointer);
- if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type,
- actual_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk)
- {
- return ImplicitCastMatchResultYes;
- }
- }
-
-
- // implicit number literal to typed number
- // implicit number literal to &const integer
- if (actual_type->id == TypeTableEntryIdComptimeFloat ||
- actual_type->id == TypeTableEntryIdComptimeInt)
- {
- if (wanted_type->id == TypeTableEntryIdPointer &&
- wanted_type->data.pointer.ptr_len == PtrLenSingle &&
- wanted_type->data.pointer.is_const)
- {
- if (ir_num_lit_fits_in_other_type(ira, value, wanted_type->data.pointer.child_type, false)) {
- return ImplicitCastMatchResultYes;
- } else {
- return ImplicitCastMatchResultReportedError;
- }
- } else if (ir_num_lit_fits_in_other_type(ira, value, wanted_type, false)) {
- return ImplicitCastMatchResultYes;
- } else {
- return ImplicitCastMatchResultReportedError;
- }
- }
-
- // implicit typed number to integer or float literal.
- // works when the number is known
- if (value->value.special == ConstValSpecialStatic) {
- if (actual_type->id == TypeTableEntryIdInt && wanted_type->id == TypeTableEntryIdComptimeInt) {
- return ImplicitCastMatchResultYes;
- } else if (actual_type->id == TypeTableEntryIdFloat && wanted_type->id == TypeTableEntryIdComptimeFloat) {
- return ImplicitCastMatchResultYes;
- }
- }
-
- // implicit union to its enum tag type
- if (wanted_type->id == TypeTableEntryIdEnum && actual_type->id == TypeTableEntryIdUnion &&
- (actual_type->data.unionation.decl_node->data.container_decl.auto_enum ||
- actual_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
- {
- type_ensure_zero_bits_known(ira->codegen, actual_type);
- if (actual_type->data.unionation.tag_type == wanted_type) {
- return ImplicitCastMatchResultYes;
- }
- }
-
- // implicit enum to union which has the enum as the tag type
- if (wanted_type->id == TypeTableEntryIdUnion && actual_type->id == TypeTableEntryIdEnum &&
- (wanted_type->data.unionation.decl_node->data.container_decl.auto_enum ||
- wanted_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
- {
- type_ensure_zero_bits_known(ira->codegen, wanted_type);
- if (wanted_type->data.unionation.tag_type == actual_type) {
- return ImplicitCastMatchResultYes;
- }
- }
-
- // implicit enum to &const union which has the enum as the tag type
- if (actual_type->id == TypeTableEntryIdEnum &&
- wanted_type->id == TypeTableEntryIdPointer &&
- wanted_type->data.pointer.ptr_len == PtrLenSingle)
- {
- TypeTableEntry *union_type = wanted_type->data.pointer.child_type;
- if (union_type->data.unionation.decl_node->data.container_decl.auto_enum ||
- union_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr)
- {
- type_ensure_zero_bits_known(ira->codegen, union_type);
- if (union_type->data.unionation.tag_type == actual_type) {
- return ImplicitCastMatchResultYes;
- }
- }
- }
-
- // implicit T to *T where T is zero bits
- if (wanted_type->id == TypeTableEntryIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle &&
- types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
- actual_type, source_node, false).id == ConstCastResultIdOk)
- {
- type_ensure_zero_bits_known(ira->codegen, actual_type);
- if (!type_has_bits(actual_type)) {
- return ImplicitCastMatchResultYes;
- }
- }
-
- // implicit undefined literal to anything
- if (actual_type->id == TypeTableEntryIdUndefined) {
- return ImplicitCastMatchResultYes;
- }
-
- // implicitly take a const pointer to something
- if (!type_requires_comptime(actual_type)) {
- TypeTableEntry *const_ptr_actual = get_pointer_to_type(ira->codegen, actual_type, true);
- if (wanted_type->id == TypeTableEntryIdPointer &&
- wanted_type->data.pointer.ptr_len == PtrLenSingle &&
- types_match_const_cast_only(ira, wanted_type, const_ptr_actual,
- source_node, false).id == ConstCastResultIdOk)
- {
- return ImplicitCastMatchResultYes;
- }
- }
-
- return ImplicitCastMatchResultNo;
-}
-
static void update_errors_helper(CodeGen *g, ErrorTableEntry ***errors, size_t *errors_count) {
size_t old_errors_count = *errors_count;
*errors_count = g->errors_by_index.length;
@@ -8881,7 +8891,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
} else if (prev_inst->value.type->id == TypeTableEntryIdOptional) {
return prev_inst->value.type;
} else {
- return get_maybe_type(ira->codegen, prev_inst->value.type);
+ return get_optional_type(ira->codegen, prev_inst->value.type);
}
} else {
return prev_inst->value.type;
@@ -8909,7 +8919,8 @@ static void copy_const_val(ConstExprValue *dest, ConstExprValue *src, bool same_
}
}
-static void eval_const_expr_implicit_cast(CastOp cast_op,
+static bool eval_const_expr_implicit_cast(IrAnalyze *ira, IrInstruction *source_instr,
+ CastOp cast_op,
ConstExprValue *other_val, TypeTableEntry *other_type,
ConstExprValue *const_val, TypeTableEntry *new_type)
{
@@ -8934,6 +8945,9 @@ static void eval_const_expr_implicit_cast(CastOp cast_op,
if (other_val->type->id == TypeTableEntryIdComptimeFloat) {
assert(new_type->id == TypeTableEntryIdFloat);
switch (new_type->data.floating.bit_count) {
+ case 16:
+ const_val->data.x_f16 = bigfloat_to_f16(&other_val->data.x_bigfloat);
+ break;
case 32:
const_val->data.x_f32 = bigfloat_to_f32(&other_val->data.x_bigfloat);
break;
@@ -8964,6 +8978,9 @@ static void eval_const_expr_implicit_cast(CastOp cast_op,
BigFloat bigfloat;
bigfloat_init_bigint(&bigfloat, &other_val->data.x_bigint);
switch (new_type->data.floating.bit_count) {
+ case 16:
+ const_val->data.x_f16 = bigfloat_to_f16(&bigfloat);
+ break;
case 32:
const_val->data.x_f32 = bigfloat_to_f32(&bigfloat);
break;
@@ -8981,6 +8998,20 @@ static void eval_const_expr_implicit_cast(CastOp cast_op,
}
case CastOpFloatToInt:
float_init_bigint(&const_val->data.x_bigint, other_val);
+ if (new_type->id == TypeTableEntryIdInt) {
+ if (!bigint_fits_in_bits(&const_val->data.x_bigint, new_type->data.integral.bit_count,
+ new_type->data.integral.is_signed))
+ {
+ Buf *int_buf = buf_alloc();
+ bigint_append_buf(int_buf, &const_val->data.x_bigint, 10);
+
+ ir_add_error(ira, source_instr,
+ buf_sprintf("integer value '%s' cannot be stored in type '%s'",
+ buf_ptr(int_buf), buf_ptr(&new_type->name)));
+ return false;
+ }
+ }
+
const_val->special = ConstValSpecialStatic;
break;
case CastOpBoolToInt:
@@ -8988,6 +9019,7 @@ static void eval_const_expr_implicit_cast(CastOp cast_op,
const_val->special = ConstValSpecialStatic;
break;
}
+ return true;
}
static IrInstruction *ir_resolve_cast(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value,
TypeTableEntry *wanted_type, CastOp cast_op, bool need_alloca)
@@ -8997,8 +9029,11 @@ static IrInstruction *ir_resolve_cast(IrAnalyze *ira, IrInstruction *source_inst
{
IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope,
source_instr->source_node, wanted_type);
- eval_const_expr_implicit_cast(cast_op, &value->value, value->value.type,
- &result->value, wanted_type);
+ if (!eval_const_expr_implicit_cast(ira, source_instr, cast_op, &value->value, value->value.type,
+ &result->value, wanted_type))
+ {
+ return ira->codegen->invalid_instruction;
+ }
return result;
} else {
IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node, wanted_type, value, cast_op);
@@ -9073,11 +9108,6 @@ static bool is_container(TypeTableEntry *type) {
type->id == TypeTableEntryIdUnion;
}
-static bool is_u8(TypeTableEntry *type) {
- return type->id == TypeTableEntryIdInt &&
- !type->data.integral.is_signed && type->data.integral.bit_count == 8;
-}
-
static IrBasicBlock *ir_get_new_bb(IrAnalyze *ira, IrBasicBlock *old_bb, IrInstruction *ref_old_instruction) {
assert(old_bb);
@@ -9631,6 +9661,8 @@ static IrInstruction *ir_analyze_array_to_slice(IrAnalyze *ira, IrInstruction *s
IrInstruction *result = ir_build_slice(&ira->new_irb, source_instr->scope,
source_instr->source_node, array_ptr, start, end, false);
result->value.type = wanted_type;
+ result->value.data.rh_slice.id = RuntimeHintSliceIdLen;
+ result->value.data.rh_slice.len = array_type->data.array.len;
ir_add_alloca(ira, result, result->value.type);
return result;
@@ -9851,7 +9883,7 @@ static IrInstruction *ir_analyze_int_to_enum(IrAnalyze *ira, IrInstruction *sour
}
IrInstruction *result = ir_build_int_to_enum(&ira->new_irb, source_instr->scope,
- source_instr->source_node, target);
+ source_instr->source_node, nullptr, target);
result->value.type = wanted_type;
return result;
}
@@ -10063,6 +10095,83 @@ static IrInstruction *ir_analyze_ptr_to_array(IrAnalyze *ira, IrInstruction *sou
return result;
}
+static void report_recursive_error(IrAnalyze *ira, AstNode *source_node, ConstCastOnly *cast_result,
+ ErrorMsg *parent_msg)
+{
+ switch (cast_result->id) {
+ case ConstCastResultIdOk:
+ zig_unreachable();
+ case ConstCastResultIdOptionalChild: {
+ ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
+ buf_sprintf("optional type child '%s' cannot cast into optional type child '%s'",
+ buf_ptr(&cast_result->data.optional->actual_child->name),
+ buf_ptr(&cast_result->data.optional->wanted_child->name)));
+ report_recursive_error(ira, source_node, &cast_result->data.optional->child, msg);
+ break;
+ }
+ case ConstCastResultIdErrorUnionErrorSet: {
+ ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
+ buf_sprintf("error set '%s' cannot cast into error set '%s'",
+ buf_ptr(&cast_result->data.error_union_error_set->actual_err_set->name),
+ buf_ptr(&cast_result->data.error_union_error_set->wanted_err_set->name)));
+ report_recursive_error(ira, source_node, &cast_result->data.error_union_error_set->child, msg);
+ break;
+ }
+ case ConstCastResultIdErrSet: {
+ ZigList *missing_errors = &cast_result->data.error_set_mismatch->missing_errors;
+ for (size_t i = 0; i < missing_errors->length; i += 1) {
+ ErrorTableEntry *error_entry = missing_errors->at(i);
+ add_error_note(ira->codegen, parent_msg, error_entry->decl_node,
+ buf_sprintf("'error.%s' not a member of destination error set", buf_ptr(&error_entry->name)));
+ }
+ break;
+ }
+ case ConstCastResultIdErrSetGlobal: {
+ add_error_note(ira->codegen, parent_msg, source_node,
+ buf_sprintf("cannot cast global error set into smaller set"));
+ break;
+ }
+ case ConstCastResultIdPointerChild: {
+ ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
+ buf_sprintf("pointer type child '%s' cannot cast into pointer type child '%s'",
+ buf_ptr(&cast_result->data.pointer_mismatch->actual_child->name),
+ buf_ptr(&cast_result->data.pointer_mismatch->wanted_child->name)));
+ report_recursive_error(ira, source_node, &cast_result->data.pointer_mismatch->child, msg);
+ break;
+ }
+ case ConstCastResultIdSliceChild: {
+ ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
+ buf_sprintf("slice type child '%s' cannot cast into slice type child '%s'",
+ buf_ptr(&cast_result->data.slice_mismatch->actual_child->name),
+ buf_ptr(&cast_result->data.slice_mismatch->wanted_child->name)));
+ report_recursive_error(ira, source_node, &cast_result->data.slice_mismatch->child, msg);
+ break;
+ }
+ case ConstCastResultIdErrorUnionPayload: {
+ ErrorMsg *msg = add_error_note(ira->codegen, parent_msg, source_node,
+ buf_sprintf("error union payload '%s' cannot cast into error union payload '%s'",
+ buf_ptr(&cast_result->data.error_union_payload->actual_payload->name),
+ buf_ptr(&cast_result->data.error_union_payload->wanted_payload->name)));
+ report_recursive_error(ira, source_node, &cast_result->data.error_union_payload->child, msg);
+ break;
+ }
+ case ConstCastResultIdFnAlign: // TODO
+ case ConstCastResultIdFnCC: // TODO
+ case ConstCastResultIdFnVarArgs: // TODO
+ case ConstCastResultIdFnIsGeneric: // TODO
+ case ConstCastResultIdFnReturnType: // TODO
+ case ConstCastResultIdFnArgCount: // TODO
+ case ConstCastResultIdFnGenericArgCount: // TODO
+ case ConstCastResultIdFnArg: // TODO
+ case ConstCastResultIdFnArgNoAlias: // TODO
+ case ConstCastResultIdType: // TODO
+ case ConstCastResultIdUnresolvedInferredErrSet: // TODO
+ case ConstCastResultIdAsyncAllocatorType: // TODO
+ case ConstCastResultIdNullWrapPtr: // TODO
+ break;
+ }
+}
+
static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_instr,
TypeTableEntry *wanted_type, IrInstruction *value)
{
@@ -10073,12 +10182,14 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
return ira->codegen->invalid_instruction;
}
- // explicit match or non-const to const
- if (types_match_const_cast_only(ira, wanted_type, actual_type, source_node, false).id == ConstCastResultIdOk) {
+ // perfect match or non-const to const
+ ConstCastOnly const_cast_result = types_match_const_cast_only(ira, wanted_type, actual_type,
+ source_node, false);
+ if (const_cast_result.id == ConstCastResultIdOk) {
return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpNoop, false);
}
- // explicit widening conversion
+ // widening conversion
if (wanted_type->id == TypeTableEntryIdInt &&
actual_type->id == TypeTableEntryIdInt &&
wanted_type->data.integral.is_signed == actual_type->data.integral.is_signed &&
@@ -10095,7 +10206,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
return ir_analyze_widen_or_shorten(ira, source_instr, value, wanted_type);
}
- // explicit float widening conversion
+ // float widening conversion
if (wanted_type->id == TypeTableEntryIdFloat &&
actual_type->id == TypeTableEntryIdFloat &&
wanted_type->data.floating.bit_count >= actual_type->data.floating.bit_count)
@@ -10104,14 +10215,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
- // explicit error set cast
- if (wanted_type->id == TypeTableEntryIdErrorSet &&
- actual_type->id == TypeTableEntryIdErrorSet)
- {
- return ir_analyze_err_set_cast(ira, source_instr, value, wanted_type);
- }
-
- // explicit cast from [N]T to []const T
+ // cast from [N]T to []const T
if (is_slice(wanted_type) && actual_type->id == TypeTableEntryIdArray) {
TypeTableEntry *ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
@@ -10123,7 +10227,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from &const [N]T to []const T
+ // cast from *const [N]T to []const T
if (is_slice(wanted_type) &&
actual_type->id == TypeTableEntryIdPointer &&
actual_type->data.pointer.is_const &&
@@ -10142,7 +10246,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from [N]T to &const []const N
+ // cast from [N]T to *const []const T
if (wanted_type->id == TypeTableEntryIdPointer &&
wanted_type->data.pointer.is_const &&
is_slice(wanted_type->data.pointer.child_type) &&
@@ -10167,7 +10271,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from [N]T to ?[]const N
+ // cast from [N]T to ?[]const T
if (wanted_type->id == TypeTableEntryIdOptional &&
is_slice(wanted_type->data.maybe.child_type) &&
actual_type->id == TypeTableEntryIdArray)
@@ -10191,53 +10295,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from []T to []u8 or []u8 to []T
- if (is_slice(wanted_type) && is_slice(actual_type)) {
- TypeTableEntry *wanted_ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
- TypeTableEntry *actual_ptr_type = actual_type->data.structure.fields[slice_ptr_index].type_entry;
- if ((is_u8(wanted_ptr_type->data.pointer.child_type) || is_u8(actual_ptr_type->data.pointer.child_type)) &&
- (wanted_ptr_type->data.pointer.is_const || !actual_ptr_type->data.pointer.is_const))
- {
- uint32_t src_align_bytes = get_ptr_align(actual_ptr_type);
- uint32_t dest_align_bytes = get_ptr_align(wanted_ptr_type);
-
- if (dest_align_bytes > src_align_bytes) {
- ErrorMsg *msg = ir_add_error(ira, source_instr,
- buf_sprintf("cast increases pointer alignment"));
- add_error_note(ira->codegen, msg, source_instr->source_node,
- buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&actual_type->name), src_align_bytes));
- add_error_note(ira->codegen, msg, source_instr->source_node,
- buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&wanted_type->name), dest_align_bytes));
- return ira->codegen->invalid_instruction;
- }
-
- if (!ir_emit_global_runtime_side_effect(ira, source_instr))
- return ira->codegen->invalid_instruction;
- return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpResizeSlice, true);
- }
- }
-
- // explicit cast from [N]u8 to []const T
- if (is_slice(wanted_type) &&
- wanted_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const &&
- actual_type->id == TypeTableEntryIdArray &&
- is_u8(actual_type->data.array.child_type))
- {
- if (!ir_emit_global_runtime_side_effect(ira, source_instr))
- return ira->codegen->invalid_instruction;
- uint64_t child_type_size = type_size(ira->codegen,
- wanted_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.child_type);
- if (actual_type->data.array.len % child_type_size == 0) {
- return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpBytesToSlice, true);
- } else {
- ir_add_error_node(ira, source_instr->source_node,
- buf_sprintf("unable to convert %s to %s: size mismatch",
- buf_ptr(&actual_type->name), buf_ptr(&wanted_type->name)));
- return ira->codegen->invalid_instruction;
- }
- }
-
- // explicit *[N]T to [*]T
+ // *[N]T to [*]T
if (wanted_type->id == TypeTableEntryIdPointer &&
wanted_type->data.pointer.ptr_len == PtrLenUnknown &&
actual_type->id == TypeTableEntryIdPointer &&
@@ -10251,7 +10309,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
return ir_resolve_ptr_of_array_to_unknown_len_ptr(ira, source_instr, value, wanted_type);
}
- // explicit *[N]T to []T
+ // *[N]T to []T
if (is_slice(wanted_type) &&
actual_type->id == TypeTableEntryIdPointer &&
actual_type->data.pointer.ptr_len == PtrLenSingle &&
@@ -10268,7 +10326,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
- // explicit cast from T to ?T
+ // cast from T to ?T
// note that the *T to ?*T case is handled via the "ConstCastOnly" mechanism
if (wanted_type->id == TypeTableEntryIdOptional) {
TypeTableEntry *wanted_child_type = wanted_type->data.maybe.child_type;
@@ -10300,14 +10358,14 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from null literal to maybe type
+ // cast from null literal to maybe type
if (wanted_type->id == TypeTableEntryIdOptional &&
actual_type->id == TypeTableEntryIdNull)
{
return ir_analyze_null_to_maybe(ira, source_instr, value, wanted_type);
}
- // explicit cast from child type of error type to error type
+ // cast from child type of error type to error type
if (wanted_type->id == TypeTableEntryIdErrorUnion) {
if (types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type, actual_type,
source_node, false).id == ConstCastResultIdOk)
@@ -10324,7 +10382,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from [N]T to E![]const T
+ // cast from [N]T to E![]const T
if (wanted_type->id == TypeTableEntryIdErrorUnion &&
is_slice(wanted_type->data.error_union.payload_type) &&
actual_type->id == TypeTableEntryIdArray)
@@ -10348,14 +10406,14 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from error set to error union type
+ // cast from error set to error union type
if (wanted_type->id == TypeTableEntryIdErrorUnion &&
actual_type->id == TypeTableEntryIdErrorSet)
{
return ir_analyze_err_wrap_code(ira, source_instr, value, wanted_type);
}
- // explicit cast from T to E!?T
+ // cast from T to E!?T
if (wanted_type->id == TypeTableEntryIdErrorUnion &&
wanted_type->data.error_union.payload_type->id == TypeTableEntryIdOptional &&
actual_type->id != TypeTableEntryIdOptional)
@@ -10378,8 +10436,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from number literal to another type
- // explicit cast from number literal to *const integer
+ // cast from number literal to another type
+ // cast from number literal to *const integer
if (actual_type->id == TypeTableEntryIdComptimeFloat ||
actual_type->id == TypeTableEntryIdComptimeInt)
{
@@ -10429,7 +10487,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from typed number to integer or float literal.
+ // cast from typed number to integer or float literal.
// works when the number is known at compile time
if (instr_is_comptime(value) &&
((actual_type->id == TypeTableEntryIdInt && wanted_type->id == TypeTableEntryIdComptimeInt) ||
@@ -10438,32 +10496,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
return ir_analyze_number_to_literal(ira, source_instr, value, wanted_type);
}
- // explicit cast from T!void to integer type which can fit it
- bool actual_type_is_void_err = actual_type->id == TypeTableEntryIdErrorUnion &&
- !type_has_bits(actual_type->data.error_union.payload_type);
- bool actual_type_is_err_set = actual_type->id == TypeTableEntryIdErrorSet;
- if ((actual_type_is_void_err || actual_type_is_err_set) && wanted_type->id == TypeTableEntryIdInt) {
- return ir_analyze_err_to_int(ira, source_instr, value, wanted_type);
- }
-
- // explicit cast from integer to error set
- if (wanted_type->id == TypeTableEntryIdErrorSet && actual_type->id == TypeTableEntryIdInt &&
- !actual_type->data.integral.is_signed)
- {
- return ir_analyze_int_to_err(ira, source_instr, value, wanted_type);
- }
-
- // explicit cast from integer to enum type with no payload
- if (actual_type->id == TypeTableEntryIdInt && wanted_type->id == TypeTableEntryIdEnum) {
- return ir_analyze_int_to_enum(ira, source_instr, value, wanted_type);
- }
-
- // explicit cast from enum type with no payload to integer
- if (wanted_type->id == TypeTableEntryIdInt && actual_type->id == TypeTableEntryIdEnum) {
- return ir_analyze_enum_to_int(ira, source_instr, value, wanted_type);
- }
-
- // explicit cast from union to the enum type of the union
+ // cast from union to the enum type of the union
if (actual_type->id == TypeTableEntryIdUnion && wanted_type->id == TypeTableEntryIdEnum) {
type_ensure_zero_bits_known(ira->codegen, actual_type);
if (type_is_invalid(actual_type))
@@ -10474,7 +10507,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit enum to union which has the enum as the tag type
+ // enum to union which has the enum as the tag type
if (wanted_type->id == TypeTableEntryIdUnion && actual_type->id == TypeTableEntryIdEnum &&
(wanted_type->data.unionation.decl_node->data.container_decl.auto_enum ||
wanted_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
@@ -10485,7 +10518,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit enum to &const union which has the enum as the tag type
+ // enum to &const union which has the enum as the tag type
if (actual_type->id == TypeTableEntryIdEnum && wanted_type->id == TypeTableEntryIdPointer) {
TypeTableEntry *union_type = wanted_type->data.pointer.child_type;
if (union_type->data.unionation.decl_node->data.container_decl.auto_enum ||
@@ -10506,7 +10539,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from *T to *[1]T
+ // cast from *T to *[1]T
if (wanted_type->id == TypeTableEntryIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle &&
actual_type->id == TypeTableEntryIdPointer && actual_type->data.pointer.ptr_len == PtrLenSingle)
{
@@ -10530,7 +10563,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from T to *T where T is zero bits
+ // cast from T to *T where T is zero bits
if (wanted_type->id == TypeTableEntryIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle &&
types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
actual_type, source_node, !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
@@ -10545,12 +10578,12 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
- // explicit cast from undefined to anything
+ // cast from undefined to anything
if (actual_type->id == TypeTableEntryIdUndefined) {
return ir_analyze_undefined_to_anything(ira, source_instr, value, wanted_type);
}
- // explicit cast from something to const pointer of it
+ // cast from something to const pointer of it
if (!type_requires_comptime(actual_type)) {
TypeTableEntry *const_ptr_actual = get_pointer_to_type(ira->codegen, actual_type, true);
if (types_match_const_cast_only(ira, wanted_type, const_ptr_actual, source_node, false).id == ConstCastResultIdOk) {
@@ -10558,10 +10591,11 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- ir_add_error_node(ira, source_instr->source_node,
- buf_sprintf("invalid cast from type '%s' to '%s'",
- buf_ptr(&actual_type->name),
- buf_ptr(&wanted_type->name)));
+ ErrorMsg *parent_msg = ir_add_error_node(ira, source_instr->source_node,
+ buf_sprintf("expected type '%s', found '%s'",
+ buf_ptr(&wanted_type->name),
+ buf_ptr(&actual_type->name)));
+ report_recursive_error(ira, source_instr->source_node, &const_cast_result, parent_msg);
return ira->codegen->invalid_instruction;
}
@@ -10578,22 +10612,7 @@ static IrInstruction *ir_implicit_cast(IrAnalyze *ira, IrInstruction *value, Typ
if (value->value.type->id == TypeTableEntryIdUnreachable)
return value;
- ImplicitCastMatchResult result = ir_types_match_with_implicit_cast(ira, expected_type, value->value.type, value);
- switch (result) {
- case ImplicitCastMatchResultNo:
- ir_add_error(ira, value,
- buf_sprintf("expected type '%s', found '%s'",
- buf_ptr(&expected_type->name),
- buf_ptr(&value->value.type->name)));
- return ira->codegen->invalid_instruction;
-
- case ImplicitCastMatchResultYes:
- return ir_analyze_cast(ira, value, expected_type, value);
- case ImplicitCastMatchResultReportedError:
- return ira->codegen->invalid_instruction;
- }
-
- zig_unreachable();
+ return ir_analyze_cast(ira, value, expected_type, value);
}
static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruction, IrInstruction *ptr) {
@@ -10889,10 +10908,15 @@ static TypeTableEntry *ir_analyze_bin_op_bool(IrAnalyze *ira, IrInstructionBinOp
if (casted_op2 == ira->codegen->invalid_instruction)
return ira->codegen->builtin_types.entry_invalid;
- ConstExprValue *op1_val = &casted_op1->value;
- ConstExprValue *op2_val = &casted_op2->value;
- if (op1_val->special != ConstValSpecialRuntime && op2_val->special != ConstValSpecialRuntime) {
+ if (instr_is_comptime(casted_op1) && instr_is_comptime(casted_op2)) {
ConstExprValue *out_val = ir_build_const_from(ira, &bin_op_instruction->base);
+ ConstExprValue *op1_val = ir_resolve_const(ira, casted_op1, UndefBad);
+ if (op1_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
+ ConstExprValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
+ if (op2_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
assert(casted_op1->value.type->id == TypeTableEntryIdBool);
assert(casted_op2->value.type->id == TypeTableEntryIdBool);
@@ -11042,9 +11066,14 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
}
}
- ConstExprValue *op1_val = &op1->value;
- ConstExprValue *op2_val = &op2->value;
- if (value_is_comptime(op1_val) && value_is_comptime(op2_val)) {
+ if (instr_is_comptime(op1) && instr_is_comptime(op2)) {
+ ConstExprValue *op1_val = ir_resolve_const(ira, op1, UndefBad);
+ if (op1_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+ ConstExprValue *op2_val = ir_resolve_const(ira, op2, UndefBad);
+ if (op2_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
bool answer;
bool are_equal = op1_val->data.x_err_set->value == op2_val->data.x_err_set->value;
if (op_id == IrBinOpCmpEq) {
@@ -11133,10 +11162,15 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
if (casted_op2 == ira->codegen->invalid_instruction)
return ira->codegen->builtin_types.entry_invalid;
- ConstExprValue *op1_val = &casted_op1->value;
- ConstExprValue *op2_val = &casted_op2->value;
bool one_possible_value = !type_requires_comptime(resolved_type) && !type_has_bits(resolved_type);
- if (one_possible_value || (value_is_comptime(op1_val) && value_is_comptime(op2_val))) {
+ if (one_possible_value || (instr_is_comptime(casted_op1) && instr_is_comptime(casted_op2))) {
+ ConstExprValue *op1_val = one_possible_value ? &casted_op1->value : ir_resolve_const(ira, casted_op1, UndefBad);
+ if (op1_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+ ConstExprValue *op2_val = one_possible_value ? &casted_op2->value : ir_resolve_const(ira, casted_op2, UndefBad);
+ if (op2_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
bool answer;
if (resolved_type->id == TypeTableEntryIdComptimeFloat || resolved_type->id == TypeTableEntryIdFloat) {
Cmp cmp_result = float_cmp(op1_val, op2_val);
@@ -11164,11 +11198,17 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
if (resolved_type->id == TypeTableEntryIdInt && !resolved_type->data.integral.is_signed) {
ConstExprValue *known_left_val;
IrBinOp flipped_op_id;
- if (value_is_comptime(op1_val)) {
- known_left_val = op1_val;
+ if (instr_is_comptime(casted_op1)) {
+ known_left_val = ir_resolve_const(ira, casted_op1, UndefBad);
+ if (known_left_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
flipped_op_id = op_id;
- } else if (value_is_comptime(op2_val)) {
- known_left_val = op2_val;
+ } else if (instr_is_comptime(casted_op2)) {
+ known_left_val = ir_resolve_const(ira, casted_op2, UndefBad);
+ if (known_left_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
if (op_id == IrBinOpCmpLessThan) {
flipped_op_id = IrBinOpCmpGreaterThan;
} else if (op_id == IrBinOpCmpGreaterThan) {
@@ -11413,6 +11453,26 @@ static TypeTableEntry *ir_analyze_bit_shift(IrAnalyze *ira, IrInstructionBinOp *
} else {
TypeTableEntry *shift_amt_type = get_smallest_unsigned_int_type(ira->codegen,
op1->value.type->data.integral.bit_count - 1);
+ if (bin_op_instruction->op_id == IrBinOpBitShiftLeftLossy &&
+ op2->value.type->id == TypeTableEntryIdComptimeInt) {
+ if (!bigint_fits_in_bits(&op2->value.data.x_bigint,
+ shift_amt_type->data.integral.bit_count,
+ op2->value.data.x_bigint.is_negative)) {
+ Buf *val_buf = buf_alloc();
+ bigint_append_buf(val_buf, &op2->value.data.x_bigint, 10);
+ ErrorMsg* msg = ir_add_error(ira,
+ &bin_op_instruction->base,
+ buf_sprintf("RHS of shift is too large for LHS type"));
+ add_error_note(
+ ira->codegen,
+ msg,
+ op2->source_node,
+ buf_sprintf("value %s cannot fit into type %s",
+ buf_ptr(val_buf),
+ buf_ptr(&shift_amt_type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+ }
casted_op2 = ir_implicit_cast(ira, op2, shift_amt_type);
if (casted_op2 == ira->codegen->invalid_instruction)
@@ -11420,8 +11480,14 @@ static TypeTableEntry *ir_analyze_bit_shift(IrAnalyze *ira, IrInstructionBinOp *
}
if (instr_is_comptime(op1) && instr_is_comptime(casted_op2)) {
- ConstExprValue *op1_val = &op1->value;
- ConstExprValue *op2_val = &casted_op2->value;
+ ConstExprValue *op1_val = ir_resolve_const(ira, op1, UndefBad);
+ if (op1_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
+ ConstExprValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
+ if (op2_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
IrInstruction *result_instruction = ir_get_const(ira, &bin_op_instruction->base);
ir_link_new_instruction(result_instruction, &bin_op_instruction->base);
ConstExprValue *out_val = &result_instruction->value;
@@ -11500,7 +11566,15 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp
if (is_signed_div) {
bool ok = false;
if (instr_is_comptime(op1) && instr_is_comptime(op2)) {
- if (bigint_cmp_zero(&op2->value.data.x_bigint) == CmpEQ) {
+ ConstExprValue *op1_val = ir_resolve_const(ira, op1, UndefBad);
+ if (op1_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
+ ConstExprValue *op2_val = ir_resolve_const(ira, op2, UndefBad);
+ if (op2_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
+ if (bigint_cmp_zero(&op2_val->data.x_bigint) == CmpEQ) {
// the division by zero error will be caught later, but we don't have a
// division function ambiguity problem.
op_id = IrBinOpDivTrunc;
@@ -11508,8 +11582,8 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp
} else {
BigInt trunc_result;
BigInt floor_result;
- bigint_div_trunc(&trunc_result, &op1->value.data.x_bigint, &op2->value.data.x_bigint);
- bigint_div_floor(&floor_result, &op1->value.data.x_bigint, &op2->value.data.x_bigint);
+ bigint_div_trunc(&trunc_result, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
+ bigint_div_floor(&floor_result, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
if (bigint_cmp(&trunc_result, &floor_result) == CmpEQ) {
ok = true;
op_id = IrBinOpDivTrunc;
@@ -11530,7 +11604,15 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp
if (is_signed_div && (is_int || is_float)) {
bool ok = false;
if (instr_is_comptime(op1) && instr_is_comptime(op2)) {
+ ConstExprValue *op1_val = ir_resolve_const(ira, op1, UndefBad);
+ if (op1_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
if (is_int) {
+ ConstExprValue *op2_val = ir_resolve_const(ira, op2, UndefBad);
+ if (op2_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
if (bigint_cmp_zero(&op2->value.data.x_bigint) == CmpEQ) {
// the division by zero error will be caught later, but we don't
// have a remainder function ambiguity problem
@@ -11538,14 +11620,19 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp
} else {
BigInt rem_result;
BigInt mod_result;
- bigint_rem(&rem_result, &op1->value.data.x_bigint, &op2->value.data.x_bigint);
- bigint_mod(&mod_result, &op1->value.data.x_bigint, &op2->value.data.x_bigint);
+ bigint_rem(&rem_result, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
+ bigint_mod(&mod_result, &op1_val->data.x_bigint, &op2_val->data.x_bigint);
ok = bigint_cmp(&rem_result, &mod_result) == CmpEQ;
}
} else {
IrInstruction *casted_op2 = ir_implicit_cast(ira, op2, resolved_type);
if (casted_op2 == ira->codegen->invalid_instruction)
return ira->codegen->builtin_types.entry_invalid;
+
+ ConstExprValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
+ if (op2_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
if (float_cmp_zero(&casted_op2->value) == CmpEQ) {
// the division by zero error will be caught later, but we don't
// have a remainder function ambiguity problem
@@ -11553,8 +11640,8 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp
} else {
ConstExprValue rem_result;
ConstExprValue mod_result;
- float_rem(&rem_result, &op1->value, &casted_op2->value);
- float_mod(&mod_result, &op1->value, &casted_op2->value);
+ float_rem(&rem_result, op1_val, op2_val);
+ float_mod(&mod_result, op1_val, op2_val);
ok = float_cmp(&rem_result, &mod_result) == CmpEQ;
}
}
@@ -11612,8 +11699,13 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp
return ira->codegen->builtin_types.entry_invalid;
if (instr_is_comptime(casted_op1) && instr_is_comptime(casted_op2)) {
- ConstExprValue *op1_val = &casted_op1->value;
- ConstExprValue *op2_val = &casted_op2->value;
+ ConstExprValue *op1_val = ir_resolve_const(ira, casted_op1, UndefBad);
+ if (op1_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+ ConstExprValue *op2_val = ir_resolve_const(ira, casted_op2, UndefBad);
+ if (op2_val == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
IrInstruction *result_instruction = ir_get_const(ira, &bin_op_instruction->base);
ir_link_new_instruction(result_instruction, &bin_op_instruction->base);
ConstExprValue *out_val = &result_instruction->value;
@@ -11788,9 +11880,16 @@ static TypeTableEntry *ir_analyze_array_cat(IrAnalyze *ira, IrInstructionBinOp *
out_val->data.x_ptr.data.base_array.array_val = out_array_val;
out_val->data.x_ptr.data.base_array.elem_index = 0;
}
- out_array_val->data.x_array.s_none.elements = create_const_vals(new_len);
+ if (op1_array_val->data.x_array.special == ConstArraySpecialUndef &&
+ op2_array_val->data.x_array.special == ConstArraySpecialUndef) {
+ out_array_val->data.x_array.special = ConstArraySpecialUndef;
+ return result_type;
+ }
+
+ out_array_val->data.x_array.s_none.elements = create_const_vals(new_len);
expand_undef_array(ira->codegen, op1_array_val);
+ expand_undef_array(ira->codegen, op2_array_val);
size_t next_index = 0;
for (size_t i = op1_array_index; i < op1_array_end; i += 1, next_index += 1) {
@@ -11842,11 +11941,15 @@ static TypeTableEntry *ir_analyze_array_mult(IrAnalyze *ira, IrInstructionBinOp
}
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
+ if (array_val->data.x_array.special == ConstArraySpecialUndef) {
+ out_val->data.x_array.special = ConstArraySpecialUndef;
+
+ TypeTableEntry *child_type = array_type->data.array.child_type;
+ return get_array_type(ira->codegen, child_type, new_array_len);
+ }
out_val->data.x_array.s_none.elements = create_const_vals(new_array_len);
- expand_undef_array(ira->codegen, array_val);
-
uint64_t i = 0;
for (uint64_t x = 0; x < mult_amt; x += 1) {
for (uint64_t y = 0; y < old_array_len; y += 1) {
@@ -12243,7 +12346,7 @@ static TypeTableEntry *ir_analyze_instruction_error_return_trace(IrAnalyze *ira,
{
if (instruction->optional == IrInstructionErrorReturnTrace::Null) {
TypeTableEntry *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(ira->codegen);
- TypeTableEntry *optional_type = get_maybe_type(ira->codegen, ptr_to_stack_trace_type);
+ TypeTableEntry *optional_type = get_optional_type(ira->codegen, ptr_to_stack_trace_type);
if (!exec_has_err_ret_trace(ira->codegen, ira->new_irb.exec)) {
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
assert(get_codegen_ptr_type(optional_type) != nullptr);
@@ -13172,7 +13275,11 @@ static TypeTableEntry *ir_analyze_dereference(IrAnalyze *ira, IrInstructionUnOp
// one of the ptr instructions
if (instr_is_comptime(value)) {
- ConstExprValue *pointee = const_ptr_pointee(ira->codegen, &value->value);
+ ConstExprValue *comptime_value = ir_resolve_const(ira, value, UndefBad);
+ if (comptime_value == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
+ ConstExprValue *pointee = const_ptr_pointee(ira->codegen, comptime_value);
if (pointee->type == child_type) {
ConstExprValue *out_val = ir_build_const_from(ira, &un_op_instruction->base);
copy_const_val(out_val, pointee, value->value.data.x_ptr.mut == ConstPtrMutComptimeConst);
@@ -13221,7 +13328,7 @@ static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op
case TypeTableEntryIdPromise:
{
ConstExprValue *out_val = ir_build_const_from(ira, &un_op_instruction->base);
- out_val->data.x_type = get_maybe_type(ira->codegen, type_entry);
+ out_val->data.x_type = get_optional_type(ira->codegen, type_entry);
return ira->codegen->builtin_types.entry_type;
}
case TypeTableEntryIdUnreachable:
@@ -13289,7 +13396,7 @@ static TypeTableEntry *ir_analyze_bin_not(IrAnalyze *ira, IrInstructionUnOp *ins
if (expr_type->id == TypeTableEntryIdInt) {
if (instr_is_comptime(value)) {
ConstExprValue *target_const_val = ir_resolve_const(ira, value, UndefBad);
- if (!target_const_val)
+ if (target_const_val == nullptr)
return ira->codegen->builtin_types.entry_invalid;
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
@@ -16045,6 +16152,9 @@ static TypeTableEntry *ir_analyze_instruction_enum_tag_name(IrAnalyze *ira, IrIn
assert(target->value.type->id == TypeTableEntryIdEnum);
if (instr_is_comptime(target)) {
+ type_ensure_zero_bits_known(ira->codegen, target->value.type);
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
TypeEnumField *field = find_enum_field_by_tag(target->value.type, &target->value.data.x_bigint);
ConstExprValue *array_val = create_const_str_lit(ira->codegen, field->name);
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
@@ -16442,7 +16552,7 @@ static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop
true, false, PtrLenUnknown,
get_abi_alignment(ira->codegen, ira->codegen->builtin_types.entry_u8),
0, 0);
- fn_def_fields[6].type = get_maybe_type(ira->codegen, get_slice_type(ira->codegen, u8_ptr));
+ fn_def_fields[6].type = get_optional_type(ira->codegen, get_slice_type(ira->codegen, u8_ptr));
if (fn_node->is_extern && buf_len(fn_node->lib_name) > 0) {
fn_def_fields[6].data.x_optional = create_const_vals(1);
ConstExprValue *lib_name = create_const_str_lit(ira->codegen, fn_node->lib_name);
@@ -16722,16 +16832,20 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
ConstExprValue *fields = create_const_vals(1);
result->data.x_struct.fields = fields;
- // @TODO ?type instead of using @typeOf(undefined) when we have no type.
- // child: type
+ // child: ?type
ensure_field_index(result->type, "child", 0);
fields[0].special = ConstValSpecialStatic;
- fields[0].type = ira->codegen->builtin_types.entry_type;
+ fields[0].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
if (type_entry->data.promise.result_type == nullptr)
- fields[0].data.x_type = ira->codegen->builtin_types.entry_undef;
- else
- fields[0].data.x_type = type_entry->data.promise.result_type;
+ fields[0].data.x_optional = nullptr;
+ else {
+ ConstExprValue *child_type = create_const_vals(1);
+ child_type->special = ConstValSpecialStatic;
+ child_type->type = ira->codegen->builtin_types.entry_type;
+ child_type->data.x_type = type_entry->data.promise.result_type;
+ fields[0].data.x_optional = child_type;
+ }
break;
}
@@ -16872,19 +16986,23 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
fields[0].special = ConstValSpecialStatic;
fields[0].type = ir_type_info_get_type(ira, "ContainerLayout");
bigint_init_unsigned(&fields[0].data.x_enum_tag, type_entry->data.unionation.layout);
- // tag_type: type
+ // tag_type: ?type
ensure_field_index(result->type, "tag_type", 1);
fields[1].special = ConstValSpecialStatic;
- fields[1].type = ira->codegen->builtin_types.entry_type;
- // @TODO ?type instead of using @typeOf(undefined) when we have no type.
+ fields[1].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
+
AstNode *union_decl_node = type_entry->data.unionation.decl_node;
if (union_decl_node->data.container_decl.auto_enum ||
union_decl_node->data.container_decl.init_arg_expr != nullptr)
{
- fields[1].data.x_type = type_entry->data.unionation.tag_type;
+ ConstExprValue *tag_type = create_const_vals(1);
+ tag_type->special = ConstValSpecialStatic;
+ tag_type->type = ira->codegen->builtin_types.entry_type;
+ tag_type->data.x_type = type_entry->data.unionation.tag_type;
+ fields[1].data.x_optional = tag_type;
}
else
- fields[1].data.x_type = ira->codegen->builtin_types.entry_undef;
+ fields[1].data.x_optional = nullptr;
// fields: []TypeInfo.UnionField
ensure_field_index(result->type, "fields", 2);
@@ -16911,9 +17029,9 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
ConstExprValue *inner_fields = create_const_vals(3);
inner_fields[1].special = ConstValSpecialStatic;
- inner_fields[1].type = get_maybe_type(ira->codegen, type_info_enum_field_type);
+ inner_fields[1].type = get_optional_type(ira->codegen, type_info_enum_field_type);
- if (fields[1].data.x_type == ira->codegen->builtin_types.entry_undef) {
+ if (fields[1].data.x_optional == nullptr) {
inner_fields[1].data.x_optional = nullptr;
} else {
inner_fields[1].data.x_optional = create_const_vals(1);
@@ -16982,7 +17100,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
ConstExprValue *inner_fields = create_const_vals(3);
inner_fields[1].special = ConstValSpecialStatic;
- inner_fields[1].type = get_maybe_type(ira->codegen, ira->codegen->builtin_types.entry_usize);
+ inner_fields[1].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_usize);
if (!type_has_bits(struct_field->type_entry)) {
inner_fields[1].data.x_optional = nullptr;
@@ -17022,8 +17140,6 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
ConstExprValue *fields = create_const_vals(6);
result->data.x_struct.fields = fields;
- // @TODO Fix type = undefined with ?type
-
// calling_convention: TypeInfo.CallingConvention
ensure_field_index(result->type, "calling_convention", 0);
fields[0].special = ConstValSpecialStatic;
@@ -17041,22 +17157,32 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
fields[2].special = ConstValSpecialStatic;
fields[2].type = ira->codegen->builtin_types.entry_bool;
fields[2].data.x_bool = type_entry->data.fn.fn_type_id.is_var_args;
- // return_type: type
+ // return_type: ?type
ensure_field_index(result->type, "return_type", 3);
fields[3].special = ConstValSpecialStatic;
- fields[3].type = ira->codegen->builtin_types.entry_type;
+ fields[3].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
if (type_entry->data.fn.fn_type_id.return_type == nullptr)
- fields[3].data.x_type = ira->codegen->builtin_types.entry_undef;
- else
- fields[3].data.x_type = type_entry->data.fn.fn_type_id.return_type;
+ fields[3].data.x_optional = nullptr;
+ else {
+ ConstExprValue *return_type = create_const_vals(1);
+ return_type->special = ConstValSpecialStatic;
+ return_type->type = ira->codegen->builtin_types.entry_type;
+ return_type->data.x_type = type_entry->data.fn.fn_type_id.return_type;
+ fields[3].data.x_optional = return_type;
+ }
// async_allocator_type: type
ensure_field_index(result->type, "async_allocator_type", 4);
fields[4].special = ConstValSpecialStatic;
- fields[4].type = ira->codegen->builtin_types.entry_type;
+ fields[4].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
if (type_entry->data.fn.fn_type_id.async_allocator_type == nullptr)
- fields[4].data.x_type = ira->codegen->builtin_types.entry_undef;
- else
- fields[4].data.x_type = type_entry->data.fn.fn_type_id.async_allocator_type;
+ fields[4].data.x_optional = nullptr;
+ else {
+ ConstExprValue *async_alloc_type = create_const_vals(1);
+ async_alloc_type->special = ConstValSpecialStatic;
+ async_alloc_type->type = ira->codegen->builtin_types.entry_type;
+ async_alloc_type->data.x_type = type_entry->data.fn.fn_type_id.async_allocator_type;
+ fields[4].data.x_optional = async_alloc_type;
+ }
// args: []TypeInfo.FnArg
TypeTableEntry *type_info_fn_arg_type = ir_type_info_get_type(ira, "FnArg");
size_t fn_arg_count = type_entry->data.fn.fn_type_id.param_count -
@@ -17090,12 +17216,17 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
inner_fields[1].type = ira->codegen->builtin_types.entry_bool;
inner_fields[1].data.x_bool = fn_param_info->is_noalias;
inner_fields[2].special = ConstValSpecialStatic;
- inner_fields[2].type = ira->codegen->builtin_types.entry_type;
+ inner_fields[2].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type);
if (arg_is_generic)
- inner_fields[2].data.x_type = ira->codegen->builtin_types.entry_undef;
- else
- inner_fields[2].data.x_type = fn_param_info->type;
+ inner_fields[2].data.x_optional = nullptr;
+ else {
+ ConstExprValue *arg_type = create_const_vals(1);
+ arg_type->special = ConstValSpecialStatic;
+ arg_type->type = ira->codegen->builtin_types.entry_type;
+ arg_type->data.x_type = fn_param_info->type;
+ inner_fields[2].data.x_optional = arg_type;
+ }
fn_arg_val->data.x_struct.fields = inner_fields;
fn_arg_val->data.x_struct.parent.id = ConstParentIdArray;
@@ -17437,7 +17568,7 @@ static TypeTableEntry *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstruct
IrInstruction *result = ir_build_cmpxchg(&ira->new_irb, instruction->base.scope, instruction->base.source_node,
nullptr, casted_ptr, casted_cmp_value, casted_new_value, nullptr, nullptr, instruction->is_weak,
operand_type, success_order, failure_order);
- result->value.type = get_maybe_type(ira->codegen, operand_type);
+ result->value.type = get_optional_type(ira->codegen, operand_type);
ir_link_new_instruction(result, &instruction->base);
ir_add_alloca(ira, result, result->value.type);
return result->value.type;
@@ -17593,6 +17724,137 @@ static TypeTableEntry *ir_analyze_instruction_float_cast(IrAnalyze *ira, IrInstr
return dest_type;
}
+static TypeTableEntry *ir_analyze_instruction_err_set_cast(IrAnalyze *ira, IrInstructionErrSetCast *instruction) {
+ TypeTableEntry *dest_type = ir_resolve_type(ira, instruction->dest_type->other);
+ if (type_is_invalid(dest_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ if (dest_type->id != TypeTableEntryIdErrorSet) {
+ ir_add_error(ira, instruction->dest_type,
+ buf_sprintf("expected error set type, found '%s'", buf_ptr(&dest_type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ IrInstruction *target = instruction->target->other;
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ if (target->value.type->id != TypeTableEntryIdErrorSet) {
+ ir_add_error(ira, instruction->target,
+ buf_sprintf("expected error set type, found '%s'", buf_ptr(&target->value.type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ IrInstruction *result = ir_analyze_err_set_cast(ira, &instruction->base, target, dest_type);
+ if (type_is_invalid(result->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+ ir_link_new_instruction(result, &instruction->base);
+ return dest_type;
+}
+
+static TypeTableEntry *ir_analyze_instruction_from_bytes(IrAnalyze *ira, IrInstructionFromBytes *instruction) {
+ TypeTableEntry *dest_child_type = ir_resolve_type(ira, instruction->dest_child_type->other);
+ if (type_is_invalid(dest_child_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ IrInstruction *target = instruction->target->other;
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ bool src_ptr_const;
+ bool src_ptr_volatile;
+ uint32_t src_ptr_align;
+ if (target->value.type->id == TypeTableEntryIdPointer) {
+ src_ptr_const = target->value.type->data.pointer.is_const;
+ src_ptr_volatile = target->value.type->data.pointer.is_volatile;
+ src_ptr_align = target->value.type->data.pointer.alignment;
+ } else if (is_slice(target->value.type)) {
+ TypeTableEntry *src_ptr_type = target->value.type->data.structure.fields[slice_ptr_index].type_entry;
+ src_ptr_const = src_ptr_type->data.pointer.is_const;
+ src_ptr_volatile = src_ptr_type->data.pointer.is_volatile;
+ src_ptr_align = src_ptr_type->data.pointer.alignment;
+ } else {
+ src_ptr_const = true;
+ src_ptr_volatile = false;
+ src_ptr_align = get_abi_alignment(ira->codegen, target->value.type);
+ }
+
+ TypeTableEntry *dest_ptr_type = get_pointer_to_type_extra(ira->codegen, dest_child_type,
+ src_ptr_const, src_ptr_volatile, PtrLenUnknown,
+ src_ptr_align, 0, 0);
+ TypeTableEntry *dest_slice_type = get_slice_type(ira->codegen, dest_ptr_type);
+
+ TypeTableEntry *u8_ptr = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8,
+ src_ptr_const, src_ptr_volatile, PtrLenUnknown,
+ src_ptr_align, 0, 0);
+ TypeTableEntry *u8_slice = get_slice_type(ira->codegen, u8_ptr);
+
+ IrInstruction *casted_value = ir_implicit_cast(ira, target, u8_slice);
+ if (type_is_invalid(casted_value->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ bool have_known_len = false;
+ uint64_t known_len;
+
+ if (instr_is_comptime(casted_value)) {
+ ConstExprValue *val = ir_resolve_const(ira, casted_value, UndefBad);
+ if (!val)
+ return ira->codegen->builtin_types.entry_invalid;
+
+ ConstExprValue *len_val = &val->data.x_struct.fields[slice_len_index];
+ if (value_is_comptime(len_val)) {
+ known_len = bigint_as_unsigned(&len_val->data.x_bigint);
+ have_known_len = true;
+ }
+ }
+
+ if (casted_value->value.data.rh_slice.id == RuntimeHintSliceIdLen) {
+ known_len = casted_value->value.data.rh_slice.len;
+ have_known_len = true;
+ }
+
+ if (have_known_len) {
+ uint64_t child_type_size = type_size(ira->codegen, dest_child_type);
+ uint64_t remainder = known_len % child_type_size;
+ if (remainder != 0) {
+ ErrorMsg *msg = ir_add_error(ira, &instruction->base,
+ buf_sprintf("unable to convert [%" ZIG_PRI_u64 "]u8 to %s: size mismatch",
+ known_len, buf_ptr(&dest_slice_type->name)));
+ add_error_note(ira->codegen, msg, instruction->dest_child_type->source_node,
+ buf_sprintf("%s has size %" ZIG_PRI_u64 "; remaining bytes: %" ZIG_PRI_u64,
+ buf_ptr(&dest_child_type->name), child_type_size, remainder));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+ }
+
+ IrInstruction *result = ir_resolve_cast(ira, &instruction->base, casted_value, dest_slice_type, CastOpResizeSlice, true);
+ ir_link_new_instruction(result, &instruction->base);
+ return dest_slice_type;
+}
+
+static TypeTableEntry *ir_analyze_instruction_to_bytes(IrAnalyze *ira, IrInstructionToBytes *instruction) {
+ IrInstruction *target = instruction->target->other;
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ if (!is_slice(target->value.type)) {
+ ir_add_error(ira, instruction->target,
+ buf_sprintf("expected slice, found '%s'", buf_ptr(&target->value.type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ TypeTableEntry *src_ptr_type = target->value.type->data.structure.fields[slice_ptr_index].type_entry;
+
+ TypeTableEntry *dest_ptr_type = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8,
+ src_ptr_type->data.pointer.is_const, src_ptr_type->data.pointer.is_volatile, PtrLenUnknown,
+ src_ptr_type->data.pointer.alignment, 0, 0);
+ TypeTableEntry *dest_slice_type = get_slice_type(ira->codegen, dest_ptr_type);
+
+ IrInstruction *result = ir_resolve_cast(ira, &instruction->base, target, dest_slice_type, CastOpResizeSlice, true);
+ ir_link_new_instruction(result, &instruction->base);
+ return dest_slice_type;
+}
+
static TypeTableEntry *ir_analyze_instruction_int_to_float(IrAnalyze *ira, IrInstructionIntToFloat *instruction) {
TypeTableEntry *dest_type = ir_resolve_type(ira, instruction->dest_type->other);
if (type_is_invalid(dest_type))
@@ -17627,6 +17889,39 @@ static TypeTableEntry *ir_analyze_instruction_float_to_int(IrAnalyze *ira, IrIns
return dest_type;
}
+static TypeTableEntry *ir_analyze_instruction_err_to_int(IrAnalyze *ira, IrInstructionErrToInt *instruction) {
+ IrInstruction *target = instruction->target->other;
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ IrInstruction *casted_target;
+ if (target->value.type->id == TypeTableEntryIdErrorSet) {
+ casted_target = target;
+ } else {
+ casted_target = ir_implicit_cast(ira, target, ira->codegen->builtin_types.entry_global_error_set);
+ if (type_is_invalid(casted_target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ IrInstruction *result = ir_analyze_err_to_int(ira, &instruction->base, casted_target, ira->codegen->err_tag_type);
+ ir_link_new_instruction(result, &instruction->base);
+ return result->value.type;
+}
+
+static TypeTableEntry *ir_analyze_instruction_int_to_err(IrAnalyze *ira, IrInstructionIntToErr *instruction) {
+ IrInstruction *target = instruction->target->other;
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ IrInstruction *casted_target = ir_implicit_cast(ira, target, ira->codegen->err_tag_type);
+ if (type_is_invalid(casted_target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ IrInstruction *result = ir_analyze_int_to_err(ira, &instruction->base, casted_target, ira->codegen->builtin_types.entry_global_error_set);
+ ir_link_new_instruction(result, &instruction->base);
+ return result->value.type;
+}
+
static TypeTableEntry *ir_analyze_instruction_bool_to_int(IrAnalyze *ira, IrInstructionBoolToInt *instruction) {
IrInstruction *target = instruction->target->other;
if (type_is_invalid(target->value.type))
@@ -17681,9 +17976,13 @@ static TypeTableEntry *ir_analyze_instruction_bool_not(IrAnalyze *ira, IrInstruc
if (type_is_invalid(casted_value->value.type))
return ira->codegen->builtin_types.entry_invalid;
- if (casted_value->value.special != ConstValSpecialRuntime) {
+ if (instr_is_comptime(casted_value)) {
+ ConstExprValue *value = ir_resolve_const(ira, casted_value, UndefBad);
+ if (value == nullptr)
+ return ira->codegen->builtin_types.entry_invalid;
+
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
- out_val->data.x_bool = !casted_value->value.data.x_bool;
+ out_val->data.x_bool = !value->data.x_bool;
return bool_type;
}
@@ -18944,7 +19243,7 @@ static IrInstruction *ir_align_cast(IrAnalyze *ira, IrInstruction *target, uint3
old_align_bytes = ptr_type->data.pointer.alignment;
TypeTableEntry *better_ptr_type = adjust_ptr_align(ira->codegen, ptr_type, align_bytes);
- result_type = get_maybe_type(ira->codegen, better_ptr_type);
+ result_type = get_optional_type(ira->codegen, better_ptr_type);
} else if (target_type->id == TypeTableEntryIdOptional &&
target_type->data.maybe.child_type->id == TypeTableEntryIdFn)
{
@@ -18952,7 +19251,7 @@ static IrInstruction *ir_align_cast(IrAnalyze *ira, IrInstruction *target, uint3
old_align_bytes = fn_type_id.alignment;
fn_type_id.alignment = align_bytes;
TypeTableEntry *fn_type = get_fn_type(ira->codegen, &fn_type_id);
- result_type = get_maybe_type(ira->codegen, fn_type);
+ result_type = get_optional_type(ira->codegen, fn_type);
} else if (is_slice(target_type)) {
TypeTableEntry *slice_ptr_type = target_type->data.structure.fields[slice_ptr_index].type_entry;
old_align_bytes = slice_ptr_type->data.pointer.alignment;
@@ -19713,7 +20012,7 @@ static TypeTableEntry *ir_analyze_instruction_coro_free(IrAnalyze *ira, IrInstru
instruction->base.source_node, coro_id, coro_handle);
ir_link_new_instruction(result, &instruction->base);
TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false);
- result->value.type = get_maybe_type(ira->codegen, ptr_type);
+ result->value.type = get_optional_type(ira->codegen, ptr_type);
return result->value.type;
}
@@ -19781,7 +20080,7 @@ static TypeTableEntry *ir_analyze_instruction_coro_alloc_helper(IrAnalyze *ira,
instruction->base.source_node, alloc_fn, coro_size);
ir_link_new_instruction(result, &instruction->base);
TypeTableEntry *u8_ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false);
- result->value.type = get_maybe_type(ira->codegen, u8_ptr_type);
+ result->value.type = get_optional_type(ira->codegen, u8_ptr_type);
return result->value.type;
}
@@ -20034,6 +20333,9 @@ static TypeTableEntry *ir_analyze_instruction_sqrt(IrAnalyze *ira, IrInstruction
bigfloat_sqrt(&out_val->data.x_bigfloat, &val->data.x_bigfloat);
} else if (float_type->id == TypeTableEntryIdFloat) {
switch (float_type->data.floating.bit_count) {
+ case 16:
+ out_val->data.x_f16 = f16_sqrt(val->data.x_f16);
+ break;
case 32:
out_val->data.x_f32 = sqrtf(val->data.x_f32);
break;
@@ -20054,7 +20356,9 @@ static TypeTableEntry *ir_analyze_instruction_sqrt(IrAnalyze *ira, IrInstruction
}
assert(float_type->id == TypeTableEntryIdFloat);
- if (float_type->data.floating.bit_count != 32 && float_type->data.floating.bit_count != 64) {
+ if (float_type->data.floating.bit_count != 16 &&
+ float_type->data.floating.bit_count != 32 &&
+ float_type->data.floating.bit_count != 64) {
ir_add_error(ira, instruction->type, buf_sprintf("compiler TODO: add implementation of sqrt for '%s'", buf_ptr(&float_type->name)));
return ira->codegen->builtin_types.entry_invalid;
}
@@ -20066,13 +20370,63 @@ static TypeTableEntry *ir_analyze_instruction_sqrt(IrAnalyze *ira, IrInstruction
return result->value.type;
}
+static TypeTableEntry *ir_analyze_instruction_enum_to_int(IrAnalyze *ira, IrInstructionEnumToInt *instruction) {
+ IrInstruction *target = instruction->target->other;
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ if (target->value.type->id != TypeTableEntryIdEnum) {
+ ir_add_error(ira, instruction->target,
+ buf_sprintf("expected enum, found type '%s'", buf_ptr(&target->value.type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ type_ensure_zero_bits_known(ira->codegen, target->value.type);
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ TypeTableEntry *tag_type = target->value.type->data.enumeration.tag_int_type;
+
+ IrInstruction *result = ir_analyze_enum_to_int(ira, &instruction->base, target, tag_type);
+ ir_link_new_instruction(result, &instruction->base);
+ return result->value.type;
+}
+
+static TypeTableEntry *ir_analyze_instruction_int_to_enum(IrAnalyze *ira, IrInstructionIntToEnum *instruction) {
+ IrInstruction *dest_type_value = instruction->dest_type->other;
+ TypeTableEntry *dest_type = ir_resolve_type(ira, dest_type_value);
+ if (type_is_invalid(dest_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ if (dest_type->id != TypeTableEntryIdEnum) {
+ ir_add_error(ira, instruction->dest_type,
+ buf_sprintf("expected enum, found type '%s'", buf_ptr(&dest_type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
+
+ type_ensure_zero_bits_known(ira->codegen, dest_type);
+ if (type_is_invalid(dest_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ TypeTableEntry *tag_type = dest_type->data.enumeration.tag_int_type;
+
+ IrInstruction *target = instruction->target->other;
+ if (type_is_invalid(target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ IrInstruction *casted_target = ir_implicit_cast(ira, target, tag_type);
+ if (type_is_invalid(casted_target->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
+ IrInstruction *result = ir_analyze_int_to_enum(ira, &instruction->base, casted_target, dest_type);
+ ir_link_new_instruction(result, &instruction->base);
+ return result->value.type;
+}
+
static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstruction *instruction) {
switch (instruction->id) {
case IrInstructionIdInvalid:
case IrInstructionIdWidenOrShorten:
- case IrInstructionIdIntToEnum:
- case IrInstructionIdIntToErr:
- case IrInstructionIdErrToInt:
case IrInstructionIdStructInit:
case IrInstructionIdUnionInit:
case IrInstructionIdStructFieldPtr:
@@ -20193,6 +20547,12 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
return ir_analyze_instruction_int_cast(ira, (IrInstructionIntCast *)instruction);
case IrInstructionIdFloatCast:
return ir_analyze_instruction_float_cast(ira, (IrInstructionFloatCast *)instruction);
+ case IrInstructionIdErrSetCast:
+ return ir_analyze_instruction_err_set_cast(ira, (IrInstructionErrSetCast *)instruction);
+ case IrInstructionIdFromBytes:
+ return ir_analyze_instruction_from_bytes(ira, (IrInstructionFromBytes *)instruction);
+ case IrInstructionIdToBytes:
+ return ir_analyze_instruction_to_bytes(ira, (IrInstructionToBytes *)instruction);
case IrInstructionIdIntToFloat:
return ir_analyze_instruction_int_to_float(ira, (IrInstructionIntToFloat *)instruction);
case IrInstructionIdFloatToInt:
@@ -20327,6 +20687,14 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
return ir_analyze_instruction_mark_err_ret_trace_ptr(ira, (IrInstructionMarkErrRetTracePtr *)instruction);
case IrInstructionIdSqrt:
return ir_analyze_instruction_sqrt(ira, (IrInstructionSqrt *)instruction);
+ case IrInstructionIdIntToErr:
+ return ir_analyze_instruction_int_to_err(ira, (IrInstructionIntToErr *)instruction);
+ case IrInstructionIdErrToInt:
+ return ir_analyze_instruction_err_to_int(ira, (IrInstructionErrToInt *)instruction);
+ case IrInstructionIdIntToEnum:
+ return ir_analyze_instruction_int_to_enum(ira, (IrInstructionIntToEnum *)instruction);
+ case IrInstructionIdEnumToInt:
+ return ir_analyze_instruction_enum_to_int(ira, (IrInstructionEnumToInt *)instruction);
}
zig_unreachable();
}
@@ -20544,9 +20912,13 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdAtomicLoad:
case IrInstructionIdIntCast:
case IrInstructionIdFloatCast:
+ case IrInstructionIdErrSetCast:
case IrInstructionIdIntToFloat:
case IrInstructionIdFloatToInt:
case IrInstructionIdBoolToInt:
+ case IrInstructionIdFromBytes:
+ case IrInstructionIdToBytes:
+ case IrInstructionIdEnumToInt:
return false;
case IrInstructionIdAsm:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index cb91720180..5e5a71382c 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -664,6 +664,28 @@ static void ir_print_float_cast(IrPrint *irp, IrInstructionFloatCast *instructio
fprintf(irp->f, ")");
}
+static void ir_print_err_set_cast(IrPrint *irp, IrInstructionErrSetCast *instruction) {
+ fprintf(irp->f, "@errSetCast(");
+ ir_print_other_instruction(irp, instruction->dest_type);
+ fprintf(irp->f, ", ");
+ ir_print_other_instruction(irp, instruction->target);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_from_bytes(IrPrint *irp, IrInstructionFromBytes *instruction) {
+ fprintf(irp->f, "@bytesToSlice(");
+ ir_print_other_instruction(irp, instruction->dest_child_type);
+ fprintf(irp->f, ", ");
+ ir_print_other_instruction(irp, instruction->target);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_to_bytes(IrPrint *irp, IrInstructionToBytes *instruction) {
+ fprintf(irp->f, "@sliceToBytes(");
+ ir_print_other_instruction(irp, instruction->target);
+ fprintf(irp->f, ")");
+}
+
static void ir_print_int_to_float(IrPrint *irp, IrInstructionIntToFloat *instruction) {
fprintf(irp->f, "@intToFloat(");
ir_print_other_instruction(irp, instruction->dest_type);
@@ -906,6 +928,17 @@ static void ir_print_int_to_ptr(IrPrint *irp, IrInstructionIntToPtr *instruction
static void ir_print_int_to_enum(IrPrint *irp, IrInstructionIntToEnum *instruction) {
fprintf(irp->f, "@intToEnum(");
+ if (instruction->dest_type == nullptr) {
+ fprintf(irp->f, "(null)");
+ } else {
+ ir_print_other_instruction(irp, instruction->dest_type);
+ }
+ ir_print_other_instruction(irp, instruction->target);
+ fprintf(irp->f, ")");
+}
+
+static void ir_print_enum_to_int(IrPrint *irp, IrInstructionEnumToInt *instruction) {
+ fprintf(irp->f, "@enumToInt(");
ir_print_other_instruction(irp, instruction->target);
fprintf(irp->f, ")");
}
@@ -1461,6 +1494,15 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdFloatCast:
ir_print_float_cast(irp, (IrInstructionFloatCast *)instruction);
break;
+ case IrInstructionIdErrSetCast:
+ ir_print_err_set_cast(irp, (IrInstructionErrSetCast *)instruction);
+ break;
+ case IrInstructionIdFromBytes:
+ ir_print_from_bytes(irp, (IrInstructionFromBytes *)instruction);
+ break;
+ case IrInstructionIdToBytes:
+ ir_print_to_bytes(irp, (IrInstructionToBytes *)instruction);
+ break;
case IrInstructionIdIntToFloat:
ir_print_int_to_float(irp, (IrInstructionIntToFloat *)instruction);
break;
@@ -1686,6 +1728,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdAtomicLoad:
ir_print_atomic_load(irp, (IrInstructionAtomicLoad *)instruction);
break;
+ case IrInstructionIdEnumToInt:
+ ir_print_enum_to_int(irp, (IrInstructionEnumToInt *)instruction);
+ break;
}
fprintf(irp->f, "\n");
}
diff --git a/src/link.cpp b/src/link.cpp
index a4631b1daf..2d9a79585f 100644
--- a/src/link.cpp
+++ b/src/link.cpp
@@ -325,10 +325,13 @@ static void construct_linker_job_elf(LinkJob *lj) {
lj->args.append((const char *)buf_ptr(g->link_objects.at(i)));
}
- if (g->libc_link_lib == nullptr && (g->out_type == OutTypeExe || g->out_type == OutTypeLib)) {
- Buf *builtin_o_path = build_o(g, "builtin");
- lj->args.append(buf_ptr(builtin_o_path));
+ if (g->out_type == OutTypeExe || g->out_type == OutTypeLib) {
+ if (g->libc_link_lib == nullptr) {
+ Buf *builtin_o_path = build_o(g, "builtin");
+ lj->args.append(buf_ptr(builtin_o_path));
+ }
+ // sometimes libgcc is missing stuff, so we still build compiler_rt and rely on weak linkage
Buf *compiler_rt_o_path = build_compiler_rt(g);
lj->args.append(buf_ptr(compiler_rt_o_path));
}
@@ -554,7 +557,7 @@ static void construct_linker_job_coff(LinkJob *lj) {
lj->args.append(buf_ptr(builtin_o_path));
}
- // msvc compiler_rt is missing some stuff, so we still build it and rely on LinkOnce
+ // msvc compiler_rt is missing some stuff, so we still build it and rely on weak linkage
Buf *compiler_rt_o_path = build_compiler_rt(g);
lj->args.append(buf_ptr(compiler_rt_o_path));
}
diff --git a/src/main.cpp b/src/main.cpp
index 0fe12bb0cb..a409778a78 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -924,6 +924,8 @@ int main(int argc, char **argv) {
codegen_print_timing_report(g, stdout);
return EXIT_SUCCESS;
} else if (cmd == CmdTest) {
+ codegen_set_emit_file_type(g, emit_file_type);
+
ZigTarget native;
get_native_target(&native);
diff --git a/src/os.cpp b/src/os.cpp
index b7d2fd1de0..d52295950d 100644
--- a/src/os.cpp
+++ b/src/os.cpp
@@ -225,6 +225,11 @@ void os_path_extname(Buf *full_path, Buf *out_basename, Buf *out_extname) {
}
void os_path_join(Buf *dirname, Buf *basename, Buf *out_full_path) {
+ if (buf_len(dirname) == 0) {
+ buf_init_from_buf(out_full_path, basename);
+ return;
+ }
+
buf_init_from_buf(out_full_path, dirname);
uint8_t c = *(buf_ptr(out_full_path) + buf_len(out_full_path) - 1);
if (!os_is_sep(c))
diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp
index 2950b4eb49..f7f41af8a6 100644
--- a/src/tokenizer.cpp
+++ b/src/tokenizer.cpp
@@ -357,12 +357,19 @@ static void end_float_token(Tokenize *t) {
// Mask the sign bit to 0 since always non-negative lex
const uint64_t exp_mask = 0xffffull << exp_shift;
- if (shift >= 64) {
+ // must be special-cased to avoid undefined behavior on shift == 64
+ if (shift == 128) {
+ f_bits.repr[0] = 0;
+ f_bits.repr[1] = sig_bits[0];
+ } else if (shift == 0) {
+ f_bits.repr[0] = sig_bits[0];
+ f_bits.repr[1] = sig_bits[1];
+ } else if (shift >= 64) {
f_bits.repr[0] = 0;
f_bits.repr[1] = sig_bits[0] << (shift - 64);
} else {
f_bits.repr[0] = sig_bits[0] << shift;
- f_bits.repr[1] = ((sig_bits[1] << shift) | (sig_bits[0] >> (64 - shift)));
+ f_bits.repr[1] = (sig_bits[1] << shift) | (sig_bits[0] >> (64 - shift));
}
f_bits.repr[1] &= ~exp_mask;
diff --git a/src/util.hpp b/src/util.hpp
index 52baab7ace..b0402137bd 100644
--- a/src/util.hpp
+++ b/src/util.hpp
@@ -31,6 +31,8 @@
#endif
+#include "softfloat.hpp"
+
#define BREAKPOINT __asm("int $0x03")
ATTRIBUTE_COLD
@@ -165,4 +167,21 @@ static inline uint8_t log2_u64(uint64_t x) {
return (63 - clzll(x));
}
+static inline float16_t zig_double_to_f16(double x) {
+ float64_t y;
+ static_assert(sizeof(x) == sizeof(y), "");
+ memcpy(&y, &x, sizeof(x));
+ return f64_to_f16(y);
+}
+
+
+// Return value is safe to coerce to float even when |x| is NaN or Infinity.
+static inline double zig_f16_to_double(float16_t x) {
+ float64_t y = f16_to_f64(x);
+ double z;
+ static_assert(sizeof(y) == sizeof(z), "");
+ memcpy(&z, &y, sizeof(y));
+ return z;
+}
+
#endif
diff --git a/std/atomic/index.zig b/std/atomic/index.zig
index 9d556a6415..c0ea5be183 100644
--- a/std/atomic/index.zig
+++ b/std/atomic/index.zig
@@ -1,7 +1,9 @@
pub const Stack = @import("stack.zig").Stack;
-pub const Queue = @import("queue.zig").Queue;
+pub const QueueMpsc = @import("queue_mpsc.zig").QueueMpsc;
+pub const QueueMpmc = @import("queue_mpmc.zig").QueueMpmc;
test "std.atomic" {
- _ = @import("stack.zig").Stack;
- _ = @import("queue.zig").Queue;
+ _ = @import("stack.zig");
+ _ = @import("queue_mpsc.zig");
+ _ = @import("queue_mpmc.zig");
}
diff --git a/std/atomic/queue_mpmc.zig b/std/atomic/queue_mpmc.zig
new file mode 100644
index 0000000000..7ffc9f9ccb
--- /dev/null
+++ b/std/atomic/queue_mpmc.zig
@@ -0,0 +1,214 @@
+const builtin = @import("builtin");
+const AtomicOrder = builtin.AtomicOrder;
+const AtomicRmwOp = builtin.AtomicRmwOp;
+
+/// Many producer, many consumer, non-allocating, thread-safe, lock-free
+/// This implementation has a crippling limitation - it hangs onto node
+/// memory for 1 extra get() and 1 extra put() operation - when get() returns a node, that
+/// node must not be freed until both the next get() and the next put() completes.
+pub fn QueueMpmc(comptime T: type) type {
+ return struct {
+ head: *Node,
+ tail: *Node,
+ root: Node,
+
+ pub const Self = this;
+
+ pub const Node = struct {
+ next: ?*Node,
+ data: T,
+ };
+
+ /// TODO: well defined copy elision: https://github.com/ziglang/zig/issues/287
+ pub fn init(self: *Self) void {
+ self.root.next = null;
+ self.head = &self.root;
+ self.tail = &self.root;
+ }
+
+ pub fn put(self: *Self, node: *Node) void {
+ node.next = null;
+
+ const tail = @atomicRmw(*Node, &self.tail, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
+ _ = @atomicRmw(?*Node, &tail.next, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
+ }
+
+ /// node must not be freed until both the next get() and the next put() complete
+ pub fn get(self: *Self) ?*Node {
+ var head = @atomicLoad(*Node, &self.head, AtomicOrder.SeqCst);
+ while (true) {
+ const node = head.next orelse return null;
+ head = @cmpxchgWeak(*Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return node;
+ }
+ }
+
+ ///// This is a debug function that is not thread-safe.
+ pub fn dump(self: *Self) void {
+ std.debug.warn("head: ");
+ dumpRecursive(self.head, 0);
+ std.debug.warn("tail: ");
+ dumpRecursive(self.tail, 0);
+ }
+
+ fn dumpRecursive(optional_node: ?*Node, indent: usize) void {
+ var stderr_file = std.io.getStdErr() catch return;
+ const stderr = &std.io.FileOutStream.init(&stderr_file).stream;
+ stderr.writeByteNTimes(' ', indent) catch return;
+ if (optional_node) |node| {
+ std.debug.warn("0x{x}={}\n", @ptrToInt(node), node.data);
+ dumpRecursive(node.next, indent + 1);
+ } else {
+ std.debug.warn("(null)\n");
+ }
+ }
+ };
+}
+
+const std = @import("std");
+const assert = std.debug.assert;
+
+const Context = struct {
+ allocator: *std.mem.Allocator,
+ queue: *QueueMpmc(i32),
+ put_sum: isize,
+ get_sum: isize,
+ get_count: usize,
+ puts_done: u8, // TODO make this a bool
+};
+
+// TODO add lazy evaluated build options and then put puts_per_thread behind
+// some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor
+// CI we would use a less aggressive setting since at 1 core, while we still
+// want this test to pass, we need a smaller value since there is so much thrashing
+// we would also use a less aggressive setting when running in valgrind
+const puts_per_thread = 500;
+const put_thread_count = 3;
+
+test "std.atomic.queue_mpmc" {
+ var direct_allocator = std.heap.DirectAllocator.init();
+ defer direct_allocator.deinit();
+
+ var plenty_of_memory = try direct_allocator.allocator.alloc(u8, 300 * 1024);
+ defer direct_allocator.allocator.free(plenty_of_memory);
+
+ var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
+ var a = &fixed_buffer_allocator.allocator;
+
+ var queue: QueueMpmc(i32) = undefined;
+ queue.init();
+ var context = Context{
+ .allocator = a,
+ .queue = &queue,
+ .put_sum = 0,
+ .get_sum = 0,
+ .puts_done = 0,
+ .get_count = 0,
+ };
+
+ var putters: [put_thread_count]*std.os.Thread = undefined;
+ for (putters) |*t| {
+ t.* = try std.os.spawnThread(&context, startPuts);
+ }
+ var getters: [put_thread_count]*std.os.Thread = undefined;
+ for (getters) |*t| {
+ t.* = try std.os.spawnThread(&context, startGets);
+ }
+
+ for (putters) |t|
+ t.wait();
+ _ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ for (getters) |t|
+ t.wait();
+
+ if (context.put_sum != context.get_sum) {
+ std.debug.panic("failure\nput_sum:{} != get_sum:{}", context.put_sum, context.get_sum);
+ }
+
+ if (context.get_count != puts_per_thread * put_thread_count) {
+ std.debug.panic(
+ "failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}",
+ context.get_count,
+ u32(puts_per_thread),
+ u32(put_thread_count),
+ );
+ }
+}
+
+fn startPuts(ctx: *Context) u8 {
+ var put_count: usize = puts_per_thread;
+ var r = std.rand.DefaultPrng.init(0xdeadbeef);
+ while (put_count != 0) : (put_count -= 1) {
+ std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
+ const x = @bitCast(i32, r.random.scalar(u32));
+ const node = ctx.allocator.create(QueueMpmc(i32).Node{
+ .next = undefined,
+ .data = x,
+ }) catch unreachable;
+ ctx.queue.put(node);
+ _ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
+ }
+ return 0;
+}
+
+fn startGets(ctx: *Context) u8 {
+ while (true) {
+ const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1;
+
+ while (ctx.queue.get()) |node| {
+ std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
+ _ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst);
+ }
+
+ if (last) return 0;
+ }
+}
+
+test "std.atomic.queue_mpmc single-threaded" {
+ var queue: QueueMpmc(i32) = undefined;
+ queue.init();
+
+ var node_0 = QueueMpmc(i32).Node{
+ .data = 0,
+ .next = undefined,
+ };
+ queue.put(&node_0);
+
+ var node_1 = QueueMpmc(i32).Node{
+ .data = 1,
+ .next = undefined,
+ };
+ queue.put(&node_1);
+
+ assert(queue.get().?.data == 0);
+
+ var node_2 = QueueMpmc(i32).Node{
+ .data = 2,
+ .next = undefined,
+ };
+ queue.put(&node_2);
+
+ var node_3 = QueueMpmc(i32).Node{
+ .data = 3,
+ .next = undefined,
+ };
+ queue.put(&node_3);
+
+ assert(queue.get().?.data == 1);
+
+ assert(queue.get().?.data == 2);
+
+ var node_4 = QueueMpmc(i32).Node{
+ .data = 4,
+ .next = undefined,
+ };
+ queue.put(&node_4);
+
+ assert(queue.get().?.data == 3);
+ // if we were to set node_3.next to null here, it would cause this test
+ // to fail. this demonstrates the limitation of hanging on to extra memory.
+
+ assert(queue.get().?.data == 4);
+
+ assert(queue.get() == null);
+}
diff --git a/std/atomic/queue.zig b/std/atomic/queue_mpsc.zig
similarity index 68%
rename from std/atomic/queue.zig
rename to std/atomic/queue_mpsc.zig
index 3dc64dbea2..8030565d7a 100644
--- a/std/atomic/queue.zig
+++ b/std/atomic/queue_mpsc.zig
@@ -1,49 +1,54 @@
+const std = @import("../index.zig");
+const assert = std.debug.assert;
const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder;
const AtomicRmwOp = builtin.AtomicRmwOp;
-/// Many reader, many writer, non-allocating, thread-safe, lock-free
-pub fn Queue(comptime T: type) type {
+/// Many producer, single consumer, non-allocating, thread-safe, lock-free
+pub fn QueueMpsc(comptime T: type) type {
return struct {
- head: *Node,
- tail: *Node,
- root: Node,
+ inboxes: [2]std.atomic.Stack(T),
+ outbox: std.atomic.Stack(T),
+ inbox_index: usize,
pub const Self = this;
- pub const Node = struct {
- next: ?*Node,
- data: T,
- };
+ pub const Node = std.atomic.Stack(T).Node;
- // TODO: well defined copy elision: https://github.com/ziglang/zig/issues/287
- pub fn init(self: *Self) void {
- self.root.next = null;
- self.head = &self.root;
- self.tail = &self.root;
+ pub fn init() Self {
+ return Self{
+ .inboxes = []std.atomic.Stack(T){
+ std.atomic.Stack(T).init(),
+ std.atomic.Stack(T).init(),
+ },
+ .outbox = std.atomic.Stack(T).init(),
+ .inbox_index = 0,
+ };
}
pub fn put(self: *Self, node: *Node) void {
- node.next = null;
-
- const tail = @atomicRmw(*Node, &self.tail, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
- _ = @atomicRmw(?*Node, &tail.next, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
+ const inbox_index = @atomicLoad(usize, &self.inbox_index, AtomicOrder.SeqCst);
+ const inbox = &self.inboxes[inbox_index];
+ inbox.push(node);
}
pub fn get(self: *Self) ?*Node {
- var head = @atomicLoad(*Node, &self.head, AtomicOrder.SeqCst);
- while (true) {
- const node = head.next orelse return null;
- head = @cmpxchgWeak(*Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return node;
+ if (self.outbox.pop()) |node| {
+ return node;
}
+ const prev_inbox_index = @atomicRmw(usize, &self.inbox_index, AtomicRmwOp.Xor, 0x1, AtomicOrder.SeqCst);
+ const prev_inbox = &self.inboxes[prev_inbox_index];
+ while (prev_inbox.pop()) |node| {
+ self.outbox.push(node);
+ }
+ return self.outbox.pop();
}
};
}
-const std = @import("std");
const Context = struct {
allocator: *std.mem.Allocator,
- queue: *Queue(i32),
+ queue: *QueueMpsc(i32),
put_sum: isize,
get_sum: isize,
get_count: usize,
@@ -58,7 +63,7 @@ const Context = struct {
const puts_per_thread = 500;
const put_thread_count = 3;
-test "std.atomic.queue" {
+test "std.atomic.queue_mpsc" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
@@ -68,8 +73,7 @@ test "std.atomic.queue" {
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
var a = &fixed_buffer_allocator.allocator;
- var queue: Queue(i32) = undefined;
- queue.init();
+ var queue = QueueMpsc(i32).init();
var context = Context{
.allocator = a,
.queue = &queue,
@@ -83,7 +87,7 @@ test "std.atomic.queue" {
for (putters) |*t| {
t.* = try std.os.spawnThread(&context, startPuts);
}
- var getters: [put_thread_count]*std.os.Thread = undefined;
+ var getters: [1]*std.os.Thread = undefined;
for (getters) |*t| {
t.* = try std.os.spawnThread(&context, startGets);
}
@@ -114,8 +118,10 @@ fn startPuts(ctx: *Context) u8 {
while (put_count != 0) : (put_count -= 1) {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
const x = @bitCast(i32, r.random.scalar(u32));
- const node = ctx.allocator.create(Queue(i32).Node) catch unreachable;
- node.data = x;
+ const node = ctx.allocator.create(QueueMpsc(i32).Node{
+ .next = undefined,
+ .data = x,
+ }) catch unreachable;
ctx.queue.put(node);
_ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
}
diff --git a/std/atomic/stack.zig b/std/atomic/stack.zig
index 9e81d89257..d74bee8e8b 100644
--- a/std/atomic/stack.zig
+++ b/std/atomic/stack.zig
@@ -117,8 +117,10 @@ fn startPuts(ctx: *Context) u8 {
while (put_count != 0) : (put_count -= 1) {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
const x = @bitCast(i32, r.random.scalar(u32));
- const node = ctx.allocator.create(Stack(i32).Node) catch unreachable;
- node.data = x;
+ const node = ctx.allocator.create(Stack(i32).Node{
+ .next = undefined,
+ .data = x,
+ }) catch unreachable;
ctx.stack.push(node);
_ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
}
diff --git a/std/build.zig b/std/build.zig
index 92454a183a..99de9b5197 100644
--- a/std/build.zig
+++ b/std/build.zig
@@ -158,8 +158,7 @@ pub const Builder = struct {
}
pub fn addTest(self: *Builder, root_src: []const u8) *TestStep {
- const test_step = self.allocator.create(TestStep) catch unreachable;
- test_step.* = TestStep.init(self, root_src);
+ const test_step = self.allocator.create(TestStep.init(self, root_src)) catch unreachable;
return test_step;
}
@@ -191,21 +190,18 @@ pub const Builder = struct {
}
pub fn addWriteFile(self: *Builder, file_path: []const u8, data: []const u8) *WriteFileStep {
- const write_file_step = self.allocator.create(WriteFileStep) catch unreachable;
- write_file_step.* = WriteFileStep.init(self, file_path, data);
+ const write_file_step = self.allocator.create(WriteFileStep.init(self, file_path, data)) catch unreachable;
return write_file_step;
}
pub fn addLog(self: *Builder, comptime format: []const u8, args: ...) *LogStep {
const data = self.fmt(format, args);
- const log_step = self.allocator.create(LogStep) catch unreachable;
- log_step.* = LogStep.init(self, data);
+ const log_step = self.allocator.create(LogStep.init(self, data)) catch unreachable;
return log_step;
}
pub fn addRemoveDirTree(self: *Builder, dir_path: []const u8) *RemoveDirStep {
- const remove_dir_step = self.allocator.create(RemoveDirStep) catch unreachable;
- remove_dir_step.* = RemoveDirStep.init(self, dir_path);
+ const remove_dir_step = self.allocator.create(RemoveDirStep.init(self, dir_path)) catch unreachable;
return remove_dir_step;
}
@@ -404,11 +400,10 @@ pub const Builder = struct {
}
pub fn step(self: *Builder, name: []const u8, description: []const u8) *Step {
- const step_info = self.allocator.create(TopLevelStep) catch unreachable;
- step_info.* = TopLevelStep{
+ const step_info = self.allocator.create(TopLevelStep{
.step = Step.initNoOp(name, self.allocator),
.description = description,
- };
+ }) catch unreachable;
self.top_level_steps.append(step_info) catch unreachable;
return &step_info.step;
}
@@ -598,8 +593,7 @@ pub const Builder = struct {
const full_dest_path = os.path.resolve(self.allocator, self.prefix, dest_rel_path) catch unreachable;
self.pushInstalledFile(full_dest_path);
- const install_step = self.allocator.create(InstallFileStep) catch unreachable;
- install_step.* = InstallFileStep.init(self, src_path, full_dest_path);
+ const install_step = self.allocator.create(InstallFileStep.init(self, src_path, full_dest_path)) catch unreachable;
return install_step;
}
@@ -837,51 +831,43 @@ pub const LibExeObjStep = struct {
};
pub fn createSharedLibrary(builder: *Builder, name: []const u8, root_src: ?[]const u8, ver: *const Version) *LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- self.* = initExtraArgs(builder, name, root_src, Kind.Lib, false, ver);
+ const self = builder.allocator.create(initExtraArgs(builder, name, root_src, Kind.Lib, false, ver)) catch unreachable;
return self;
}
pub fn createCSharedLibrary(builder: *Builder, name: []const u8, version: *const Version) *LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- self.* = initC(builder, name, Kind.Lib, version, false);
+ const self = builder.allocator.create(initC(builder, name, Kind.Lib, version, false)) catch unreachable;
return self;
}
pub fn createStaticLibrary(builder: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- self.* = initExtraArgs(builder, name, root_src, Kind.Lib, true, builder.version(0, 0, 0));
+ const self = builder.allocator.create(initExtraArgs(builder, name, root_src, Kind.Lib, true, builder.version(0, 0, 0))) catch unreachable;
return self;
}
pub fn createCStaticLibrary(builder: *Builder, name: []const u8) *LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- self.* = initC(builder, name, Kind.Lib, builder.version(0, 0, 0), true);
+ const self = builder.allocator.create(initC(builder, name, Kind.Lib, builder.version(0, 0, 0), true)) catch unreachable;
return self;
}
pub fn createObject(builder: *Builder, name: []const u8, root_src: []const u8) *LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- self.* = initExtraArgs(builder, name, root_src, Kind.Obj, false, builder.version(0, 0, 0));
+ const self = builder.allocator.create(initExtraArgs(builder, name, root_src, Kind.Obj, false, builder.version(0, 0, 0))) catch unreachable;
return self;
}
pub fn createCObject(builder: *Builder, name: []const u8, src: []const u8) *LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- self.* = initC(builder, name, Kind.Obj, builder.version(0, 0, 0), false);
+ const self = builder.allocator.create(initC(builder, name, Kind.Obj, builder.version(0, 0, 0), false)) catch unreachable;
self.object_src = src;
return self;
}
pub fn createExecutable(builder: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- self.* = initExtraArgs(builder, name, root_src, Kind.Exe, false, builder.version(0, 0, 0));
+ const self = builder.allocator.create(initExtraArgs(builder, name, root_src, Kind.Exe, false, builder.version(0, 0, 0))) catch unreachable;
return self;
}
pub fn createCExecutable(builder: *Builder, name: []const u8) *LibExeObjStep {
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- self.* = initC(builder, name, Kind.Exe, builder.version(0, 0, 0), false);
+ const self = builder.allocator.create(initC(builder, name, Kind.Exe, builder.version(0, 0, 0), false)) catch unreachable;
return self;
}
@@ -1748,14 +1734,14 @@ pub const CommandStep = struct {
/// ::argv is copied.
pub fn create(builder: *Builder, cwd: ?[]const u8, env_map: *const BufMap, argv: []const []const u8) *CommandStep {
- const self = builder.allocator.create(CommandStep) catch unreachable;
- self.* = CommandStep{
+ const self = builder.allocator.create(CommandStep{
.builder = builder,
.step = Step.init(argv[0], builder.allocator, make),
.argv = builder.allocator.alloc([]u8, argv.len) catch unreachable,
.cwd = cwd,
.env_map = env_map,
- };
+ }) catch unreachable;
+
mem.copy([]const u8, self.argv, argv);
self.step.name = self.argv[0];
return self;
@@ -1778,18 +1764,17 @@ const InstallArtifactStep = struct {
const Self = this;
pub fn create(builder: *Builder, artifact: *LibExeObjStep) *Self {
- const self = builder.allocator.create(Self) catch unreachable;
const dest_dir = switch (artifact.kind) {
LibExeObjStep.Kind.Obj => unreachable,
LibExeObjStep.Kind.Exe => builder.exe_dir,
LibExeObjStep.Kind.Lib => builder.lib_dir,
};
- self.* = Self{
+ const self = builder.allocator.create(Self{
.builder = builder,
.step = Step.init(builder.fmt("install {}", artifact.step.name), builder.allocator, make),
.artifact = artifact,
.dest_file = os.path.join(builder.allocator, dest_dir, artifact.out_filename) catch unreachable,
- };
+ }) catch unreachable;
self.step.dependOn(&artifact.step);
builder.pushInstalledFile(self.dest_file);
if (self.artifact.kind == LibExeObjStep.Kind.Lib and !self.artifact.static) {
diff --git a/std/cstr.zig b/std/cstr.zig
index d9106769c1..e83d5a39e9 100644
--- a/std/cstr.zig
+++ b/std/cstr.zig
@@ -79,7 +79,7 @@ pub const NullTerminated2DArray = struct {
errdefer allocator.free(buf);
var write_index = index_size;
- const index_buf = ([]?[*]u8)(buf);
+ const index_buf = @bytesToSlice(?[*]u8, buf);
var i: usize = 0;
for (slices) |slice| {
diff --git a/std/debug/index.zig b/std/debug/index.zig
index 198e0f90f6..57b2dfc300 100644
--- a/std/debug/index.zig
+++ b/std/debug/index.zig
@@ -249,9 +249,7 @@ fn printSourceAtAddress(debug_info: *ElfStackTrace, out_stream: var, address: us
pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace {
switch (builtin.object_format) {
builtin.ObjectFormat.elf => {
- const st = try allocator.create(ElfStackTrace);
- errdefer allocator.destroy(st);
- st.* = ElfStackTrace{
+ const st = try allocator.create(ElfStackTrace{
.self_exe_file = undefined,
.elf = undefined,
.debug_info = undefined,
@@ -261,7 +259,8 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace {
.debug_ranges = null,
.abbrev_table_list = ArrayList(AbbrevTableHeader).init(allocator),
.compile_unit_list = ArrayList(CompileUnit).init(allocator),
- };
+ });
+ errdefer allocator.destroy(st);
st.self_exe_file = try os.openSelfExe();
errdefer st.self_exe_file.close();
@@ -280,11 +279,8 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace {
var exe_file = try os.openSelfExe();
defer exe_file.close();
- const st = try allocator.create(ElfStackTrace);
+ const st = try allocator.create(ElfStackTrace{ .symbol_table = try macho.loadSymbols(allocator, &io.FileInStream.init(&exe_file)) });
errdefer allocator.destroy(st);
-
- st.* = ElfStackTrace{ .symbol_table = try macho.loadSymbols(allocator, &io.FileInStream.init(&exe_file)) };
-
return st;
},
builtin.ObjectFormat.coff => {
@@ -974,8 +970,7 @@ fn scanAllCompileUnits(st: *ElfStackTrace) !void {
try st.self_exe_file.seekTo(compile_unit_pos);
- const compile_unit_die = try st.allocator().create(Die);
- compile_unit_die.* = try parseDie(st, abbrev_table, is_64);
+ const compile_unit_die = try st.allocator().create(try parseDie(st, abbrev_table, is_64));
if (compile_unit_die.tag_id != DW.TAG_compile_unit) return error.InvalidDebugInfo;
diff --git a/std/event.zig b/std/event.zig
index 0821c789b7..c6ac04a9d0 100644
--- a/std/event.zig
+++ b/std/event.zig
@@ -4,6 +4,8 @@ const assert = std.debug.assert;
const event = this;
const mem = std.mem;
const posix = std.os.posix;
+const AtomicRmwOp = builtin.AtomicRmwOp;
+const AtomicOrder = builtin.AtomicOrder;
pub const TcpServer = struct {
handleRequestFn: async<*mem.Allocator> fn (*TcpServer, *const std.net.Address, *const std.os.File) void,
@@ -93,16 +95,56 @@ pub const TcpServer = struct {
pub const Loop = struct {
allocator: *mem.Allocator,
- epollfd: i32,
keep_running: bool,
+ next_tick_queue: std.atomic.QueueMpsc(promise),
+ os_data: OsData,
- fn init(allocator: *mem.Allocator) !Loop {
- const epollfd = try std.os.linuxEpollCreate(std.os.linux.EPOLL_CLOEXEC);
- return Loop{
+ const OsData = switch (builtin.os) {
+ builtin.Os.linux => struct {
+ epollfd: i32,
+ },
+ else => struct {},
+ };
+
+ pub const NextTickNode = std.atomic.QueueMpsc(promise).Node;
+
+ /// The allocator must be thread-safe because we use it for multiplexing
+ /// coroutines onto kernel threads.
+ pub fn init(allocator: *mem.Allocator) !Loop {
+ var self = Loop{
.keep_running = true,
.allocator = allocator,
- .epollfd = epollfd,
+ .os_data = undefined,
+ .next_tick_queue = std.atomic.QueueMpsc(promise).init(),
};
+ try self.initOsData();
+ errdefer self.deinitOsData();
+
+ return self;
+ }
+
+ /// must call stop before deinit
+ pub fn deinit(self: *Loop) void {
+ self.deinitOsData();
+ }
+
+ const InitOsDataError = std.os.LinuxEpollCreateError;
+
+ fn initOsData(self: *Loop) InitOsDataError!void {
+ switch (builtin.os) {
+ builtin.Os.linux => {
+ self.os_data.epollfd = try std.os.linuxEpollCreate(std.os.linux.EPOLL_CLOEXEC);
+ errdefer std.os.close(self.os_data.epollfd);
+ },
+ else => {},
+ }
+ }
+
+ fn deinitOsData(self: *Loop) void {
+ switch (builtin.os) {
+ builtin.Os.linux => std.os.close(self.os_data.epollfd),
+ else => {},
+ }
}
pub fn addFd(self: *Loop, fd: i32, prom: promise) !void {
@@ -110,11 +152,11 @@ pub const Loop = struct {
.events = std.os.linux.EPOLLIN | std.os.linux.EPOLLOUT | std.os.linux.EPOLLET,
.data = std.os.linux.epoll_data{ .ptr = @ptrToInt(prom) },
};
- try std.os.linuxEpollCtl(self.epollfd, std.os.linux.EPOLL_CTL_ADD, fd, &ev);
+ try std.os.linuxEpollCtl(self.os_data.epollfd, std.os.linux.EPOLL_CTL_ADD, fd, &ev);
}
pub fn removeFd(self: *Loop, fd: i32) void {
- std.os.linuxEpollCtl(self.epollfd, std.os.linux.EPOLL_CTL_DEL, fd, undefined) catch {};
+ std.os.linuxEpollCtl(self.os_data.epollfd, std.os.linux.EPOLL_CTL_DEL, fd, undefined) catch {};
}
async fn waitFd(self: *Loop, fd: i32) !void {
defer self.removeFd(fd);
@@ -126,21 +168,250 @@ pub const Loop = struct {
pub fn stop(self: *Loop) void {
// TODO make atomic
self.keep_running = false;
- // TODO activate an fd in the epoll set
+ // TODO activate an fd in the epoll set which should cancel all the promises
+ }
+
+ /// bring your own linked list node. this means it can't fail.
+ pub fn onNextTick(self: *Loop, node: *NextTickNode) void {
+ self.next_tick_queue.put(node);
}
pub fn run(self: *Loop) void {
while (self.keep_running) {
- var events: [16]std.os.linux.epoll_event = undefined;
- const count = std.os.linuxEpollWait(self.epollfd, events[0..], -1);
- for (events[0..count]) |ev| {
- const p = @intToPtr(promise, ev.data.ptr);
- resume p;
+ // TODO multiplex the next tick queue and the epoll event results onto a thread pool
+ while (self.next_tick_queue.get()) |node| {
+ resume node.data;
}
+ if (!self.keep_running) break;
+
+ self.dispatchOsEvents();
+ }
+ }
+
+ fn dispatchOsEvents(self: *Loop) void {
+ switch (builtin.os) {
+ builtin.Os.linux => {
+ var events: [16]std.os.linux.epoll_event = undefined;
+ const count = std.os.linuxEpollWait(self.os_data.epollfd, events[0..], -1);
+ for (events[0..count]) |ev| {
+ const p = @intToPtr(promise, ev.data.ptr);
+ resume p;
+ }
+ },
+ else => {},
}
}
};
+/// many producer, many consumer, thread-safe, lock-free, runtime configurable buffer size
+/// when buffer is empty, consumers suspend and are resumed by producers
+/// when buffer is full, producers suspend and are resumed by consumers
+pub fn Channel(comptime T: type) type {
+ return struct {
+ loop: *Loop,
+
+ getters: std.atomic.QueueMpsc(GetNode),
+ putters: std.atomic.QueueMpsc(PutNode),
+ get_count: usize,
+ put_count: usize,
+ dispatch_lock: u8, // TODO make this a bool
+ need_dispatch: u8, // TODO make this a bool
+
+ // simple fixed size ring buffer
+ buffer_nodes: []T,
+ buffer_index: usize,
+ buffer_len: usize,
+
+ const SelfChannel = this;
+ const GetNode = struct {
+ ptr: *T,
+ tick_node: *Loop.NextTickNode,
+ };
+ const PutNode = struct {
+ data: T,
+ tick_node: *Loop.NextTickNode,
+ };
+
+ /// call destroy when done
+ pub fn create(loop: *Loop, capacity: usize) !*SelfChannel {
+ const buffer_nodes = try loop.allocator.alloc(T, capacity);
+ errdefer loop.allocator.free(buffer_nodes);
+
+ const self = try loop.allocator.create(SelfChannel{
+ .loop = loop,
+ .buffer_len = 0,
+ .buffer_nodes = buffer_nodes,
+ .buffer_index = 0,
+ .dispatch_lock = 0,
+ .need_dispatch = 0,
+ .getters = std.atomic.QueueMpsc(GetNode).init(),
+ .putters = std.atomic.QueueMpsc(PutNode).init(),
+ .get_count = 0,
+ .put_count = 0,
+ });
+ errdefer loop.allocator.destroy(self);
+
+ return self;
+ }
+
+ /// must be called when all calls to put and get have suspended and no more calls occur
+ pub fn destroy(self: *SelfChannel) void {
+ while (self.getters.get()) |get_node| {
+ cancel get_node.data.tick_node.data;
+ }
+ while (self.putters.get()) |put_node| {
+ cancel put_node.data.tick_node.data;
+ }
+ self.loop.allocator.free(self.buffer_nodes);
+ self.loop.allocator.destroy(self);
+ }
+
+ /// puts a data item in the channel. The promise completes when the value has been added to the
+ /// buffer, or in the case of a zero size buffer, when the item has been retrieved by a getter.
+ pub async fn put(self: *SelfChannel, data: T) void {
+ // TODO should be able to group memory allocation failure before first suspend point
+ // so that the async invocation catches it
+ var dispatch_tick_node_ptr: *Loop.NextTickNode = undefined;
+ _ = async self.dispatch(&dispatch_tick_node_ptr) catch unreachable;
+
+ suspend |handle| {
+ var my_tick_node = Loop.NextTickNode{
+ .next = undefined,
+ .data = handle,
+ };
+ var queue_node = std.atomic.QueueMpsc(PutNode).Node{
+ .data = PutNode{
+ .tick_node = &my_tick_node,
+ .data = data,
+ },
+ .next = undefined,
+ };
+ self.putters.put(&queue_node);
+ _ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+
+ self.loop.onNextTick(dispatch_tick_node_ptr);
+ }
+ }
+
+ /// await this function to get an item from the channel. If the buffer is empty, the promise will
+ /// complete when the next item is put in the channel.
+ pub async fn get(self: *SelfChannel) T {
+ // TODO should be able to group memory allocation failure before first suspend point
+ // so that the async invocation catches it
+ var dispatch_tick_node_ptr: *Loop.NextTickNode = undefined;
+ _ = async self.dispatch(&dispatch_tick_node_ptr) catch unreachable;
+
+ // TODO integrate this function with named return values
+ // so we can get rid of this extra result copy
+ var result: T = undefined;
+ var debug_handle: usize = undefined;
+ suspend |handle| {
+ debug_handle = @ptrToInt(handle);
+ var my_tick_node = Loop.NextTickNode{
+ .next = undefined,
+ .data = handle,
+ };
+ var queue_node = std.atomic.QueueMpsc(GetNode).Node{
+ .data = GetNode{
+ .ptr = &result,
+ .tick_node = &my_tick_node,
+ },
+ .next = undefined,
+ };
+ self.getters.put(&queue_node);
+ _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+
+ self.loop.onNextTick(dispatch_tick_node_ptr);
+ }
+ return result;
+ }
+
+ async fn dispatch(self: *SelfChannel, tick_node_ptr: **Loop.NextTickNode) void {
+ // resumed by onNextTick
+ suspend |handle| {
+ var tick_node = Loop.NextTickNode{
+ .data = handle,
+ .next = undefined,
+ };
+ tick_node_ptr.* = &tick_node;
+ }
+
+ // set the "need dispatch" flag
+ _ = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+
+ lock: while (true) {
+ // set the lock flag
+ const prev_lock = @atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ if (prev_lock != 0) return;
+
+ // clear the need_dispatch flag since we're about to do it
+ _ = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+
+ while (true) {
+ one_dispatch: {
+ // later we correct these extra subtractions
+ var get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ var put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+
+ // transfer self.buffer to self.getters
+ while (self.buffer_len != 0) {
+ if (get_count == 0) break :one_dispatch;
+
+ const get_node = &self.getters.get().?.data;
+ get_node.ptr.* = self.buffer_nodes[self.buffer_index -% self.buffer_len];
+ self.loop.onNextTick(get_node.tick_node);
+ self.buffer_len -= 1;
+
+ get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ }
+
+ // direct transfer self.putters to self.getters
+ while (get_count != 0 and put_count != 0) {
+ const get_node = &self.getters.get().?.data;
+ const put_node = &self.putters.get().?.data;
+
+ get_node.ptr.* = put_node.data;
+ self.loop.onNextTick(get_node.tick_node);
+ self.loop.onNextTick(put_node.tick_node);
+
+ get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ }
+
+ // transfer self.putters to self.buffer
+ while (self.buffer_len != self.buffer_nodes.len and put_count != 0) {
+ const put_node = &self.putters.get().?.data;
+
+ self.buffer_nodes[self.buffer_index] = put_node.data;
+ self.loop.onNextTick(put_node.tick_node);
+ self.buffer_index +%= 1;
+ self.buffer_len += 1;
+
+ put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ }
+ }
+
+ // undo the extra subtractions
+ _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+
+ // clear need-dispatch flag
+ const need_dispatch = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ if (need_dispatch != 0) continue;
+
+ const my_lock = @atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ assert(my_lock != 0);
+
+ // we have to check again now that we unlocked
+ if (@atomicLoad(u8, &self.need_dispatch, AtomicOrder.SeqCst) != 0) continue :lock;
+
+ return;
+ }
+ }
+ }
+ };
+}
+
pub async fn connect(loop: *Loop, _address: *const std.net.Address) !std.os.File {
var address = _address.*; // TODO https://github.com/ziglang/zig/issues/733
@@ -199,6 +470,7 @@ test "listen on a port, send bytes, receive bytes" {
defer cancel p;
loop.run();
}
+
async fn doAsyncTest(loop: *Loop, address: *const std.net.Address) void {
errdefer @panic("test failure");
@@ -211,3 +483,43 @@ async fn doAsyncTest(loop: *Loop, address: *const std.net.Address) void {
assert(mem.eql(u8, msg, "hello from server\n"));
loop.stop();
}
+
+test "std.event.Channel" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const allocator = &da.allocator;
+
+ var loop = try Loop.init(allocator);
+ defer loop.deinit();
+
+ const channel = try Channel(i32).create(&loop, 0);
+ defer channel.destroy();
+
+ const handle = try async testChannelGetter(&loop, channel);
+ defer cancel handle;
+
+ const putter = try async testChannelPutter(channel);
+ defer cancel putter;
+
+ loop.run();
+}
+
+async fn testChannelGetter(loop: *Loop, channel: *Channel(i32)) void {
+ errdefer @panic("test failed");
+
+ const value1_promise = try async channel.get();
+ const value1 = await value1_promise;
+ assert(value1 == 1234);
+
+ const value2_promise = try async channel.get();
+ const value2 = await value2_promise;
+ assert(value2 == 4567);
+
+ loop.stop();
+}
+
+async fn testChannelPutter(channel: *Channel(i32)) void {
+ await (async channel.put(1234) catch @panic("out of memory"));
+ await (async channel.put(4567) catch @panic("out of memory"));
+}
diff --git a/std/fmt/index.zig b/std/fmt/index.zig
index f4dfa0e324..c3c17f5322 100644
--- a/std/fmt/index.zig
+++ b/std/fmt/index.zig
@@ -130,6 +130,9 @@ pub fn formatType(
try output(context, "error.");
return output(context, @errorName(value));
},
+ builtin.TypeId.Promise => {
+ return format(context, Errors, output, "promise@{x}", @ptrToInt(value));
+ },
builtin.TypeId.Pointer => |ptr_info| switch (ptr_info.size) {
builtin.TypeInfo.Pointer.Size.One => switch (@typeInfo(ptr_info.child)) {
builtin.TypeId.Array => |info| {
@@ -327,7 +330,7 @@ pub fn formatFloatScientific(
comptime Errors: type,
output: fn (@typeOf(context), []const u8) Errors!void,
) Errors!void {
- var x = f64(value);
+ var x = @floatCast(f64, value);
// Errol doesn't handle these special cases.
if (math.signbit(x)) {
diff --git a/std/heap.zig b/std/heap.zig
index 2a2c8c0b59..2e02733da1 100644
--- a/std/heap.zig
+++ b/std/heap.zig
@@ -38,6 +38,7 @@ fn cFree(self: *Allocator, old_mem: []u8) void {
}
/// This allocator makes a syscall directly for every allocation and free.
+/// TODO make this thread-safe. The windows implementation will need some atomics.
pub const DirectAllocator = struct {
allocator: Allocator,
heap_handle: ?HeapHandle,
@@ -221,7 +222,7 @@ pub const ArenaAllocator = struct {
if (len >= actual_min_size) break;
}
const buf = try self.child_allocator.alignedAlloc(u8, @alignOf(BufNode), len);
- const buf_node_slice = ([]BufNode)(buf[0..@sizeOf(BufNode)]);
+ const buf_node_slice = @bytesToSlice(BufNode, buf[0..@sizeOf(BufNode)]);
const buf_node = &buf_node_slice[0];
buf_node.* = BufNode{
.data = buf,
@@ -407,8 +408,7 @@ fn testAllocator(allocator: *mem.Allocator) !void {
var slice = try allocator.alloc(*i32, 100);
for (slice) |*item, i| {
- item.* = try allocator.create(i32);
- item.*.* = @intCast(i32, i);
+ item.* = try allocator.create(@intCast(i32, i));
}
for (slice) |item, i| {
diff --git a/std/io.zig b/std/io.zig
index cfe1a7f585..1c468f6f4f 100644
--- a/std/io.zig
+++ b/std/io.zig
@@ -414,14 +414,12 @@ pub const BufferedAtomicFile = struct {
pub fn create(allocator: *mem.Allocator, dest_path: []const u8) !*BufferedAtomicFile {
// TODO with well defined copy elision we don't need this allocation
- var self = try allocator.create(BufferedAtomicFile);
- errdefer allocator.destroy(self);
-
- self.* = BufferedAtomicFile{
+ var self = try allocator.create(BufferedAtomicFile{
.atomic_file = undefined,
.file_stream = undefined,
.buffered_stream = undefined,
- };
+ });
+ errdefer allocator.destroy(self);
self.atomic_file = try os.AtomicFile.init(allocator, dest_path, os.default_file_mode);
errdefer self.atomic_file.deinit();
diff --git a/std/json.zig b/std/json.zig
index 2930cd21bb..8986034fb4 100644
--- a/std/json.zig
+++ b/std/json.zig
@@ -180,7 +180,7 @@ pub const StreamingParser = struct {
pub fn fromInt(x: var) State {
debug.assert(x == 0 or x == 1);
const T = @TagType(State);
- return State(@intCast(T, x));
+ return @intToEnum(State, @intCast(T, x));
}
};
diff --git a/std/linked_list.zig b/std/linked_list.zig
index 9e32b7d9da..62cd5ca2bb 100644
--- a/std/linked_list.zig
+++ b/std/linked_list.zig
@@ -193,7 +193,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// A pointer to the new node.
pub fn allocateNode(list: *Self, allocator: *Allocator) !*Node {
comptime assert(!isIntrusive());
- return allocator.create(Node);
+ return allocator.create(Node(undefined));
}
/// Deallocate a node.
diff --git a/std/macho.zig b/std/macho.zig
index 64f78ae4a3..fe5409ad4d 100644
--- a/std/macho.zig
+++ b/std/macho.zig
@@ -161,7 +161,7 @@ pub fn loadSymbols(allocator: *mem.Allocator, in: *io.FileInStream) !SymbolTable
}
fn readNoEof(in: *io.FileInStream, comptime T: type, result: []T) !void {
- return in.stream.readNoEof(([]u8)(result));
+ return in.stream.readNoEof(@sliceToBytes(result));
}
fn readOneNoEof(in: *io.FileInStream, comptime T: type, result: *T) !void {
return readNoEof(in, T, (*[1]T)(result)[0..]);
diff --git a/std/math/copysign.zig b/std/math/copysign.zig
index 4ca8f82f4b..8c71dcb0bc 100644
--- a/std/math/copysign.zig
+++ b/std/math/copysign.zig
@@ -4,12 +4,22 @@ const assert = std.debug.assert;
pub fn copysign(comptime T: type, x: T, y: T) T {
return switch (T) {
+ f16 => copysign16(x, y),
f32 => copysign32(x, y),
f64 => copysign64(x, y),
else => @compileError("copysign not implemented for " ++ @typeName(T)),
};
}
+fn copysign16(x: f16, y: f16) f16 {
+ const ux = @bitCast(u16, x);
+ const uy = @bitCast(u16, y);
+
+ const h1 = ux & (@maxValue(u16) / 2);
+ const h2 = uy & (u16(1) << 15);
+ return @bitCast(f16, h1 | h2);
+}
+
fn copysign32(x: f32, y: f32) f32 {
const ux = @bitCast(u32, x);
const uy = @bitCast(u32, y);
@@ -29,10 +39,18 @@ fn copysign64(x: f64, y: f64) f64 {
}
test "math.copysign" {
+ assert(copysign(f16, 1.0, 1.0) == copysign16(1.0, 1.0));
assert(copysign(f32, 1.0, 1.0) == copysign32(1.0, 1.0));
assert(copysign(f64, 1.0, 1.0) == copysign64(1.0, 1.0));
}
+test "math.copysign16" {
+ assert(copysign16(5.0, 1.0) == 5.0);
+ assert(copysign16(5.0, -1.0) == -5.0);
+ assert(copysign16(-5.0, -1.0) == -5.0);
+ assert(copysign16(-5.0, 1.0) == 5.0);
+}
+
test "math.copysign32" {
assert(copysign32(5.0, 1.0) == 5.0);
assert(copysign32(5.0, -1.0) == -5.0);
diff --git a/std/math/expm1.zig b/std/math/expm1.zig
index 438e44ccce..6fa0194b32 100644
--- a/std/math/expm1.zig
+++ b/std/math/expm1.zig
@@ -20,6 +20,10 @@ pub fn expm1(x: var) @typeOf(x) {
fn expm1_32(x_: f32) f32 {
@setFloatMode(this, builtin.FloatMode.Strict);
+
+ if (math.isNan(x_))
+ return math.nan(f32);
+
const o_threshold: f32 = 8.8721679688e+01;
const ln2_hi: f32 = 6.9313812256e-01;
const ln2_lo: f32 = 9.0580006145e-06;
@@ -146,6 +150,10 @@ fn expm1_32(x_: f32) f32 {
fn expm1_64(x_: f64) f64 {
@setFloatMode(this, builtin.FloatMode.Strict);
+
+ if (math.isNan(x_))
+ return math.nan(f64);
+
const o_threshold: f64 = 7.09782712893383973096e+02;
const ln2_hi: f64 = 6.93147180369123816490e-01;
const ln2_lo: f64 = 1.90821492927058770002e-10;
diff --git a/std/math/fabs.zig b/std/math/fabs.zig
index 821624e1bc..ae8f9616a8 100644
--- a/std/math/fabs.zig
+++ b/std/math/fabs.zig
@@ -10,12 +10,19 @@ const assert = std.debug.assert;
pub fn fabs(x: var) @typeOf(x) {
const T = @typeOf(x);
return switch (T) {
+ f16 => fabs16(x),
f32 => fabs32(x),
f64 => fabs64(x),
else => @compileError("fabs not implemented for " ++ @typeName(T)),
};
}
+fn fabs16(x: f16) f16 {
+ var u = @bitCast(u16, x);
+ u &= 0x7FFF;
+ return @bitCast(f16, u);
+}
+
fn fabs32(x: f32) f32 {
var u = @bitCast(u32, x);
u &= 0x7FFFFFFF;
@@ -29,10 +36,16 @@ fn fabs64(x: f64) f64 {
}
test "math.fabs" {
+ assert(fabs(f16(1.0)) == fabs16(1.0));
assert(fabs(f32(1.0)) == fabs32(1.0));
assert(fabs(f64(1.0)) == fabs64(1.0));
}
+test "math.fabs16" {
+ assert(fabs16(1.0) == 1.0);
+ assert(fabs16(-1.0) == 1.0);
+}
+
test "math.fabs32" {
assert(fabs32(1.0) == 1.0);
assert(fabs32(-1.0) == 1.0);
@@ -43,6 +56,12 @@ test "math.fabs64" {
assert(fabs64(-1.0) == 1.0);
}
+test "math.fabs16.special" {
+ assert(math.isPositiveInf(fabs(math.inf(f16))));
+ assert(math.isPositiveInf(fabs(-math.inf(f16))));
+ assert(math.isNan(fabs(math.nan(f16))));
+}
+
test "math.fabs32.special" {
assert(math.isPositiveInf(fabs(math.inf(f32))));
assert(math.isPositiveInf(fabs(-math.inf(f32))));
diff --git a/std/math/floor.zig b/std/math/floor.zig
index 79d1097d08..0858598eea 100644
--- a/std/math/floor.zig
+++ b/std/math/floor.zig
@@ -12,12 +12,47 @@ const math = std.math;
pub fn floor(x: var) @typeOf(x) {
const T = @typeOf(x);
return switch (T) {
+ f16 => floor16(x),
f32 => floor32(x),
f64 => floor64(x),
else => @compileError("floor not implemented for " ++ @typeName(T)),
};
}
+fn floor16(x: f16) f16 {
+ var u = @bitCast(u16, x);
+ const e = @intCast(i16, (u >> 10) & 31) - 15;
+ var m: u16 = undefined;
+
+ // TODO: Shouldn't need this explicit check.
+ if (x == 0.0) {
+ return x;
+ }
+
+ if (e >= 10) {
+ return x;
+ }
+
+ if (e >= 0) {
+ m = u16(1023) >> @intCast(u4, e);
+ if (u & m == 0) {
+ return x;
+ }
+ math.forceEval(x + 0x1.0p120);
+ if (u >> 15 != 0) {
+ u += m;
+ }
+ return @bitCast(f16, u & ~m);
+ } else {
+ math.forceEval(x + 0x1.0p120);
+ if (u >> 15 == 0) {
+ return 0.0;
+ } else {
+ return -1.0;
+ }
+ }
+}
+
fn floor32(x: f32) f32 {
var u = @bitCast(u32, x);
const e = @intCast(i32, (u >> 23) & 0xFF) - 0x7F;
@@ -84,10 +119,17 @@ fn floor64(x: f64) f64 {
}
test "math.floor" {
+ assert(floor(f16(1.3)) == floor16(1.3));
assert(floor(f32(1.3)) == floor32(1.3));
assert(floor(f64(1.3)) == floor64(1.3));
}
+test "math.floor16" {
+ assert(floor16(1.3) == 1.0);
+ assert(floor16(-1.3) == -2.0);
+ assert(floor16(0.2) == 0.0);
+}
+
test "math.floor32" {
assert(floor32(1.3) == 1.0);
assert(floor32(-1.3) == -2.0);
@@ -100,6 +142,14 @@ test "math.floor64" {
assert(floor64(0.2) == 0.0);
}
+test "math.floor16.special" {
+ assert(floor16(0.0) == 0.0);
+ assert(floor16(-0.0) == -0.0);
+ assert(math.isPositiveInf(floor16(math.inf(f16))));
+ assert(math.isNegativeInf(floor16(-math.inf(f16))));
+ assert(math.isNan(floor16(math.nan(f16))));
+}
+
test "math.floor32.special" {
assert(floor32(0.0) == 0.0);
assert(floor32(-0.0) == -0.0);
diff --git a/std/math/index.zig b/std/math/index.zig
index 176293be74..17b66f5568 100644
--- a/std/math/index.zig
+++ b/std/math/index.zig
@@ -19,6 +19,18 @@ pub const f32_max = 3.40282346638528859812e+38;
pub const f32_epsilon = 1.1920928955078125e-07;
pub const f32_toint = 1.0 / f32_epsilon;
+pub const f16_true_min = 0.000000059604644775390625; // 2**-24
+pub const f16_min = 0.00006103515625; // 2**-14
+pub const f16_max = 65504;
+pub const f16_epsilon = 0.0009765625; // 2**-10
+pub const f16_toint = 1.0 / f16_epsilon;
+
+pub const nan_u16 = u16(0x7C01);
+pub const nan_f16 = @bitCast(f16, nan_u16);
+
+pub const inf_u16 = u16(0x7C00);
+pub const inf_f16 = @bitCast(f16, inf_u16);
+
pub const nan_u32 = u32(0x7F800001);
pub const nan_f32 = @bitCast(f32, nan_u32);
@@ -44,6 +56,11 @@ pub fn approxEq(comptime T: type, x: T, y: T, epsilon: T) bool {
pub fn forceEval(value: var) void {
const T = @typeOf(value);
switch (T) {
+ f16 => {
+ var x: f16 = undefined;
+ const p = @ptrCast(*volatile f16, &x);
+ p.* = x;
+ },
f32 => {
var x: f32 = undefined;
const p = @ptrCast(*volatile f32, &x);
@@ -183,6 +200,32 @@ test "math" {
_ = @import("big/index.zig");
}
+pub fn floatMantissaBits(comptime T: type) comptime_int {
+ assert(@typeId(T) == builtin.TypeId.Float);
+
+ return switch (T.bit_count) {
+ 16 => 10,
+ 32 => 23,
+ 64 => 52,
+ 80 => 64,
+ 128 => 112,
+ else => @compileError("unknown floating point type " ++ @typeName(T)),
+ };
+}
+
+pub fn floatExponentBits(comptime T: type) comptime_int {
+ assert(@typeId(T) == builtin.TypeId.Float);
+
+ return switch (T.bit_count) {
+ 16 => 5,
+ 32 => 8,
+ 64 => 11,
+ 80 => 15,
+ 128 => 15,
+ else => @compileError("unknown floating point type " ++ @typeName(T)),
+ };
+}
+
pub fn min(x: var, y: var) @typeOf(x + y) {
return if (x < y) x else y;
}
@@ -607,4 +650,3 @@ pub fn lossyCast(comptime T: type, value: var) T {
else => @compileError("bad type"),
}
}
-
diff --git a/std/math/inf.zig b/std/math/inf.zig
index bde90b2be1..62f5ef7c0d 100644
--- a/std/math/inf.zig
+++ b/std/math/inf.zig
@@ -1,9 +1,9 @@
const std = @import("../index.zig");
const math = std.math;
-const assert = std.debug.assert;
pub fn inf(comptime T: type) T {
return switch (T) {
+ f16 => @bitCast(f16, math.inf_u16),
f32 => @bitCast(f32, math.inf_u32),
f64 => @bitCast(f64, math.inf_u64),
else => @compileError("inf not implemented for " ++ @typeName(T)),
diff --git a/std/math/isfinite.zig b/std/math/isfinite.zig
index 37ead03bba..3a5d4f01bb 100644
--- a/std/math/isfinite.zig
+++ b/std/math/isfinite.zig
@@ -5,6 +5,10 @@ const assert = std.debug.assert;
pub fn isFinite(x: var) bool {
const T = @typeOf(x);
switch (T) {
+ f16 => {
+ const bits = @bitCast(u16, x);
+ return bits & 0x7FFF < 0x7C00;
+ },
f32 => {
const bits = @bitCast(u32, x);
return bits & 0x7FFFFFFF < 0x7F800000;
@@ -20,10 +24,14 @@ pub fn isFinite(x: var) bool {
}
test "math.isFinite" {
+ assert(isFinite(f16(0.0)));
+ assert(isFinite(f16(-0.0)));
assert(isFinite(f32(0.0)));
assert(isFinite(f32(-0.0)));
assert(isFinite(f64(0.0)));
assert(isFinite(f64(-0.0)));
+ assert(!isFinite(math.inf(f16)));
+ assert(!isFinite(-math.inf(f16)));
assert(!isFinite(math.inf(f32)));
assert(!isFinite(-math.inf(f32)));
assert(!isFinite(math.inf(f64)));
diff --git a/std/math/isinf.zig b/std/math/isinf.zig
index a976fb73d2..cf68b5769c 100644
--- a/std/math/isinf.zig
+++ b/std/math/isinf.zig
@@ -5,6 +5,10 @@ const assert = std.debug.assert;
pub fn isInf(x: var) bool {
const T = @typeOf(x);
switch (T) {
+ f16 => {
+ const bits = @bitCast(u16, x);
+ return bits & 0x7FFF == 0x7C00;
+ },
f32 => {
const bits = @bitCast(u32, x);
return bits & 0x7FFFFFFF == 0x7F800000;
@@ -22,6 +26,9 @@ pub fn isInf(x: var) bool {
pub fn isPositiveInf(x: var) bool {
const T = @typeOf(x);
switch (T) {
+ f16 => {
+ return @bitCast(u16, x) == 0x7C00;
+ },
f32 => {
return @bitCast(u32, x) == 0x7F800000;
},
@@ -37,6 +44,9 @@ pub fn isPositiveInf(x: var) bool {
pub fn isNegativeInf(x: var) bool {
const T = @typeOf(x);
switch (T) {
+ f16 => {
+ return @bitCast(u16, x) == 0xFC00;
+ },
f32 => {
return @bitCast(u32, x) == 0xFF800000;
},
@@ -50,10 +60,14 @@ pub fn isNegativeInf(x: var) bool {
}
test "math.isInf" {
+ assert(!isInf(f16(0.0)));
+ assert(!isInf(f16(-0.0)));
assert(!isInf(f32(0.0)));
assert(!isInf(f32(-0.0)));
assert(!isInf(f64(0.0)));
assert(!isInf(f64(-0.0)));
+ assert(isInf(math.inf(f16)));
+ assert(isInf(-math.inf(f16)));
assert(isInf(math.inf(f32)));
assert(isInf(-math.inf(f32)));
assert(isInf(math.inf(f64)));
@@ -61,10 +75,14 @@ test "math.isInf" {
}
test "math.isPositiveInf" {
+ assert(!isPositiveInf(f16(0.0)));
+ assert(!isPositiveInf(f16(-0.0)));
assert(!isPositiveInf(f32(0.0)));
assert(!isPositiveInf(f32(-0.0)));
assert(!isPositiveInf(f64(0.0)));
assert(!isPositiveInf(f64(-0.0)));
+ assert(isPositiveInf(math.inf(f16)));
+ assert(!isPositiveInf(-math.inf(f16)));
assert(isPositiveInf(math.inf(f32)));
assert(!isPositiveInf(-math.inf(f32)));
assert(isPositiveInf(math.inf(f64)));
@@ -72,10 +90,14 @@ test "math.isPositiveInf" {
}
test "math.isNegativeInf" {
+ assert(!isNegativeInf(f16(0.0)));
+ assert(!isNegativeInf(f16(-0.0)));
assert(!isNegativeInf(f32(0.0)));
assert(!isNegativeInf(f32(-0.0)));
assert(!isNegativeInf(f64(0.0)));
assert(!isNegativeInf(f64(-0.0)));
+ assert(!isNegativeInf(math.inf(f16)));
+ assert(isNegativeInf(-math.inf(f16)));
assert(!isNegativeInf(math.inf(f32)));
assert(isNegativeInf(-math.inf(f32)));
assert(!isNegativeInf(math.inf(f64)));
diff --git a/std/math/isnan.zig b/std/math/isnan.zig
index ca36df5106..e05c1428b0 100644
--- a/std/math/isnan.zig
+++ b/std/math/isnan.zig
@@ -5,6 +5,10 @@ const assert = std.debug.assert;
pub fn isNan(x: var) bool {
const T = @typeOf(x);
switch (T) {
+ f16 => {
+ const bits = @bitCast(u16, x);
+ return (bits & 0x7fff) > 0x7c00;
+ },
f32 => {
const bits = @bitCast(u32, x);
return bits & 0x7FFFFFFF > 0x7F800000;
@@ -26,8 +30,10 @@ pub fn isSignalNan(x: var) bool {
}
test "math.isNan" {
+ assert(isNan(math.nan(f16)));
assert(isNan(math.nan(f32)));
assert(isNan(math.nan(f64)));
+ assert(!isNan(f16(1.0)));
assert(!isNan(f32(1.0)));
assert(!isNan(f64(1.0)));
}
diff --git a/std/math/isnormal.zig b/std/math/isnormal.zig
index d5c1061cb1..22109936c4 100644
--- a/std/math/isnormal.zig
+++ b/std/math/isnormal.zig
@@ -5,6 +5,10 @@ const assert = std.debug.assert;
pub fn isNormal(x: var) bool {
const T = @typeOf(x);
switch (T) {
+ f16 => {
+ const bits = @bitCast(u16, x);
+ return (bits + 1024) & 0x7FFF >= 2048;
+ },
f32 => {
const bits = @bitCast(u32, x);
return (bits + 0x00800000) & 0x7FFFFFFF >= 0x01000000;
@@ -20,8 +24,13 @@ pub fn isNormal(x: var) bool {
}
test "math.isNormal" {
+ assert(!isNormal(math.nan(f16)));
assert(!isNormal(math.nan(f32)));
assert(!isNormal(math.nan(f64)));
+ assert(!isNormal(f16(0)));
+ assert(!isNormal(f32(0)));
+ assert(!isNormal(f64(0)));
+ assert(isNormal(f16(1.0)));
assert(isNormal(f32(1.0)));
assert(isNormal(f64(1.0)));
}
diff --git a/std/math/nan.zig b/std/math/nan.zig
index 22461711d0..2cbcbee81b 100644
--- a/std/math/nan.zig
+++ b/std/math/nan.zig
@@ -2,6 +2,7 @@ const math = @import("index.zig");
pub fn nan(comptime T: type) T {
return switch (T) {
+ f16 => @bitCast(f16, math.nan_u16),
f32 => @bitCast(f32, math.nan_u32),
f64 => @bitCast(f64, math.nan_u64),
else => @compileError("nan not implemented for " ++ @typeName(T)),
@@ -12,6 +13,7 @@ pub fn nan(comptime T: type) T {
// representation in the future when required.
pub fn snan(comptime T: type) T {
return switch (T) {
+ f16 => @bitCast(f16, math.nan_u16),
f32 => @bitCast(f32, math.nan_u32),
f64 => @bitCast(f64, math.nan_u64),
else => @compileError("snan not implemented for " ++ @typeName(T)),
diff --git a/std/math/signbit.zig b/std/math/signbit.zig
index a0191bed5c..8c6829dfcd 100644
--- a/std/math/signbit.zig
+++ b/std/math/signbit.zig
@@ -5,12 +5,18 @@ const assert = std.debug.assert;
pub fn signbit(x: var) bool {
const T = @typeOf(x);
return switch (T) {
+ f16 => signbit16(x),
f32 => signbit32(x),
f64 => signbit64(x),
else => @compileError("signbit not implemented for " ++ @typeName(T)),
};
}
+fn signbit16(x: f16) bool {
+ const bits = @bitCast(u16, x);
+ return bits >> 15 != 0;
+}
+
fn signbit32(x: f32) bool {
const bits = @bitCast(u32, x);
return bits >> 31 != 0;
@@ -22,10 +28,16 @@ fn signbit64(x: f64) bool {
}
test "math.signbit" {
+ assert(signbit(f16(4.0)) == signbit16(4.0));
assert(signbit(f32(4.0)) == signbit32(4.0));
assert(signbit(f64(4.0)) == signbit64(4.0));
}
+test "math.signbit16" {
+ assert(!signbit16(4.0));
+ assert(signbit16(-3.0));
+}
+
test "math.signbit32" {
assert(!signbit32(4.0));
assert(signbit32(-3.0));
diff --git a/std/math/sqrt.zig b/std/math/sqrt.zig
index 599008acff..e12ecf9683 100644
--- a/std/math/sqrt.zig
+++ b/std/math/sqrt.zig
@@ -31,10 +31,25 @@ pub fn sqrt(x: var) (if (@typeId(@typeOf(x)) == TypeId.Int) @IntType(false, @typ
}
test "math.sqrt" {
+ assert(sqrt(f16(0.0)) == @sqrt(f16, 0.0));
assert(sqrt(f32(0.0)) == @sqrt(f32, 0.0));
assert(sqrt(f64(0.0)) == @sqrt(f64, 0.0));
}
+test "math.sqrt16" {
+ const epsilon = 0.000001;
+
+ assert(@sqrt(f16, 0.0) == 0.0);
+ assert(math.approxEq(f16, @sqrt(f16, 2.0), 1.414214, epsilon));
+ assert(math.approxEq(f16, @sqrt(f16, 3.6), 1.897367, epsilon));
+ assert(@sqrt(f16, 4.0) == 2.0);
+ assert(math.approxEq(f16, @sqrt(f16, 7.539840), 2.745877, epsilon));
+ assert(math.approxEq(f16, @sqrt(f16, 19.230934), 4.385309, epsilon));
+ assert(@sqrt(f16, 64.0) == 8.0);
+ assert(math.approxEq(f16, @sqrt(f16, 64.1), 8.006248, epsilon));
+ assert(math.approxEq(f16, @sqrt(f16, 8942.230469), 94.563370, epsilon));
+}
+
test "math.sqrt32" {
const epsilon = 0.000001;
@@ -63,6 +78,14 @@ test "math.sqrt64" {
assert(math.approxEq(f64, @sqrt(f64, 8942.230469), 94.563367, epsilon));
}
+test "math.sqrt16.special" {
+ assert(math.isPositiveInf(@sqrt(f16, math.inf(f16))));
+ assert(@sqrt(f16, 0.0) == 0.0);
+ assert(@sqrt(f16, -0.0) == -0.0);
+ assert(math.isNan(@sqrt(f16, -1.0)));
+ assert(math.isNan(@sqrt(f16, math.nan(f16))));
+}
+
test "math.sqrt32.special" {
assert(math.isPositiveInf(@sqrt(f32, math.inf(f32))));
assert(@sqrt(f32, 0.0) == 0.0);
diff --git a/std/mem.zig b/std/mem.zig
index b02589b0dd..ba59faf711 100644
--- a/std/mem.zig
+++ b/std/mem.zig
@@ -31,16 +31,8 @@ pub const Allocator = struct {
/// Guaranteed: `old_mem.len` is the same as what was returned from `allocFn` or `reallocFn`
freeFn: fn (self: *Allocator, old_mem: []u8) void,
- /// Call destroy with the result
- pub fn create(self: *Allocator, comptime T: type) !*T {
- if (@sizeOf(T) == 0) return *{};
- const slice = try self.alloc(T, 1);
- return &slice[0];
- }
-
- /// Call destroy with the result
- /// TODO once #733 is solved, this will replace create
- pub fn construct(self: *Allocator, init: var) Error!*@typeOf(init) {
+ /// Call `destroy` with the result
+ pub fn create(self: *Allocator, init: var) Error!*@typeOf(init) {
const T = @typeOf(init);
if (@sizeOf(T) == 0) return &{};
const slice = try self.alloc(T, 1);
@@ -49,7 +41,7 @@ pub const Allocator = struct {
return ptr;
}
- /// `ptr` should be the return value of `construct` or `create`
+ /// `ptr` should be the return value of `create`
pub fn destroy(self: *Allocator, ptr: var) void {
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
self.freeFn(self, non_const_ptr[0..@sizeOf(@typeOf(ptr).Child)]);
@@ -70,7 +62,7 @@ pub const Allocator = struct {
for (byte_slice) |*byte| {
byte.* = undefined;
}
- return ([]align(alignment) T)(@alignCast(alignment, byte_slice));
+ return @bytesToSlice(T, @alignCast(alignment, byte_slice));
}
pub fn realloc(self: *Allocator, comptime T: type, old_mem: []T, n: usize) ![]T {
@@ -86,7 +78,7 @@ pub const Allocator = struct {
return ([*]align(alignment) T)(undefined)[0..0];
}
- const old_byte_slice = ([]u8)(old_mem);
+ const old_byte_slice = @sliceToBytes(old_mem);
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
const byte_slice = try self.reallocFn(self, old_byte_slice, byte_count, alignment);
assert(byte_slice.len == byte_count);
@@ -96,7 +88,7 @@ pub const Allocator = struct {
byte.* = undefined;
}
}
- return ([]T)(@alignCast(alignment, byte_slice));
+ return @bytesToSlice(T, @alignCast(alignment, byte_slice));
}
/// Reallocate, but `n` must be less than or equal to `old_mem.len`.
@@ -118,13 +110,13 @@ pub const Allocator = struct {
// n <= old_mem.len and the multiplication didn't overflow for that operation.
const byte_count = @sizeOf(T) * n;
- const byte_slice = self.reallocFn(self, ([]u8)(old_mem), byte_count, alignment) catch unreachable;
+ const byte_slice = self.reallocFn(self, @sliceToBytes(old_mem), byte_count, alignment) catch unreachable;
assert(byte_slice.len == byte_count);
- return ([]align(alignment) T)(@alignCast(alignment, byte_slice));
+ return @bytesToSlice(T, @alignCast(alignment, byte_slice));
}
pub fn free(self: *Allocator, memory: var) void {
- const bytes = ([]const u8)(memory);
+ const bytes = @sliceToBytes(memory);
if (bytes.len == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
self.freeFn(self, non_const_ptr[0..bytes.len]);
diff --git a/std/net.zig b/std/net.zig
index f21611ff91..8c1aeb92d7 100644
--- a/std/net.zig
+++ b/std/net.zig
@@ -68,7 +68,7 @@ pub const Address = struct {
pub fn parseIp4(buf: []const u8) !u32 {
var result: u32 = undefined;
- const out_ptr = ([]u8)((*[1]u32)(&result)[0..]);
+ const out_ptr = @sliceToBytes((*[1]u32)(&result)[0..]);
var x: u8 = 0;
var index: u8 = 0;
diff --git a/std/os/child_process.zig b/std/os/child_process.zig
index 3a0fa7f461..693129eea8 100644
--- a/std/os/child_process.zig
+++ b/std/os/child_process.zig
@@ -85,10 +85,7 @@ pub const ChildProcess = struct {
/// First argument in argv is the executable.
/// On success must call deinit.
pub fn init(argv: []const []const u8, allocator: *mem.Allocator) !*ChildProcess {
- const child = try allocator.create(ChildProcess);
- errdefer allocator.destroy(child);
-
- child.* = ChildProcess{
+ const child = try allocator.create(ChildProcess{
.allocator = allocator,
.argv = argv,
.pid = undefined,
@@ -109,8 +106,8 @@ pub const ChildProcess = struct {
.stdin_behavior = StdIo.Inherit,
.stdout_behavior = StdIo.Inherit,
.stderr_behavior = StdIo.Inherit,
- };
-
+ });
+ errdefer allocator.destroy(child);
return child;
}
@@ -318,7 +315,7 @@ pub const ChildProcess = struct {
// Here we potentially return the fork child's error
// from the parent pid.
if (err_int != @maxValue(ErrInt)) {
- return SpawnError(err_int);
+ return @errSetCast(SpawnError, @intToError(err_int));
}
return statusToTerm(status);
@@ -756,7 +753,7 @@ fn destroyPipe(pipe: *const [2]i32) void {
// Child of fork calls this to report an error to the fork parent.
// Then the child exits.
fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn {
- _ = writeIntFd(fd, ErrInt(err));
+ _ = writeIntFd(fd, ErrInt(@errorToInt(err)));
posix.exit(1);
}
diff --git a/std/os/index.zig b/std/os/index.zig
index f1c3ab2128..52b36c351c 100644
--- a/std/os/index.zig
+++ b/std/os/index.zig
@@ -1805,7 +1805,7 @@ pub fn argsAlloc(allocator: *mem.Allocator) ![]const []u8 {
const buf = try allocator.alignedAlloc(u8, @alignOf([]u8), total_bytes);
errdefer allocator.free(buf);
- const result_slice_list = ([][]u8)(buf[0..slice_list_bytes]);
+ const result_slice_list = @bytesToSlice([]u8, buf[0..slice_list_bytes]);
const result_contents = buf[slice_list_bytes..];
mem.copy(u8, result_contents, contents_slice);
@@ -2468,7 +2468,7 @@ pub const Thread = struct {
data: Data,
pub const use_pthreads = is_posix and builtin.link_libc;
- const Data = if (use_pthreads)
+ pub const Data = if (use_pthreads)
struct {
handle: c.pthread_t,
stack_addr: usize,
@@ -2582,10 +2582,16 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread
const bytes_ptr = windows.HeapAlloc(heap_handle, 0, byte_count) orelse return SpawnThreadError.OutOfMemory;
errdefer assert(windows.HeapFree(heap_handle, 0, bytes_ptr) != 0);
const bytes = @ptrCast([*]u8, bytes_ptr)[0..byte_count];
- const outer_context = std.heap.FixedBufferAllocator.init(bytes).allocator.create(WinThread.OuterContext) catch unreachable;
- outer_context.inner = context;
- outer_context.thread.data.heap_handle = heap_handle;
- outer_context.thread.data.alloc_start = bytes_ptr;
+ const outer_context = std.heap.FixedBufferAllocator.init(bytes).allocator.create(WinThread.OuterContext{
+ .thread = Thread{
+ .data = Thread.Data{
+ .heap_handle = heap_handle,
+ .alloc_start = bytes_ptr,
+ .handle = undefined,
+ },
+ },
+ .inner = context,
+ }) catch unreachable;
const parameter = if (@sizeOf(Context) == 0) null else @ptrCast(*c_void, &outer_context.inner);
outer_context.thread.data.handle = windows.CreateThread(null, default_stack_size, WinThread.threadMain, parameter, 0, null) orelse {
diff --git a/std/os/windows/util.zig b/std/os/windows/util.zig
index cb4788ba17..45b205451d 100644
--- a/std/os/windows/util.zig
+++ b/std/os/windows/util.zig
@@ -79,7 +79,7 @@ pub fn windowsIsCygwinPty(handle: windows.HANDLE) bool {
const name_info = @ptrCast(*const windows.FILE_NAME_INFO, &name_info_bytes[0]);
const name_bytes = name_info_bytes[size .. size + usize(name_info.FileNameLength)];
- const name_wide = ([]u16)(name_bytes);
+ const name_wide = @bytesToSlice(u16, name_bytes);
return mem.indexOf(u16, name_wide, []u16{ 'm', 's', 'y', 's', '-' }) != null or
mem.indexOf(u16, name_wide, []u16{ '-', 'p', 't', 'y' }) != null;
}
diff --git a/std/rand/index.zig b/std/rand/index.zig
index 13694f4c09..7daa558f13 100644
--- a/std/rand/index.zig
+++ b/std/rand/index.zig
@@ -116,7 +116,7 @@ pub const Random = struct {
pub fn floatNorm(r: *Random, comptime T: type) T {
const value = ziggurat.next_f64(r, ziggurat.NormDist);
switch (T) {
- f32 => return f32(value),
+ f32 => return @floatCast(f32, value),
f64 => return value,
else => @compileError("unknown floating point type"),
}
@@ -128,7 +128,7 @@ pub const Random = struct {
pub fn floatExp(r: *Random, comptime T: type) T {
const value = ziggurat.next_f64(r, ziggurat.ExpDist);
switch (T) {
- f32 => return f32(value),
+ f32 => return @floatCast(f32, value),
f64 => return value,
else => @compileError("unknown floating point type"),
}
diff --git a/std/rand/ziggurat.zig b/std/rand/ziggurat.zig
index 774d3bd52a..f7a1359f17 100644
--- a/std/rand/ziggurat.zig
+++ b/std/rand/ziggurat.zig
@@ -84,12 +84,12 @@ fn ZigTableGen(
for (tables.x[2..256]) |*entry, i| {
const last = tables.x[2 + i - 1];
- *entry = f_inv(v / last + f(last));
+ entry.* = f_inv(v / last + f(last));
}
tables.x[256] = 0;
for (tables.f[0..]) |*entry, i| {
- *entry = f(tables.x[i]);
+ entry.* = f(tables.x[i]);
}
return tables;
@@ -160,3 +160,7 @@ test "ziggurant exp dist sanity" {
_ = prng.random.floatExp(f64);
}
}
+
+test "ziggurat table gen" {
+ const table = NormDist;
+}
diff --git a/std/special/builtin.zig b/std/special/builtin.zig
index 4a01658a50..deccec68b4 100644
--- a/std/special/builtin.zig
+++ b/std/special/builtin.zig
@@ -210,7 +210,9 @@ fn generic_fmod(comptime T: type, x: T, y: T) T {
}
fn isNan(comptime T: type, bits: T) bool {
- if (T == u32) {
+ if (T == u16) {
+ return (bits & 0x7fff) > 0x7c00;
+ } else if (T == u32) {
return (bits & 0x7fffffff) > 0x7f800000;
} else if (T == u64) {
return (bits & (@maxValue(u64) >> 1)) > (u64(0x7ff) << 52);
diff --git a/std/special/compiler_rt/extendXfYf2.zig b/std/special/compiler_rt/extendXfYf2.zig
new file mode 100644
index 0000000000..099e27b74a
--- /dev/null
+++ b/std/special/compiler_rt/extendXfYf2.zig
@@ -0,0 +1,89 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const is_test = builtin.is_test;
+
+pub extern fn __extenddftf2(a: f64) f128 {
+ return extendXfYf2(f128, f64, a);
+}
+
+pub extern fn __extendsftf2(a: f32) f128 {
+ return extendXfYf2(f128, f32, a);
+}
+
+pub extern fn __extendhfsf2(a: u16) f32 {
+ return extendXfYf2(f32, f16, @bitCast(f16, a));
+}
+
+const CHAR_BIT = 8;
+
+inline fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
+ const src_rep_t = @IntType(false, @typeInfo(src_t).Float.bits);
+ const dst_rep_t = @IntType(false, @typeInfo(dst_t).Float.bits);
+ const srcSigBits = std.math.floatMantissaBits(src_t);
+ const dstSigBits = std.math.floatMantissaBits(dst_t);
+ const SrcShift = std.math.Log2Int(src_rep_t);
+ const DstShift = std.math.Log2Int(dst_rep_t);
+
+ // Various constants whose values follow from the type parameters.
+ // Any reasonable optimizer will fold and propagate all of these.
+ const srcBits = @sizeOf(src_t) * CHAR_BIT;
+ const srcExpBits = srcBits - srcSigBits - 1;
+ const srcInfExp = (1 << srcExpBits) - 1;
+ const srcExpBias = srcInfExp >> 1;
+
+ const srcMinNormal = 1 << srcSigBits;
+ const srcInfinity = srcInfExp << srcSigBits;
+ const srcSignMask = 1 << (srcSigBits + srcExpBits);
+ const srcAbsMask = srcSignMask - 1;
+ const srcQNaN = 1 << (srcSigBits - 1);
+ const srcNaNCode = srcQNaN - 1;
+
+ const dstBits = @sizeOf(dst_t) * CHAR_BIT;
+ const dstExpBits = dstBits - dstSigBits - 1;
+ const dstInfExp = (1 << dstExpBits) - 1;
+ const dstExpBias = dstInfExp >> 1;
+
+ const dstMinNormal: dst_rep_t = dst_rep_t(1) << dstSigBits;
+
+ // Break a into a sign and representation of the absolute value
+ const aRep: src_rep_t = @bitCast(src_rep_t, a);
+ const aAbs: src_rep_t = aRep & srcAbsMask;
+ const sign: src_rep_t = aRep & srcSignMask;
+ var absResult: dst_rep_t = undefined;
+
+ if (aAbs -% srcMinNormal < srcInfinity - srcMinNormal) {
+ // a is a normal number.
+ // Extend to the destination type by shifting the significand and
+ // exponent into the proper position and rebiasing the exponent.
+ absResult = dst_rep_t(aAbs) << (dstSigBits - srcSigBits);
+ absResult += (dstExpBias - srcExpBias) << dstSigBits;
+ } else if (aAbs >= srcInfinity) {
+ // a is NaN or infinity.
+ // Conjure the result by beginning with infinity, then setting the qNaN
+ // bit (if needed) and right-aligning the rest of the trailing NaN
+ // payload field.
+ absResult = dstInfExp << dstSigBits;
+ absResult |= dst_rep_t(aAbs & srcQNaN) << (dstSigBits - srcSigBits);
+ absResult |= dst_rep_t(aAbs & srcNaNCode) << (dstSigBits - srcSigBits);
+ } else if (aAbs != 0) {
+ // a is denormal.
+ // renormalize the significand and clear the leading bit, then insert
+ // the correct adjusted exponent in the destination type.
+ const scale: u32 = @clz(aAbs) - @clz(src_rep_t(srcMinNormal));
+ absResult = dst_rep_t(aAbs) << @intCast(DstShift, dstSigBits - srcSigBits + scale);
+ absResult ^= dstMinNormal;
+ const resultExponent: u32 = dstExpBias - srcExpBias - scale + 1;
+ absResult |= @intCast(dst_rep_t, resultExponent) << dstSigBits;
+ } else {
+ // a is zero.
+ absResult = 0;
+ }
+
+ // Apply the signbit to (dst_t)abs(a).
+ const result: dst_rep_t align(@alignOf(dst_t)) = absResult | dst_rep_t(sign) << (dstBits - srcBits);
+ return @bitCast(dst_t, result);
+}
+
+test "import extendXfYf2" {
+ _ = @import("extendXfYf2_test.zig");
+}
diff --git a/std/special/compiler_rt/extendXfYf2_test.zig b/std/special/compiler_rt/extendXfYf2_test.zig
new file mode 100644
index 0000000000..185c83a0ef
--- /dev/null
+++ b/std/special/compiler_rt/extendXfYf2_test.zig
@@ -0,0 +1,155 @@
+const __extenddftf2 = @import("extendXfYf2.zig").__extenddftf2;
+const __extendhfsf2 = @import("extendXfYf2.zig").__extendhfsf2;
+const __extendsftf2 = @import("extendXfYf2.zig").__extendsftf2;
+const assert = @import("std").debug.assert;
+
+fn test__extenddftf2(a: f64, expectedHi: u64, expectedLo: u64) void {
+ const x = __extenddftf2(a);
+
+ const rep = @bitCast(u128, x);
+ const hi = @intCast(u64, rep >> 64);
+ const lo = @truncate(u64, rep);
+
+ if (hi == expectedHi and lo == expectedLo)
+ return;
+
+ // test other possible NaN representation(signal NaN)
+ if (expectedHi == 0x7fff800000000000 and expectedLo == 0x0) {
+ if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and
+ ((hi & 0xffffffffffff) > 0 or lo > 0))
+ {
+ return;
+ }
+ }
+
+ @panic("__extenddftf2 test failure");
+}
+
+fn test__extendhfsf2(a: u16, expected: u32) void {
+ const x = __extendhfsf2(a);
+ const rep = @bitCast(u32, x);
+
+ if (rep == expected) {
+ if (rep & 0x7fffffff > 0x7f800000) {
+ return; // NaN is always unequal.
+ }
+ if (x == @bitCast(f32, expected)) {
+ return;
+ }
+ }
+
+ @panic("__extendhfsf2 test failure");
+}
+
+fn test__extendsftf2(a: f32, expectedHi: u64, expectedLo: u64) void {
+ const x = __extendsftf2(a);
+
+ const rep = @bitCast(u128, x);
+ const hi = @intCast(u64, rep >> 64);
+ const lo = @truncate(u64, rep);
+
+ if (hi == expectedHi and lo == expectedLo)
+ return;
+
+ // test other possible NaN representation(signal NaN)
+ if (expectedHi == 0x7fff800000000000 and expectedLo == 0x0) {
+ if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and
+ ((hi & 0xffffffffffff) > 0 or lo > 0))
+ {
+ return;
+ }
+ }
+
+ @panic("__extendsftf2 test failure");
+}
+
+test "extenddftf2" {
+ // qNaN
+ test__extenddftf2(makeQNaN64(), 0x7fff800000000000, 0x0);
+
+ // NaN
+ test__extenddftf2(makeNaN64(0x7100000000000), 0x7fff710000000000, 0x0);
+
+ // inf
+ test__extenddftf2(makeInf64(), 0x7fff000000000000, 0x0);
+
+ // zero
+ test__extenddftf2(0.0, 0x0, 0x0);
+
+ test__extenddftf2(0x1.23456789abcdefp+5, 0x400423456789abcd, 0xf000000000000000);
+
+ test__extenddftf2(0x1.edcba987654321fp-9, 0x3ff6edcba9876543, 0x2000000000000000);
+
+ test__extenddftf2(0x1.23456789abcdefp+45, 0x402c23456789abcd, 0xf000000000000000);
+
+ test__extenddftf2(0x1.edcba987654321fp-45, 0x3fd2edcba9876543, 0x2000000000000000);
+}
+
+test "extendhfsf2" {
+ test__extendhfsf2(0x7e00, 0x7fc00000); // qNaN
+ test__extendhfsf2(0x7f00, 0x7fe00000); // sNaN
+ test__extendhfsf2(0x7c01, 0x7f802000); // sNaN
+
+ test__extendhfsf2(0, 0); // 0
+ test__extendhfsf2(0x8000, 0x80000000); // -0
+
+ test__extendhfsf2(0x7c00, 0x7f800000); // inf
+ test__extendhfsf2(0xfc00, 0xff800000); // -inf
+
+ test__extendhfsf2(0x0001, 0x33800000); // denormal (min), 2**-24
+ test__extendhfsf2(0x8001, 0xb3800000); // denormal (min), -2**-24
+
+ test__extendhfsf2(0x03ff, 0x387fc000); // denormal (max), 2**-14 - 2**-24
+ test__extendhfsf2(0x83ff, 0xb87fc000); // denormal (max), -2**-14 + 2**-24
+
+ test__extendhfsf2(0x0400, 0x38800000); // normal (min), 2**-14
+ test__extendhfsf2(0x8400, 0xb8800000); // normal (min), -2**-14
+
+ test__extendhfsf2(0x7bff, 0x477fe000); // normal (max), 65504
+ test__extendhfsf2(0xfbff, 0xc77fe000); // normal (max), -65504
+
+ test__extendhfsf2(0x3c01, 0x3f802000); // normal, 1 + 2**-10
+ test__extendhfsf2(0xbc01, 0xbf802000); // normal, -1 - 2**-10
+
+ test__extendhfsf2(0x3555, 0x3eaaa000); // normal, approx. 1/3
+ test__extendhfsf2(0xb555, 0xbeaaa000); // normal, approx. -1/3
+}
+
+test "extendsftf2" {
+ // qNaN
+ test__extendsftf2(makeQNaN32(), 0x7fff800000000000, 0x0);
+ // NaN
+ test__extendsftf2(makeNaN32(0x410000), 0x7fff820000000000, 0x0);
+ // inf
+ test__extendsftf2(makeInf32(), 0x7fff000000000000, 0x0);
+ // zero
+ test__extendsftf2(0.0, 0x0, 0x0);
+ test__extendsftf2(0x1.23456p+5, 0x4004234560000000, 0x0);
+ test__extendsftf2(0x1.edcbap-9, 0x3ff6edcba0000000, 0x0);
+ test__extendsftf2(0x1.23456p+45, 0x402c234560000000, 0x0);
+ test__extendsftf2(0x1.edcbap-45, 0x3fd2edcba0000000, 0x0);
+}
+
+fn makeQNaN64() f64 {
+ return @bitCast(f64, u64(0x7ff8000000000000));
+}
+
+fn makeInf64() f64 {
+ return @bitCast(f64, u64(0x7ff0000000000000));
+}
+
+fn makeNaN64(rand: u64) f64 {
+ return @bitCast(f64, 0x7ff0000000000000 | (rand & 0xfffffffffffff));
+}
+
+fn makeQNaN32() f32 {
+ return @bitCast(f32, u32(0x7fc00000));
+}
+
+fn makeNaN32(rand: u32) f32 {
+ return @bitCast(f32, 0x7f800000 | (rand & 0x7fffff));
+}
+
+fn makeInf32() f32 {
+ return @bitCast(f32, u32(0x7f800000));
+}
diff --git a/std/special/compiler_rt/floattidf.zig b/std/special/compiler_rt/floattidf.zig
new file mode 100644
index 0000000000..2a24c64efe
--- /dev/null
+++ b/std/special/compiler_rt/floattidf.zig
@@ -0,0 +1,69 @@
+const builtin = @import("builtin");
+const is_test = builtin.is_test;
+
+const DBL_MANT_DIG = 53;
+
+pub extern fn __floattidf(arg: i128) f64 {
+ @setRuntimeSafety(is_test);
+
+ if (arg == 0)
+ return 0.0;
+
+ var ai = arg;
+ const N: u32 = 128;
+ const si = ai >> @intCast(u7, (N - 1));
+ ai = ((ai ^ si) -% si);
+ var a = @bitCast(u128, ai);
+
+ const sd = @bitCast(i32, N - @clz(a)); // number of significant digits
+ var e: i32 = sd - 1; // exponent
+ if (sd > DBL_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit DBL_MANT_DIG-1 bits to the right of 1
+ // Q = bit DBL_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ DBL_MANT_DIG + 1 => {
+ a <<= 1;
+ },
+ DBL_MANT_DIG + 2 => {},
+ else => {
+ const shift1_amt = @intCast(i32, sd - (DBL_MANT_DIG + 2));
+ const shift1_amt_u7 = @intCast(u7, shift1_amt);
+
+ const shift2_amt = @intCast(i32, N + (DBL_MANT_DIG + 2)) - sd;
+ const shift2_amt_u7 = @intCast(u7, shift2_amt);
+
+ a = (a >> shift1_amt_u7) | @boolToInt((a & (@intCast(u128, @maxValue(u128)) >> shift2_amt_u7)) != 0);
+ },
+ }
+ // finish
+ a |= @boolToInt((a & 4) != 0); // Or P into R
+ a += 1; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits
+ if ((a & (u128(1) << DBL_MANT_DIG)) != 0) {
+ a >>= 1;
+ e += 1;
+ }
+ // a is now rounded to DBL_MANT_DIG bits
+ } else {
+ a <<= @intCast(u7, DBL_MANT_DIG - sd);
+ // a is now rounded to DBL_MANT_DIG bits
+ }
+
+ const s = @bitCast(u128, arg) >> (128 - 32);
+ const high: u64 = (@intCast(u64, s) & 0x80000000) | // sign
+ (@intCast(u32, (e + 1023)) << 20) | // exponent
+ (@truncate(u32, a >> 32) & 0x000fffff); // mantissa-high
+ const low: u64 = @truncate(u32, a); // mantissa-low
+
+ return @bitCast(f64, low | (high << 32));
+}
+
+test "import floattidf" {
+ _ = @import("floattidf_test.zig");
+}
diff --git a/std/special/compiler_rt/floattidf_test.zig b/std/special/compiler_rt/floattidf_test.zig
new file mode 100644
index 0000000000..25dc595052
--- /dev/null
+++ b/std/special/compiler_rt/floattidf_test.zig
@@ -0,0 +1,84 @@
+const __floattidf = @import("floattidf.zig").__floattidf;
+const assert = @import("std").debug.assert;
+
+fn test__floattidf(a: i128, expected: f64) void {
+ const x = __floattidf(a);
+ assert(x == expected);
+}
+
+test "floattidf" {
+ test__floattidf(0, 0.0);
+
+ test__floattidf(1, 1.0);
+ test__floattidf(2, 2.0);
+ test__floattidf(20, 20.0);
+ test__floattidf(-1, -1.0);
+ test__floattidf(-2, -2.0);
+ test__floattidf(-20, -20.0);
+
+ test__floattidf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ test__floattidf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62);
+ test__floattidf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+ test__floattidf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62);
+
+ test__floattidf(make_ti(0x8000008000000000, 0), -0x1.FFFFFEp+126);
+ test__floattidf(make_ti(0x8000000000000800, 0), -0x1.FFFFFFFFFFFFEp+126);
+ test__floattidf(make_ti(0x8000010000000000, 0), -0x1.FFFFFCp+126);
+ test__floattidf(make_ti(0x8000000000001000, 0), -0x1.FFFFFFFFFFFFCp+126);
+
+ test__floattidf(make_ti(0x8000000000000000, 0), -0x1.000000p+127);
+ test__floattidf(make_ti(0x8000000000000001, 0), -0x1.000000p+127);
+
+ test__floattidf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ test__floattidf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
+ test__floattidf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
+ test__floattidf(0x0007FB72EBFFFFFF, 0x1.FEDCBAFFFFFFCp+50);
+ test__floattidf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
+ test__floattidf(0x0007FB72E8000001, 0x1.FEDCBA0000004p+50);
+
+ test__floattidf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
+ test__floattidf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
+ test__floattidf(0x0007FB72E7FFFFFF, 0x1.FEDCB9FFFFFFCp+50);
+ test__floattidf(0x0007FB72E4000001, 0x1.FEDCB90000004p+50);
+ test__floattidf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
+
+ test__floattidf(0x023479FD0E092DC0, 0x1.1A3CFE870496Ep+57);
+ test__floattidf(0x023479FD0E092DA1, 0x1.1A3CFE870496Dp+57);
+ test__floattidf(0x023479FD0E092DB0, 0x1.1A3CFE870496Ep+57);
+ test__floattidf(0x023479FD0E092DB8, 0x1.1A3CFE870496Ep+57);
+ test__floattidf(0x023479FD0E092DB6, 0x1.1A3CFE870496Ep+57);
+ test__floattidf(0x023479FD0E092DBF, 0x1.1A3CFE870496Ep+57);
+ test__floattidf(0x023479FD0E092DC1, 0x1.1A3CFE870496Ep+57);
+ test__floattidf(0x023479FD0E092DC7, 0x1.1A3CFE870496Ep+57);
+ test__floattidf(0x023479FD0E092DC8, 0x1.1A3CFE870496Ep+57);
+ test__floattidf(0x023479FD0E092DCF, 0x1.1A3CFE870496Ep+57);
+ test__floattidf(0x023479FD0E092DD0, 0x1.1A3CFE870496Ep+57);
+ test__floattidf(0x023479FD0E092DD1, 0x1.1A3CFE870496Fp+57);
+ test__floattidf(0x023479FD0E092DD8, 0x1.1A3CFE870496Fp+57);
+ test__floattidf(0x023479FD0E092DDF, 0x1.1A3CFE870496Fp+57);
+ test__floattidf(0x023479FD0E092DE0, 0x1.1A3CFE870496Fp+57);
+
+ test__floattidf(make_ti(0x023479FD0E092DC0, 0), 0x1.1A3CFE870496Ep+121);
+ test__floattidf(make_ti(0x023479FD0E092DA1, 1), 0x1.1A3CFE870496Dp+121);
+ test__floattidf(make_ti(0x023479FD0E092DB0, 2), 0x1.1A3CFE870496Ep+121);
+ test__floattidf(make_ti(0x023479FD0E092DB8, 3), 0x1.1A3CFE870496Ep+121);
+ test__floattidf(make_ti(0x023479FD0E092DB6, 4), 0x1.1A3CFE870496Ep+121);
+ test__floattidf(make_ti(0x023479FD0E092DBF, 5), 0x1.1A3CFE870496Ep+121);
+ test__floattidf(make_ti(0x023479FD0E092DC1, 6), 0x1.1A3CFE870496Ep+121);
+ test__floattidf(make_ti(0x023479FD0E092DC7, 7), 0x1.1A3CFE870496Ep+121);
+ test__floattidf(make_ti(0x023479FD0E092DC8, 8), 0x1.1A3CFE870496Ep+121);
+ test__floattidf(make_ti(0x023479FD0E092DCF, 9), 0x1.1A3CFE870496Ep+121);
+ test__floattidf(make_ti(0x023479FD0E092DD0, 0), 0x1.1A3CFE870496Ep+121);
+ test__floattidf(make_ti(0x023479FD0E092DD1, 11), 0x1.1A3CFE870496Fp+121);
+ test__floattidf(make_ti(0x023479FD0E092DD8, 12), 0x1.1A3CFE870496Fp+121);
+ test__floattidf(make_ti(0x023479FD0E092DDF, 13), 0x1.1A3CFE870496Fp+121);
+ test__floattidf(make_ti(0x023479FD0E092DE0, 14), 0x1.1A3CFE870496Fp+121);
+}
+
+fn make_ti(high: u64, low: u64) i128 {
+ var result: u128 = high;
+ result <<= 64;
+ result |= low;
+ return @bitCast(i128, result);
+}
diff --git a/std/special/compiler_rt/floattisf.zig b/std/special/compiler_rt/floattisf.zig
new file mode 100644
index 0000000000..4618a86444
--- /dev/null
+++ b/std/special/compiler_rt/floattisf.zig
@@ -0,0 +1,69 @@
+const builtin = @import("builtin");
+const is_test = builtin.is_test;
+
+const FLT_MANT_DIG = 24;
+
+pub extern fn __floattisf(arg: i128) f32 {
+ @setRuntimeSafety(is_test);
+
+ if (arg == 0)
+ return 0.0;
+
+ var ai = arg;
+ const N: u32 = 128;
+ const si = ai >> @intCast(u7, (N - 1));
+ ai = ((ai ^ si) -% si);
+ var a = @bitCast(u128, ai);
+
+ const sd = @bitCast(i32, N - @clz(a)); // number of significant digits
+ var e: i32 = sd - 1; // exponent
+
+ if (sd > FLT_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit FLT_MANT_DIG-1 bits to the right of 1
+ // Q = bit FLT_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ FLT_MANT_DIG + 1 => {
+ a <<= 1;
+ },
+ FLT_MANT_DIG + 2 => {},
+ else => {
+ const shift1_amt = @intCast(i32, sd - (FLT_MANT_DIG + 2));
+ const shift1_amt_u7 = @intCast(u7, shift1_amt);
+
+ const shift2_amt = @intCast(i32, N + (FLT_MANT_DIG + 2)) - sd;
+ const shift2_amt_u7 = @intCast(u7, shift2_amt);
+
+ a = (a >> shift1_amt_u7) | @boolToInt((a & (@intCast(u128, @maxValue(u128)) >> shift2_amt_u7)) != 0);
+ },
+ }
+ // finish
+ a |= @boolToInt((a & 4) != 0); // Or P into R
+ a += 1; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits
+ if ((a & (u128(1) << FLT_MANT_DIG)) != 0) {
+ a >>= 1;
+ e += 1;
+ }
+ // a is now rounded to FLT_MANT_DIG bits
+ } else {
+ a <<= @intCast(u7, FLT_MANT_DIG - sd);
+ // a is now rounded to FLT_MANT_DIG bits
+ }
+
+ const s = @bitCast(u128, arg) >> (128 - 32);
+ const r = (@intCast(u32, s) & 0x80000000) | // sign
+ (@intCast(u32, (e + 127)) << 23) | // exponent
+ (@truncate(u32, a) & 0x007fffff); // mantissa-high
+
+ return @bitCast(f32, r);
+}
+
+test "import floattisf" {
+ _ = @import("floattisf_test.zig");
+}
diff --git a/std/special/compiler_rt/floattisf_test.zig b/std/special/compiler_rt/floattisf_test.zig
new file mode 100644
index 0000000000..ecb8eac60a
--- /dev/null
+++ b/std/special/compiler_rt/floattisf_test.zig
@@ -0,0 +1,60 @@
+const __floattisf = @import("floattisf.zig").__floattisf;
+const assert = @import("std").debug.assert;
+
+fn test__floattisf(a: i128, expected: f32) void {
+ const x = __floattisf(a);
+ assert(x == expected);
+}
+
+test "floattisf" {
+ test__floattisf(0, 0.0);
+
+ test__floattisf(1, 1.0);
+ test__floattisf(2, 2.0);
+ test__floattisf(-1, -1.0);
+ test__floattisf(-2, -2.0);
+
+ test__floattisf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ test__floattisf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+
+ test__floattisf(make_ti(0xFFFFFFFFFFFFFFFF, 0x8000008000000000), -0x1.FFFFFEp+62);
+ test__floattisf(make_ti(0xFFFFFFFFFFFFFFFF, 0x8000010000000000), -0x1.FFFFFCp+62);
+
+ test__floattisf(make_ti(0xFFFFFFFFFFFFFFFF, 0x8000000000000000), -0x1.000000p+63);
+ test__floattisf(make_ti(0xFFFFFFFFFFFFFFFF, 0x8000000000000001), -0x1.000000p+63);
+
+ test__floattisf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ test__floattisf(0x0007FB72EA000000, 0x1.FEDCBAp+50);
+ test__floattisf(0x0007FB72EB000000, 0x1.FEDCBAp+50);
+ test__floattisf(0x0007FB72EBFFFFFF, 0x1.FEDCBAp+50);
+ test__floattisf(0x0007FB72EC000000, 0x1.FEDCBCp+50);
+ test__floattisf(0x0007FB72E8000001, 0x1.FEDCBAp+50);
+
+ test__floattisf(0x0007FB72E6000000, 0x1.FEDCBAp+50);
+ test__floattisf(0x0007FB72E7000000, 0x1.FEDCBAp+50);
+ test__floattisf(0x0007FB72E7FFFFFF, 0x1.FEDCBAp+50);
+ test__floattisf(0x0007FB72E4000001, 0x1.FEDCBAp+50);
+ test__floattisf(0x0007FB72E4000000, 0x1.FEDCB8p+50);
+
+ test__floattisf(make_ti(0x0007FB72E8000000, 0), 0x1.FEDCBAp+114);
+
+ test__floattisf(make_ti(0x0007FB72EA000000, 0), 0x1.FEDCBAp+114);
+ test__floattisf(make_ti(0x0007FB72EB000000, 0), 0x1.FEDCBAp+114);
+ test__floattisf(make_ti(0x0007FB72EBFFFFFF, 0), 0x1.FEDCBAp+114);
+ test__floattisf(make_ti(0x0007FB72EC000000, 0), 0x1.FEDCBCp+114);
+ test__floattisf(make_ti(0x0007FB72E8000001, 0), 0x1.FEDCBAp+114);
+
+ test__floattisf(make_ti(0x0007FB72E6000000, 0), 0x1.FEDCBAp+114);
+ test__floattisf(make_ti(0x0007FB72E7000000, 0), 0x1.FEDCBAp+114);
+ test__floattisf(make_ti(0x0007FB72E7FFFFFF, 0), 0x1.FEDCBAp+114);
+ test__floattisf(make_ti(0x0007FB72E4000001, 0), 0x1.FEDCBAp+114);
+ test__floattisf(make_ti(0x0007FB72E4000000, 0), 0x1.FEDCB8p+114);
+}
+
+fn make_ti(high: u64, low: u64) i128 {
+ var result: u128 = high;
+ result <<= 64;
+ result |= low;
+ return @bitCast(i128, result);
+}
diff --git a/std/special/compiler_rt/floattitf.zig b/std/special/compiler_rt/floattitf.zig
new file mode 100644
index 0000000000..4da2c145fa
--- /dev/null
+++ b/std/special/compiler_rt/floattitf.zig
@@ -0,0 +1,69 @@
+const builtin = @import("builtin");
+const is_test = builtin.is_test;
+
+const LDBL_MANT_DIG = 113;
+
+pub extern fn __floattitf(arg: i128) f128 {
+ @setRuntimeSafety(is_test);
+
+ if (arg == 0)
+ return 0.0;
+
+ var ai = arg;
+ const N: u32 = 128;
+ const si = ai >> @intCast(u7, (N - 1));
+ ai = ((ai ^ si) -% si);
+ var a = @bitCast(u128, ai);
+
+ const sd = @bitCast(i32, N - @clz(a)); // number of significant digits
+ var e: i32 = sd - 1; // exponent
+ if (sd > LDBL_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit LDBL_MANT_DIG-1 bits to the right of 1
+ // Q = bit LDBL_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ LDBL_MANT_DIG + 1 => {
+ a <<= 1;
+ },
+ LDBL_MANT_DIG + 2 => {},
+ else => {
+ const shift1_amt = @intCast(i32, sd - (LDBL_MANT_DIG + 2));
+ const shift1_amt_u7 = @intCast(u7, shift1_amt);
+
+ const shift2_amt = @intCast(i32, N + (LDBL_MANT_DIG + 2)) - sd;
+ const shift2_amt_u7 = @intCast(u7, shift2_amt);
+
+ a = (a >> shift1_amt_u7) | @boolToInt((a & (@intCast(u128, @maxValue(u128)) >> shift2_amt_u7)) != 0);
+ },
+ }
+ // finish
+ a |= @boolToInt((a & 4) != 0); // Or P into R
+ a += 1; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits
+ if ((a & (u128(1) << LDBL_MANT_DIG)) != 0) {
+ a >>= 1;
+ e += 1;
+ }
+ // a is now rounded to LDBL_MANT_DIG bits
+ } else {
+ a <<= @intCast(u7, LDBL_MANT_DIG - sd);
+ // a is now rounded to LDBL_MANT_DIG bits
+ }
+
+ const s = @bitCast(u128, arg) >> (128 - 64);
+ const high: u128 = (@intCast(u64, s) & 0x8000000000000000) | // sign
+ (@intCast(u64, (e + 16383)) << 48) | // exponent
+ (@truncate(u64, a >> 64) & 0x0000ffffffffffff); // mantissa-high
+ const low = @truncate(u64, a); // mantissa-low
+
+ return @bitCast(f128, low | (high << 64));
+}
+
+test "import floattitf" {
+ _ = @import("floattitf_test.zig");
+}
diff --git a/std/special/compiler_rt/floattitf_test.zig b/std/special/compiler_rt/floattitf_test.zig
new file mode 100644
index 0000000000..da2ccc8b35
--- /dev/null
+++ b/std/special/compiler_rt/floattitf_test.zig
@@ -0,0 +1,96 @@
+const __floattitf = @import("floattitf.zig").__floattitf;
+const assert = @import("std").debug.assert;
+
+fn test__floattitf(a: i128, expected: f128) void {
+ const x = __floattitf(a);
+ assert(x == expected);
+}
+
+test "floattitf" {
+ test__floattitf(0, 0.0);
+
+ test__floattitf(1, 1.0);
+ test__floattitf(2, 2.0);
+ test__floattitf(20, 20.0);
+ test__floattitf(-1, -1.0);
+ test__floattitf(-2, -2.0);
+ test__floattitf(-20, -20.0);
+
+ test__floattitf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ test__floattitf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62);
+ test__floattitf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+ test__floattitf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62);
+
+ test__floattitf(make_ti(0x8000008000000000, 0), -0x1.FFFFFEp+126);
+ test__floattitf(make_ti(0x8000000000000800, 0), -0x1.FFFFFFFFFFFFEp+126);
+ test__floattitf(make_ti(0x8000010000000000, 0), -0x1.FFFFFCp+126);
+ test__floattitf(make_ti(0x8000000000001000, 0), -0x1.FFFFFFFFFFFFCp+126);
+
+ test__floattitf(make_ti(0x8000000000000000, 0), -0x1.000000p+127);
+ test__floattitf(make_ti(0x8000000000000001, 0), -0x1.FFFFFFFFFFFFFFFCp+126);
+
+ test__floattitf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ test__floattitf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
+ test__floattitf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
+ test__floattitf(0x0007FB72EBFFFFFF, 0x1.FEDCBAFFFFFFCp+50);
+ test__floattitf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
+ test__floattitf(0x0007FB72E8000001, 0x1.FEDCBA0000004p+50);
+
+ test__floattitf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
+ test__floattitf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
+ test__floattitf(0x0007FB72E7FFFFFF, 0x1.FEDCB9FFFFFFCp+50);
+ test__floattitf(0x0007FB72E4000001, 0x1.FEDCB90000004p+50);
+ test__floattitf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
+
+ test__floattitf(0x023479FD0E092DC0, 0x1.1A3CFE870496Ep+57);
+ test__floattitf(0x023479FD0E092DA1, 0x1.1A3CFE870496D08p+57);
+ test__floattitf(0x023479FD0E092DB0, 0x1.1A3CFE870496D8p+57);
+ test__floattitf(0x023479FD0E092DB8, 0x1.1A3CFE870496DCp+57);
+ test__floattitf(0x023479FD0E092DB6, 0x1.1A3CFE870496DBp+57);
+ test__floattitf(0x023479FD0E092DBF, 0x1.1A3CFE870496DF8p+57);
+ test__floattitf(0x023479FD0E092DC1, 0x1.1A3CFE870496E08p+57);
+ test__floattitf(0x023479FD0E092DC7, 0x1.1A3CFE870496E38p+57);
+ test__floattitf(0x023479FD0E092DC8, 0x1.1A3CFE870496E4p+57);
+ test__floattitf(0x023479FD0E092DCF, 0x1.1A3CFE870496E78p+57);
+ test__floattitf(0x023479FD0E092DD0, 0x1.1A3CFE870496E8p+57);
+ test__floattitf(0x023479FD0E092DD1, 0x1.1A3CFE870496E88p+57);
+ test__floattitf(0x023479FD0E092DD8, 0x1.1A3CFE870496ECp+57);
+ test__floattitf(0x023479FD0E092DDF, 0x1.1A3CFE870496EF8p+57);
+ test__floattitf(0x023479FD0E092DE0, 0x1.1A3CFE870496Fp+57);
+
+ test__floattitf(make_ti(0x023479FD0E092DC0, 0), 0x1.1A3CFE870496Ep+121);
+ test__floattitf(make_ti(0x023479FD0E092DA1, 1), 0x1.1A3CFE870496D08p+121);
+ test__floattitf(make_ti(0x023479FD0E092DB0, 2), 0x1.1A3CFE870496D8p+121);
+ test__floattitf(make_ti(0x023479FD0E092DB8, 3), 0x1.1A3CFE870496DCp+121);
+ test__floattitf(make_ti(0x023479FD0E092DB6, 4), 0x1.1A3CFE870496DBp+121);
+ test__floattitf(make_ti(0x023479FD0E092DBF, 5), 0x1.1A3CFE870496DF8p+121);
+ test__floattitf(make_ti(0x023479FD0E092DC1, 6), 0x1.1A3CFE870496E08p+121);
+ test__floattitf(make_ti(0x023479FD0E092DC7, 7), 0x1.1A3CFE870496E38p+121);
+ test__floattitf(make_ti(0x023479FD0E092DC8, 8), 0x1.1A3CFE870496E4p+121);
+ test__floattitf(make_ti(0x023479FD0E092DCF, 9), 0x1.1A3CFE870496E78p+121);
+ test__floattitf(make_ti(0x023479FD0E092DD0, 0), 0x1.1A3CFE870496E8p+121);
+ test__floattitf(make_ti(0x023479FD0E092DD1, 11), 0x1.1A3CFE870496E88p+121);
+ test__floattitf(make_ti(0x023479FD0E092DD8, 12), 0x1.1A3CFE870496ECp+121);
+ test__floattitf(make_ti(0x023479FD0E092DDF, 13), 0x1.1A3CFE870496EF8p+121);
+ test__floattitf(make_ti(0x023479FD0E092DE0, 14), 0x1.1A3CFE870496Fp+121);
+
+ test__floattitf(make_ti(0, 0xFFFFFFFFFFFFFFFF), 0x1.FFFFFFFFFFFFFFFEp+63);
+
+ test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC2801), 0x1.23456789ABCDEF0123456789ABC3p+124);
+ test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC3000), 0x1.23456789ABCDEF0123456789ABC3p+124);
+ test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC37FF), 0x1.23456789ABCDEF0123456789ABC3p+124);
+ test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC3800), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4000), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC47FF), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4800), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4801), 0x1.23456789ABCDEF0123456789ABC5p+124);
+ test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC57FF), 0x1.23456789ABCDEF0123456789ABC5p+124);
+}
+
+fn make_ti(high: u64, low: u64) i128 {
+ var result: u128 = high;
+ result <<= 64;
+ result |= low;
+ return @bitCast(i128, result);
+}
diff --git a/std/special/compiler_rt/floatunditf.zig b/std/special/compiler_rt/floatunditf.zig
new file mode 100644
index 0000000000..afc545448a
--- /dev/null
+++ b/std/special/compiler_rt/floatunditf.zig
@@ -0,0 +1,28 @@
+const builtin = @import("builtin");
+const is_test = builtin.is_test;
+const std = @import("std");
+
+pub extern fn __floatunditf(a: u128) f128 {
+ @setRuntimeSafety(is_test);
+
+ if (a == 0) {
+ return 0;
+ }
+
+ const mantissa_bits = std.math.floatMantissaBits(f128);
+ const exponent_bits = std.math.floatExponentBits(f128);
+ const exponent_bias = (1 << (exponent_bits - 1)) - 1;
+ const implicit_bit = 1 << mantissa_bits;
+
+ const exp = (u128.bit_count - 1) - @clz(a);
+ const shift = mantissa_bits - @intCast(u7, exp);
+
+ var result: u128 align(16) = (a << shift) ^ implicit_bit;
+ result += (@intCast(u128, exp) + exponent_bias) << mantissa_bits;
+
+ return @bitCast(f128, result);
+}
+
+test "import floatunditf" {
+ _ = @import("floatunditf_test.zig");
+}
diff --git a/std/special/compiler_rt/floatunditf_test.zig b/std/special/compiler_rt/floatunditf_test.zig
new file mode 100644
index 0000000000..8533c75070
--- /dev/null
+++ b/std/special/compiler_rt/floatunditf_test.zig
@@ -0,0 +1,33 @@
+const __floatunditf = @import("floatunditf.zig").__floatunditf;
+const assert = @import("std").debug.assert;
+
+fn test__floatunditf(a: u128, expected_hi: u64, expected_lo: u64) void {
+ const x = __floatunditf(a);
+
+ const x_repr = @bitCast(u128, x);
+ const x_hi = @intCast(u64, x_repr >> 64);
+ const x_lo = @truncate(u64, x_repr);
+
+ if (x_hi == expected_hi and x_lo == expected_lo) {
+ return;
+ }
+ // nan repr
+ else if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
+ if ((x_hi & 0x7fff000000000000) == 0x7fff000000000000 and ((x_hi & 0xffffffffffff) > 0 or x_lo > 0)) {
+ return;
+ }
+ }
+
+ @panic("__floatunditf test failure");
+}
+
+test "floatunditf" {
+ test__floatunditf(0xffffffffffffffff, 0x403effffffffffff, 0xfffe000000000000);
+ test__floatunditf(0xfffffffffffffffe, 0x403effffffffffff, 0xfffc000000000000);
+ test__floatunditf(0x8000000000000000, 0x403e000000000000, 0x0);
+ test__floatunditf(0x7fffffffffffffff, 0x403dffffffffffff, 0xfffc000000000000);
+ test__floatunditf(0x123456789abcdef1, 0x403b23456789abcd, 0xef10000000000000);
+ test__floatunditf(0x2, 0x4000000000000000, 0x0);
+ test__floatunditf(0x1, 0x3fff000000000000, 0x0);
+ test__floatunditf(0x0, 0x0, 0x0);
+}
diff --git a/std/special/compiler_rt/floatunsitf.zig b/std/special/compiler_rt/floatunsitf.zig
new file mode 100644
index 0000000000..625f90a3d0
--- /dev/null
+++ b/std/special/compiler_rt/floatunsitf.zig
@@ -0,0 +1,29 @@
+const builtin = @import("builtin");
+const is_test = builtin.is_test;
+const std = @import("std");
+
+pub extern fn __floatunsitf(a: u64) f128 {
+ @setRuntimeSafety(is_test);
+
+ if (a == 0) {
+ return 0;
+ }
+
+ const mantissa_bits = std.math.floatMantissaBits(f128);
+ const exponent_bits = std.math.floatExponentBits(f128);
+ const exponent_bias = (1 << (exponent_bits - 1)) - 1;
+ const implicit_bit = 1 << mantissa_bits;
+
+ const exp = (u64.bit_count - 1) - @clz(a);
+ const shift = mantissa_bits - @intCast(u7, exp);
+
+ // TODO: @bitCast alignment error
+ var result align(16) = (@intCast(u128, a) << shift) ^ implicit_bit;
+ result += (@intCast(u128, exp) + exponent_bias) << mantissa_bits;
+
+ return @bitCast(f128, result);
+}
+
+test "import floatunsitf" {
+ _ = @import("floatunsitf_test.zig");
+}
diff --git a/std/special/compiler_rt/floatunsitf_test.zig b/std/special/compiler_rt/floatunsitf_test.zig
new file mode 100644
index 0000000000..06f54cde03
--- /dev/null
+++ b/std/special/compiler_rt/floatunsitf_test.zig
@@ -0,0 +1,29 @@
+const __floatunsitf = @import("floatunsitf.zig").__floatunsitf;
+const assert = @import("std").debug.assert;
+
+fn test__floatunsitf(a: u64, expected_hi: u64, expected_lo: u64) void {
+ const x = __floatunsitf(a);
+
+ const x_repr = @bitCast(u128, x);
+ const x_hi = @intCast(u64, x_repr >> 64);
+ const x_lo = @truncate(u64, x_repr);
+
+ if (x_hi == expected_hi and x_lo == expected_lo) {
+ return;
+ }
+ // nan repr
+ else if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
+ if ((x_hi & 0x7fff000000000000) == 0x7fff000000000000 and ((x_hi & 0xffffffffffff) > 0 or x_lo > 0)) {
+ return;
+ }
+ }
+
+ @panic("__floatunsitf test failure");
+}
+
+test "floatunsitf" {
+ test__floatunsitf(0x7fffffff, 0x401dfffffffc0000, 0x0);
+ test__floatunsitf(0, 0x0, 0x0);
+ test__floatunsitf(0xffffffff, 0x401efffffffe0000, 0x0);
+ test__floatunsitf(0x12345678, 0x401b234567800000, 0x0);
+}
diff --git a/std/special/compiler_rt/floatuntidf.zig b/std/special/compiler_rt/floatuntidf.zig
new file mode 100644
index 0000000000..1101733825
--- /dev/null
+++ b/std/special/compiler_rt/floatuntidf.zig
@@ -0,0 +1,60 @@
+const builtin = @import("builtin");
+const is_test = builtin.is_test;
+
+const DBL_MANT_DIG = 53;
+
+pub extern fn __floatuntidf(arg: u128) f64 {
+ @setRuntimeSafety(is_test);
+
+ if (arg == 0)
+ return 0.0;
+
+ var a = arg;
+ const N: u32 = @sizeOf(u128) * 8;
+ const sd = @bitCast(i32, N - @clz(a)); // number of significant digits
+ var e: i32 = sd - 1; // exponent
+ if (sd > DBL_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit DBL_MANT_DIG-1 bits to the right of 1
+ // Q = bit DBL_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ DBL_MANT_DIG + 1 => {
+ a <<= 1;
+ },
+ DBL_MANT_DIG + 2 => {},
+ else => {
+ const shift_amt = @bitCast(i32, N + (DBL_MANT_DIG + 2)) - sd;
+ const shift_amt_u7 = @intCast(u7, shift_amt);
+ a = (a >> @intCast(u7, sd - (DBL_MANT_DIG + 2))) |
+ @boolToInt((a & (u128(@maxValue(u128)) >> shift_amt_u7)) != 0);
+ },
+ }
+ // finish
+ a |= @boolToInt((a & 4) != 0); // Or P into R
+ a += 1; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits
+ if ((a & (u128(1) << DBL_MANT_DIG)) != 0) {
+ a >>= 1;
+ e += 1;
+ }
+ // a is now rounded to DBL_MANT_DIG bits
+ } else {
+ a <<= @intCast(u7, DBL_MANT_DIG - sd);
+ // a is now rounded to DBL_MANT_DIG bits
+ }
+
+ const high: u64 = @bitCast(u32, (e + 1023) << 20) | // exponent
+ (@truncate(u32, a >> 32) & 0x000FFFFF); // mantissa-high
+ const low = @truncate(u32, a); // mantissa-low
+
+ return @bitCast(f64, low | (high << 32));
+}
+
+test "import floatuntidf" {
+ _ = @import("floatuntidf_test.zig");
+}
diff --git a/std/special/compiler_rt/floatuntidf_test.zig b/std/special/compiler_rt/floatuntidf_test.zig
new file mode 100644
index 0000000000..e2c79378e2
--- /dev/null
+++ b/std/special/compiler_rt/floatuntidf_test.zig
@@ -0,0 +1,81 @@
+const __floatuntidf = @import("floatuntidf.zig").__floatuntidf;
+const assert = @import("std").debug.assert;
+
+fn test__floatuntidf(a: u128, expected: f64) void {
+ const x = __floatuntidf(a);
+ assert(x == expected);
+}
+
+test "floatuntidf" {
+ test__floatuntidf(0, 0.0);
+
+ test__floatuntidf(1, 1.0);
+ test__floatuntidf(2, 2.0);
+ test__floatuntidf(20, 20.0);
+
+ test__floatuntidf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ test__floatuntidf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62);
+ test__floatuntidf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+ test__floatuntidf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62);
+
+ test__floatuntidf(make_ti(0x8000008000000000, 0), 0x1.000001p+127);
+ test__floatuntidf(make_ti(0x8000000000000800, 0), 0x1.0000000000001p+127);
+ test__floatuntidf(make_ti(0x8000010000000000, 0), 0x1.000002p+127);
+ test__floatuntidf(make_ti(0x8000000000001000, 0), 0x1.0000000000002p+127);
+
+ test__floatuntidf(make_ti(0x8000000000000000, 0), 0x1.000000p+127);
+ test__floatuntidf(make_ti(0x8000000000000001, 0), 0x1.0000000000000002p+127);
+
+ test__floatuntidf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ test__floatuntidf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
+ test__floatuntidf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
+ test__floatuntidf(0x0007FB72EBFFFFFF, 0x1.FEDCBAFFFFFFCp+50);
+ test__floatuntidf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
+ test__floatuntidf(0x0007FB72E8000001, 0x1.FEDCBA0000004p+50);
+
+ test__floatuntidf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
+ test__floatuntidf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
+ test__floatuntidf(0x0007FB72E7FFFFFF, 0x1.FEDCB9FFFFFFCp+50);
+ test__floatuntidf(0x0007FB72E4000001, 0x1.FEDCB90000004p+50);
+ test__floatuntidf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
+
+ test__floatuntidf(0x023479FD0E092DC0, 0x1.1A3CFE870496Ep+57);
+ test__floatuntidf(0x023479FD0E092DA1, 0x1.1A3CFE870496Dp+57);
+ test__floatuntidf(0x023479FD0E092DB0, 0x1.1A3CFE870496Ep+57);
+ test__floatuntidf(0x023479FD0E092DB8, 0x1.1A3CFE870496Ep+57);
+ test__floatuntidf(0x023479FD0E092DB6, 0x1.1A3CFE870496Ep+57);
+ test__floatuntidf(0x023479FD0E092DBF, 0x1.1A3CFE870496Ep+57);
+ test__floatuntidf(0x023479FD0E092DC1, 0x1.1A3CFE870496Ep+57);
+ test__floatuntidf(0x023479FD0E092DC7, 0x1.1A3CFE870496Ep+57);
+ test__floatuntidf(0x023479FD0E092DC8, 0x1.1A3CFE870496Ep+57);
+ test__floatuntidf(0x023479FD0E092DCF, 0x1.1A3CFE870496Ep+57);
+ test__floatuntidf(0x023479FD0E092DD0, 0x1.1A3CFE870496Ep+57);
+ test__floatuntidf(0x023479FD0E092DD1, 0x1.1A3CFE870496Fp+57);
+ test__floatuntidf(0x023479FD0E092DD8, 0x1.1A3CFE870496Fp+57);
+ test__floatuntidf(0x023479FD0E092DDF, 0x1.1A3CFE870496Fp+57);
+ test__floatuntidf(0x023479FD0E092DE0, 0x1.1A3CFE870496Fp+57);
+
+ test__floatuntidf(make_ti(0x023479FD0E092DC0, 0), 0x1.1A3CFE870496Ep+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DA1, 1), 0x1.1A3CFE870496Dp+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DB0, 2), 0x1.1A3CFE870496Ep+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DB8, 3), 0x1.1A3CFE870496Ep+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DB6, 4), 0x1.1A3CFE870496Ep+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DBF, 5), 0x1.1A3CFE870496Ep+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DC1, 6), 0x1.1A3CFE870496Ep+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DC7, 7), 0x1.1A3CFE870496Ep+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DC8, 8), 0x1.1A3CFE870496Ep+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DCF, 9), 0x1.1A3CFE870496Ep+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DD0, 0), 0x1.1A3CFE870496Ep+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DD1, 11), 0x1.1A3CFE870496Fp+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DD8, 12), 0x1.1A3CFE870496Fp+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DDF, 13), 0x1.1A3CFE870496Fp+121);
+ test__floatuntidf(make_ti(0x023479FD0E092DE0, 14), 0x1.1A3CFE870496Fp+121);
+}
+
+fn make_ti(high: u64, low: u64) u128 {
+ var result: u128 = high;
+ result <<= 64;
+ result |= low;
+ return result;
+}
diff --git a/std/special/compiler_rt/floatuntisf.zig b/std/special/compiler_rt/floatuntisf.zig
new file mode 100644
index 0000000000..f85c22578e
--- /dev/null
+++ b/std/special/compiler_rt/floatuntisf.zig
@@ -0,0 +1,59 @@
+const builtin = @import("builtin");
+const is_test = builtin.is_test;
+
+const FLT_MANT_DIG = 24;
+
+pub extern fn __floatuntisf(arg: u128) f32 {
+ @setRuntimeSafety(is_test);
+
+ if (arg == 0)
+ return 0.0;
+
+ var a = arg;
+ const N: u32 = @sizeOf(u128) * 8;
+ const sd = @bitCast(i32, N - @clz(a)); // number of significant digits
+ var e: i32 = sd - 1; // exponent
+ if (sd > FLT_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit FLT_MANT_DIG-1 bits to the right of 1
+ // Q = bit FLT_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ FLT_MANT_DIG + 1 => {
+ a <<= 1;
+ },
+ FLT_MANT_DIG + 2 => {},
+ else => {
+ const shift_amt = @bitCast(i32, N + (FLT_MANT_DIG + 2)) - sd;
+ const shift_amt_u7 = @intCast(u7, shift_amt);
+ a = (a >> @intCast(u7, sd - (FLT_MANT_DIG + 2))) |
+ @boolToInt((a & (u128(@maxValue(u128)) >> shift_amt_u7)) != 0);
+ },
+ }
+ // finish
+ a |= @boolToInt((a & 4) != 0); // Or P into R
+ a += 1; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits
+ if ((a & (u128(1) << FLT_MANT_DIG)) != 0) {
+ a >>= 1;
+ e += 1;
+ }
+ // a is now rounded to FLT_MANT_DIG bits
+ } else {
+ a <<= @intCast(u7, FLT_MANT_DIG - sd);
+ // a is now rounded to FLT_MANT_DIG bits
+ }
+
+ const high = @bitCast(u32, (e + 127) << 23); // exponent
+ const low = @truncate(u32, a) & 0x007fffff; // mantissa
+
+ return @bitCast(f32, high | low);
+}
+
+test "import floatuntisf" {
+ _ = @import("floatuntisf_test.zig");
+}
diff --git a/std/special/compiler_rt/floatuntisf_test.zig b/std/special/compiler_rt/floatuntisf_test.zig
new file mode 100644
index 0000000000..7f84c1f963
--- /dev/null
+++ b/std/special/compiler_rt/floatuntisf_test.zig
@@ -0,0 +1,72 @@
+const __floatuntisf = @import("floatuntisf.zig").__floatuntisf;
+const assert = @import("std").debug.assert;
+
+fn test__floatuntisf(a: u128, expected: f32) void {
+ const x = __floatuntisf(a);
+ assert(x == expected);
+}
+
+test "floatuntisf" {
+ test__floatuntisf(0, 0.0);
+
+ test__floatuntisf(1, 1.0);
+ test__floatuntisf(2, 2.0);
+ test__floatuntisf(20, 20.0);
+
+ test__floatuntisf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ test__floatuntisf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+
+ test__floatuntisf(make_ti(0x8000008000000000, 0), 0x1.000001p+127);
+ test__floatuntisf(make_ti(0x8000000000000800, 0), 0x1.0p+127);
+ test__floatuntisf(make_ti(0x8000010000000000, 0), 0x1.000002p+127);
+
+ test__floatuntisf(make_ti(0x8000000000000000, 0), 0x1.000000p+127);
+
+ test__floatuntisf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ test__floatuntisf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
+ test__floatuntisf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
+
+ test__floatuntisf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
+
+ test__floatuntisf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
+ test__floatuntisf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
+ test__floatuntisf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
+
+ test__floatuntisf(0xFFFFFFFFFFFFFFFE, 0x1p+64);
+ test__floatuntisf(0xFFFFFFFFFFFFFFFF, 0x1p+64);
+
+ test__floatuntisf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ test__floatuntisf(0x0007FB72EA000000, 0x1.FEDCBAp+50);
+ test__floatuntisf(0x0007FB72EB000000, 0x1.FEDCBAp+50);
+ test__floatuntisf(0x0007FB72EBFFFFFF, 0x1.FEDCBAp+50);
+ test__floatuntisf(0x0007FB72EC000000, 0x1.FEDCBCp+50);
+ test__floatuntisf(0x0007FB72E8000001, 0x1.FEDCBAp+50);
+
+ test__floatuntisf(0x0007FB72E6000000, 0x1.FEDCBAp+50);
+ test__floatuntisf(0x0007FB72E7000000, 0x1.FEDCBAp+50);
+ test__floatuntisf(0x0007FB72E7FFFFFF, 0x1.FEDCBAp+50);
+ test__floatuntisf(0x0007FB72E4000001, 0x1.FEDCBAp+50);
+ test__floatuntisf(0x0007FB72E4000000, 0x1.FEDCB8p+50);
+
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCB90000000000001), 0x1.FEDCBAp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBA0000000000000), 0x1.FEDCBAp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBAFFFFFFFFFFFFF), 0x1.FEDCBAp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBB0000000000000), 0x1.FEDCBCp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBB0000000000001), 0x1.FEDCBCp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBBFFFFFFFFFFFFF), 0x1.FEDCBCp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBC0000000000000), 0x1.FEDCBCp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBC0000000000001), 0x1.FEDCBCp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBD0000000000000), 0x1.FEDCBCp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBD0000000000001), 0x1.FEDCBEp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBDFFFFFFFFFFFFF), 0x1.FEDCBEp+76);
+ test__floatuntisf(make_ti(0x0000000000001FED, 0xCBE0000000000000), 0x1.FEDCBEp+76);
+}
+
+fn make_ti(high: u64, low: u64) u128 {
+ var result: u128 = high;
+ result <<= 64;
+ result |= low;
+ return result;
+}
diff --git a/std/special/compiler_rt/floatuntitf.zig b/std/special/compiler_rt/floatuntitf.zig
new file mode 100644
index 0000000000..6354c89287
--- /dev/null
+++ b/std/special/compiler_rt/floatuntitf.zig
@@ -0,0 +1,60 @@
+const builtin = @import("builtin");
+const is_test = builtin.is_test;
+
+const LDBL_MANT_DIG = 113;
+
+pub extern fn __floatuntitf(arg: u128) f128 {
+ @setRuntimeSafety(is_test);
+
+ if (arg == 0)
+ return 0.0;
+
+ var a = arg;
+ const N: u32 = @sizeOf(u128) * 8;
+ const sd = @bitCast(i32, N - @clz(a)); // number of significant digits
+ var e: i32 = sd - 1; // exponent
+ if (sd > LDBL_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit LDBL_MANT_DIG-1 bits to the right of 1
+ // Q = bit LDBL_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ LDBL_MANT_DIG + 1 => {
+ a <<= 1;
+ },
+ LDBL_MANT_DIG + 2 => {},
+ else => {
+ const shift_amt = @bitCast(i32, N + (LDBL_MANT_DIG + 2)) - sd;
+ const shift_amt_u7 = @intCast(u7, shift_amt);
+ a = (a >> @intCast(u7, sd - (LDBL_MANT_DIG + 2))) |
+ @boolToInt((a & (u128(@maxValue(u128)) >> shift_amt_u7)) != 0);
+ },
+ }
+ // finish
+ a |= @boolToInt((a & 4) != 0); // Or P into R
+ a += 1; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits
+ if ((a & (u128(1) << LDBL_MANT_DIG)) != 0) {
+ a >>= 1;
+ e += 1;
+ }
+ // a is now rounded to LDBL_MANT_DIG bits
+ } else {
+ a <<= @intCast(u7, LDBL_MANT_DIG - sd);
+ // a is now rounded to LDBL_MANT_DIG bits
+ }
+
+ const high: u128 = (@intCast(u64, (e + 16383)) << 48) | // exponent
+ (@truncate(u64, a >> 64) & 0x0000ffffffffffff); // mantissa-high
+ const low = @truncate(u64, a); // mantissa-low
+
+ return @bitCast(f128, low | (high << 64));
+}
+
+test "import floatuntitf" {
+ _ = @import("floatuntitf_test.zig");
+}
diff --git a/std/special/compiler_rt/floatuntitf_test.zig b/std/special/compiler_rt/floatuntitf_test.zig
new file mode 100644
index 0000000000..8e67fee108
--- /dev/null
+++ b/std/special/compiler_rt/floatuntitf_test.zig
@@ -0,0 +1,99 @@
+const __floatuntitf = @import("floatuntitf.zig").__floatuntitf;
+const assert = @import("std").debug.assert;
+
+fn test__floatuntitf(a: u128, expected: f128) void {
+ const x = __floatuntitf(a);
+ assert(x == expected);
+}
+
+test "floatuntitf" {
+ test__floatuntitf(0, 0.0);
+
+ test__floatuntitf(1, 1.0);
+ test__floatuntitf(2, 2.0);
+ test__floatuntitf(20, 20.0);
+
+ test__floatuntitf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
+ test__floatuntitf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62);
+ test__floatuntitf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
+ test__floatuntitf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62);
+ test__floatuntitf(0x7FFFFFFFFFFFFFFF, 0xF.FFFFFFFFFFFFFFEp+59);
+ test__floatuntitf(0xFFFFFFFFFFFFFFFE, 0xF.FFFFFFFFFFFFFFEp+60);
+ test__floatuntitf(0xFFFFFFFFFFFFFFFF, 0xF.FFFFFFFFFFFFFFFp+60);
+
+ test__floatuntitf(0x8000008000000000, 0x8.000008p+60);
+ test__floatuntitf(0x8000000000000800, 0x8.0000000000008p+60);
+ test__floatuntitf(0x8000010000000000, 0x8.00001p+60);
+ test__floatuntitf(0x8000000000001000, 0x8.000000000001p+60);
+
+ test__floatuntitf(0x8000000000000000, 0x8p+60);
+ test__floatuntitf(0x8000000000000001, 0x8.000000000000001p+60);
+
+ test__floatuntitf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
+
+ test__floatuntitf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
+ test__floatuntitf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
+ test__floatuntitf(0x0007FB72EBFFFFFF, 0x1.FEDCBAFFFFFFCp+50);
+ test__floatuntitf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
+ test__floatuntitf(0x0007FB72E8000001, 0x1.FEDCBA0000004p+50);
+
+ test__floatuntitf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
+ test__floatuntitf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
+ test__floatuntitf(0x0007FB72E7FFFFFF, 0x1.FEDCB9FFFFFFCp+50);
+ test__floatuntitf(0x0007FB72E4000001, 0x1.FEDCB90000004p+50);
+ test__floatuntitf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
+
+ test__floatuntitf(0x023479FD0E092DC0, 0x1.1A3CFE870496Ep+57);
+ test__floatuntitf(0x023479FD0E092DA1, 0x1.1A3CFE870496D08p+57);
+ test__floatuntitf(0x023479FD0E092DB0, 0x1.1A3CFE870496D8p+57);
+ test__floatuntitf(0x023479FD0E092DB8, 0x1.1A3CFE870496DCp+57);
+ test__floatuntitf(0x023479FD0E092DB6, 0x1.1A3CFE870496DBp+57);
+ test__floatuntitf(0x023479FD0E092DBF, 0x1.1A3CFE870496DF8p+57);
+ test__floatuntitf(0x023479FD0E092DC1, 0x1.1A3CFE870496E08p+57);
+ test__floatuntitf(0x023479FD0E092DC7, 0x1.1A3CFE870496E38p+57);
+ test__floatuntitf(0x023479FD0E092DC8, 0x1.1A3CFE870496E4p+57);
+ test__floatuntitf(0x023479FD0E092DCF, 0x1.1A3CFE870496E78p+57);
+ test__floatuntitf(0x023479FD0E092DD0, 0x1.1A3CFE870496E8p+57);
+ test__floatuntitf(0x023479FD0E092DD1, 0x1.1A3CFE870496E88p+57);
+ test__floatuntitf(0x023479FD0E092DD8, 0x1.1A3CFE870496ECp+57);
+ test__floatuntitf(0x023479FD0E092DDF, 0x1.1A3CFE870496EF8p+57);
+ test__floatuntitf(0x023479FD0E092DE0, 0x1.1A3CFE870496Fp+57);
+
+ test__floatuntitf(make_ti(0x023479FD0E092DC0, 0), 0x1.1A3CFE870496Ep+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DA1, 1), 0x1.1A3CFE870496D08p+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DB0, 2), 0x1.1A3CFE870496D8p+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DB8, 3), 0x1.1A3CFE870496DCp+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DB6, 4), 0x1.1A3CFE870496DBp+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DBF, 5), 0x1.1A3CFE870496DF8p+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DC1, 6), 0x1.1A3CFE870496E08p+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DC7, 7), 0x1.1A3CFE870496E38p+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DC8, 8), 0x1.1A3CFE870496E4p+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DCF, 9), 0x1.1A3CFE870496E78p+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DD0, 0), 0x1.1A3CFE870496E8p+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DD1, 11), 0x1.1A3CFE870496E88p+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DD8, 12), 0x1.1A3CFE870496ECp+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DDF, 13), 0x1.1A3CFE870496EF8p+121);
+ test__floatuntitf(make_ti(0x023479FD0E092DE0, 14), 0x1.1A3CFE870496Fp+121);
+
+ test__floatuntitf(make_ti(0, 0xFFFFFFFFFFFFFFFF), 0x1.FFFFFFFFFFFFFFFEp+63);
+
+ test__floatuntitf(make_ti(0xFFFFFFFFFFFFFFFF, 0x0000000000000000), 0x1.FFFFFFFFFFFFFFFEp+127);
+ test__floatuntitf(make_ti(0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF), 0x1.0000000000000000p+128);
+
+ test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC2801), 0x1.23456789ABCDEF0123456789ABC3p+124);
+ test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC3000), 0x1.23456789ABCDEF0123456789ABC3p+124);
+ test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC37FF), 0x1.23456789ABCDEF0123456789ABC3p+124);
+ test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC3800), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4000), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC47FF), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4800), 0x1.23456789ABCDEF0123456789ABC4p+124);
+ test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4801), 0x1.23456789ABCDEF0123456789ABC5p+124);
+ test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC57FF), 0x1.23456789ABCDEF0123456789ABC5p+124);
+}
+
+fn make_ti(high: u64, low: u64) u128 {
+ var result: u128 = high;
+ result <<= 64;
+ result |= low;
+ return result;
+}
diff --git a/std/special/compiler_rt/index.zig b/std/special/compiler_rt/index.zig
index dc95aa23f2..54a461d0f1 100644
--- a/std/special/compiler_rt/index.zig
+++ b/std/special/compiler_rt/index.zig
@@ -15,10 +15,31 @@ comptime {
@export("__lttf2", @import("comparetf2.zig").__letf2, linkage);
@export("__netf2", @import("comparetf2.zig").__letf2, linkage);
@export("__gttf2", @import("comparetf2.zig").__getf2, linkage);
+ @export("__gnu_h2f_ieee", @import("extendXfYf2.zig").__extendhfsf2, linkage);
+ @export("__gnu_f2h_ieee", @import("truncXfYf2.zig").__truncsfhf2, linkage);
}
@export("__unordtf2", @import("comparetf2.zig").__unordtf2, linkage);
+ @export("__floattitf", @import("floattitf.zig").__floattitf, linkage);
+ @export("__floattidf", @import("floattidf.zig").__floattidf, linkage);
+ @export("__floattisf", @import("floattisf.zig").__floattisf, linkage);
+
+ @export("__floatunditf", @import("floatunditf.zig").__floatunditf, linkage);
+ @export("__floatunsitf", @import("floatunsitf.zig").__floatunsitf, linkage);
+
+ @export("__floatuntitf", @import("floatuntitf.zig").__floatuntitf, linkage);
+ @export("__floatuntidf", @import("floatuntidf.zig").__floatuntidf, linkage);
+ @export("__floatuntisf", @import("floatuntisf.zig").__floatuntisf, linkage);
+
+ @export("__extenddftf2", @import("extendXfYf2.zig").__extenddftf2, linkage);
+ @export("__extendsftf2", @import("extendXfYf2.zig").__extendsftf2, linkage);
+ @export("__extendhfsf2", @import("extendXfYf2.zig").__extendhfsf2, linkage);
+
+ @export("__truncsfhf2", @import("truncXfYf2.zig").__truncsfhf2, linkage);
+ @export("__trunctfdf2", @import("truncXfYf2.zig").__trunctfdf2, linkage);
+ @export("__trunctfsf2", @import("truncXfYf2.zig").__trunctfsf2, linkage);
+
@export("__fixunssfsi", @import("fixunssfsi.zig").__fixunssfsi, linkage);
@export("__fixunssfdi", @import("fixunssfdi.zig").__fixunssfdi, linkage);
@export("__fixunssfti", @import("fixunssfti.zig").__fixunssfti, linkage);
diff --git a/std/special/compiler_rt/truncXfYf2.zig b/std/special/compiler_rt/truncXfYf2.zig
new file mode 100644
index 0000000000..5cb2f61568
--- /dev/null
+++ b/std/special/compiler_rt/truncXfYf2.zig
@@ -0,0 +1,117 @@
+const std = @import("std");
+
+pub extern fn __truncsfhf2(a: f32) u16 {
+ return @bitCast(u16, truncXfYf2(f16, f32, a));
+}
+
+pub extern fn __trunctfsf2(a: f128) f32 {
+ return truncXfYf2(f32, f128, a);
+}
+
+pub extern fn __trunctfdf2(a: f128) f64 {
+ return truncXfYf2(f64, f128, a);
+}
+
+inline fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
+ const src_rep_t = @IntType(false, @typeInfo(src_t).Float.bits);
+ const dst_rep_t = @IntType(false, @typeInfo(dst_t).Float.bits);
+ const srcSigBits = std.math.floatMantissaBits(src_t);
+ const dstSigBits = std.math.floatMantissaBits(dst_t);
+ const SrcShift = std.math.Log2Int(src_rep_t);
+ const DstShift = std.math.Log2Int(dst_rep_t);
+
+ // Various constants whose values follow from the type parameters.
+ // Any reasonable optimizer will fold and propagate all of these.
+ const srcBits = src_t.bit_count;
+ const srcExpBits = srcBits - srcSigBits - 1;
+ const srcInfExp = (1 << srcExpBits) - 1;
+ const srcExpBias = srcInfExp >> 1;
+
+ const srcMinNormal = 1 << srcSigBits;
+ const srcSignificandMask = srcMinNormal - 1;
+ const srcInfinity = srcInfExp << srcSigBits;
+ const srcSignMask = 1 << (srcSigBits + srcExpBits);
+ const srcAbsMask = srcSignMask - 1;
+ const roundMask = (1 << (srcSigBits - dstSigBits)) - 1;
+ const halfway = 1 << (srcSigBits - dstSigBits - 1);
+ const srcQNaN = 1 << (srcSigBits - 1);
+ const srcNaNCode = srcQNaN - 1;
+
+ const dstBits = dst_t.bit_count;
+ const dstExpBits = dstBits - dstSigBits - 1;
+ const dstInfExp = (1 << dstExpBits) - 1;
+ const dstExpBias = dstInfExp >> 1;
+
+ const underflowExponent = srcExpBias + 1 - dstExpBias;
+ const overflowExponent = srcExpBias + dstInfExp - dstExpBias;
+ const underflow = underflowExponent << srcSigBits;
+ const overflow = overflowExponent << srcSigBits;
+
+ const dstQNaN = 1 << (dstSigBits - 1);
+ const dstNaNCode = dstQNaN - 1;
+
+ // Break a into a sign and representation of the absolute value
+ const aRep: src_rep_t = @bitCast(src_rep_t, a);
+ const aAbs: src_rep_t = aRep & srcAbsMask;
+ const sign: src_rep_t = aRep & srcSignMask;
+ var absResult: dst_rep_t = undefined;
+
+ if (aAbs -% underflow < aAbs -% overflow) {
+ // The exponent of a is within the range of normal numbers in the
+ // destination format. We can convert by simply right-shifting with
+ // rounding and adjusting the exponent.
+ absResult = @truncate(dst_rep_t, aAbs >> (srcSigBits - dstSigBits));
+ absResult -%= dst_rep_t(srcExpBias - dstExpBias) << dstSigBits;
+
+ const roundBits: src_rep_t = aAbs & roundMask;
+ if (roundBits > halfway) {
+ // Round to nearest
+ absResult += 1;
+ } else if (roundBits == halfway) {
+ // Ties to even
+ absResult += absResult & 1;
+ }
+ } else if (aAbs > srcInfinity) {
+ // a is NaN.
+ // Conjure the result by beginning with infinity, setting the qNaN
+ // bit and inserting the (truncated) trailing NaN field.
+ absResult = @intCast(dst_rep_t, dstInfExp) << dstSigBits;
+ absResult |= dstQNaN;
+ absResult |= @intCast(dst_rep_t, ((aAbs & srcNaNCode) >> (srcSigBits - dstSigBits)) & dstNaNCode);
+ } else if (aAbs >= overflow) {
+ // a overflows to infinity.
+ absResult = @intCast(dst_rep_t, dstInfExp) << dstSigBits;
+ } else {
+ // a underflows on conversion to the destination type or is an exact
+ // zero. The result may be a denormal or zero. Extract the exponent
+ // to get the shift amount for the denormalization.
+ const aExp = @intCast(u32, aAbs >> srcSigBits);
+ const shift = @intCast(u32, srcExpBias - dstExpBias - aExp + 1);
+
+ const significand: src_rep_t = (aRep & srcSignificandMask) | srcMinNormal;
+
+ // Right shift by the denormalization amount with sticky.
+ if (shift > srcSigBits) {
+ absResult = 0;
+ } else {
+ const sticky: src_rep_t = significand << @intCast(SrcShift, srcBits - shift);
+ const denormalizedSignificand: src_rep_t = significand >> @intCast(SrcShift, shift) | sticky;
+ absResult = @intCast(dst_rep_t, denormalizedSignificand >> (srcSigBits - dstSigBits));
+ const roundBits: src_rep_t = denormalizedSignificand & roundMask;
+ if (roundBits > halfway) {
+ // Round to nearest
+ absResult += 1;
+ } else if (roundBits == halfway) {
+ // Ties to even
+ absResult += absResult & 1;
+ }
+ }
+ }
+
+ const result: dst_rep_t align(@alignOf(dst_t)) = absResult | @truncate(dst_rep_t, sign >> @intCast(SrcShift, srcBits - dstBits));
+ return @bitCast(dst_t, result);
+}
+
+test "import truncXfYf2" {
+ _ = @import("truncXfYf2_test.zig");
+}
diff --git a/std/special/compiler_rt/truncXfYf2_test.zig b/std/special/compiler_rt/truncXfYf2_test.zig
new file mode 100644
index 0000000000..c4bf2db733
--- /dev/null
+++ b/std/special/compiler_rt/truncXfYf2_test.zig
@@ -0,0 +1,134 @@
+const __truncsfhf2 = @import("truncXfYf2.zig").__truncsfhf2;
+
+fn test__truncsfhf2(a: u32, expected: u16) void {
+ const actual = __truncsfhf2(@bitCast(f32, a));
+
+ if (actual == expected) {
+ return;
+ }
+
+ @panic("__truncsfhf2 test failure");
+}
+
+test "truncsfhf2" {
+ test__truncsfhf2(0x7fc00000, 0x7e00); // qNaN
+ test__truncsfhf2(0x7fe00000, 0x7f00); // sNaN
+
+ test__truncsfhf2(0, 0); // 0
+ test__truncsfhf2(0x80000000, 0x8000); // -0
+
+ test__truncsfhf2(0x7f800000, 0x7c00); // inf
+ test__truncsfhf2(0xff800000, 0xfc00); // -inf
+
+ test__truncsfhf2(0x477ff000, 0x7c00); // 65520 -> inf
+ test__truncsfhf2(0xc77ff000, 0xfc00); // -65520 -> -inf
+
+ test__truncsfhf2(0x71cc3892, 0x7c00); // 0x1.987124876876324p+100 -> inf
+ test__truncsfhf2(0xf1cc3892, 0xfc00); // -0x1.987124876876324p+100 -> -inf
+
+ test__truncsfhf2(0x38800000, 0x0400); // normal (min), 2**-14
+ test__truncsfhf2(0xb8800000, 0x8400); // normal (min), -2**-14
+
+ test__truncsfhf2(0x477fe000, 0x7bff); // normal (max), 65504
+ test__truncsfhf2(0xc77fe000, 0xfbff); // normal (max), -65504
+
+ test__truncsfhf2(0x477fe100, 0x7bff); // normal, 65505 -> 65504
+ test__truncsfhf2(0xc77fe100, 0xfbff); // normal, -65505 -> -65504
+
+ test__truncsfhf2(0x477fef00, 0x7bff); // normal, 65519 -> 65504
+ test__truncsfhf2(0xc77fef00, 0xfbff); // normal, -65519 -> -65504
+
+ test__truncsfhf2(0x3f802000, 0x3c01); // normal, 1 + 2**-10
+ test__truncsfhf2(0xbf802000, 0xbc01); // normal, -1 - 2**-10
+
+ test__truncsfhf2(0x3eaaa000, 0x3555); // normal, approx. 1/3
+ test__truncsfhf2(0xbeaaa000, 0xb555); // normal, approx. -1/3
+
+ test__truncsfhf2(0x40490fdb, 0x4248); // normal, 3.1415926535
+ test__truncsfhf2(0xc0490fdb, 0xc248); // normal, -3.1415926535
+
+ test__truncsfhf2(0x45cc3892, 0x6e62); // normal, 0x1.987124876876324p+12
+
+ test__truncsfhf2(0x3f800000, 0x3c00); // normal, 1
+ test__truncsfhf2(0x38800000, 0x0400); // normal, 0x1.0p-14
+
+ test__truncsfhf2(0x33800000, 0x0001); // denormal (min), 2**-24
+ test__truncsfhf2(0xb3800000, 0x8001); // denormal (min), -2**-24
+
+ test__truncsfhf2(0x387fc000, 0x03ff); // denormal (max), 2**-14 - 2**-24
+ test__truncsfhf2(0xb87fc000, 0x83ff); // denormal (max), -2**-14 + 2**-24
+
+ test__truncsfhf2(0x35800000, 0x0010); // denormal, 0x1.0p-20
+ test__truncsfhf2(0x33280000, 0x0001); // denormal, 0x1.5p-25 -> 0x1.0p-24
+ test__truncsfhf2(0x33000000, 0x0000); // 0x1.0p-25 -> zero
+}
+
+const __trunctfsf2 = @import("truncXfYf2.zig").__trunctfsf2;
+
+fn test__trunctfsf2(a: f128, expected: u32) void {
+ const x = __trunctfsf2(a);
+
+ const rep = @bitCast(u32, x);
+ if (rep == expected) {
+ return;
+ }
+ // test other possible NaN representation(signal NaN)
+ else if (expected == 0x7fc00000) {
+ if ((rep & 0x7f800000) == 0x7f800000 and (rep & 0x7fffff) > 0) {
+ return;
+ }
+ }
+
+ @panic("__trunctfsf2 test failure");
+}
+
+test "trunctfsf2" {
+ // qnan
+ test__trunctfsf2(@bitCast(f128, u128(0x7fff800000000000 << 64)), 0x7fc00000);
+ // nan
+ test__trunctfsf2(@bitCast(f128, u128((0x7fff000000000000 | (0x810000000000 & 0xffffffffffff)) << 64)), 0x7fc08000);
+ // inf
+ test__trunctfsf2(@bitCast(f128, u128(0x7fff000000000000 << 64)), 0x7f800000);
+ // zero
+ test__trunctfsf2(0.0, 0x0);
+
+ test__trunctfsf2(0x1.23a2abb4a2ddee355f36789abcdep+5, 0x4211d156);
+ test__trunctfsf2(0x1.e3d3c45bd3abfd98b76a54cc321fp-9, 0x3b71e9e2);
+ test__trunctfsf2(0x1.234eebb5faa678f4488693abcdefp+4534, 0x7f800000);
+ test__trunctfsf2(0x1.edcba9bb8c76a5a43dd21f334634p-435, 0x0);
+}
+
+const __trunctfdf2 = @import("truncXfYf2.zig").__trunctfdf2;
+
+fn test__trunctfdf2(a: f128, expected: u64) void {
+ const x = __trunctfdf2(a);
+
+ const rep = @bitCast(u64, x);
+ if (rep == expected) {
+ return;
+ }
+ // test other possible NaN representation(signal NaN)
+ else if (expected == 0x7ff8000000000000) {
+ if ((rep & 0x7ff0000000000000) == 0x7ff0000000000000 and (rep & 0xfffffffffffff) > 0) {
+ return;
+ }
+ }
+
+ @panic("__trunctfsf2 test failure");
+}
+
+test "trunctfdf2" {
+ // qnan
+ test__trunctfdf2(@bitCast(f128, u128(0x7fff800000000000 << 64)), 0x7ff8000000000000);
+ // nan
+ test__trunctfdf2(@bitCast(f128, u128((0x7fff000000000000 | (0x810000000000 & 0xffffffffffff)) << 64)), 0x7ff8100000000000);
+ // inf
+ test__trunctfdf2(@bitCast(f128, u128(0x7fff000000000000 << 64)), 0x7ff0000000000000);
+ // zero
+ test__trunctfdf2(0.0, 0x0);
+
+ test__trunctfdf2(0x1.af23456789bbaaab347645365cdep+5, 0x404af23456789bbb);
+ test__trunctfdf2(0x1.dedafcff354b6ae9758763545432p-9, 0x3f6dedafcff354b7);
+ test__trunctfdf2(0x1.2f34dd5f437e849b4baab754cdefp+4534, 0x7ff0000000000000);
+ test__trunctfdf2(0x1.edcbff8ad76ab5bf46463233214fp-435, 0x24cedcbff8ad76ab);
+}
diff --git a/std/zig/ast.zig b/std/zig/ast.zig
index 4246a50861..63518c5182 100644
--- a/std/zig/ast.zig
+++ b/std/zig/ast.zig
@@ -858,6 +858,7 @@ pub const Node = struct {
pub fn firstToken(self: *FnProto) TokenIndex {
if (self.visib_token) |visib_token| return visib_token;
+ if (self.async_attr) |async_attr| return async_attr.firstToken();
if (self.extern_export_inline_token) |extern_export_inline_token| return extern_export_inline_token;
assert(self.lib_name == null);
if (self.cc_token) |cc_token| return cc_token;
diff --git a/std/zig/parse.zig b/std/zig/parse.zig
index 877b81c527..9f0371d4da 100644
--- a/std/zig/parse.zig
+++ b/std/zig/parse.zig
@@ -17,7 +17,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
defer stack.deinit();
const arena = &tree_arena.allocator;
- const root_node = try arena.construct(ast.Node.Root{
+ const root_node = try arena.create(ast.Node.Root{
.base = ast.Node{ .id = ast.Node.Id.Root },
.decls = ast.Node.Root.DeclList.init(arena),
.doc_comments = null,
@@ -65,14 +65,14 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
Token.Id.Keyword_test => {
stack.append(State.TopLevel) catch unreachable;
- const block = try arena.construct(ast.Node.Block{
+ const block = try arena.create(ast.Node.Block{
.base = ast.Node{ .id = ast.Node.Id.Block },
.label = null,
.lbrace = undefined,
.statements = ast.Node.Block.StatementList.init(arena),
.rbrace = undefined,
});
- const test_node = try arena.construct(ast.Node.TestDecl{
+ const test_node = try arena.create(ast.Node.TestDecl{
.base = ast.Node{ .id = ast.Node.Id.TestDecl },
.doc_comments = comments,
.test_token = token_index,
@@ -109,14 +109,14 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_comptime => {
- const block = try arena.construct(ast.Node.Block{
+ const block = try arena.create(ast.Node.Block{
.base = ast.Node{ .id = ast.Node.Id.Block },
.label = null,
.lbrace = undefined,
.statements = ast.Node.Block.StatementList.init(arena),
.rbrace = undefined,
});
- const node = try arena.construct(ast.Node.Comptime{
+ const node = try arena.create(ast.Node.Comptime{
.base = ast.Node{ .id = ast.Node.Id.Comptime },
.comptime_token = token_index,
.expr = &block.base,
@@ -225,7 +225,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
return tree;
}
- const node = try arena.construct(ast.Node.Use{
+ const node = try arena.create(ast.Node.Use{
.base = ast.Node{ .id = ast.Node.Id.Use },
.use_token = token_index,
.visib_token = ctx.visib_token,
@@ -266,7 +266,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_fn, Token.Id.Keyword_nakedcc, Token.Id.Keyword_stdcallcc, Token.Id.Keyword_async => {
- const fn_proto = try arena.construct(ast.Node.FnProto{
+ const fn_proto = try arena.create(ast.Node.FnProto{
.base = ast.Node{ .id = ast.Node.Id.FnProto },
.doc_comments = ctx.comments,
.visib_token = ctx.visib_token,
@@ -298,7 +298,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_async => {
- const async_node = try arena.construct(ast.Node.AsyncAttribute{
+ const async_node = try arena.create(ast.Node.AsyncAttribute{
.base = ast.Node{ .id = ast.Node.Id.AsyncAttribute },
.async_token = token_index,
.allocator_type = null,
@@ -330,7 +330,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.TopLevelExternOrField => |ctx| {
if (eatToken(&tok_it, &tree, Token.Id.Identifier)) |identifier| {
- const node = try arena.construct(ast.Node.StructField{
+ const node = try arena.create(ast.Node.StructField{
.base = ast.Node{ .id = ast.Node.Id.StructField },
.doc_comments = ctx.comments,
.visib_token = ctx.visib_token,
@@ -375,7 +375,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
const token_ptr = token.ptr;
- const node = try arena.construct(ast.Node.ContainerDecl{
+ const node = try arena.create(ast.Node.ContainerDecl{
.base = ast.Node{ .id = ast.Node.Id.ContainerDecl },
.layout_token = ctx.layout_token,
.kind_token = switch (token_ptr.id) {
@@ -448,7 +448,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
Token.Id.Identifier => {
switch (tree.tokens.at(container_decl.kind_token).id) {
Token.Id.Keyword_struct => {
- const node = try arena.construct(ast.Node.StructField{
+ const node = try arena.create(ast.Node.StructField{
.base = ast.Node{ .id = ast.Node.Id.StructField },
.doc_comments = comments,
.visib_token = null,
@@ -464,7 +464,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_union => {
- const node = try arena.construct(ast.Node.UnionTag{
+ const node = try arena.create(ast.Node.UnionTag{
.base = ast.Node{ .id = ast.Node.Id.UnionTag },
.name_token = token_index,
.type_expr = null,
@@ -480,7 +480,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_enum => {
- const node = try arena.construct(ast.Node.EnumTag{
+ const node = try arena.create(ast.Node.EnumTag{
.base = ast.Node{ .id = ast.Node.Id.EnumTag },
.name_token = token_index,
.value = null,
@@ -562,7 +562,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.VarDecl => |ctx| {
- const var_decl = try arena.construct(ast.Node.VarDecl{
+ const var_decl = try arena.create(ast.Node.VarDecl{
.base = ast.Node{ .id = ast.Node.Id.VarDecl },
.doc_comments = ctx.comments,
.visib_token = ctx.visib_token,
@@ -660,7 +660,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_ptr = token.ptr;
switch (token_ptr.id) {
Token.Id.LBrace => {
- const block = try arena.construct(ast.Node.Block{
+ const block = try arena.create(ast.Node.Block{
.base = ast.Node{ .id = ast.Node.Id.Block },
.label = null,
.lbrace = token_index,
@@ -712,7 +712,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
// TODO: this is a special case. Remove this when #760 is fixed
if (token_ptr.id == Token.Id.Keyword_error) {
if (tok_it.peek().?.id == Token.Id.LBrace) {
- const error_type_node = try arena.construct(ast.Node.ErrorType{
+ const error_type_node = try arena.create(ast.Node.ErrorType{
.base = ast.Node{ .id = ast.Node.Id.ErrorType },
.token = token_index,
});
@@ -733,7 +733,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
if (eatToken(&tok_it, &tree, Token.Id.RParen)) |_| {
continue;
}
- const param_decl = try arena.construct(ast.Node.ParamDecl{
+ const param_decl = try arena.create(ast.Node.ParamDecl{
.base = ast.Node{ .id = ast.Node.Id.ParamDecl },
.comptime_token = null,
.noalias_token = null,
@@ -819,7 +819,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_ptr = token.ptr;
switch (token_ptr.id) {
Token.Id.LBrace => {
- const block = try arena.construct(ast.Node.Block{
+ const block = try arena.create(ast.Node.Block{
.base = ast.Node{ .id = ast.Node.Id.Block },
.label = ctx.label,
.lbrace = token_index,
@@ -853,7 +853,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_suspend => {
- const node = try arena.construct(ast.Node.Suspend{
+ const node = try arena.create(ast.Node.Suspend{
.base = ast.Node{ .id = ast.Node.Id.Suspend },
.label = ctx.label,
.suspend_token = token_index,
@@ -925,7 +925,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
}
},
State.While => |ctx| {
- const node = try arena.construct(ast.Node.While{
+ const node = try arena.create(ast.Node.While{
.base = ast.Node{ .id = ast.Node.Id.While },
.label = ctx.label,
.inline_token = ctx.inline_token,
@@ -954,7 +954,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
State.For => |ctx| {
- const node = try arena.construct(ast.Node.For{
+ const node = try arena.create(ast.Node.For{
.base = ast.Node{ .id = ast.Node.Id.For },
.label = ctx.label,
.inline_token = ctx.inline_token,
@@ -975,7 +975,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.Else => |dest| {
if (eatToken(&tok_it, &tree, Token.Id.Keyword_else)) |else_token| {
- const node = try arena.construct(ast.Node.Else{
+ const node = try arena.create(ast.Node.Else{
.base = ast.Node{ .id = ast.Node.Id.Else },
.else_token = else_token,
.payload = null,
@@ -1038,7 +1038,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_defer, Token.Id.Keyword_errdefer => {
- const node = try arena.construct(ast.Node.Defer{
+ const node = try arena.create(ast.Node.Defer{
.base = ast.Node{ .id = ast.Node.Id.Defer },
.defer_token = token_index,
.kind = switch (token_ptr.id) {
@@ -1056,7 +1056,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.LBrace => {
- const inner_block = try arena.construct(ast.Node.Block{
+ const inner_block = try arena.create(ast.Node.Block{
.base = ast.Node{ .id = ast.Node.Id.Block },
.label = null,
.lbrace = token_index,
@@ -1124,7 +1124,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
- const node = try arena.construct(ast.Node.AsmOutput{
+ const node = try arena.create(ast.Node.AsmOutput{
.base = ast.Node{ .id = ast.Node.Id.AsmOutput },
.lbracket = lbracket_index,
.symbolic_name = undefined,
@@ -1178,7 +1178,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
- const node = try arena.construct(ast.Node.AsmInput{
+ const node = try arena.create(ast.Node.AsmInput{
.base = ast.Node{ .id = ast.Node.Id.AsmInput },
.lbracket = lbracket_index,
.symbolic_name = undefined,
@@ -1243,7 +1243,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
- const node = try arena.construct(ast.Node.FieldInitializer{
+ const node = try arena.create(ast.Node.FieldInitializer{
.base = ast.Node{ .id = ast.Node.Id.FieldInitializer },
.period_token = undefined,
.name_token = undefined,
@@ -1332,7 +1332,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
}
const comments = try eatDocComments(arena, &tok_it, &tree);
- const node = try arena.construct(ast.Node.SwitchCase{
+ const node = try arena.create(ast.Node.SwitchCase{
.base = ast.Node{ .id = ast.Node.Id.SwitchCase },
.items = ast.Node.SwitchCase.ItemList.init(arena),
.payload = null,
@@ -1369,7 +1369,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (token_ptr.id == Token.Id.Keyword_else) {
- const else_node = try arena.construct(ast.Node.SwitchElse{
+ const else_node = try arena.create(ast.Node.SwitchElse{
.base = ast.Node{ .id = ast.Node.Id.SwitchElse },
.token = token_index,
});
@@ -1468,7 +1468,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
State.ExternType => |ctx| {
if (eatToken(&tok_it, &tree, Token.Id.Keyword_fn)) |fn_token| {
- const fn_proto = try arena.construct(ast.Node.FnProto{
+ const fn_proto = try arena.create(ast.Node.FnProto{
.base = ast.Node{ .id = ast.Node.Id.FnProto },
.doc_comments = ctx.comments,
.visib_token = null,
@@ -1641,7 +1641,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
- const node = try arena.construct(ast.Node.Payload{
+ const node = try arena.create(ast.Node.Payload{
.base = ast.Node{ .id = ast.Node.Id.Payload },
.lpipe = token_index,
.error_symbol = undefined,
@@ -1677,7 +1677,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
- const node = try arena.construct(ast.Node.PointerPayload{
+ const node = try arena.create(ast.Node.PointerPayload{
.base = ast.Node{ .id = ast.Node.Id.PointerPayload },
.lpipe = token_index,
.ptr_token = null,
@@ -1720,7 +1720,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
- const node = try arena.construct(ast.Node.PointerIndexPayload{
+ const node = try arena.create(ast.Node.PointerIndexPayload{
.base = ast.Node{ .id = ast.Node.Id.PointerIndexPayload },
.lpipe = token_index,
.ptr_token = null,
@@ -1754,7 +1754,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_ptr = token.ptr;
switch (token_ptr.id) {
Token.Id.Keyword_return, Token.Id.Keyword_break, Token.Id.Keyword_continue => {
- const node = try arena.construct(ast.Node.ControlFlowExpression{
+ const node = try arena.create(ast.Node.ControlFlowExpression{
.base = ast.Node{ .id = ast.Node.Id.ControlFlowExpression },
.ltoken = token_index,
.kind = undefined,
@@ -1783,7 +1783,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_try, Token.Id.Keyword_cancel, Token.Id.Keyword_resume => {
- const node = try arena.construct(ast.Node.PrefixOp{
+ const node = try arena.create(ast.Node.PrefixOp{
.base = ast.Node{ .id = ast.Node.Id.PrefixOp },
.op_token = token_index,
.op = switch (token_ptr.id) {
@@ -1817,7 +1817,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Ellipsis3)) |ellipsis3| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = ellipsis3,
@@ -1842,7 +1842,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToAssignment(token_ptr.id)) |ass_id| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@@ -1872,7 +1872,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToUnwrapExpr(token_ptr.id)) |unwrap_id| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@@ -1904,7 +1904,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Keyword_or)) |or_token| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = or_token,
@@ -1928,7 +1928,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Keyword_and)) |and_token| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = and_token,
@@ -1955,7 +1955,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToComparison(token_ptr.id)) |comp_id| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@@ -1982,7 +1982,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Pipe)) |pipe| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = pipe,
@@ -2006,7 +2006,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Caret)) |caret| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = caret,
@@ -2030,7 +2030,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Ampersand)) |ampersand| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = ampersand,
@@ -2057,7 +2057,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToBitShift(token_ptr.id)) |bitshift_id| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@@ -2087,7 +2087,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToAddition(token_ptr.id)) |add_id| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@@ -2117,7 +2117,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToMultiply(token_ptr.id)) |mult_id| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@@ -2145,7 +2145,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (tok_it.peek().?.id == Token.Id.Period) {
- const node = try arena.construct(ast.Node.SuffixOp{
+ const node = try arena.create(ast.Node.SuffixOp{
.base = ast.Node{ .id = ast.Node.Id.SuffixOp },
.lhs = lhs,
.op = ast.Node.SuffixOp.Op{ .StructInitializer = ast.Node.SuffixOp.Op.InitList.init(arena) },
@@ -2164,7 +2164,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
- const node = try arena.construct(ast.Node.SuffixOp{
+ const node = try arena.create(ast.Node.SuffixOp{
.base = ast.Node{ .id = ast.Node.Id.SuffixOp },
.lhs = lhs,
.op = ast.Node.SuffixOp.Op{ .ArrayInitializer = ast.Node.SuffixOp.Op.InitList.init(arena) },
@@ -2193,7 +2193,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Bang)) |bang| {
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = bang,
@@ -2212,7 +2212,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToPrefixOp(token_ptr.id)) |prefix_id| {
- var node = try arena.construct(ast.Node.PrefixOp{
+ var node = try arena.create(ast.Node.PrefixOp{
.base = ast.Node{ .id = ast.Node.Id.PrefixOp },
.op_token = token_index,
.op = prefix_id,
@@ -2222,7 +2222,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
// Treat '**' token as two pointer types
if (token_ptr.id == Token.Id.AsteriskAsterisk) {
- const child = try arena.construct(ast.Node.PrefixOp{
+ const child = try arena.create(ast.Node.PrefixOp{
.base = ast.Node{ .id = ast.Node.Id.PrefixOp },
.op_token = token_index,
.op = prefix_id,
@@ -2246,7 +2246,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
State.SuffixOpExpressionBegin => |opt_ctx| {
if (eatToken(&tok_it, &tree, Token.Id.Keyword_async)) |async_token| {
- const async_node = try arena.construct(ast.Node.AsyncAttribute{
+ const async_node = try arena.create(ast.Node.AsyncAttribute{
.base = ast.Node{ .id = ast.Node.Id.AsyncAttribute },
.async_token = async_token,
.allocator_type = null,
@@ -2277,7 +2277,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_ptr = token.ptr;
switch (token_ptr.id) {
Token.Id.LParen => {
- const node = try arena.construct(ast.Node.SuffixOp{
+ const node = try arena.create(ast.Node.SuffixOp{
.base = ast.Node{ .id = ast.Node.Id.SuffixOp },
.lhs = lhs,
.op = ast.Node.SuffixOp.Op{
@@ -2301,7 +2301,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.LBracket => {
- const node = try arena.construct(ast.Node.SuffixOp{
+ const node = try arena.create(ast.Node.SuffixOp{
.base = ast.Node{ .id = ast.Node.Id.SuffixOp },
.lhs = lhs,
.op = ast.Node.SuffixOp.Op{ .ArrayAccess = undefined },
@@ -2316,7 +2316,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
Token.Id.Period => {
if (eatToken(&tok_it, &tree, Token.Id.Asterisk)) |asterisk_token| {
- const node = try arena.construct(ast.Node.SuffixOp{
+ const node = try arena.create(ast.Node.SuffixOp{
.base = ast.Node{ .id = ast.Node.Id.SuffixOp },
.lhs = lhs,
.op = ast.Node.SuffixOp.Op.Deref,
@@ -2327,7 +2327,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
if (eatToken(&tok_it, &tree, Token.Id.QuestionMark)) |question_token| {
- const node = try arena.construct(ast.Node.SuffixOp{
+ const node = try arena.create(ast.Node.SuffixOp{
.base = ast.Node{ .id = ast.Node.Id.SuffixOp },
.lhs = lhs,
.op = ast.Node.SuffixOp.Op.UnwrapOptional,
@@ -2337,7 +2337,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
stack.append(State{ .SuffixOpExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
continue;
}
- const node = try arena.construct(ast.Node.InfixOp{
+ const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@@ -2397,7 +2397,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_promise => {
- const node = try arena.construct(ast.Node.PromiseType{
+ const node = try arena.create(ast.Node.PromiseType{
.base = ast.Node{ .id = ast.Node.Id.PromiseType },
.promise_token = token.index,
.result = null,
@@ -2423,7 +2423,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.LParen => {
- const node = try arena.construct(ast.Node.GroupedExpression{
+ const node = try arena.create(ast.Node.GroupedExpression{
.base = ast.Node{ .id = ast.Node.Id.GroupedExpression },
.lparen = token.index,
.expr = undefined,
@@ -2441,7 +2441,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Builtin => {
- const node = try arena.construct(ast.Node.BuiltinCall{
+ const node = try arena.create(ast.Node.BuiltinCall{
.base = ast.Node{ .id = ast.Node.Id.BuiltinCall },
.builtin_token = token.index,
.params = ast.Node.BuiltinCall.ParamList.init(arena),
@@ -2460,7 +2460,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.LBracket => {
- const node = try arena.construct(ast.Node.PrefixOp{
+ const node = try arena.create(ast.Node.PrefixOp{
.base = ast.Node{ .id = ast.Node.Id.PrefixOp },
.op_token = token.index,
.op = undefined,
@@ -2519,7 +2519,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_fn => {
- const fn_proto = try arena.construct(ast.Node.FnProto{
+ const fn_proto = try arena.create(ast.Node.FnProto{
.base = ast.Node{ .id = ast.Node.Id.FnProto },
.doc_comments = null,
.visib_token = null,
@@ -2540,7 +2540,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_nakedcc, Token.Id.Keyword_stdcallcc => {
- const fn_proto = try arena.construct(ast.Node.FnProto{
+ const fn_proto = try arena.create(ast.Node.FnProto{
.base = ast.Node{ .id = ast.Node.Id.FnProto },
.doc_comments = null,
.visib_token = null,
@@ -2567,7 +2567,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_asm => {
- const node = try arena.construct(ast.Node.Asm{
+ const node = try arena.create(ast.Node.Asm{
.base = ast.Node{ .id = ast.Node.Id.Asm },
.asm_token = token.index,
.volatile_token = null,
@@ -2629,7 +2629,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
- const node = try arena.construct(ast.Node.ErrorSetDecl{
+ const node = try arena.create(ast.Node.ErrorSetDecl{
.base = ast.Node{ .id = ast.Node.Id.ErrorSetDecl },
.error_token = ctx.error_token,
.decls = ast.Node.ErrorSetDecl.DeclList.init(arena),
@@ -2695,7 +2695,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
return tree;
}
- const node = try arena.construct(ast.Node.ErrorTag{
+ const node = try arena.create(ast.Node.ErrorTag{
.base = ast.Node{ .id = ast.Node.Id.ErrorTag },
.doc_comments = comments,
.name_token = ident_token_index,
@@ -3032,7 +3032,7 @@ fn pushDocComment(arena: *mem.Allocator, line_comment: TokenIndex, result: *?*as
if (result.*) |comment_node| {
break :blk comment_node;
} else {
- const comment_node = try arena.construct(ast.Node.DocComment{
+ const comment_node = try arena.create(ast.Node.DocComment{
.base = ast.Node{ .id = ast.Node.Id.DocComment },
.lines = ast.Node.DocComment.LineList.init(arena),
});
@@ -3061,7 +3061,7 @@ fn parseStringLiteral(arena: *mem.Allocator, tok_it: *ast.Tree.TokenList.Iterato
return &(try createLiteral(arena, ast.Node.StringLiteral, token_index)).base;
},
Token.Id.MultilineStringLiteralLine => {
- const node = try arena.construct(ast.Node.MultilineStringLiteral{
+ const node = try arena.create(ast.Node.MultilineStringLiteral{
.base = ast.Node{ .id = ast.Node.Id.MultilineStringLiteral },
.lines = ast.Node.MultilineStringLiteral.LineList.init(arena),
});
@@ -3089,7 +3089,7 @@ fn parseStringLiteral(arena: *mem.Allocator, tok_it: *ast.Tree.TokenList.Iterato
fn parseBlockExpr(stack: *std.ArrayList(State), arena: *mem.Allocator, ctx: *const OptionalCtx, token_ptr: *const Token, token_index: TokenIndex) !bool {
switch (token_ptr.id) {
Token.Id.Keyword_suspend => {
- const node = try arena.construct(ast.Node.Suspend{
+ const node = try arena.create(ast.Node.Suspend{
.base = ast.Node{ .id = ast.Node.Id.Suspend },
.label = null,
.suspend_token = token_index,
@@ -3103,7 +3103,7 @@ fn parseBlockExpr(stack: *std.ArrayList(State), arena: *mem.Allocator, ctx: *con
return true;
},
Token.Id.Keyword_if => {
- const node = try arena.construct(ast.Node.If{
+ const node = try arena.create(ast.Node.If{
.base = ast.Node{ .id = ast.Node.Id.If },
.if_token = token_index,
.condition = undefined,
@@ -3144,7 +3144,7 @@ fn parseBlockExpr(stack: *std.ArrayList(State), arena: *mem.Allocator, ctx: *con
return true;
},
Token.Id.Keyword_switch => {
- const node = try arena.construct(ast.Node.Switch{
+ const node = try arena.create(ast.Node.Switch{
.base = ast.Node{ .id = ast.Node.Id.Switch },
.switch_token = token_index,
.expr = undefined,
@@ -3166,7 +3166,7 @@ fn parseBlockExpr(stack: *std.ArrayList(State), arena: *mem.Allocator, ctx: *con
return true;
},
Token.Id.Keyword_comptime => {
- const node = try arena.construct(ast.Node.Comptime{
+ const node = try arena.create(ast.Node.Comptime{
.base = ast.Node{ .id = ast.Node.Id.Comptime },
.comptime_token = token_index,
.expr = undefined,
@@ -3178,7 +3178,7 @@ fn parseBlockExpr(stack: *std.ArrayList(State), arena: *mem.Allocator, ctx: *con
return true;
},
Token.Id.LBrace => {
- const block = try arena.construct(ast.Node.Block{
+ const block = try arena.create(ast.Node.Block{
.base = ast.Node{ .id = ast.Node.Id.Block },
.label = null,
.lbrace = token_index,
@@ -3318,7 +3318,7 @@ fn tokenIdToPrefixOp(id: @TagType(Token.Id)) ?ast.Node.PrefixOp.Op {
}
fn createLiteral(arena: *mem.Allocator, comptime T: type, token_index: TokenIndex) !*T {
- return arena.construct(T{
+ return arena.create(T{
.base = ast.Node{ .id = ast.Node.typeToId(T) },
.token = token_index,
});
diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig
index 09ea8aa1a1..21259bec3c 100644
--- a/std/zig/parser_test.zig
+++ b/std/zig/parser_test.zig
@@ -1,3 +1,12 @@
+test "zig fmt: preserve space between async fn definitions" {
+ try testCanonical(
+ \\async fn a() void {}
+ \\
+ \\async fn b() void {}
+ \\
+ );
+}
+
test "zig fmt: comment to disable/enable zig fmt first" {
try testCanonical(
\\// Test trailing comma syntax
diff --git a/test/behavior.zig b/test/behavior.zig
index 096c07b2e0..3766ed4305 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -13,6 +13,7 @@ comptime {
_ = @import("cases/bugs/656.zig");
_ = @import("cases/bugs/828.zig");
_ = @import("cases/bugs/920.zig");
+ _ = @import("cases/bugs/1111.zig");
_ = @import("cases/byval_arg_var.zig");
_ = @import("cases/cast.zig");
_ = @import("cases/const_slice_child.zig");
@@ -34,6 +35,7 @@ comptime {
_ = @import("cases/math.zig");
_ = @import("cases/merge_error_sets.zig");
_ = @import("cases/misc.zig");
+ _ = @import("cases/optional.zig");
_ = @import("cases/namespace_depends_on_compile_var/index.zig");
_ = @import("cases/new_stack_call.zig");
_ = @import("cases/null.zig");
@@ -58,4 +60,5 @@ comptime {
_ = @import("cases/var_args.zig");
_ = @import("cases/void.zig");
_ = @import("cases/while.zig");
+ _ = @import("cases/widening.zig");
}
diff --git a/test/cases/align.zig b/test/cases/align.zig
index 682c185e86..64f0788efc 100644
--- a/test/cases/align.zig
+++ b/test/cases/align.zig
@@ -90,7 +90,7 @@ fn testBytesAlignSlice(b: u8) void {
b,
b,
};
- const slice = ([]u32)(bytes[0..]);
+ const slice: []u32 = @bytesToSlice(u32, bytes[0..]);
assert(slice[0] == 0x33333333);
}
diff --git a/test/cases/array.zig b/test/cases/array.zig
index b481261b4f..b72491bcc0 100644
--- a/test/cases/array.zig
+++ b/test/cases/array.zig
@@ -152,3 +152,11 @@ fn testImplicitCastSingleItemPtr() void {
slice[0] += 1;
assert(byte == 101);
}
+
+fn testArrayByValAtComptime(b: [2]u8) u8 { return b[0]; }
+
+test "comptime evalutating function that takes array by value" {
+ const arr = []u8{0,1};
+ _ = comptime testArrayByValAtComptime(arr);
+ _ = comptime testArrayByValAtComptime(arr);
+}
diff --git a/test/cases/bugs/1111.zig b/test/cases/bugs/1111.zig
new file mode 100644
index 0000000000..f62107f9a3
--- /dev/null
+++ b/test/cases/bugs/1111.zig
@@ -0,0 +1,12 @@
+const Foo = extern enum {
+ Bar = -1,
+};
+
+test "issue 1111 fixed" {
+ const v = Foo.Bar;
+
+ switch (v) {
+ Foo.Bar => return,
+ else => return,
+ }
+}
diff --git a/test/cases/cast.zig b/test/cases/cast.zig
index f1e49c6d1f..5688d90e11 100644
--- a/test/cases/cast.zig
+++ b/test/cases/cast.zig
@@ -140,8 +140,8 @@ test "explicit cast from integer to error type" {
comptime testCastIntToErr(error.ItBroke);
}
fn testCastIntToErr(err: error) void {
- const x = usize(err);
- const y = error(x);
+ const x = @errorToInt(err);
+ const y = @intToError(x);
assert(error.ItBroke == y);
}
@@ -340,11 +340,26 @@ fn testPeerErrorAndArray2(x: u8) error![]const u8 {
};
}
-test "explicit cast float number literal to integer if no fraction component" {
+test "@floatToInt" {
+ testFloatToInts();
+ comptime testFloatToInts();
+}
+
+fn testFloatToInts() void {
const x = i32(1e4);
assert(x == 10000);
const y = @floatToInt(i32, f32(1e4));
assert(y == 10000);
+ expectFloatToInt(f16, 255.1, u8, 255);
+ expectFloatToInt(f16, 127.2, i8, 127);
+ expectFloatToInt(f16, -128.2, i8, -128);
+ expectFloatToInt(f32, 255.1, u8, 255);
+ expectFloatToInt(f32, 127.2, i8, 127);
+ expectFloatToInt(f32, -128.2, i8, -128);
+}
+
+fn expectFloatToInt(comptime F: type, f: F, comptime I: type, i: I) void {
+ assert(@floatToInt(I, f) == i);
}
test "cast u128 to f128 and back" {
@@ -372,7 +387,7 @@ test "const slice widen cast" {
0x12,
};
- const u32_value = ([]const u32)(bytes[0..])[0];
+ const u32_value = @bytesToSlice(u32, bytes[0..])[0];
assert(u32_value == 0x12121212);
assert(@bitCast(u32, bytes) == 0x12121212);
@@ -406,17 +421,50 @@ test "@intCast comptime_int" {
}
test "@floatCast comptime_int and comptime_float" {
- const result = @floatCast(f32, 1234);
- assert(@typeOf(result) == f32);
- assert(result == 1234.0);
-
- const result2 = @floatCast(f32, 1234.0);
- assert(@typeOf(result) == f32);
- assert(result == 1234.0);
+ {
+ const result = @floatCast(f16, 1234);
+ assert(@typeOf(result) == f16);
+ assert(result == 1234.0);
+ }
+ {
+ const result = @floatCast(f16, 1234.0);
+ assert(@typeOf(result) == f16);
+ assert(result == 1234.0);
+ }
+ {
+ const result = @floatCast(f32, 1234);
+ assert(@typeOf(result) == f32);
+ assert(result == 1234.0);
+ }
+ {
+ const result = @floatCast(f32, 1234.0);
+ assert(@typeOf(result) == f32);
+ assert(result == 1234.0);
+ }
}
test "comptime_int @intToFloat" {
- const result = @intToFloat(f32, 1234);
- assert(@typeOf(result) == f32);
- assert(result == 1234.0);
+ {
+ const result = @intToFloat(f16, 1234);
+ assert(@typeOf(result) == f16);
+ assert(result == 1234.0);
+ }
+ {
+ const result = @intToFloat(f32, 1234);
+ assert(@typeOf(result) == f32);
+ assert(result == 1234.0);
+ }
+}
+
+test "@bytesToSlice keeps pointer alignment" {
+ var bytes = []u8{ 0x01, 0x02, 0x03, 0x04 };
+ const numbers = @bytesToSlice(u32, bytes[0..]);
+ comptime assert(@typeOf(numbers) == []align(@alignOf(@typeOf(bytes))) u32);
+}
+
+test "@intCast i32 to u7" {
+ var x: u128 = @maxValue(u128);
+ var y: i32 = 120;
+ var z = x >> @intCast(u7, y);
+ assert(z == 0xff);
}
diff --git a/test/cases/coroutines.zig b/test/cases/coroutines.zig
index 4d2aa54a69..b3899b306b 100644
--- a/test/cases/coroutines.zig
+++ b/test/cases/coroutines.zig
@@ -5,7 +5,10 @@ const assert = std.debug.assert;
var x: i32 = 1;
test "create a coroutine and cancel it" {
- const p = try async simpleAsyncFn();
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const p = try async<&da.allocator> simpleAsyncFn();
comptime assert(@typeOf(p) == promise->void);
cancel p;
assert(x == 2);
@@ -17,8 +20,11 @@ async fn simpleAsyncFn() void {
}
test "coroutine suspend, resume, cancel" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
seq('a');
- const p = try async testAsyncSeq();
+ const p = try async<&da.allocator> testAsyncSeq();
seq('c');
resume p;
seq('f');
@@ -43,7 +49,10 @@ fn seq(c: u8) void {
}
test "coroutine suspend with block" {
- const p = try async testSuspendBlock();
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const p = try async<&da.allocator> testSuspendBlock();
std.debug.assert(!result);
resume a_promise;
std.debug.assert(result);
@@ -64,8 +73,11 @@ var await_a_promise: promise = undefined;
var await_final_result: i32 = 0;
test "coroutine await" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
await_seq('a');
- const p = async await_amain() catch unreachable;
+ const p = async<&da.allocator> await_amain() catch unreachable;
await_seq('f');
resume await_a_promise;
await_seq('i');
@@ -100,8 +112,11 @@ fn await_seq(c: u8) void {
var early_final_result: i32 = 0;
test "coroutine await early return" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
early_seq('a');
- const p = async early_amain() catch unreachable;
+ const p = async<&da.allocator> early_amain() catch unreachable;
early_seq('f');
assert(early_final_result == 1234);
assert(std.mem.eql(u8, early_points, "abcdef"));
@@ -146,7 +161,9 @@ test "async function with dot syntax" {
suspend;
}
};
- const p = try async S.foo();
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+ const p = try async<&da.allocator> S.foo();
cancel p;
assert(S.y == 2);
}
@@ -157,7 +174,9 @@ test "async fn pointer in a struct field" {
bar: async<*std.mem.Allocator> fn (*i32) void,
};
var foo = Foo{ .bar = simpleAsyncFn2 };
- const p = (async foo.bar(&data)) catch unreachable;
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+ const p = (async<&da.allocator> foo.bar(&data)) catch unreachable;
assert(data == 2);
cancel p;
assert(data == 4);
@@ -169,7 +188,9 @@ async<*std.mem.Allocator> fn simpleAsyncFn2(y: *i32) void {
}
test "async fn with inferred error set" {
- const p = (async failing()) catch unreachable;
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+ const p = (async<&da.allocator> failing()) catch unreachable;
resume p;
cancel p;
}
@@ -181,7 +202,9 @@ async fn failing() !void {
test "error return trace across suspend points - early return" {
const p = nonFailing();
resume p;
- const p2 = try async printTrace(p);
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+ const p2 = try async<&da.allocator> printTrace(p);
cancel p2;
}
diff --git a/test/cases/enum.zig b/test/cases/enum.zig
index 6a02a47784..50edfda536 100644
--- a/test/cases/enum.zig
+++ b/test/cases/enum.zig
@@ -92,14 +92,14 @@ test "enum to int" {
}
fn shouldEqual(n: Number, expected: u3) void {
- assert(u3(n) == expected);
+ assert(@enumToInt(n) == expected);
}
test "int to enum" {
testIntToEnumEval(3);
}
fn testIntToEnumEval(x: i32) void {
- assert(IntToEnumNumber(@intCast(u3, x)) == IntToEnumNumber.Three);
+ assert(@intToEnum(IntToEnumNumber, @intCast(u3, x)) == IntToEnumNumber.Three);
}
const IntToEnumNumber = enum {
Zero,
@@ -768,7 +768,7 @@ test "casting enum to its tag type" {
}
fn testCastEnumToTagType(value: Small2) void {
- assert(u2(value) == 1);
+ assert(@enumToInt(value) == 1);
}
const MultipleChoice = enum(u32) {
@@ -784,7 +784,7 @@ test "enum with specified tag values" {
}
fn testEnumWithSpecifiedTagValues(x: MultipleChoice) void {
- assert(u32(x) == 60);
+ assert(@enumToInt(x) == 60);
assert(1234 == switch (x) {
MultipleChoice.A => 1,
MultipleChoice.B => 2,
@@ -811,7 +811,7 @@ test "enum with specified and unspecified tag values" {
}
fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: MultipleChoice2) void {
- assert(u32(x) == 1000);
+ assert(@enumToInt(x) == 1000);
assert(1234 == switch (x) {
MultipleChoice2.A => 1,
MultipleChoice2.B => 2,
@@ -826,8 +826,8 @@ fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: MultipleChoice2) void {
}
test "cast integer literal to enum" {
- assert(MultipleChoice2(0) == MultipleChoice2.Unspecified1);
- assert(MultipleChoice2(40) == MultipleChoice2.B);
+ assert(@intToEnum(MultipleChoice2, 0) == MultipleChoice2.Unspecified1);
+ assert(@intToEnum(MultipleChoice2, 40) == MultipleChoice2.B);
}
const EnumWithOneMember = enum {
@@ -865,7 +865,7 @@ const EnumWithTagValues = enum(u4) {
D = 1 << 3,
};
test "enum with tag values don't require parens" {
- assert(u4(EnumWithTagValues.C) == 0b0100);
+ assert(@enumToInt(EnumWithTagValues.C) == 0b0100);
}
test "enum with 1 field but explicit tag type should still have the tag type" {
diff --git a/test/cases/error.zig b/test/cases/error.zig
index 693631fe2d..45971fd40d 100644
--- a/test/cases/error.zig
+++ b/test/cases/error.zig
@@ -31,8 +31,8 @@ test "@errorName" {
}
test "error values" {
- const a = i32(error.err1);
- const b = i32(error.err2);
+ const a = @errorToInt(error.err1);
+ const b = @errorToInt(error.err2);
assert(a != b);
}
@@ -124,8 +124,8 @@ const Set2 = error{
};
fn testExplicitErrorSetCast(set1: Set1) void {
- var x = Set2(set1);
- var y = Set1(x);
+ var x = @errSetCast(Set2, set1);
+ var y = @errSetCast(Set1, x);
assert(y == error.A);
}
@@ -147,14 +147,14 @@ test "syntax: optional operator in front of error union operator" {
}
test "comptime err to int of error set with only 1 possible value" {
- testErrToIntWithOnePossibleValue(error.A, u32(error.A));
- comptime testErrToIntWithOnePossibleValue(error.A, u32(error.A));
+ testErrToIntWithOnePossibleValue(error.A, @errorToInt(error.A));
+ comptime testErrToIntWithOnePossibleValue(error.A, @errorToInt(error.A));
}
fn testErrToIntWithOnePossibleValue(
x: error{A},
comptime value: u32,
) void {
- if (u32(x) != value) {
+ if (@errorToInt(x) != value) {
@compileError("bad");
}
}
diff --git a/test/cases/eval.zig b/test/cases/eval.zig
index 6c919e17a6..83d2e80176 100644
--- a/test/cases/eval.zig
+++ b/test/cases/eval.zig
@@ -623,3 +623,22 @@ test "function which returns struct with type field causes implicit comptime" {
const ty = wrap(i32).T;
assert(ty == i32);
}
+
+test "call method with comptime pass-by-non-copying-value self parameter" {
+ const S = struct {
+ a: u8,
+
+ fn b(comptime s: this) u8 {
+ return s.a;
+ }
+ };
+
+ const s = S{ .a = 2 };
+ var b = s.b();
+ assert(b == 2);
+}
+
+test "@tagName of @typeId" {
+ const str = @tagName(@typeId(u8));
+ assert(std.mem.eql(u8, str, "Int"));
+}
diff --git a/test/cases/math.zig b/test/cases/math.zig
index 0bf99cff0e..195ada15dd 100644
--- a/test/cases/math.zig
+++ b/test/cases/math.zig
@@ -6,15 +6,20 @@ test "division" {
}
fn testDivision() void {
assert(div(u32, 13, 3) == 4);
+ assert(div(f16, 1.0, 2.0) == 0.5);
assert(div(f32, 1.0, 2.0) == 0.5);
assert(divExact(u32, 55, 11) == 5);
assert(divExact(i32, -55, 11) == -5);
+ assert(divExact(f16, 55.0, 11.0) == 5.0);
+ assert(divExact(f16, -55.0, 11.0) == -5.0);
assert(divExact(f32, 55.0, 11.0) == 5.0);
assert(divExact(f32, -55.0, 11.0) == -5.0);
assert(divFloor(i32, 5, 3) == 1);
assert(divFloor(i32, -5, 3) == -2);
+ assert(divFloor(f16, 5.0, 3.0) == 1.0);
+ assert(divFloor(f16, -5.0, 3.0) == -2.0);
assert(divFloor(f32, 5.0, 3.0) == 1.0);
assert(divFloor(f32, -5.0, 3.0) == -2.0);
assert(divFloor(i32, -0x80000000, -2) == 0x40000000);
@@ -24,8 +29,12 @@ fn testDivision() void {
assert(divTrunc(i32, 5, 3) == 1);
assert(divTrunc(i32, -5, 3) == -1);
+ assert(divTrunc(f16, 5.0, 3.0) == 1.0);
+ assert(divTrunc(f16, -5.0, 3.0) == -1.0);
assert(divTrunc(f32, 5.0, 3.0) == 1.0);
assert(divTrunc(f32, -5.0, 3.0) == -1.0);
+ assert(divTrunc(f64, 5.0, 3.0) == 1.0);
+ assert(divTrunc(f64, -5.0, 3.0) == -1.0);
comptime {
assert(
@@ -287,6 +296,14 @@ test "quad hex float literal parsing in range" {
const d = 0x1.edcbff8ad76ab5bf46463233214fp-435;
}
+test "quad hex float literal parsing accurate" {
+ const a: f128 = 0x1.1111222233334444555566667777p+0;
+
+ // implied 1 is dropped, with an exponent of 0 (0x3fff) after biasing.
+ const expected: u128 = 0x3fff1111222233334444555566667777;
+ assert(@bitCast(u128, a) == expected);
+}
+
test "hex float literal within range" {
const a = 0x1.0p16383;
const b = 0x0.1p16387;
@@ -434,11 +451,28 @@ test "comptime float rem int" {
}
}
+test "remainder division" {
+ comptime remdiv(f16);
+ comptime remdiv(f32);
+ comptime remdiv(f64);
+ comptime remdiv(f128);
+ remdiv(f16);
+ remdiv(f64);
+ remdiv(f128);
+}
+
+fn remdiv(comptime T: type) void {
+ assert(T(1) == T(1) % T(2));
+ assert(T(1) == T(7) % T(3));
+}
+
test "@sqrt" {
testSqrt(f64, 12.0);
comptime testSqrt(f64, 12.0);
testSqrt(f32, 13.0);
comptime testSqrt(f32, 13.0);
+ testSqrt(f16, 13.0);
+ comptime testSqrt(f16, 13.0);
const x = 14.0;
const y = x * x;
diff --git a/test/cases/misc.zig b/test/cases/misc.zig
index beb0d6d456..0f181a7b4e 100644
--- a/test/cases/misc.zig
+++ b/test/cases/misc.zig
@@ -53,6 +53,7 @@ test "@IntType builtin" {
}
test "floating point primitive bit counts" {
+ assert(f16.bit_count == 16);
assert(f32.bit_count == 32);
assert(f64.bit_count == 64);
}
@@ -422,14 +423,14 @@ test "cast slice to u8 slice" {
4,
};
const big_thing_slice: []i32 = big_thing_array[0..];
- const bytes = ([]u8)(big_thing_slice);
+ const bytes = @sliceToBytes(big_thing_slice);
assert(bytes.len == 4 * 4);
bytes[4] = 0;
bytes[5] = 0;
bytes[6] = 0;
bytes[7] = 0;
assert(big_thing_slice[1] == 0);
- const big_thing_again = ([]align(1) i32)(bytes);
+ const big_thing_again = @bytesToSlice(i32, bytes);
assert(big_thing_again[2] == 3);
big_thing_again[2] = -1;
assert(bytes[8] == @maxValue(u8));
@@ -701,3 +702,8 @@ test "comptime cast fn to ptr" {
const addr2 = @ptrCast(*const u8, emptyFn);
comptime assert(addr1 == addr2);
}
+
+test "equality compare fn ptrs" {
+ var a = emptyFn;
+ assert(a == a);
+}
diff --git a/test/cases/null.zig b/test/cases/null.zig
index d2a9aaed55..c86dd34b06 100644
--- a/test/cases/null.zig
+++ b/test/cases/null.zig
@@ -146,7 +146,7 @@ test "null with default unwrap" {
test "optional types" {
comptime {
- const opt_type_struct = StructWithOptionalType { .t=u8, };
+ const opt_type_struct = StructWithOptionalType{ .t = u8 };
assert(opt_type_struct.t != null and opt_type_struct.t.? == u8);
}
}
diff --git a/test/cases/optional.zig b/test/cases/optional.zig
new file mode 100644
index 0000000000..0129252dab
--- /dev/null
+++ b/test/cases/optional.zig
@@ -0,0 +1,9 @@
+const assert = @import("std").debug.assert;
+
+pub const EmptyStruct = struct {};
+
+test "optional pointer to size zero struct" {
+ var e = EmptyStruct{};
+ var o: ?*EmptyStruct = &e;
+ assert(o != null);
+}
diff --git a/test/cases/struct.zig b/test/cases/struct.zig
index 94a2ba6336..2941ecb56a 100644
--- a/test/cases/struct.zig
+++ b/test/cases/struct.zig
@@ -302,7 +302,7 @@ test "packed array 24bits" {
var bytes = []u8{0} ** (@sizeOf(FooArray24Bits) + 1);
bytes[bytes.len - 1] = 0xaa;
- const ptr = &([]FooArray24Bits)(bytes[0 .. bytes.len - 1])[0];
+ const ptr = &@bytesToSlice(FooArray24Bits, bytes[0 .. bytes.len - 1])[0];
assert(ptr.a == 0);
assert(ptr.b[0].field == 0);
assert(ptr.b[1].field == 0);
@@ -351,7 +351,7 @@ test "aligned array of packed struct" {
}
var bytes = []u8{0xbb} ** @sizeOf(FooArrayOfAligned);
- const ptr = &([]FooArrayOfAligned)(bytes[0..bytes.len])[0];
+ const ptr = &@bytesToSlice(FooArrayOfAligned, bytes[0..bytes.len])[0];
assert(ptr.a[0].a == 0xbb);
assert(ptr.a[0].b == 0xbb);
diff --git a/test/cases/type_info.zig b/test/cases/type_info.zig
index 1bc58b14e1..b8fc4cf14e 100644
--- a/test/cases/type_info.zig
+++ b/test/cases/type_info.zig
@@ -107,11 +107,11 @@ test "type info: promise info" {
fn testPromise() void {
const null_promise_info = @typeInfo(promise);
assert(TypeId(null_promise_info) == TypeId.Promise);
- assert(null_promise_info.Promise.child == @typeOf(undefined));
+ assert(null_promise_info.Promise.child == null);
const promise_info = @typeInfo(promise->usize);
assert(TypeId(promise_info) == TypeId.Promise);
- assert(promise_info.Promise.child == usize);
+ assert(promise_info.Promise.child.? == usize);
}
test "type info: error set, error union info" {
@@ -130,7 +130,7 @@ fn testErrorSet() void {
assert(TypeId(error_set_info) == TypeId.ErrorSet);
assert(error_set_info.ErrorSet.errors.len == 3);
assert(mem.eql(u8, error_set_info.ErrorSet.errors[0].name, "First"));
- assert(error_set_info.ErrorSet.errors[2].value == usize(TestErrorSet.Third));
+ assert(error_set_info.ErrorSet.errors[2].value == @errorToInt(TestErrorSet.Third));
const error_union_info = @typeInfo(TestErrorSet!usize);
assert(TypeId(error_union_info) == TypeId.ErrorUnion);
@@ -165,7 +165,7 @@ fn testUnion() void {
const typeinfo_info = @typeInfo(TypeInfo);
assert(TypeId(typeinfo_info) == TypeId.Union);
assert(typeinfo_info.Union.layout == TypeInfo.ContainerLayout.Auto);
- assert(typeinfo_info.Union.tag_type == TypeId);
+ assert(typeinfo_info.Union.tag_type.? == TypeId);
assert(typeinfo_info.Union.fields.len == 25);
assert(typeinfo_info.Union.fields[4].enum_field != null);
assert(typeinfo_info.Union.fields[4].enum_field.?.value == 4);
@@ -179,7 +179,7 @@ fn testUnion() void {
const notag_union_info = @typeInfo(TestNoTagUnion);
assert(TypeId(notag_union_info) == TypeId.Union);
- assert(notag_union_info.Union.tag_type == @typeOf(undefined));
+ assert(notag_union_info.Union.tag_type == null);
assert(notag_union_info.Union.layout == TypeInfo.ContainerLayout.Auto);
assert(notag_union_info.Union.fields.len == 2);
assert(notag_union_info.Union.fields[0].enum_field == null);
@@ -191,7 +191,7 @@ fn testUnion() void {
const extern_union_info = @typeInfo(TestExternUnion);
assert(extern_union_info.Union.layout == TypeInfo.ContainerLayout.Extern);
- assert(extern_union_info.Union.tag_type == @typeOf(undefined));
+ assert(extern_union_info.Union.tag_type == null);
assert(extern_union_info.Union.fields[0].enum_field == null);
assert(extern_union_info.Union.fields[0].field_type == *c_void);
}
@@ -238,13 +238,13 @@ fn testFunction() void {
assert(fn_info.Fn.is_generic);
assert(fn_info.Fn.args.len == 2);
assert(fn_info.Fn.is_var_args);
- assert(fn_info.Fn.return_type == @typeOf(undefined));
- assert(fn_info.Fn.async_allocator_type == @typeOf(undefined));
+ assert(fn_info.Fn.return_type == null);
+ assert(fn_info.Fn.async_allocator_type == null);
const test_instance: TestStruct = undefined;
const bound_fn_info = @typeInfo(@typeOf(test_instance.foo));
assert(TypeId(bound_fn_info) == TypeId.BoundFn);
- assert(bound_fn_info.BoundFn.args[0].arg_type == *const TestStruct);
+ assert(bound_fn_info.BoundFn.args[0].arg_type.? == *const TestStruct);
}
fn foo(comptime a: usize, b: bool, args: ...) usize {
diff --git a/test/cases/union.zig b/test/cases/union.zig
index bdcbbdb452..78b2dc8dd7 100644
--- a/test/cases/union.zig
+++ b/test/cases/union.zig
@@ -126,7 +126,7 @@ const MultipleChoice = union(enum(u32)) {
test "simple union(enum(u32))" {
var x = MultipleChoice.C;
assert(x == MultipleChoice.C);
- assert(u32(@TagType(MultipleChoice)(x)) == 60);
+ assert(@enumToInt(@TagType(MultipleChoice)(x)) == 60);
}
const MultipleChoice2 = union(enum(u32)) {
@@ -148,7 +148,7 @@ test "union(enum(u32)) with specified and unspecified tag values" {
}
fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: *const MultipleChoice2) void {
- assert(u32(@TagType(MultipleChoice2)(x.*)) == 60);
+ assert(@enumToInt(@TagType(MultipleChoice2)(x.*)) == 60);
assert(1123 == switch (x.*) {
MultipleChoice2.A => 1,
MultipleChoice2.B => 2,
diff --git a/test/cases/widening.zig b/test/cases/widening.zig
new file mode 100644
index 0000000000..cf6ab4ca0f
--- /dev/null
+++ b/test/cases/widening.zig
@@ -0,0 +1,27 @@
+const std = @import("std");
+const assert = std.debug.assert;
+const mem = std.mem;
+
+test "integer widening" {
+ var a: u8 = 250;
+ var b: u16 = a;
+ var c: u32 = b;
+ var d: u64 = c;
+ var e: u64 = d;
+ var f: u128 = e;
+ assert(f == a);
+}
+
+test "implicit unsigned integer to signed integer" {
+ var a: u8 = 250;
+ var b: i16 = a;
+ assert(b == 250);
+}
+
+test "float widening" {
+ var a: f16 = 12.34;
+ var b: f32 = a;
+ var c: f64 = b;
+ var d: f128 = c;
+ assert(d == a);
+}
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 8c5abaaccc..7291a48a8f 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -1,6 +1,36 @@
const tests = @import("tests.zig");
pub fn addCases(cases: *tests.CompileErrorContext) void {
+ cases.add(
+ "enum field value references enum",
+ \\pub const Foo = extern enum {
+ \\ A = Foo.B,
+ \\ C = D,
+ \\};
+ \\export fn entry() void {
+ \\ var s: Foo = Foo.E;
+ \\}
+ ,
+ ".tmp_source.zig:1:17: error: 'Foo' depends on itself",
+ );
+
+ cases.add(
+ "@floatToInt comptime safety",
+ \\comptime {
+ \\ _ = @floatToInt(i8, f32(-129.1));
+ \\}
+ \\comptime {
+ \\ _ = @floatToInt(u8, f32(-1.1));
+ \\}
+ \\comptime {
+ \\ _ = @floatToInt(u8, f32(256.1));
+ \\}
+ ,
+ ".tmp_source.zig:2:9: error: integer value '-129' cannot be stored in type 'i8'",
+ ".tmp_source.zig:5:9: error: integer value '-1' cannot be stored in type 'u8'",
+ ".tmp_source.zig:8:9: error: integer value '256' cannot be stored in type 'u8'",
+ );
+
cases.add(
"use c_void as return type of fn ptr",
\\export fn entry() void {
@@ -83,7 +113,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ var rule_set = try Foo.init();
\\}
,
- ".tmp_source.zig:2:13: error: invalid cast from type 'type' to 'i32'",
+ ".tmp_source.zig:2:13: error: expected type 'i32', found 'type'",
);
cases.add(
@@ -105,7 +135,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
);
cases.add(
- "invalid deref on switch target",
+ "nested error set mismatch",
\\const NextError = error{NextError};
\\const OtherError = error{OutOfMemory};
\\
@@ -117,7 +147,9 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ return null;
\\}
,
- ".tmp_source.zig:5:34: error: expected 'NextError!i32', found 'OtherError!i32'",
+ ".tmp_source.zig:5:34: error: expected type '?NextError!i32', found '?OtherError!i32'",
+ ".tmp_source.zig:5:34: note: optional type child 'OtherError!i32' cannot cast into optional type child 'NextError!i32'",
+ ".tmp_source.zig:5:34: note: error set 'OtherError' cannot cast into error set 'NextError'",
".tmp_source.zig:2:26: note: 'error.OutOfMemory' not a member of destination error set",
);
@@ -404,10 +436,10 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\const Set2 = error {A, C};
\\comptime {
\\ var x = Set1.B;
- \\ var y = Set2(x);
+ \\ var y = @errSetCast(Set2, x);
\\}
,
- ".tmp_source.zig:5:17: error: error.B not a member of error set 'Set2'",
+ ".tmp_source.zig:5:13: error: error.B not a member of error set 'Set2'",
);
cases.add(
@@ -420,8 +452,9 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ return error.B;
\\}
,
- ".tmp_source.zig:3:35: error: expected 'SmallErrorSet!i32', found 'error!i32'",
- ".tmp_source.zig:3:35: note: unable to cast global error set into smaller set",
+ ".tmp_source.zig:3:35: error: expected type 'SmallErrorSet!i32', found 'error!i32'",
+ ".tmp_source.zig:3:35: note: error set 'error' cannot cast into error set 'SmallErrorSet'",
+ ".tmp_source.zig:3:35: note: cannot cast global error set into smaller set",
);
cases.add(
@@ -434,8 +467,8 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ return error.B;
\\}
,
- ".tmp_source.zig:3:31: error: expected 'SmallErrorSet', found 'error'",
- ".tmp_source.zig:3:31: note: unable to cast global error set into smaller set",
+ ".tmp_source.zig:3:31: error: expected type 'SmallErrorSet', found 'error'",
+ ".tmp_source.zig:3:31: note: cannot cast global error set into smaller set",
);
cases.add(
@@ -461,31 +494,40 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ var x: Set2 = set1;
\\}
,
- ".tmp_source.zig:7:19: error: expected 'Set2', found 'Set1'",
+ ".tmp_source.zig:7:19: error: expected type 'Set2', found 'Set1'",
".tmp_source.zig:1:23: note: 'error.B' not a member of destination error set",
);
cases.add(
"int to err global invalid number",
- \\const Set1 = error{A, B};
+ \\const Set1 = error{
+ \\ A,
+ \\ B,
+ \\};
\\comptime {
- \\ var x: usize = 3;
- \\ var y = error(x);
+ \\ var x: u16 = 3;
+ \\ var y = @intToError(x);
\\}
,
- ".tmp_source.zig:4:18: error: integer value 3 represents no error",
+ ".tmp_source.zig:7:13: error: integer value 3 represents no error",
);
cases.add(
"int to err non global invalid number",
- \\const Set1 = error{A, B};
- \\const Set2 = error{A, C};
+ \\const Set1 = error{
+ \\ A,
+ \\ B,
+ \\};
+ \\const Set2 = error{
+ \\ A,
+ \\ C,
+ \\};
\\comptime {
- \\ var x = usize(Set1.B);
- \\ var y = Set2(x);
+ \\ var x = @errorToInt(Set1.B);
+ \\ var y = @errSetCast(Set2, @intToError(x));
\\}
,
- ".tmp_source.zig:5:17: error: integer value 2 represents no error in 'Set2'",
+ ".tmp_source.zig:11:13: error: error.B not a member of error set 'Set2'",
);
cases.add(
@@ -1635,6 +1677,18 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
".tmp_source.zig:1:16: error: integer value 300 cannot be implicitly casted to type 'u8'",
);
+ cases.add(
+ "invalid shift amount error",
+ \\const x : u8 = 2;
+ \\fn f() u16 {
+ \\ return x << 8;
+ \\}
+ \\export fn entry() u16 { return f(); }
+ ,
+ ".tmp_source.zig:3:14: error: RHS of shift is too large for LHS type",
+ ".tmp_source.zig:3:17: note: value 8 cannot fit into type u3",
+ );
+
cases.add(
"incompatible number literals",
\\const x = 2 == 2.0;
@@ -1851,6 +1905,416 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
".tmp_source.zig:1:15: error: use of undefined value",
);
+ cases.add(
+ "div on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a / a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "div assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a /= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "mod on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a % a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "mod assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a %= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "add on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a + a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "add assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a += a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "add wrap on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a +% a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "add wrap assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a +%= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "sub on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a - a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "sub assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a -= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "sub wrap on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a -% a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "sub wrap assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a -%= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "mult on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a * a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "mult assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a *= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "mult wrap on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a *% a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "mult wrap assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a *%= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "shift left on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a << 2;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "shift left assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a <<= 2;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "shift right on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a >> 2;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "shift left assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a >>= 2;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "bin and on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a & a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "bin and assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a &= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "bin or on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a | a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "bin or assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a |= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "bin xor on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a ^ a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "bin xor assign on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ a ^= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:5: error: use of undefined value",
+ );
+
+ cases.add(
+ "equal on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a == a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "not equal on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a != a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "greater than on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a > a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "greater than equal on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a >= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "less than on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a < a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "less than equal on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = a <= a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "and on undefined value",
+ \\comptime {
+ \\ var a: bool = undefined;
+ \\ _ = a and a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "or on undefined value",
+ \\comptime {
+ \\ var a: bool = undefined;
+ \\ _ = a or a;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
+ cases.add(
+ "negate on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = -a;
+ \\}
+ ,
+ ".tmp_source.zig:3:10: error: use of undefined value",
+ );
+
+ cases.add(
+ "negate wrap on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = -%a;
+ \\}
+ ,
+ ".tmp_source.zig:3:11: error: use of undefined value",
+ );
+
+ cases.add(
+ "bin not on undefined value",
+ \\comptime {
+ \\ var a: i64 = undefined;
+ \\ _ = ~a;
+ \\}
+ ,
+ ".tmp_source.zig:3:10: error: use of undefined value",
+ );
+
+ cases.add(
+ "bool not on undefined value",
+ \\comptime {
+ \\ var a: bool = undefined;
+ \\ _ = !a;
+ \\}
+ ,
+ ".tmp_source.zig:3:10: error: use of undefined value",
+ );
+
+ cases.add(
+ "orelse on undefined value",
+ \\comptime {
+ \\ var a: ?bool = undefined;
+ \\ _ = a orelse false;
+ \\}
+ ,
+ ".tmp_source.zig:3:11: error: use of undefined value",
+ );
+
+ cases.add(
+ "catch on undefined value",
+ \\comptime {
+ \\ var a: error!bool = undefined;
+ \\ _ = a catch |err| false;
+ \\}
+ ,
+ ".tmp_source.zig:3:11: error: use of undefined value",
+ );
+
+ cases.add(
+ "deref on undefined value",
+ \\comptime {
+ \\ var a: *u8 = undefined;
+ \\ _ = a.*;
+ \\}
+ ,
+ ".tmp_source.zig:3:9: error: use of undefined value",
+ );
+
cases.add(
"endless loop in function evaluation",
\\const seventh_fib_number = fibbonaci(7);
@@ -2086,10 +2550,11 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
"convert fixed size array to slice with invalid size",
\\export fn f() void {
\\ var array: [5]u8 = undefined;
- \\ var foo = ([]const u32)(array)[0];
+ \\ var foo = @bytesToSlice(u32, array)[0];
\\}
,
- ".tmp_source.zig:3:28: error: unable to convert [5]u8 to []const u32: size mismatch",
+ ".tmp_source.zig:3:15: error: unable to convert [5]u8 to []align(1) const u32: size mismatch",
+ ".tmp_source.zig:3:29: note: u32 has size 4; remaining bytes: 1",
);
cases.add(
@@ -2611,17 +3076,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
".tmp_source.zig:2:21: error: expected pointer, found 'usize'",
);
- cases.add(
- "too many error values to cast to small integer",
- \\const Error = error { A, B, C, D, E, F, G, H };
- \\fn foo(e: Error) u2 {
- \\ return u2(e);
- \\}
- \\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
- ,
- ".tmp_source.zig:3:14: error: too many error values to fit in 'u2'",
- );
-
cases.add(
"asm at compile time",
\\comptime {
@@ -3239,18 +3693,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
".tmp_source.zig:3:26: note: '*u32' has alignment 4",
);
- cases.add(
- "increase pointer alignment in slice resize",
- \\export fn entry() u32 {
- \\ var bytes = []u8{0x01, 0x02, 0x03, 0x04};
- \\ return ([]u32)(bytes[0..])[0];
- \\}
- ,
- ".tmp_source.zig:3:19: error: cast increases pointer alignment",
- ".tmp_source.zig:3:19: note: '[]u8' has alignment 1",
- ".tmp_source.zig:3:19: note: '[]u32' has alignment 4",
- );
-
cases.add(
"@alignCast expects pointer or slice",
\\export fn entry() void {
@@ -3722,22 +4164,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
".tmp_source.zig:9:22: error: expected type 'u2', found 'Small'",
);
- cases.add(
- "explicitly casting enum to non tag type",
- \\const Small = enum(u2) {
- \\ One,
- \\ Two,
- \\ Three,
- \\ Four,
- \\};
- \\
- \\export fn entry() void {
- \\ var x = u3(Small.Two);
- \\}
- ,
- ".tmp_source.zig:9:15: error: enum to integer cast to 'u3' instead of its tag type, 'u2'",
- );
-
cases.add(
"explicitly casting non tag type to enum",
\\const Small = enum(u2) {
@@ -3749,10 +4175,10 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\
\\export fn entry() void {
\\ var y = u3(3);
- \\ var x = Small(y);
+ \\ var x = @intToEnum(Small, y);
\\}
,
- ".tmp_source.zig:10:18: error: integer to enum cast from 'u3' instead of its tag type, 'u2'",
+ ".tmp_source.zig:10:31: error: expected type 'u2', found 'u3'",
);
cases.add(
@@ -4033,10 +4459,10 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ B = 11,
\\};
\\export fn entry() void {
- \\ var x = Foo(0);
+ \\ var x = @intToEnum(Foo, 0);
\\}
,
- ".tmp_source.zig:6:16: error: enum 'Foo' has no tag matching integer value 0",
+ ".tmp_source.zig:6:13: error: enum 'Foo' has no tag matching integer value 0",
".tmp_source.zig:1:13: note: 'Foo' declared here",
);
diff --git a/test/runtime_safety.zig b/test/runtime_safety.zig
index 96384066e5..3d58dfe748 100644
--- a/test/runtime_safety.zig
+++ b/test/runtime_safety.zig
@@ -1,6 +1,63 @@
const tests = @import("tests.zig");
pub fn addCases(cases: *tests.CompareOutputContext) void {
+ cases.addRuntimeSafety("@intToEnum - no matching tag value",
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
+ \\ @import("std").os.exit(126);
+ \\}
+ \\const Foo = enum {
+ \\ A,
+ \\ B,
+ \\ C,
+ \\};
+ \\pub fn main() void {
+ \\ baz(bar(3));
+ \\}
+ \\fn bar(a: u2) Foo {
+ \\ return @intToEnum(Foo, a);
+ \\}
+ \\fn baz(a: Foo) void {}
+ );
+
+ cases.addRuntimeSafety("@floatToInt cannot fit - negative to unsigned",
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
+ \\ @import("std").os.exit(126);
+ \\}
+ \\pub fn main() void {
+ \\ baz(bar(-1.1));
+ \\}
+ \\fn bar(a: f32) u8 {
+ \\ return @floatToInt(u8, a);
+ \\}
+ \\fn baz(a: u8) void { }
+ );
+
+ cases.addRuntimeSafety("@floatToInt cannot fit - negative out of range",
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
+ \\ @import("std").os.exit(126);
+ \\}
+ \\pub fn main() void {
+ \\ baz(bar(-129.1));
+ \\}
+ \\fn bar(a: f32) i8 {
+ \\ return @floatToInt(i8, a);
+ \\}
+ \\fn baz(a: i8) void { }
+ );
+
+ cases.addRuntimeSafety("@floatToInt cannot fit - positive out of range",
+ \\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
+ \\ @import("std").os.exit(126);
+ \\}
+ \\pub fn main() void {
+ \\ baz(bar(256.2));
+ \\}
+ \\fn bar(a: f32) u8 {
+ \\ return @floatToInt(u8, a);
+ \\}
+ \\fn baz(a: u8) void { }
+ );
+
cases.addRuntimeSafety("calling panic",
\\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
@@ -175,7 +232,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\ if (x.len == 0) return error.Whatever;
\\}
\\fn widenSlice(slice: []align(1) const u8) []align(1) const i32 {
- \\ return ([]align(1) const i32)(slice);
+ \\ return @bytesToSlice(i32, slice);
\\}
);
@@ -227,12 +284,12 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\pub fn main() void {
\\ _ = bar(9999);
\\}
- \\fn bar(x: u32) error {
- \\ return error(x);
+ \\fn bar(x: u16) error {
+ \\ return @intToError(x);
\\}
);
- cases.addRuntimeSafety("cast integer to non-global error set and no match",
+ cases.addRuntimeSafety("@errSetCast error not present in destination",
\\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
@@ -242,7 +299,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\ _ = foo(Set1.B);
\\}
\\fn foo(set1: Set1) Set2 {
- \\ return Set2(set1);
+ \\ return @errSetCast(Set2, set1);
\\}
);
@@ -252,12 +309,12 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\}
\\pub fn main() !void {
\\ var array align(4) = []u32{0x11111111, 0x11111111};
- \\ const bytes = ([]u8)(array[0..]);
+ \\ const bytes = @sliceToBytes(array[0..]);
\\ if (foo(bytes) != 0x11111111) return error.Wrong;
\\}
\\fn foo(bytes: []u8) u32 {
\\ const slice4 = bytes[1..5];
- \\ const int_slice = ([]u32)(@alignCast(4, slice4));
+ \\ const int_slice = @bytesToSlice(u32, @alignCast(4, slice4));
\\ return int_slice[0];
\\}
);
diff --git a/test/tests.zig b/test/tests.zig
index b66441f628..66eb2d93a0 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -48,13 +48,12 @@ const test_targets = []TestTarget{
const max_stdout_size = 1 * 1024 * 1024; // 1 MB
pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
- const cases = b.allocator.create(CompareOutputContext) catch unreachable;
- cases.* = CompareOutputContext{
+ const cases = b.allocator.create(CompareOutputContext{
.b = b,
.step = b.step("test-compare-output", "Run the compare output tests"),
.test_index = 0,
.test_filter = test_filter,
- };
+ }) catch unreachable;
compare_output.addCases(cases);
@@ -62,13 +61,12 @@ pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8) *build
}
pub fn addRuntimeSafetyTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
- const cases = b.allocator.create(CompareOutputContext) catch unreachable;
- cases.* = CompareOutputContext{
+ const cases = b.allocator.create(CompareOutputContext{
.b = b,
.step = b.step("test-runtime-safety", "Run the runtime safety tests"),
.test_index = 0,
.test_filter = test_filter,
- };
+ }) catch unreachable;
runtime_safety.addCases(cases);
@@ -76,13 +74,12 @@ pub fn addRuntimeSafetyTests(b: *build.Builder, test_filter: ?[]const u8) *build
}
pub fn addCompileErrorTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
- const cases = b.allocator.create(CompileErrorContext) catch unreachable;
- cases.* = CompileErrorContext{
+ const cases = b.allocator.create(CompileErrorContext{
.b = b,
.step = b.step("test-compile-errors", "Run the compile error tests"),
.test_index = 0,
.test_filter = test_filter,
- };
+ }) catch unreachable;
compile_errors.addCases(cases);
@@ -90,13 +87,12 @@ pub fn addCompileErrorTests(b: *build.Builder, test_filter: ?[]const u8) *build.
}
pub fn addBuildExampleTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
- const cases = b.allocator.create(BuildExamplesContext) catch unreachable;
- cases.* = BuildExamplesContext{
+ const cases = b.allocator.create(BuildExamplesContext{
.b = b,
.step = b.step("test-build-examples", "Build the examples"),
.test_index = 0,
.test_filter = test_filter,
- };
+ }) catch unreachable;
build_examples.addCases(cases);
@@ -104,13 +100,12 @@ pub fn addBuildExampleTests(b: *build.Builder, test_filter: ?[]const u8) *build.
}
pub fn addAssembleAndLinkTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
- const cases = b.allocator.create(CompareOutputContext) catch unreachable;
- cases.* = CompareOutputContext{
+ const cases = b.allocator.create(CompareOutputContext{
.b = b,
.step = b.step("test-asm-link", "Run the assemble and link tests"),
.test_index = 0,
.test_filter = test_filter,
- };
+ }) catch unreachable;
assemble_and_link.addCases(cases);
@@ -118,13 +113,12 @@ pub fn addAssembleAndLinkTests(b: *build.Builder, test_filter: ?[]const u8) *bui
}
pub fn addTranslateCTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
- const cases = b.allocator.create(TranslateCContext) catch unreachable;
- cases.* = TranslateCContext{
+ const cases = b.allocator.create(TranslateCContext{
.b = b,
.step = b.step("test-translate-c", "Run the C transation tests"),
.test_index = 0,
.test_filter = test_filter,
- };
+ }) catch unreachable;
translate_c.addCases(cases);
@@ -132,13 +126,12 @@ pub fn addTranslateCTests(b: *build.Builder, test_filter: ?[]const u8) *build.St
}
pub fn addGenHTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
- const cases = b.allocator.create(GenHContext) catch unreachable;
- cases.* = GenHContext{
+ const cases = b.allocator.create(GenHContext{
.b = b,
.step = b.step("test-gen-h", "Run the C header file generation tests"),
.test_index = 0,
.test_filter = test_filter,
- };
+ }) catch unreachable;
gen_h.addCases(cases);
@@ -240,8 +233,7 @@ pub const CompareOutputContext = struct {
pub fn create(context: *CompareOutputContext, exe_path: []const u8, name: []const u8, expected_output: []const u8, cli_args: []const []const u8) *RunCompareOutputStep {
const allocator = context.b.allocator;
- const ptr = allocator.create(RunCompareOutputStep) catch unreachable;
- ptr.* = RunCompareOutputStep{
+ const ptr = allocator.create(RunCompareOutputStep{
.context = context,
.exe_path = exe_path,
.name = name,
@@ -249,7 +241,7 @@ pub const CompareOutputContext = struct {
.test_index = context.test_index,
.step = build.Step.init("RunCompareOutput", allocator, make),
.cli_args = cli_args,
- };
+ }) catch unreachable;
context.test_index += 1;
return ptr;
}
@@ -328,14 +320,14 @@ pub const CompareOutputContext = struct {
pub fn create(context: *CompareOutputContext, exe_path: []const u8, name: []const u8) *RuntimeSafetyRunStep {
const allocator = context.b.allocator;
- const ptr = allocator.create(RuntimeSafetyRunStep) catch unreachable;
- ptr.* = RuntimeSafetyRunStep{
+ const ptr = allocator.create(RuntimeSafetyRunStep{
.context = context,
.exe_path = exe_path,
.name = name,
.test_index = context.test_index,
.step = build.Step.init("RuntimeSafetyRun", allocator, make),
- };
+ }) catch unreachable;
+
context.test_index += 1;
return ptr;
}
@@ -543,15 +535,15 @@ pub const CompileErrorContext = struct {
pub fn create(context: *CompileErrorContext, name: []const u8, case: *const TestCase, build_mode: Mode) *CompileCmpOutputStep {
const allocator = context.b.allocator;
- const ptr = allocator.create(CompileCmpOutputStep) catch unreachable;
- ptr.* = CompileCmpOutputStep{
+ const ptr = allocator.create(CompileCmpOutputStep{
.step = build.Step.init("CompileCmpOutput", allocator, make),
.context = context,
.name = name,
.test_index = context.test_index,
.case = case,
.build_mode = build_mode,
- };
+ }) catch unreachable;
+
context.test_index += 1;
return ptr;
}
@@ -662,14 +654,14 @@ pub const CompileErrorContext = struct {
}
pub fn create(self: *CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) *TestCase {
- const tc = self.b.allocator.create(TestCase) catch unreachable;
- tc.* = TestCase{
+ const tc = self.b.allocator.create(TestCase{
.name = name,
.sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
.expected_errors = ArrayList([]const u8).init(self.b.allocator),
.link_libc = false,
.is_exe = false,
- };
+ }) catch unreachable;
+
tc.addSourceFile(".tmp_source.zig", source);
comptime var arg_i = 0;
inline while (arg_i < expected_lines.len) : (arg_i += 1) {
@@ -829,14 +821,14 @@ pub const TranslateCContext = struct {
pub fn create(context: *TranslateCContext, name: []const u8, case: *const TestCase) *TranslateCCmpOutputStep {
const allocator = context.b.allocator;
- const ptr = allocator.create(TranslateCCmpOutputStep) catch unreachable;
- ptr.* = TranslateCCmpOutputStep{
+ const ptr = allocator.create(TranslateCCmpOutputStep{
.step = build.Step.init("ParseCCmpOutput", allocator, make),
.context = context,
.name = name,
.test_index = context.test_index,
.case = case,
- };
+ }) catch unreachable;
+
context.test_index += 1;
return ptr;
}
@@ -936,13 +928,13 @@ pub const TranslateCContext = struct {
}
pub fn create(self: *TranslateCContext, allow_warnings: bool, filename: []const u8, name: []const u8, source: []const u8, expected_lines: ...) *TestCase {
- const tc = self.b.allocator.create(TestCase) catch unreachable;
- tc.* = TestCase{
+ const tc = self.b.allocator.create(TestCase{
.name = name,
.sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
.expected_lines = ArrayList([]const u8).init(self.b.allocator),
.allow_warnings = allow_warnings,
- };
+ }) catch unreachable;
+
tc.addSourceFile(filename, source);
comptime var arg_i = 0;
inline while (arg_i < expected_lines.len) : (arg_i += 1) {
@@ -1023,15 +1015,15 @@ pub const GenHContext = struct {
pub fn create(context: *GenHContext, h_path: []const u8, name: []const u8, case: *const TestCase) *GenHCmpOutputStep {
const allocator = context.b.allocator;
- const ptr = allocator.create(GenHCmpOutputStep) catch unreachable;
- ptr.* = GenHCmpOutputStep{
+ const ptr = allocator.create(GenHCmpOutputStep{
.step = build.Step.init("ParseCCmpOutput", allocator, make),
.context = context,
.h_path = h_path,
.name = name,
.test_index = context.test_index,
.case = case,
- };
+ }) catch unreachable;
+
context.test_index += 1;
return ptr;
}
@@ -1070,12 +1062,12 @@ pub const GenHContext = struct {
}
pub fn create(self: *GenHContext, filename: []const u8, name: []const u8, source: []const u8, expected_lines: ...) *TestCase {
- const tc = self.b.allocator.create(TestCase) catch unreachable;
- tc.* = TestCase{
+ const tc = self.b.allocator.create(TestCase{
.name = name,
.sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
.expected_lines = ArrayList([]const u8).init(self.b.allocator),
- };
+ }) catch unreachable;
+
tc.addSourceFile(filename, source);
comptime var arg_i = 0;
inline while (arg_i < expected_lines.len) : (arg_i += 1) {