Merge remote-tracking branch 'origin/master' into llvm7

This commit is contained in:
Andrew Kelley 2018-08-25 21:57:28 -04:00
commit 7109035b78
91 changed files with 7312 additions and 2407 deletions

View File

@ -463,11 +463,14 @@ set(ZIG_STD_FILES
"empty.zig" "empty.zig"
"event.zig" "event.zig"
"event/channel.zig" "event/channel.zig"
"event/fs.zig"
"event/future.zig" "event/future.zig"
"event/group.zig" "event/group.zig"
"event/lock.zig" "event/lock.zig"
"event/locked.zig" "event/locked.zig"
"event/loop.zig" "event/loop.zig"
"event/rwlock.zig"
"event/rwlocked.zig"
"event/tcp.zig" "event/tcp.zig"
"fmt/errol/enum3.zig" "fmt/errol/enum3.zig"
"fmt/errol/index.zig" "fmt/errol/index.zig"
@ -556,6 +559,7 @@ set(ZIG_STD_FILES
"math/tanh.zig" "math/tanh.zig"
"math/trunc.zig" "math/trunc.zig"
"mem.zig" "mem.zig"
"mutex.zig"
"net.zig" "net.zig"
"os/child_process.zig" "os/child_process.zig"
"os/darwin.zig" "os/darwin.zig"

View File

@ -19,7 +19,7 @@ pub fn build(b: *Builder) !void {
var docgen_cmd = b.addCommand(null, b.env_map, [][]const u8{ var docgen_cmd = b.addCommand(null, b.env_map, [][]const u8{
docgen_exe.getOutputPath(), docgen_exe.getOutputPath(),
rel_zig_exe, rel_zig_exe,
"doc/langref.html.in", "doc" ++ os.path.sep_str ++ "langref.html.in",
os.path.join(b.allocator, b.cache_root, "langref.html") catch unreachable, os.path.join(b.allocator, b.cache_root, "langref.html") catch unreachable,
}); });
docgen_cmd.step.dependOn(&docgen_exe.step); docgen_cmd.step.dependOn(&docgen_exe.step);

View File

@ -8,7 +8,7 @@
# LLVM_LIBDIRS # LLVM_LIBDIRS
find_program(LLVM_CONFIG_EXE find_program(LLVM_CONFIG_EXE
NAMES llvm-config-7.0 llvm-config NAMES llvm-config llvm-config-7.0
PATHS PATHS
"/mingw64/bin" "/mingw64/bin"
"/c/msys64/mingw64/bin" "/c/msys64/mingw64/bin"

View File

@ -34,10 +34,10 @@ pub fn main() !void {
const out_file_name = try (args_it.next(allocator) orelse @panic("expected output arg")); const out_file_name = try (args_it.next(allocator) orelse @panic("expected output arg"));
defer allocator.free(out_file_name); defer allocator.free(out_file_name);
var in_file = try os.File.openRead(allocator, in_file_name); var in_file = try os.File.openRead(in_file_name);
defer in_file.close(); defer in_file.close();
var out_file = try os.File.openWrite(allocator, out_file_name); var out_file = try os.File.openWrite(out_file_name);
defer out_file.close(); defer out_file.close();
var file_in_stream = io.FileInStream.init(&in_file); var file_in_stream = io.FileInStream.init(&in_file);
@ -370,9 +370,9 @@ fn genToc(allocator: *mem.Allocator, tokenizer: *Tokenizer) !Toc {
.n = header_stack_size, .n = header_stack_size,
}, },
}); });
if (try urls.put(urlized, tag_token)) |other_tag_token| { if (try urls.put(urlized, tag_token)) |entry| {
parseError(tokenizer, tag_token, "duplicate header url: #{}", urlized) catch {}; parseError(tokenizer, tag_token, "duplicate header url: #{}", urlized) catch {};
parseError(tokenizer, other_tag_token, "other tag here") catch {}; parseError(tokenizer, entry.value, "other tag here") catch {};
return error.ParseError; return error.ParseError;
} }
if (last_action == Action.Open) { if (last_action == Action.Open) {
@ -738,7 +738,7 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: var
try out.print("<pre><code class=\"zig\">{}</code></pre>", escaped_source); try out.print("<pre><code class=\"zig\">{}</code></pre>", escaped_source);
const name_plus_ext = try std.fmt.allocPrint(allocator, "{}.zig", code.name); const name_plus_ext = try std.fmt.allocPrint(allocator, "{}.zig", code.name);
const tmp_source_file_name = try os.path.join(allocator, tmp_dir_name, name_plus_ext); const tmp_source_file_name = try os.path.join(allocator, tmp_dir_name, name_plus_ext);
try io.writeFile(allocator, tmp_source_file_name, trimmed_raw_source); try io.writeFile(tmp_source_file_name, trimmed_raw_source);
switch (code.id) { switch (code.id) {
Code.Id.Exe => |expected_outcome| { Code.Id.Exe => |expected_outcome| {

View File

@ -247,66 +247,6 @@ pub fn main() void {
Description Description
</th> </th>
</tr> </tr>
<tr>
<td><code>i2</code></td>
<td><code>(none)</code></td>
<td>signed 2-bit integer</td>
</tr>
<tr>
<td><code>u2</code></td>
<td><code>(none)</code></td>
<td>unsigned 2-bit integer</td>
</tr>
<tr>
<td><code>i3</code></td>
<td><code>(none)</code></td>
<td>signed 3-bit integer</td>
</tr>
<tr>
<td><code>u3</code></td>
<td><code>(none)</code></td>
<td>unsigned 3-bit integer</td>
</tr>
<tr>
<td><code>i4</code></td>
<td><code>(none)</code></td>
<td>signed 4-bit integer</td>
</tr>
<tr>
<td><code>u4</code></td>
<td><code>(none)</code></td>
<td>unsigned 4-bit integer</td>
</tr>
<tr>
<td><code>i5</code></td>
<td><code>(none)</code></td>
<td>signed 5-bit integer</td>
</tr>
<tr>
<td><code>u5</code></td>
<td><code>(none)</code></td>
<td>unsigned 5-bit integer</td>
</tr>
<tr>
<td><code>i6</code></td>
<td><code>(none)</code></td>
<td>signed 6-bit integer</td>
</tr>
<tr>
<td><code>u6</code></td>
<td><code>(none)</code></td>
<td>unsigned 6-bit integer</td>
</tr>
<tr>
<td><code>i7</code></td>
<td><code>(none)</code></td>
<td>signed 7-bit integer</td>
</tr>
<tr>
<td><code>u7</code></td>
<td><code>(none)</code></td>
<td>unsigned 7-bit integer</td>
</tr>
<tr> <tr>
<td><code>i8</code></td> <td><code>i8</code></td>
<td><code>int8_t</code></td> <td><code>int8_t</code></td>
@ -476,6 +416,11 @@ pub fn main() void {
</tr> </tr>
</table> </table>
</div> </div>
<p>
In addition to the integer types above, arbitrary bit-width integers can be referenced by using
an identifier of <code>i</code> or </code>u</code> followed by digits. For example, the identifier
<code>i7</code> refers to a signed 7-bit integer.
</p>
{#see_also|Integers|Floats|void|Errors#} {#see_also|Integers|Floats|void|Errors#}
{#header_close#} {#header_close#}
{#header_open|Primitive Values#} {#header_open|Primitive Values#}
@ -744,19 +689,19 @@ const yet_another_hex_float = 0x103.70P-5;
{#code_end#} {#code_end#}
{#header_close#} {#header_close#}
{#header_open|Floating Point Operations#} {#header_open|Floating Point Operations#}
<p>By default floating point operations use <code>Optimized</code> mode, <p>By default floating point operations use <code>Strict</code> mode,
but you can switch to <code>Strict</code> mode on a per-block basis:</p> but you can switch to <code>Optimized</code> mode on a per-block basis:</p>
{#code_begin|obj|foo#} {#code_begin|obj|foo#}
{#code_release_fast#} {#code_release_fast#}
const builtin = @import("builtin"); const builtin = @import("builtin");
const big = f64(1 << 40); const big = f64(1 << 40);
export fn foo_strict(x: f64) f64 { export fn foo_strict(x: f64) f64 {
@setFloatMode(this, builtin.FloatMode.Strict);
return x + big - big; return x + big - big;
} }
export fn foo_optimized(x: f64) f64 { export fn foo_optimized(x: f64) f64 {
@setFloatMode(this, builtin.FloatMode.Optimized);
return x + big - big; return x + big - big;
} }
{#code_end#} {#code_end#}
@ -809,6 +754,8 @@ a += b</code></pre></td>
<td>Addition. <td>Addition.
<ul> <ul>
<li>Can cause {#link|overflow|Default Operations#} for integers.</li> <li>Can cause {#link|overflow|Default Operations#} for integers.</li>
<li>Invokes {#link|Peer Type Resolution#} for the operands.</li>
<li>See also {#link|@addWithOverflow#}.</li>
</ul> </ul>
</td> </td>
<td> <td>
@ -826,6 +773,8 @@ a +%= b</code></pre></td>
<td>Wrapping Addition. <td>Wrapping Addition.
<ul> <ul>
<li>Guaranteed to have twos-complement wrapping behavior.</li> <li>Guaranteed to have twos-complement wrapping behavior.</li>
<li>Invokes {#link|Peer Type Resolution#} for the operands.</li>
<li>See also {#link|@addWithOverflow#}.</li>
</ul> </ul>
</td> </td>
<td> <td>
@ -844,6 +793,8 @@ a -= b</code></pre></td>
<td>Subtraction. <td>Subtraction.
<ul> <ul>
<li>Can cause {#link|overflow|Default Operations#} for integers.</li> <li>Can cause {#link|overflow|Default Operations#} for integers.</li>
<li>Invokes {#link|Peer Type Resolution#} for the operands.</li>
<li>See also {#link|@subWithOverflow#}.</li>
</ul> </ul>
</td> </td>
<td> <td>
@ -861,6 +812,8 @@ a -%= b</code></pre></td>
<td>Wrapping Subtraction. <td>Wrapping Subtraction.
<ul> <ul>
<li>Guaranteed to have twos-complement wrapping behavior.</li> <li>Guaranteed to have twos-complement wrapping behavior.</li>
<li>Invokes {#link|Peer Type Resolution#} for the operands.</li>
<li>See also {#link|@subWithOverflow#}.</li>
</ul> </ul>
</td> </td>
<td> <td>
@ -914,6 +867,8 @@ a *= b</code></pre></td>
<td>Multiplication. <td>Multiplication.
<ul> <ul>
<li>Can cause {#link|overflow|Default Operations#} for integers.</li> <li>Can cause {#link|overflow|Default Operations#} for integers.</li>
<li>Invokes {#link|Peer Type Resolution#} for the operands.</li>
<li>See also {#link|@mulWithOverflow#}.</li>
</ul> </ul>
</td> </td>
<td> <td>
@ -931,6 +886,8 @@ a *%= b</code></pre></td>
<td>Wrapping Multiplication. <td>Wrapping Multiplication.
<ul> <ul>
<li>Guaranteed to have twos-complement wrapping behavior.</li> <li>Guaranteed to have twos-complement wrapping behavior.</li>
<li>Invokes {#link|Peer Type Resolution#} for the operands.</li>
<li>See also {#link|@mulWithOverflow#}.</li>
</ul> </ul>
</td> </td>
<td> <td>
@ -956,6 +913,7 @@ a /= b</code></pre></td>
{#link|@divFloor#}, or {#link|@divFloor#}, or
{#link|@divExact#} instead of <code>/</code>. {#link|@divExact#} instead of <code>/</code>.
</li> </li>
<li>Invokes {#link|Peer Type Resolution#} for the operands.</li>
</ul> </ul>
</td> </td>
<td> <td>
@ -979,6 +937,7 @@ a %= b</code></pre></td>
{#link|@rem#} or {#link|@rem#} or
{#link|@mod#} instead of <code>%</code>. {#link|@mod#} instead of <code>%</code>.
</li> </li>
<li>Invokes {#link|Peer Type Resolution#} for the operands.</li>
</ul> </ul>
</td> </td>
<td> <td>
@ -995,6 +954,7 @@ a &lt;&lt;= b</code></pre></td>
</td> </td>
<td>Bit Shift Left. <td>Bit Shift Left.
<ul> <ul>
<li><code>b</code> must be {#link|comptime-known|comptime#} or have a type with log2 number of bits as <code>a</code>.</li>
<li>See also {#link|@shlExact#}.</li> <li>See also {#link|@shlExact#}.</li>
<li>See also {#link|@shlWithOverflow#}.</li> <li>See also {#link|@shlWithOverflow#}.</li>
</ul> </ul>
@ -1013,6 +973,7 @@ a &gt;&gt;= b</code></pre></td>
</td> </td>
<td>Bit Shift Right. <td>Bit Shift Right.
<ul> <ul>
<li><code>b</code> must be {#link|comptime-known|comptime#} or have a type with log2 number of bits as <code>a</code>.</li>
<li>See also {#link|@shrExact#}.</li> <li>See also {#link|@shrExact#}.</li>
</ul> </ul>
</td> </td>
@ -1029,6 +990,9 @@ a &amp;= b</code></pre></td>
</ul> </ul>
</td> </td>
<td>Bitwise AND. <td>Bitwise AND.
<ul>
<li>Invokes {#link|Peer Type Resolution#} for the operands.</li>
</ul>
</td> </td>
<td> <td>
<pre><code class="zig">0b011 &amp; 0b101 == 0b001</code></pre> <pre><code class="zig">0b011 &amp; 0b101 == 0b001</code></pre>
@ -1043,6 +1007,9 @@ a |= b</code></pre></td>
</ul> </ul>
</td> </td>
<td>Bitwise OR. <td>Bitwise OR.
<ul>
<li>Invokes {#link|Peer Type Resolution#} for the operands.</li>
</ul>
</td> </td>
<td> <td>
<pre><code class="zig">0b010 | 0b100 == 0b110</code></pre> <pre><code class="zig">0b010 | 0b100 == 0b110</code></pre>
@ -1057,6 +1024,9 @@ a ^= b</code></pre></td>
</ul> </ul>
</td> </td>
<td>Bitwise XOR. <td>Bitwise XOR.
<ul>
<li>Invokes {#link|Peer Type Resolution#} for the operands.</li>
</ul>
</td> </td>
<td> <td>
<pre><code class="zig">0b011 ^ 0b101 == 0b110</code></pre> <pre><code class="zig">0b011 ^ 0b101 == 0b110</code></pre>
@ -1186,6 +1156,7 @@ unwrapped == 1234</code></pre>
</td> </td>
<td> <td>
Returns <code>true</code> if a and b are equal, otherwise returns <code>false</code>. Returns <code>true</code> if a and b are equal, otherwise returns <code>false</code>.
Invokes {#link|Peer Type Resolution#} for the operands.
</td> </td>
<td> <td>
<pre><code class="zig">(1 == 1) == true</code></pre> <pre><code class="zig">(1 == 1) == true</code></pre>
@ -1218,6 +1189,7 @@ value == null</code></pre>
</td> </td>
<td> <td>
Returns <code>false</code> if a and b are equal, otherwise returns <code>true</code>. Returns <code>false</code> if a and b are equal, otherwise returns <code>true</code>.
Invokes {#link|Peer Type Resolution#} for the operands.
</td> </td>
<td> <td>
<pre><code class="zig">(1 != 1) == false</code></pre> <pre><code class="zig">(1 != 1) == false</code></pre>
@ -1233,6 +1205,7 @@ value == null</code></pre>
</td> </td>
<td> <td>
Returns <code>true</code> if a is greater than b, otherwise returns <code>false</code>. Returns <code>true</code> if a is greater than b, otherwise returns <code>false</code>.
Invokes {#link|Peer Type Resolution#} for the operands.
</td> </td>
<td> <td>
<pre><code class="zig">(2 &gt; 1) == true</code></pre> <pre><code class="zig">(2 &gt; 1) == true</code></pre>
@ -1248,6 +1221,7 @@ value == null</code></pre>
</td> </td>
<td> <td>
Returns <code>true</code> if a is greater than or equal to b, otherwise returns <code>false</code>. Returns <code>true</code> if a is greater than or equal to b, otherwise returns <code>false</code>.
Invokes {#link|Peer Type Resolution#} for the operands.
</td> </td>
<td> <td>
<pre><code class="zig">(2 &gt;= 1) == true</code></pre> <pre><code class="zig">(2 &gt;= 1) == true</code></pre>
@ -1263,6 +1237,7 @@ value == null</code></pre>
</td> </td>
<td> <td>
Returns <code>true</code> if a is less than b, otherwise returns <code>false</code>. Returns <code>true</code> if a is less than b, otherwise returns <code>false</code>.
Invokes {#link|Peer Type Resolution#} for the operands.
</td> </td>
<td> <td>
<pre><code class="zig">(1 &lt; 2) == true</code></pre> <pre><code class="zig">(1 &lt; 2) == true</code></pre>
@ -1278,6 +1253,7 @@ value == null</code></pre>
</td> </td>
<td> <td>
Returns <code>true</code> if a is less than or equal to b, otherwise returns <code>false</code>. Returns <code>true</code> if a is less than or equal to b, otherwise returns <code>false</code>.
Invokes {#link|Peer Type Resolution#} for the operands.
</td> </td>
<td> <td>
<pre><code class="zig">(1 &lt;= 2) == true</code></pre> <pre><code class="zig">(1 &lt;= 2) == true</code></pre>
@ -3807,6 +3783,7 @@ test "float widening" {
<p>TODO: [N]T to ?[]const T</p> <p>TODO: [N]T to ?[]const T</p>
<p>TODO: *[N]T to []T</p> <p>TODO: *[N]T to []T</p>
<p>TODO: *[N]T to [*]T</p> <p>TODO: *[N]T to [*]T</p>
<p>TODO: *[N]T to ?[*]T</p>
<p>TODO: *T to *[1]T</p> <p>TODO: *T to *[1]T</p>
<p>TODO: [N]T to E![]const T</p> <p>TODO: [N]T to E![]const T</p>
{#header_close#} {#header_close#}
@ -3877,7 +3854,106 @@ test "float widening" {
{#header_close#} {#header_close#}
{#header_open|Peer Type Resolution#} {#header_open|Peer Type Resolution#}
<p>TODO</p> <p>Peer Type Resolution occurs in these places:</p>
<ul>
<li>{#link|switch#} expressions</li>
<li>{#link|if#} expressions</li>
<li>{#link|while#} expressions</li>
<li>{#link|for#} expressions</li>
<li>Multiple break statements in a block</li>
<li>Some {#link|binary operations|Table of Operators#}</li>
</ul>
<p>
This kind of type resolution chooses a type that all peer types can implicitly cast into. Here are
some examples:
</p>
{#code_begin|test#}
const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;
test "peer resolve int widening" {
var a: i8 = 12;
var b: i16 = 34;
var c = a + b;
assert(c == 46);
assert(@typeOf(c) == i16);
}
test "peer resolve arrays of different size to const slice" {
assert(mem.eql(u8, boolToStr(true), "true"));
assert(mem.eql(u8, boolToStr(false), "false"));
comptime assert(mem.eql(u8, boolToStr(true), "true"));
comptime assert(mem.eql(u8, boolToStr(false), "false"));
}
fn boolToStr(b: bool) []const u8 {
return if (b) "true" else "false";
}
test "peer resolve array and const slice" {
testPeerResolveArrayConstSlice(true);
comptime testPeerResolveArrayConstSlice(true);
}
fn testPeerResolveArrayConstSlice(b: bool) void {
const value1 = if (b) "aoeu" else ([]const u8)("zz");
const value2 = if (b) ([]const u8)("zz") else "aoeu";
assert(mem.eql(u8, value1, "aoeu"));
assert(mem.eql(u8, value2, "zz"));
}
test "peer type resolution: ?T and T" {
assert(peerTypeTAndOptionalT(true, false).? == 0);
assert(peerTypeTAndOptionalT(false, false).? == 3);
comptime {
assert(peerTypeTAndOptionalT(true, false).? == 0);
assert(peerTypeTAndOptionalT(false, false).? == 3);
}
}
fn peerTypeTAndOptionalT(c: bool, b: bool) ?usize {
if (c) {
return if (b) null else usize(0);
}
return usize(3);
}
test "peer type resolution: [0]u8 and []const u8" {
assert(peerTypeEmptyArrayAndSlice(true, "hi").len == 0);
assert(peerTypeEmptyArrayAndSlice(false, "hi").len == 1);
comptime {
assert(peerTypeEmptyArrayAndSlice(true, "hi").len == 0);
assert(peerTypeEmptyArrayAndSlice(false, "hi").len == 1);
}
}
fn peerTypeEmptyArrayAndSlice(a: bool, slice: []const u8) []const u8 {
if (a) {
return []const u8{};
}
return slice[0..1];
}
test "peer type resolution: [0]u8, []const u8, and error![]u8" {
{
var data = "hi";
const slice = data[0..];
assert((try peerTypeEmptyArrayAndSliceAndError(true, slice)).len == 0);
assert((try peerTypeEmptyArrayAndSliceAndError(false, slice)).len == 1);
}
comptime {
var data = "hi";
const slice = data[0..];
assert((try peerTypeEmptyArrayAndSliceAndError(true, slice)).len == 0);
assert((try peerTypeEmptyArrayAndSliceAndError(false, slice)).len == 1);
}
}
fn peerTypeEmptyArrayAndSliceAndError(a: bool, slice: []u8) error![]u8 {
if (a) {
return []u8{};
}
return slice[0..1];
}
{#code_end#}
{#header_close#} {#header_close#}
{#header_close#} {#header_close#}
@ -4705,10 +4781,7 @@ async fn testSuspendBlock() void {
<p> <p>
{#link|Await#} counts as a suspend point. {#link|Await#} counts as a suspend point.
</p> </p>
{#header_open|Breaking from Suspend Blocks#} {#header_open|Resuming from Suspend Blocks#}
<p>
Suspend blocks support labeled break, just like {#link|while#} and {#link|for#}.
</p>
<p> <p>
Upon entering a <code>suspend</code> block, the coroutine is already considered Upon entering a <code>suspend</code> block, the coroutine is already considered
suspended, and can be resumed. For example, if you started another kernel thread, suspended, and can be resumed. For example, if you started another kernel thread,
@ -4741,6 +4814,9 @@ async fn testResumeFromSuspend(my_result: *i32) void {
my_result.* += 1; my_result.* += 1;
} }
{#code_end#} {#code_end#}
<p>
This is guaranteed to be a tail call, and therefore will not cause a new stack frame.
</p>
{#header_close#} {#header_close#}
{#header_close#} {#header_close#}
{#header_open|Await#} {#header_open|Await#}
@ -5527,7 +5603,7 @@ fn add(a: i32, b: i32) i32 { return a + b; }
<p>Returns the field type of a struct or union.</p> <p>Returns the field type of a struct or union.</p>
{#header_close#} {#header_close#}
{#header_open|@memcpy#} {#header_open|@memcpy#}
<pre><code class="zig">@memcpy(noalias dest: *u8, noalias source: *const u8, byte_count: usize)</code></pre> <pre><code class="zig">@memcpy(noalias dest: [*]u8, noalias source: [*]const u8, byte_count: usize)</code></pre>
<p> <p>
This function copies bytes from one region of memory to another. <code>dest</code> and This function copies bytes from one region of memory to another. <code>dest</code> and
<code>source</code> are both pointers and must not overlap. <code>source</code> are both pointers and must not overlap.
@ -5545,7 +5621,7 @@ fn add(a: i32, b: i32) i32 { return a + b; }
mem.copy(u8, dest[0...byte_count], source[0...byte_count]);</code></pre> mem.copy(u8, dest[0...byte_count], source[0...byte_count]);</code></pre>
{#header_close#} {#header_close#}
{#header_open|@memset#} {#header_open|@memset#}
<pre><code class="zig">@memset(dest: *u8, c: u8, byte_count: usize)</code></pre> <pre><code class="zig">@memset(dest: [*]u8, c: u8, byte_count: usize)</code></pre>
<p> <p>
This function sets a region of memory to <code>c</code>. <code>dest</code> is a pointer. This function sets a region of memory to <code>c</code>. <code>dest</code> is a pointer.
</p> </p>
@ -5817,7 +5893,7 @@ pub const FloatMode = enum {
{#code_end#} {#code_end#}
<ul> <ul>
<li> <li>
<code>Optimized</code> (default) - Floating point operations may do all of the following: <code>Optimized</code> - Floating point operations may do all of the following:
<ul> <ul>
<li>Assume the arguments and result are not NaN. Optimizations are required to retain defined behavior over NaNs, but the value of the result is undefined.</li> <li>Assume the arguments and result are not NaN. Optimizations are required to retain defined behavior over NaNs, but the value of the result is undefined.</li>
<li>Assume the arguments and result are not +/-Inf. Optimizations are required to retain defined behavior over +/-Inf, but the value of the result is undefined.</li> <li>Assume the arguments and result are not +/-Inf. Optimizations are required to retain defined behavior over +/-Inf, but the value of the result is undefined.</li>
@ -5829,7 +5905,7 @@ pub const FloatMode = enum {
This is equivalent to <code>-ffast-math</code> in GCC. This is equivalent to <code>-ffast-math</code> in GCC.
</li> </li>
<li> <li>
<code>Strict</code> - Floating point operations follow strict IEEE compliance. <code>Strict</code> (default) - Floating point operations follow strict IEEE compliance.
</li> </li>
</ul> </ul>
{#see_also|Floating Point Operations#} {#see_also|Floating Point Operations#}
@ -6035,7 +6111,7 @@ pub const TypeInfo = union(TypeId) {
size: Size, size: Size,
is_const: bool, is_const: bool,
is_volatile: bool, is_volatile: bool,
alignment: u32, alignment: u29,
child: type, child: type,
pub const Size = enum { pub const Size = enum {
@ -7543,8 +7619,8 @@ hljs.registerLanguage("zig", function(t) {
}, },
a = t.IR + "\\s*\\(", a = t.IR + "\\s*\\(",
c = { c = {
keyword: "const align var extern stdcallcc nakedcc volatile export pub noalias inline struct packed enum union break return try catch test continue unreachable comptime and or asm defer errdefer if else switch while for fn use bool f32 f64 void type noreturn error i8 u8 i16 u16 i32 u32 i64 u64 isize usize i8w u8w i16w i32w u32w i64w u64w isizew usizew c_short c_ushort c_int c_uint c_long c_ulong c_longlong c_ulonglong resume cancel await async orelse", keyword: "const align var extern stdcallcc nakedcc volatile export pub noalias inline struct packed enum union break return try catch test continue unreachable comptime and or asm defer errdefer if else switch while for fn use bool f32 f64 void type noreturn error i8 u8 i16 u16 i32 u32 i64 u64 isize usize i8w u8w i16w i32w u32w i64w u64w isizew usizew c_short c_ushort c_int c_uint c_long c_ulong c_longlong c_ulonglong resume suspend cancel await async orelse",
built_in: "atomicLoad breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage divTrunc divFloor enumTagName intToPtr ptrToInt panic ptrCast intCast floatCast intToFloat floatToInt boolToInt bytesToSlice sliceToBytes errSetCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz popCount import cImport errorName embedFile cmpxchgStrong cmpxchgWeak fence divExact truncate atomicRmw sqrt field typeInfo typeName newStackCall errorToInt intToError enumToInt intToEnum", built_in: "atomicLoad breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage divTrunc divFloor enumTagName intToPtr ptrToInt panic ptrCast intCast floatCast intToFloat floatToInt boolToInt bytesToSlice sliceToBytes errSetCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz popCount import cImport errorName embedFile cmpxchgStrong cmpxchgWeak fence divExact truncate atomicRmw sqrt field typeInfo typeName newStackCall errorToInt intToError enumToInt intToEnum handle",
literal: "true false null undefined" literal: "true false null undefined"
}, },
n = [e, t.CLCM, t.CBCM, s, r]; n = [e, t.CLCM, t.CBCM, s, r];

View File

@ -20,7 +20,7 @@ pub fn main() !void {
} else if (arg[0] == '-') { } else if (arg[0] == '-') {
return usage(exe); return usage(exe);
} else { } else {
var file = os.File.openRead(allocator, arg) catch |err| { var file = os.File.openRead(arg) catch |err| {
warn("Unable to open file: {}\n", @errorName(err)); warn("Unable to open file: {}\n", @errorName(err));
return err; return err;
}; };

View File

@ -1,3 +1,12 @@
// TODO Remove this workaround
comptime {
const builtin = @import("builtin");
if (builtin.os == builtin.Os.macosx) {
@export("__mh_execute_header", _mh_execute_header, builtin.GlobalLinkage.Weak);
}
}
var _mh_execute_header = extern struct {x: usize}{.x = 0};
export fn add(a: i32, b: i32) i32 { export fn add(a: i32, b: i32) i32 {
return a + b; return a + b;
} }

View File

@ -19,8 +19,8 @@ pub async fn renderToLlvm(comp: *Compilation, fn_val: *Value.Fn, code: *ir.Code)
var output_path = try await (async comp.createRandomOutputPath(comp.target.objFileExt()) catch unreachable); var output_path = try await (async comp.createRandomOutputPath(comp.target.objFileExt()) catch unreachable);
errdefer output_path.deinit(); errdefer output_path.deinit();
const llvm_handle = try comp.event_loop_local.getAnyLlvmContext(); const llvm_handle = try comp.zig_compiler.getAnyLlvmContext();
defer llvm_handle.release(comp.event_loop_local); defer llvm_handle.release(comp.zig_compiler);
const context = llvm_handle.node.data; const context = llvm_handle.node.data;

View File

@ -30,9 +30,12 @@ const Package = @import("package.zig").Package;
const link = @import("link.zig").link; const link = @import("link.zig").link;
const LibCInstallation = @import("libc_installation.zig").LibCInstallation; const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
const CInt = @import("c_int.zig").CInt; const CInt = @import("c_int.zig").CInt;
const fs = event.fs;
const max_src_size = 2 * 1024 * 1024 * 1024; // 2 GiB
/// Data that is local to the event loop. /// Data that is local to the event loop.
pub const EventLoopLocal = struct { pub const ZigCompiler = struct {
loop: *event.Loop, loop: *event.Loop,
llvm_handle_pool: std.atomic.Stack(llvm.ContextRef), llvm_handle_pool: std.atomic.Stack(llvm.ContextRef),
lld_lock: event.Lock, lld_lock: event.Lock,
@ -44,7 +47,7 @@ pub const EventLoopLocal = struct {
var lazy_init_targets = std.lazyInit(void); var lazy_init_targets = std.lazyInit(void);
fn init(loop: *event.Loop) !EventLoopLocal { fn init(loop: *event.Loop) !ZigCompiler {
lazy_init_targets.get() orelse { lazy_init_targets.get() orelse {
Target.initializeAll(); Target.initializeAll();
lazy_init_targets.resolve(); lazy_init_targets.resolve();
@ -54,7 +57,7 @@ pub const EventLoopLocal = struct {
try std.os.getRandomBytes(seed_bytes[0..]); try std.os.getRandomBytes(seed_bytes[0..]);
const seed = std.mem.readInt(seed_bytes, u64, builtin.Endian.Big); const seed = std.mem.readInt(seed_bytes, u64, builtin.Endian.Big);
return EventLoopLocal{ return ZigCompiler{
.loop = loop, .loop = loop,
.lld_lock = event.Lock.init(loop), .lld_lock = event.Lock.init(loop),
.llvm_handle_pool = std.atomic.Stack(llvm.ContextRef).init(), .llvm_handle_pool = std.atomic.Stack(llvm.ContextRef).init(),
@ -64,7 +67,7 @@ pub const EventLoopLocal = struct {
} }
/// Must be called only after EventLoop.run completes. /// Must be called only after EventLoop.run completes.
fn deinit(self: *EventLoopLocal) void { fn deinit(self: *ZigCompiler) void {
self.lld_lock.deinit(); self.lld_lock.deinit();
while (self.llvm_handle_pool.pop()) |node| { while (self.llvm_handle_pool.pop()) |node| {
c.LLVMContextDispose(node.data); c.LLVMContextDispose(node.data);
@ -74,7 +77,7 @@ pub const EventLoopLocal = struct {
/// Gets an exclusive handle on any LlvmContext. /// Gets an exclusive handle on any LlvmContext.
/// Caller must release the handle when done. /// Caller must release the handle when done.
pub fn getAnyLlvmContext(self: *EventLoopLocal) !LlvmHandle { pub fn getAnyLlvmContext(self: *ZigCompiler) !LlvmHandle {
if (self.llvm_handle_pool.pop()) |node| return LlvmHandle{ .node = node }; if (self.llvm_handle_pool.pop()) |node| return LlvmHandle{ .node = node };
const context_ref = c.LLVMContextCreate() orelse return error.OutOfMemory; const context_ref = c.LLVMContextCreate() orelse return error.OutOfMemory;
@ -89,24 +92,36 @@ pub const EventLoopLocal = struct {
return LlvmHandle{ .node = node }; return LlvmHandle{ .node = node };
} }
pub async fn getNativeLibC(self: *EventLoopLocal) !*LibCInstallation { pub async fn getNativeLibC(self: *ZigCompiler) !*LibCInstallation {
if (await (async self.native_libc.start() catch unreachable)) |ptr| return ptr; if (await (async self.native_libc.start() catch unreachable)) |ptr| return ptr;
try await (async self.native_libc.data.findNative(self.loop) catch unreachable); try await (async self.native_libc.data.findNative(self.loop) catch unreachable);
self.native_libc.resolve(); self.native_libc.resolve();
return &self.native_libc.data; return &self.native_libc.data;
} }
/// Must be called only once, ever. Sets global state.
pub fn setLlvmArgv(allocator: *Allocator, llvm_argv: []const []const u8) !void {
if (llvm_argv.len != 0) {
var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(allocator, [][]const []const u8{
[][]const u8{"zig (LLVM option parsing)"},
llvm_argv,
});
defer c_compatible_args.deinit();
c.ZigLLVMParseCommandLineOptions(llvm_argv.len + 1, c_compatible_args.ptr);
}
}
}; };
pub const LlvmHandle = struct { pub const LlvmHandle = struct {
node: *std.atomic.Stack(llvm.ContextRef).Node, node: *std.atomic.Stack(llvm.ContextRef).Node,
pub fn release(self: LlvmHandle, event_loop_local: *EventLoopLocal) void { pub fn release(self: LlvmHandle, zig_compiler: *ZigCompiler) void {
event_loop_local.llvm_handle_pool.push(self.node); zig_compiler.llvm_handle_pool.push(self.node);
} }
}; };
pub const Compilation = struct { pub const Compilation = struct {
event_loop_local: *EventLoopLocal, zig_compiler: *ZigCompiler,
loop: *event.Loop, loop: *event.Loop,
name: Buffer, name: Buffer,
llvm_triple: Buffer, llvm_triple: Buffer,
@ -134,7 +149,6 @@ pub const Compilation = struct {
linker_rdynamic: bool, linker_rdynamic: bool,
clang_argv: []const []const u8, clang_argv: []const []const u8,
llvm_argv: []const []const u8,
lib_dirs: []const []const u8, lib_dirs: []const []const u8,
rpath_list: []const []const u8, rpath_list: []const []const u8,
assembly_files: []const []const u8, assembly_files: []const []const u8,
@ -214,6 +228,8 @@ pub const Compilation = struct {
deinit_group: event.Group(void), deinit_group: event.Group(void),
destroy_handle: promise, destroy_handle: promise,
main_loop_handle: promise,
main_loop_future: event.Future(void),
have_err_ret_tracing: bool, have_err_ret_tracing: bool,
@ -227,6 +243,8 @@ pub const Compilation = struct {
c_int_types: [CInt.list.len]*Type.Int, c_int_types: [CInt.list.len]*Type.Int,
fs_watch: *fs.Watch(*Scope.Root),
const IntTypeTable = std.HashMap(*const Type.Int.Key, *Type.Int, Type.Int.Key.hash, Type.Int.Key.eql); const IntTypeTable = std.HashMap(*const Type.Int.Key, *Type.Int, Type.Int.Key.hash, Type.Int.Key.eql);
const ArrayTypeTable = std.HashMap(*const Type.Array.Key, *Type.Array, Type.Array.Key.hash, Type.Array.Key.eql); const ArrayTypeTable = std.HashMap(*const Type.Array.Key, *Type.Array, Type.Array.Key.hash, Type.Array.Key.eql);
const PtrTypeTable = std.HashMap(*const Type.Pointer.Key, *Type.Pointer, Type.Pointer.Key.hash, Type.Pointer.Key.eql); const PtrTypeTable = std.HashMap(*const Type.Pointer.Key, *Type.Pointer, Type.Pointer.Key.hash, Type.Pointer.Key.eql);
@ -239,8 +257,6 @@ pub const Compilation = struct {
pub const BuildError = error{ pub const BuildError = error{
OutOfMemory, OutOfMemory,
EndOfStream, EndOfStream,
BadFd,
Io,
IsDir, IsDir,
Unexpected, Unexpected,
SystemResources, SystemResources,
@ -255,7 +271,6 @@ pub const Compilation = struct {
NameTooLong, NameTooLong,
SystemFdQuotaExceeded, SystemFdQuotaExceeded,
NoDevice, NoDevice,
PathNotFound,
NoSpaceLeft, NoSpaceLeft,
NotDir, NotDir,
FileSystem, FileSystem,
@ -282,6 +297,9 @@ pub const Compilation = struct {
LibCMissingDynamicLinker, LibCMissingDynamicLinker,
InvalidDarwinVersionString, InvalidDarwinVersionString,
UnsupportedLinkArchitecture, UnsupportedLinkArchitecture,
UserResourceLimitReached,
InvalidUtf8,
BadPathName,
}; };
pub const Event = union(enum) { pub const Event = union(enum) {
@ -318,7 +336,7 @@ pub const Compilation = struct {
}; };
pub fn create( pub fn create(
event_loop_local: *EventLoopLocal, zig_compiler: *ZigCompiler,
name: []const u8, name: []const u8,
root_src_path: ?[]const u8, root_src_path: ?[]const u8,
target: Target, target: Target,
@ -327,11 +345,45 @@ pub const Compilation = struct {
is_static: bool, is_static: bool,
zig_lib_dir: []const u8, zig_lib_dir: []const u8,
) !*Compilation { ) !*Compilation {
const loop = event_loop_local.loop; var optional_comp: ?*Compilation = null;
const comp = try event_loop_local.loop.allocator.create(Compilation{ const handle = try async<zig_compiler.loop.allocator> createAsync(
&optional_comp,
zig_compiler,
name,
root_src_path,
target,
kind,
build_mode,
is_static,
zig_lib_dir,
);
return optional_comp orelse if (getAwaitResult(
zig_compiler.loop.allocator,
handle,
)) |_| unreachable else |err| err;
}
async fn createAsync(
out_comp: *?*Compilation,
zig_compiler: *ZigCompiler,
name: []const u8,
root_src_path: ?[]const u8,
target: Target,
kind: Kind,
build_mode: builtin.Mode,
is_static: bool,
zig_lib_dir: []const u8,
) !void {
// workaround for https://github.com/ziglang/zig/issues/1194
suspend {
resume @handle();
}
const loop = zig_compiler.loop;
var comp = Compilation{
.loop = loop, .loop = loop,
.arena_allocator = std.heap.ArenaAllocator.init(loop.allocator), .arena_allocator = std.heap.ArenaAllocator.init(loop.allocator),
.event_loop_local = event_loop_local, .zig_compiler = zig_compiler,
.events = undefined, .events = undefined,
.root_src_path = root_src_path, .root_src_path = root_src_path,
.target = target, .target = target,
@ -341,6 +393,9 @@ pub const Compilation = struct {
.zig_lib_dir = zig_lib_dir, .zig_lib_dir = zig_lib_dir,
.zig_std_dir = undefined, .zig_std_dir = undefined,
.tmp_dir = event.Future(BuildError![]u8).init(loop), .tmp_dir = event.Future(BuildError![]u8).init(loop),
.destroy_handle = @handle(),
.main_loop_handle = undefined,
.main_loop_future = event.Future(void).init(loop),
.name = undefined, .name = undefined,
.llvm_triple = undefined, .llvm_triple = undefined,
@ -365,7 +420,6 @@ pub const Compilation = struct {
.is_static = is_static, .is_static = is_static,
.linker_rdynamic = false, .linker_rdynamic = false,
.clang_argv = [][]const u8{}, .clang_argv = [][]const u8{},
.llvm_argv = [][]const u8{},
.lib_dirs = [][]const u8{}, .lib_dirs = [][]const u8{},
.rpath_list = [][]const u8{}, .rpath_list = [][]const u8{},
.assembly_files = [][]const u8{}, .assembly_files = [][]const u8{},
@ -412,25 +466,26 @@ pub const Compilation = struct {
.std_package = undefined, .std_package = undefined,
.override_libc = null, .override_libc = null,
.destroy_handle = undefined,
.have_err_ret_tracing = false, .have_err_ret_tracing = false,
.primitive_type_table = undefined, .primitive_type_table = undefined,
});
errdefer { .fs_watch = undefined,
};
comp.link_libs_list = ArrayList(*LinkLib).init(comp.arena());
comp.primitive_type_table = TypeTable.init(comp.arena());
defer {
comp.int_type_table.private_data.deinit(); comp.int_type_table.private_data.deinit();
comp.array_type_table.private_data.deinit(); comp.array_type_table.private_data.deinit();
comp.ptr_type_table.private_data.deinit(); comp.ptr_type_table.private_data.deinit();
comp.fn_type_table.private_data.deinit(); comp.fn_type_table.private_data.deinit();
comp.arena_allocator.deinit(); comp.arena_allocator.deinit();
comp.loop.allocator.destroy(comp);
} }
comp.name = try Buffer.init(comp.arena(), name); comp.name = try Buffer.init(comp.arena(), name);
comp.llvm_triple = try target.getTriple(comp.arena()); comp.llvm_triple = try target.getTriple(comp.arena());
comp.llvm_target = try Target.llvmTargetFromTriple(comp.llvm_triple); comp.llvm_target = try Target.llvmTargetFromTriple(comp.llvm_triple);
comp.link_libs_list = ArrayList(*LinkLib).init(comp.arena());
comp.zig_std_dir = try std.os.path.join(comp.arena(), zig_lib_dir, "std"); comp.zig_std_dir = try std.os.path.join(comp.arena(), zig_lib_dir, "std");
comp.primitive_type_table = TypeTable.init(comp.arena());
const opt_level = switch (build_mode) { const opt_level = switch (build_mode) {
builtin.Mode.Debug => llvm.CodeGenLevelNone, builtin.Mode.Debug => llvm.CodeGenLevelNone,
@ -444,8 +499,8 @@ pub const Compilation = struct {
// As a workaround we do not use target native features on Windows. // As a workaround we do not use target native features on Windows.
var target_specific_cpu_args: ?[*]u8 = null; var target_specific_cpu_args: ?[*]u8 = null;
var target_specific_cpu_features: ?[*]u8 = null; var target_specific_cpu_features: ?[*]u8 = null;
errdefer llvm.DisposeMessage(target_specific_cpu_args); defer llvm.DisposeMessage(target_specific_cpu_args);
errdefer llvm.DisposeMessage(target_specific_cpu_features); defer llvm.DisposeMessage(target_specific_cpu_features);
if (target == Target.Native and !target.isWindows()) { if (target == Target.Native and !target.isWindows()) {
target_specific_cpu_args = llvm.GetHostCPUName() orelse return error.OutOfMemory; target_specific_cpu_args = llvm.GetHostCPUName() orelse return error.OutOfMemory;
target_specific_cpu_features = llvm.GetNativeFeatures() orelse return error.OutOfMemory; target_specific_cpu_features = llvm.GetNativeFeatures() orelse return error.OutOfMemory;
@ -460,16 +515,16 @@ pub const Compilation = struct {
reloc_mode, reloc_mode,
llvm.CodeModelDefault, llvm.CodeModelDefault,
) orelse return error.OutOfMemory; ) orelse return error.OutOfMemory;
errdefer llvm.DisposeTargetMachine(comp.target_machine); defer llvm.DisposeTargetMachine(comp.target_machine);
comp.target_data_ref = llvm.CreateTargetDataLayout(comp.target_machine) orelse return error.OutOfMemory; comp.target_data_ref = llvm.CreateTargetDataLayout(comp.target_machine) orelse return error.OutOfMemory;
errdefer llvm.DisposeTargetData(comp.target_data_ref); defer llvm.DisposeTargetData(comp.target_data_ref);
comp.target_layout_str = llvm.CopyStringRepOfTargetData(comp.target_data_ref) orelse return error.OutOfMemory; comp.target_layout_str = llvm.CopyStringRepOfTargetData(comp.target_data_ref) orelse return error.OutOfMemory;
errdefer llvm.DisposeMessage(comp.target_layout_str); defer llvm.DisposeMessage(comp.target_layout_str);
comp.events = try event.Channel(Event).create(comp.loop, 0); comp.events = try event.Channel(Event).create(comp.loop, 0);
errdefer comp.events.destroy(); defer comp.events.destroy();
if (root_src_path) |root_src| { if (root_src_path) |root_src| {
const dirname = std.os.path.dirname(root_src) orelse "."; const dirname = std.os.path.dirname(root_src) orelse ".";
@ -482,11 +537,27 @@ pub const Compilation = struct {
comp.root_package = try Package.create(comp.arena(), ".", ""); comp.root_package = try Package.create(comp.arena(), ".", "");
} }
comp.fs_watch = try fs.Watch(*Scope.Root).create(loop, 16);
defer comp.fs_watch.destroy();
try comp.initTypes(); try comp.initTypes();
defer comp.primitive_type_table.deinit();
comp.destroy_handle = try async<loop.allocator> comp.internalDeinit(); comp.main_loop_handle = async comp.mainLoop() catch unreachable;
// Set this to indicate that initialization completed successfully.
// from here on out we must not return an error.
// This must occur before the first suspend/await.
out_comp.* = &comp;
// This suspend is resumed by destroy()
suspend;
// From here on is cleanup.
return comp; await (async comp.deinit_group.wait() catch unreachable);
if (comp.tmp_dir.getOrNull()) |tmp_dir_result| if (tmp_dir_result.*) |tmp_dir| {
// TODO evented I/O?
os.deleteTree(comp.arena(), tmp_dir) catch {};
} else |_| {};
} }
/// it does ref the result because it could be an arbitrary integer size /// it does ref the result because it could be an arbitrary integer size
@ -672,55 +743,28 @@ pub const Compilation = struct {
assert((try comp.primitive_type_table.put(comp.u8_type.base.name, &comp.u8_type.base)) == null); assert((try comp.primitive_type_table.put(comp.u8_type.base.name, &comp.u8_type.base)) == null);
} }
/// This function can safely use async/await, because it manages Compilation's lifetime,
/// and EventLoopLocal.deinit will not be called until the event.Loop.run() completes.
async fn internalDeinit(self: *Compilation) void {
suspend;
await (async self.deinit_group.wait() catch unreachable);
if (self.tmp_dir.getOrNull()) |tmp_dir_result| if (tmp_dir_result.*) |tmp_dir| {
// TODO evented I/O?
os.deleteTree(self.arena(), tmp_dir) catch {};
} else |_| {};
self.events.destroy();
llvm.DisposeMessage(self.target_layout_str);
llvm.DisposeTargetData(self.target_data_ref);
llvm.DisposeTargetMachine(self.target_machine);
self.primitive_type_table.deinit();
self.arena_allocator.deinit();
self.gpa().destroy(self);
}
pub fn destroy(self: *Compilation) void { pub fn destroy(self: *Compilation) void {
cancel self.main_loop_handle;
resume self.destroy_handle; resume self.destroy_handle;
} }
pub fn build(self: *Compilation) !void { fn start(self: *Compilation) void {
if (self.llvm_argv.len != 0) { self.main_loop_future.resolve();
var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.arena(), [][]const []const u8{
[][]const u8{"zig (LLVM option parsing)"},
self.llvm_argv,
});
defer c_compatible_args.deinit();
// TODO this sets global state
c.ZigLLVMParseCommandLineOptions(self.llvm_argv.len + 1, c_compatible_args.ptr);
} }
_ = try async<self.gpa()> self.buildAsync(); async fn mainLoop(self: *Compilation) void {
} // wait until start() is called
_ = await (async self.main_loop_future.get() catch unreachable);
var build_result = await (async self.initialCompile() catch unreachable);
async fn buildAsync(self: *Compilation) void {
while (true) { while (true) {
// TODO directly awaiting async should guarantee memory allocation elision const link_result = if (build_result) blk: {
const build_result = await (async self.compileAndLink() catch unreachable); break :blk await (async self.maybeLink() catch unreachable);
} else |err| err;
// this makes a handy error return trace and stack trace in debug mode // this makes a handy error return trace and stack trace in debug mode
if (std.debug.runtime_safety) { if (std.debug.runtime_safety) {
build_result catch unreachable; link_result catch unreachable;
} }
const compile_errors = blk: { const compile_errors = blk: {
@ -729,7 +773,7 @@ pub const Compilation = struct {
break :blk held.value.toOwnedSlice(); break :blk held.value.toOwnedSlice();
}; };
if (build_result) |_| { if (link_result) |_| {
if (compile_errors.len == 0) { if (compile_errors.len == 0) {
await (async self.events.put(Event.Ok) catch unreachable); await (async self.events.put(Event.Ok) catch unreachable);
} else { } else {
@ -742,25 +786,45 @@ pub const Compilation = struct {
await (async self.events.put(Event{ .Error = err }) catch unreachable); await (async self.events.put(Event{ .Error = err }) catch unreachable);
} }
// for now we stop after 1 // First, get an item from the watch channel, waiting on the channel.
return; var group = event.Group(BuildError!void).init(self.loop);
} {
} const ev = (await (async self.fs_watch.channel.get() catch unreachable)) catch |err| {
build_result = err;
async fn compileAndLink(self: *Compilation) !void { continue;
if (self.root_src_path) |root_src_path| {
// TODO async/await os.path.real
const root_src_real_path = os.path.real(self.gpa(), root_src_path) catch |err| {
try printError("unable to get real path '{}': {}", root_src_path, err);
return err;
}; };
const root_scope = blk: { const root_scope = ev.data;
errdefer self.gpa().free(root_src_real_path); group.call(rebuildFile, self, root_scope) catch |err| {
build_result = err;
continue;
};
}
// Next, get all the items from the channel that are buffered up.
while (await (async self.fs_watch.channel.getOrNull() catch unreachable)) |ev_or_err| {
if (ev_or_err) |ev| {
const root_scope = ev.data;
group.call(rebuildFile, self, root_scope) catch |err| {
build_result = err;
continue;
};
} else |err| {
build_result = err;
continue;
}
}
build_result = await (async group.wait() catch unreachable);
}
}
// TODO async/await readFileAlloc() async fn rebuildFile(self: *Compilation, root_scope: *Scope.Root) !void {
const source_code = io.readFileAlloc(self.gpa(), root_src_real_path) catch |err| { const tree_scope = blk: {
try printError("unable to open '{}': {}", root_src_real_path, err); const source_code = (await (async fs.readFile(
return err; self.loop,
root_scope.realpath,
max_src_size,
) catch unreachable)) catch |err| {
try self.addCompileErrorCli(root_scope.realpath, "unable to open: {}", @errorName(err));
return;
}; };
errdefer self.gpa().free(source_code); errdefer self.gpa().free(source_code);
@ -771,76 +835,146 @@ pub const Compilation = struct {
self.gpa().destroy(tree); self.gpa().destroy(tree);
} }
break :blk try Scope.Root.create(self, tree, root_src_real_path); break :blk try Scope.AstTree.create(self, tree, root_scope);
}; };
defer root_scope.base.deref(self); defer tree_scope.base.deref(self);
const tree = root_scope.tree;
var error_it = tree.errors.iterator(0); var error_it = tree_scope.tree.errors.iterator(0);
while (error_it.next()) |parse_error| { while (error_it.next()) |parse_error| {
const msg = try Msg.createFromParseErrorAndScope(self, root_scope, parse_error); const msg = try Msg.createFromParseErrorAndScope(self, tree_scope, parse_error);
errdefer msg.destroy(); errdefer msg.destroy();
try await (async self.addCompileErrorAsync(msg) catch unreachable); try await (async self.addCompileErrorAsync(msg) catch unreachable);
} }
if (tree.errors.len != 0) { if (tree_scope.tree.errors.len != 0) {
return; return;
} }
const decls = try Scope.Decls.create(self, &root_scope.base); const locked_table = await (async root_scope.decls.table.acquireWrite() catch unreachable);
defer decls.base.deref(self); defer locked_table.release();
var decl_group = event.Group(BuildError!void).init(self.loop); var decl_group = event.Group(BuildError!void).init(self.loop);
var decl_group_consumed = false; defer decl_group.deinit();
errdefer if (!decl_group_consumed) decl_group.cancelAll();
var it = tree.root_node.decls.iterator(0); try await try async self.rebuildChangedDecls(
while (it.next()) |decl_ptr| { &decl_group,
locked_table.value,
root_scope.decls,
&tree_scope.tree.root_node.decls,
tree_scope,
);
try await (async decl_group.wait() catch unreachable);
}
async fn rebuildChangedDecls(
self: *Compilation,
group: *event.Group(BuildError!void),
locked_table: *Decl.Table,
decl_scope: *Scope.Decls,
ast_decls: *ast.Node.Root.DeclList,
tree_scope: *Scope.AstTree,
) !void {
var existing_decls = try locked_table.clone();
defer existing_decls.deinit();
var ast_it = ast_decls.iterator(0);
while (ast_it.next()) |decl_ptr| {
const decl = decl_ptr.*; const decl = decl_ptr.*;
switch (decl.id) { switch (decl.id) {
ast.Node.Id.Comptime => { ast.Node.Id.Comptime => {
const comptime_node = @fieldParentPtr(ast.Node.Comptime, "base", decl); const comptime_node = @fieldParentPtr(ast.Node.Comptime, "base", decl);
try self.prelink_group.call(addCompTimeBlock, self, &decls.base, comptime_node); // TODO connect existing comptime decls to updated source files
try self.prelink_group.call(addCompTimeBlock, self, tree_scope, &decl_scope.base, comptime_node);
}, },
ast.Node.Id.VarDecl => @panic("TODO"), ast.Node.Id.VarDecl => @panic("TODO"),
ast.Node.Id.FnProto => { ast.Node.Id.FnProto => {
const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl); const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
const name = if (fn_proto.name_token) |name_token| tree.tokenSlice(name_token) else { const name = if (fn_proto.name_token) |name_token| tree_scope.tree.tokenSlice(name_token) else {
try self.addCompileError(root_scope, Span{ try self.addCompileError(tree_scope, Span{
.first = fn_proto.fn_token, .first = fn_proto.fn_token,
.last = fn_proto.fn_token + 1, .last = fn_proto.fn_token + 1,
}, "missing function name"); }, "missing function name");
continue; continue;
}; };
if (existing_decls.remove(name)) |entry| {
// compare new code to existing
if (entry.value.cast(Decl.Fn)) |existing_fn_decl| {
// Just compare the old bytes to the new bytes of the top level decl.
// Even if the AST is technically the same, we want error messages to display
// from the most recent source.
const old_decl_src = existing_fn_decl.base.tree_scope.tree.getNodeSource(
&existing_fn_decl.fn_proto.base,
);
const new_decl_src = tree_scope.tree.getNodeSource(&fn_proto.base);
if (mem.eql(u8, old_decl_src, new_decl_src)) {
// it's the same, we can skip this decl
continue;
} else {
@panic("TODO decl changed implementation");
// Add the new thing before dereferencing the old thing. This way we don't end
// up pointlessly re-creating things we end up using in the new thing.
}
} else {
@panic("TODO decl changed kind");
}
} else {
// add new decl
const fn_decl = try self.gpa().create(Decl.Fn{ const fn_decl = try self.gpa().create(Decl.Fn{
.base = Decl{ .base = Decl{
.id = Decl.Id.Fn, .id = Decl.Id.Fn,
.name = name, .name = name,
.visib = parseVisibToken(tree, fn_proto.visib_token), .visib = parseVisibToken(tree_scope.tree, fn_proto.visib_token),
.resolution = event.Future(BuildError!void).init(self.loop), .resolution = event.Future(BuildError!void).init(self.loop),
.parent_scope = &decls.base, .parent_scope = &decl_scope.base,
.tree_scope = tree_scope,
}, },
.value = Decl.Fn.Val{ .Unresolved = {} }, .value = Decl.Fn.Val{ .Unresolved = {} },
.fn_proto = fn_proto, .fn_proto = fn_proto,
}); });
tree_scope.base.ref();
errdefer self.gpa().destroy(fn_decl); errdefer self.gpa().destroy(fn_decl);
try decl_group.call(addTopLevelDecl, self, decls, &fn_decl.base); try group.call(addTopLevelDecl, self, &fn_decl.base, locked_table);
}
}, },
ast.Node.Id.TestDecl => @panic("TODO"), ast.Node.Id.TestDecl => @panic("TODO"),
else => unreachable, else => unreachable,
} }
} }
decl_group_consumed = true;
try await (async decl_group.wait() catch unreachable);
// Now other code can rely on the decls scope having a complete list of names. var existing_decl_it = existing_decls.iterator();
decls.name_future.resolve(); while (existing_decl_it.next()) |entry| {
// this decl was deleted
const existing_decl = entry.value;
@panic("TODO handle decl deletion");
}
} }
async fn initialCompile(self: *Compilation) !void {
if (self.root_src_path) |root_src_path| {
const root_scope = blk: {
// TODO async/await os.path.real
const root_src_real_path = os.path.realAlloc(self.gpa(), root_src_path) catch |err| {
try self.addCompileErrorCli(root_src_path, "unable to open: {}", @errorName(err));
return;
};
errdefer self.gpa().free(root_src_real_path);
break :blk try Scope.Root.create(self, root_src_real_path);
};
defer root_scope.base.deref(self);
assert((try await try async self.fs_watch.addFile(root_scope.realpath, root_scope)) == null);
try await try async self.rebuildFile(root_scope);
}
}
async fn maybeLink(self: *Compilation) !void {
(await (async self.prelink_group.wait() catch unreachable)) catch |err| switch (err) { (await (async self.prelink_group.wait() catch unreachable)) catch |err| switch (err) {
error.SemanticAnalysisFailed => {}, error.SemanticAnalysisFailed => {},
else => return err, else => return err,
@ -861,6 +995,7 @@ pub const Compilation = struct {
/// caller takes ownership of resulting Code /// caller takes ownership of resulting Code
async fn genAndAnalyzeCode( async fn genAndAnalyzeCode(
comp: *Compilation, comp: *Compilation,
tree_scope: *Scope.AstTree,
scope: *Scope, scope: *Scope,
node: *ast.Node, node: *ast.Node,
expected_type: ?*Type, expected_type: ?*Type,
@ -868,6 +1003,7 @@ pub const Compilation = struct {
const unanalyzed_code = try await (async ir.gen( const unanalyzed_code = try await (async ir.gen(
comp, comp,
node, node,
tree_scope,
scope, scope,
) catch unreachable); ) catch unreachable);
defer unanalyzed_code.destroy(comp.gpa()); defer unanalyzed_code.destroy(comp.gpa());
@ -894,6 +1030,7 @@ pub const Compilation = struct {
async fn addCompTimeBlock( async fn addCompTimeBlock(
comp: *Compilation, comp: *Compilation,
tree_scope: *Scope.AstTree,
scope: *Scope, scope: *Scope,
comptime_node: *ast.Node.Comptime, comptime_node: *ast.Node.Comptime,
) !void { ) !void {
@ -902,6 +1039,7 @@ pub const Compilation = struct {
const analyzed_code = (await (async genAndAnalyzeCode( const analyzed_code = (await (async genAndAnalyzeCode(
comp, comp,
tree_scope,
scope, scope,
comptime_node.expr, comptime_node.expr,
&void_type.base, &void_type.base,
@ -914,38 +1052,42 @@ pub const Compilation = struct {
analyzed_code.destroy(comp.gpa()); analyzed_code.destroy(comp.gpa());
} }
async fn addTopLevelDecl(self: *Compilation, decls: *Scope.Decls, decl: *Decl) !void { async fn addTopLevelDecl(
const tree = decl.findRootScope().tree; self: *Compilation,
const is_export = decl.isExported(tree); decl: *Decl,
locked_table: *Decl.Table,
var add_to_table_resolved = false; ) !void {
const add_to_table = async self.addDeclToTable(decls, decl) catch unreachable; const is_export = decl.isExported(decl.tree_scope.tree);
errdefer if (!add_to_table_resolved) cancel add_to_table; // TODO https://github.com/ziglang/zig/issues/1261
if (is_export) { if (is_export) {
try self.prelink_group.call(verifyUniqueSymbol, self, decl); try self.prelink_group.call(verifyUniqueSymbol, self, decl);
try self.prelink_group.call(resolveDecl, self, decl); try self.prelink_group.call(resolveDecl, self, decl);
} }
add_to_table_resolved = true; const gop = try locked_table.getOrPut(decl.name);
try await add_to_table; if (gop.found_existing) {
} try self.addCompileError(decl.tree_scope, decl.getSpan(), "redefinition of '{}'", decl.name);
async fn addDeclToTable(self: *Compilation, decls: *Scope.Decls, decl: *Decl) !void {
const held = await (async decls.table.acquire() catch unreachable);
defer held.release();
if (try held.value.put(decl.name, decl)) |other_decl| {
try self.addCompileError(decls.base.findRoot(), decl.getSpan(), "redefinition of '{}'", decl.name);
// TODO note: other definition here // TODO note: other definition here
} else {
gop.kv.value = decl;
} }
} }
fn addCompileError(self: *Compilation, root: *Scope.Root, span: Span, comptime fmt: []const u8, args: ...) !void { fn addCompileError(self: *Compilation, tree_scope: *Scope.AstTree, span: Span, comptime fmt: []const u8, args: ...) !void {
const text = try std.fmt.allocPrint(self.gpa(), fmt, args); const text = try std.fmt.allocPrint(self.gpa(), fmt, args);
errdefer self.gpa().free(text); errdefer self.gpa().free(text);
const msg = try Msg.createFromScope(self, root, span, text); const msg = try Msg.createFromScope(self, tree_scope, span, text);
errdefer msg.destroy();
try self.prelink_group.call(addCompileErrorAsync, self, msg);
}
fn addCompileErrorCli(self: *Compilation, realpath: []const u8, comptime fmt: []const u8, args: ...) !void {
const text = try std.fmt.allocPrint(self.gpa(), fmt, args);
errdefer self.gpa().free(text);
const msg = try Msg.createFromCli(self, realpath, text);
errdefer msg.destroy(); errdefer msg.destroy();
try self.prelink_group.call(addCompileErrorAsync, self, msg); try self.prelink_group.call(addCompileErrorAsync, self, msg);
@ -969,7 +1111,7 @@ pub const Compilation = struct {
if (try exported_symbol_names.value.put(decl.name, decl)) |other_decl| { if (try exported_symbol_names.value.put(decl.name, decl)) |other_decl| {
try self.addCompileError( try self.addCompileError(
decl.findRootScope(), decl.tree_scope,
decl.getSpan(), decl.getSpan(),
"exported symbol collision: '{}'", "exported symbol collision: '{}'",
decl.name, decl.name,
@ -1019,7 +1161,7 @@ pub const Compilation = struct {
async fn startFindingNativeLibC(self: *Compilation) void { async fn startFindingNativeLibC(self: *Compilation) void {
await (async self.loop.yield() catch unreachable); await (async self.loop.yield() catch unreachable);
// we don't care if it fails, we're just trying to kick off the future resolution // we don't care if it fails, we're just trying to kick off the future resolution
_ = (await (async self.event_loop_local.getNativeLibC() catch unreachable)) catch return; _ = (await (async self.zig_compiler.getNativeLibC() catch unreachable)) catch return;
} }
/// General Purpose Allocator. Must free when done. /// General Purpose Allocator. Must free when done.
@ -1077,7 +1219,7 @@ pub const Compilation = struct {
var rand_bytes: [9]u8 = undefined; var rand_bytes: [9]u8 = undefined;
{ {
const held = await (async self.event_loop_local.prng.acquire() catch unreachable); const held = await (async self.zig_compiler.prng.acquire() catch unreachable);
defer held.release(); defer held.release();
held.value.random.bytes(rand_bytes[0..]); held.value.random.bytes(rand_bytes[0..]);
@ -1093,18 +1235,24 @@ pub const Compilation = struct {
} }
/// Returns a value which has been ref()'d once /// Returns a value which has been ref()'d once
async fn analyzeConstValue(comp: *Compilation, scope: *Scope, node: *ast.Node, expected_type: *Type) !*Value { async fn analyzeConstValue(
const analyzed_code = try await (async comp.genAndAnalyzeCode(scope, node, expected_type) catch unreachable); comp: *Compilation,
tree_scope: *Scope.AstTree,
scope: *Scope,
node: *ast.Node,
expected_type: *Type,
) !*Value {
const analyzed_code = try await (async comp.genAndAnalyzeCode(tree_scope, scope, node, expected_type) catch unreachable);
defer analyzed_code.destroy(comp.gpa()); defer analyzed_code.destroy(comp.gpa());
return analyzed_code.getCompTimeResult(comp); return analyzed_code.getCompTimeResult(comp);
} }
async fn analyzeTypeExpr(comp: *Compilation, scope: *Scope, node: *ast.Node) !*Type { async fn analyzeTypeExpr(comp: *Compilation, tree_scope: *Scope.AstTree, scope: *Scope, node: *ast.Node) !*Type {
const meta_type = &Type.MetaType.get(comp).base; const meta_type = &Type.MetaType.get(comp).base;
defer meta_type.base.deref(comp); defer meta_type.base.deref(comp);
const result_val = try await (async comp.analyzeConstValue(scope, node, meta_type) catch unreachable); const result_val = try await (async comp.analyzeConstValue(tree_scope, scope, node, meta_type) catch unreachable);
errdefer result_val.base.deref(comp); errdefer result_val.base.deref(comp);
return result_val.cast(Type).?; return result_val.cast(Type).?;
@ -1120,13 +1268,6 @@ pub const Compilation = struct {
} }
}; };
fn printError(comptime format: []const u8, args: ...) !void {
var stderr_file = try std.io.getStdErr();
var stderr_file_out_stream = std.io.FileOutStream.init(&stderr_file);
const out_stream = &stderr_file_out_stream.stream;
try out_stream.print(format, args);
}
fn parseVisibToken(tree: *ast.Tree, optional_token_index: ?ast.TokenIndex) Visib { fn parseVisibToken(tree: *ast.Tree, optional_token_index: ?ast.TokenIndex) Visib {
if (optional_token_index) |token_index| { if (optional_token_index) |token_index| {
const token = tree.tokens.at(token_index); const token = tree.tokens.at(token_index);
@ -1150,12 +1291,14 @@ async fn generateDecl(comp: *Compilation, decl: *Decl) !void {
} }
async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void { async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void {
const tree_scope = fn_decl.base.tree_scope;
const body_node = fn_decl.fn_proto.body_node orelse return await (async generateDeclFnProto(comp, fn_decl) catch unreachable); const body_node = fn_decl.fn_proto.body_node orelse return await (async generateDeclFnProto(comp, fn_decl) catch unreachable);
const fndef_scope = try Scope.FnDef.create(comp, fn_decl.base.parent_scope); const fndef_scope = try Scope.FnDef.create(comp, fn_decl.base.parent_scope);
defer fndef_scope.base.deref(comp); defer fndef_scope.base.deref(comp);
const fn_type = try await (async analyzeFnType(comp, fn_decl.base.parent_scope, fn_decl.fn_proto) catch unreachable); const fn_type = try await (async analyzeFnType(comp, tree_scope, fn_decl.base.parent_scope, fn_decl.fn_proto) catch unreachable);
defer fn_type.base.base.deref(comp); defer fn_type.base.base.deref(comp);
var symbol_name = try std.Buffer.init(comp.gpa(), fn_decl.base.name); var symbol_name = try std.Buffer.init(comp.gpa(), fn_decl.base.name);
@ -1168,18 +1311,17 @@ async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void {
symbol_name_consumed = true; symbol_name_consumed = true;
// Define local parameter variables // Define local parameter variables
const root_scope = fn_decl.base.findRootScope();
for (fn_type.key.data.Normal.params) |param, i| { for (fn_type.key.data.Normal.params) |param, i| {
//AstNode *param_decl_node = get_param_decl_node(fn_table_entry, i); //AstNode *param_decl_node = get_param_decl_node(fn_table_entry, i);
const param_decl = @fieldParentPtr(ast.Node.ParamDecl, "base", fn_decl.fn_proto.params.at(i).*); const param_decl = @fieldParentPtr(ast.Node.ParamDecl, "base", fn_decl.fn_proto.params.at(i).*);
const name_token = param_decl.name_token orelse { const name_token = param_decl.name_token orelse {
try comp.addCompileError(root_scope, Span{ try comp.addCompileError(tree_scope, Span{
.first = param_decl.firstToken(), .first = param_decl.firstToken(),
.last = param_decl.type_node.firstToken(), .last = param_decl.type_node.firstToken(),
}, "missing parameter name"); }, "missing parameter name");
return error.SemanticAnalysisFailed; return error.SemanticAnalysisFailed;
}; };
const param_name = root_scope.tree.tokenSlice(name_token); const param_name = tree_scope.tree.tokenSlice(name_token);
// if (is_noalias && get_codegen_ptr_type(param_type) == nullptr) { // if (is_noalias && get_codegen_ptr_type(param_type) == nullptr) {
// add_node_error(g, param_decl_node, buf_sprintf("noalias on non-pointer parameter")); // add_node_error(g, param_decl_node, buf_sprintf("noalias on non-pointer parameter"));
@ -1201,6 +1343,7 @@ async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void {
} }
const analyzed_code = try await (async comp.genAndAnalyzeCode( const analyzed_code = try await (async comp.genAndAnalyzeCode(
tree_scope,
fn_val.child_scope, fn_val.child_scope,
body_node, body_node,
fn_type.key.data.Normal.return_type, fn_type.key.data.Normal.return_type,
@ -1231,12 +1374,17 @@ fn getZigDir(allocator: *mem.Allocator) ![]u8 {
return os.getAppDataDir(allocator, "zig"); return os.getAppDataDir(allocator, "zig");
} }
async fn analyzeFnType(comp: *Compilation, scope: *Scope, fn_proto: *ast.Node.FnProto) !*Type.Fn { async fn analyzeFnType(
comp: *Compilation,
tree_scope: *Scope.AstTree,
scope: *Scope,
fn_proto: *ast.Node.FnProto,
) !*Type.Fn {
const return_type_node = switch (fn_proto.return_type) { const return_type_node = switch (fn_proto.return_type) {
ast.Node.FnProto.ReturnType.Explicit => |n| n, ast.Node.FnProto.ReturnType.Explicit => |n| n,
ast.Node.FnProto.ReturnType.InferErrorSet => |n| n, ast.Node.FnProto.ReturnType.InferErrorSet => |n| n,
}; };
const return_type = try await (async comp.analyzeTypeExpr(scope, return_type_node) catch unreachable); const return_type = try await (async comp.analyzeTypeExpr(tree_scope, scope, return_type_node) catch unreachable);
return_type.base.deref(comp); return_type.base.deref(comp);
var params = ArrayList(Type.Fn.Param).init(comp.gpa()); var params = ArrayList(Type.Fn.Param).init(comp.gpa());
@ -1252,7 +1400,7 @@ async fn analyzeFnType(comp: *Compilation, scope: *Scope, fn_proto: *ast.Node.Fn
var it = fn_proto.params.iterator(0); var it = fn_proto.params.iterator(0);
while (it.next()) |param_node_ptr| { while (it.next()) |param_node_ptr| {
const param_node = param_node_ptr.*.cast(ast.Node.ParamDecl).?; const param_node = param_node_ptr.*.cast(ast.Node.ParamDecl).?;
const param_type = try await (async comp.analyzeTypeExpr(scope, param_node.type_node) catch unreachable); const param_type = try await (async comp.analyzeTypeExpr(tree_scope, scope, param_node.type_node) catch unreachable);
errdefer param_type.base.deref(comp); errdefer param_type.base.deref(comp);
try params.append(Type.Fn.Param{ try params.append(Type.Fn.Param{
.typ = param_type, .typ = param_type,
@ -1289,7 +1437,12 @@ async fn analyzeFnType(comp: *Compilation, scope: *Scope, fn_proto: *ast.Node.Fn
} }
async fn generateDeclFnProto(comp: *Compilation, fn_decl: *Decl.Fn) !void { async fn generateDeclFnProto(comp: *Compilation, fn_decl: *Decl.Fn) !void {
const fn_type = try await (async analyzeFnType(comp, fn_decl.base.parent_scope, fn_decl.fn_proto) catch unreachable); const fn_type = try await (async analyzeFnType(
comp,
fn_decl.base.tree_scope,
fn_decl.base.parent_scope,
fn_decl.fn_proto,
) catch unreachable);
defer fn_type.base.base.deref(comp); defer fn_type.base.base.deref(comp);
var symbol_name = try std.Buffer.init(comp.gpa(), fn_decl.base.name); var symbol_name = try std.Buffer.init(comp.gpa(), fn_decl.base.name);
@ -1301,3 +1454,14 @@ async fn generateDeclFnProto(comp: *Compilation, fn_decl: *Decl.Fn) !void {
fn_decl.value = Decl.Fn.Val{ .FnProto = fn_proto_val }; fn_decl.value = Decl.Fn.Val{ .FnProto = fn_proto_val };
symbol_name_consumed = true; symbol_name_consumed = true;
} }
// TODO these are hacks which should probably be solved by the language
fn getAwaitResult(allocator: *Allocator, handle: var) @typeInfo(@typeOf(handle)).Promise.child.? {
var result: ?@typeInfo(@typeOf(handle)).Promise.child.? = null;
cancel (async<allocator> getAwaitResultAsync(handle, &result) catch unreachable);
return result.?;
}
async fn getAwaitResultAsync(handle: var, out: *?@typeInfo(@typeOf(handle)).Promise.child.?) void {
out.* = await handle;
}

View File

@ -17,8 +17,16 @@ pub const Decl = struct {
resolution: event.Future(Compilation.BuildError!void), resolution: event.Future(Compilation.BuildError!void),
parent_scope: *Scope, parent_scope: *Scope,
// TODO when we destroy the decl, deref the tree scope
tree_scope: *Scope.AstTree,
pub const Table = std.HashMap([]const u8, *Decl, mem.hash_slice_u8, mem.eql_slice_u8); pub const Table = std.HashMap([]const u8, *Decl, mem.hash_slice_u8, mem.eql_slice_u8);
pub fn cast(base: *Decl, comptime T: type) ?*T {
if (base.id != @field(Id, @typeName(T))) return null;
return @fieldParentPtr(T, "base", base);
}
pub fn isExported(base: *const Decl, tree: *ast.Tree) bool { pub fn isExported(base: *const Decl, tree: *ast.Tree) bool {
switch (base.id) { switch (base.id) {
Id.Fn => { Id.Fn => {
@ -95,4 +103,3 @@ pub const Decl = struct {
base: Decl, base: Decl,
}; };
}; };

View File

@ -33,35 +33,48 @@ pub const Span = struct {
}; };
pub const Msg = struct { pub const Msg = struct {
span: Span,
text: []u8, text: []u8,
realpath: []u8,
data: Data, data: Data,
const Data = union(enum) { const Data = union(enum) {
Cli: Cli,
PathAndTree: PathAndTree, PathAndTree: PathAndTree,
ScopeAndComp: ScopeAndComp, ScopeAndComp: ScopeAndComp,
}; };
const PathAndTree = struct { const PathAndTree = struct {
realpath: []const u8, span: Span,
tree: *ast.Tree, tree: *ast.Tree,
allocator: *mem.Allocator, allocator: *mem.Allocator,
}; };
const ScopeAndComp = struct { const ScopeAndComp = struct {
root_scope: *Scope.Root, span: Span,
tree_scope: *Scope.AstTree,
compilation: *Compilation, compilation: *Compilation,
}; };
const Cli = struct {
allocator: *mem.Allocator,
};
pub fn destroy(self: *Msg) void { pub fn destroy(self: *Msg) void {
switch (self.data) { switch (self.data) {
Data.Cli => |cli| {
cli.allocator.free(self.text);
cli.allocator.free(self.realpath);
cli.allocator.destroy(self);
},
Data.PathAndTree => |path_and_tree| { Data.PathAndTree => |path_and_tree| {
path_and_tree.allocator.free(self.text); path_and_tree.allocator.free(self.text);
path_and_tree.allocator.free(self.realpath);
path_and_tree.allocator.destroy(self); path_and_tree.allocator.destroy(self);
}, },
Data.ScopeAndComp => |scope_and_comp| { Data.ScopeAndComp => |scope_and_comp| {
scope_and_comp.root_scope.base.deref(scope_and_comp.compilation); scope_and_comp.tree_scope.base.deref(scope_and_comp.compilation);
scope_and_comp.compilation.gpa().free(self.text); scope_and_comp.compilation.gpa().free(self.text);
scope_and_comp.compilation.gpa().free(self.realpath);
scope_and_comp.compilation.gpa().destroy(self); scope_and_comp.compilation.gpa().destroy(self);
}, },
} }
@ -69,6 +82,7 @@ pub const Msg = struct {
fn getAllocator(self: *const Msg) *mem.Allocator { fn getAllocator(self: *const Msg) *mem.Allocator {
switch (self.data) { switch (self.data) {
Data.Cli => |cli| return cli.allocator,
Data.PathAndTree => |path_and_tree| { Data.PathAndTree => |path_and_tree| {
return path_and_tree.allocator; return path_and_tree.allocator;
}, },
@ -78,71 +92,93 @@ pub const Msg = struct {
} }
} }
pub fn getRealPath(self: *const Msg) []const u8 {
switch (self.data) {
Data.PathAndTree => |path_and_tree| {
return path_and_tree.realpath;
},
Data.ScopeAndComp => |scope_and_comp| {
return scope_and_comp.root_scope.realpath;
},
}
}
pub fn getTree(self: *const Msg) *ast.Tree { pub fn getTree(self: *const Msg) *ast.Tree {
switch (self.data) { switch (self.data) {
Data.Cli => unreachable,
Data.PathAndTree => |path_and_tree| { Data.PathAndTree => |path_and_tree| {
return path_and_tree.tree; return path_and_tree.tree;
}, },
Data.ScopeAndComp => |scope_and_comp| { Data.ScopeAndComp => |scope_and_comp| {
return scope_and_comp.root_scope.tree; return scope_and_comp.tree_scope.tree;
}, },
} }
} }
pub fn getSpan(self: *const Msg) Span {
return switch (self.data) {
Data.Cli => unreachable,
Data.PathAndTree => |path_and_tree| path_and_tree.span,
Data.ScopeAndComp => |scope_and_comp| scope_and_comp.span,
};
}
/// Takes ownership of text /// Takes ownership of text
/// References root_scope, and derefs when the msg is freed /// References tree_scope, and derefs when the msg is freed
pub fn createFromScope(comp: *Compilation, root_scope: *Scope.Root, span: Span, text: []u8) !*Msg { pub fn createFromScope(comp: *Compilation, tree_scope: *Scope.AstTree, span: Span, text: []u8) !*Msg {
const realpath = try mem.dupe(comp.gpa(), u8, tree_scope.root().realpath);
errdefer comp.gpa().free(realpath);
const msg = try comp.gpa().create(Msg{ const msg = try comp.gpa().create(Msg{
.text = text, .text = text,
.span = span, .realpath = realpath,
.data = Data{ .data = Data{
.ScopeAndComp = ScopeAndComp{ .ScopeAndComp = ScopeAndComp{
.root_scope = root_scope, .tree_scope = tree_scope,
.compilation = comp, .compilation = comp,
.span = span,
}, },
}, },
}); });
root_scope.base.ref(); tree_scope.base.ref();
return msg;
}
/// Caller owns returned Msg and must free with `allocator`
/// allocator will additionally be used for printing messages later.
pub fn createFromCli(comp: *Compilation, realpath: []const u8, text: []u8) !*Msg {
const realpath_copy = try mem.dupe(comp.gpa(), u8, realpath);
errdefer comp.gpa().free(realpath_copy);
const msg = try comp.gpa().create(Msg{
.text = text,
.realpath = realpath_copy,
.data = Data{
.Cli = Cli{ .allocator = comp.gpa() },
},
});
return msg; return msg;
} }
pub fn createFromParseErrorAndScope( pub fn createFromParseErrorAndScope(
comp: *Compilation, comp: *Compilation,
root_scope: *Scope.Root, tree_scope: *Scope.AstTree,
parse_error: *const ast.Error, parse_error: *const ast.Error,
) !*Msg { ) !*Msg {
const loc_token = parse_error.loc(); const loc_token = parse_error.loc();
var text_buf = try std.Buffer.initSize(comp.gpa(), 0); var text_buf = try std.Buffer.initSize(comp.gpa(), 0);
defer text_buf.deinit(); defer text_buf.deinit();
const realpath_copy = try mem.dupe(comp.gpa(), u8, tree_scope.root().realpath);
errdefer comp.gpa().free(realpath_copy);
var out_stream = &std.io.BufferOutStream.init(&text_buf).stream; var out_stream = &std.io.BufferOutStream.init(&text_buf).stream;
try parse_error.render(&root_scope.tree.tokens, out_stream); try parse_error.render(&tree_scope.tree.tokens, out_stream);
const msg = try comp.gpa().create(Msg{ const msg = try comp.gpa().create(Msg{
.text = undefined, .text = undefined,
.realpath = realpath_copy,
.data = Data{
.ScopeAndComp = ScopeAndComp{
.tree_scope = tree_scope,
.compilation = comp,
.span = Span{ .span = Span{
.first = loc_token, .first = loc_token,
.last = loc_token, .last = loc_token,
}, },
.data = Data{
.ScopeAndComp = ScopeAndComp{
.root_scope = root_scope,
.compilation = comp,
}, },
}, },
}); });
root_scope.base.ref(); tree_scope.base.ref();
msg.text = text_buf.toOwnedSlice(); msg.text = text_buf.toOwnedSlice();
return msg; return msg;
} }
@ -161,22 +197,25 @@ pub const Msg = struct {
var text_buf = try std.Buffer.initSize(allocator, 0); var text_buf = try std.Buffer.initSize(allocator, 0);
defer text_buf.deinit(); defer text_buf.deinit();
const realpath_copy = try mem.dupe(allocator, u8, realpath);
errdefer allocator.free(realpath_copy);
var out_stream = &std.io.BufferOutStream.init(&text_buf).stream; var out_stream = &std.io.BufferOutStream.init(&text_buf).stream;
try parse_error.render(&tree.tokens, out_stream); try parse_error.render(&tree.tokens, out_stream);
const msg = try allocator.create(Msg{ const msg = try allocator.create(Msg{
.text = undefined, .text = undefined,
.realpath = realpath_copy,
.data = Data{ .data = Data{
.PathAndTree = PathAndTree{ .PathAndTree = PathAndTree{
.allocator = allocator, .allocator = allocator,
.realpath = realpath,
.tree = tree, .tree = tree,
},
},
.span = Span{ .span = Span{
.first = loc_token, .first = loc_token,
.last = loc_token, .last = loc_token,
}, },
},
},
}); });
msg.text = text_buf.toOwnedSlice(); msg.text = text_buf.toOwnedSlice();
errdefer allocator.destroy(msg); errdefer allocator.destroy(msg);
@ -185,20 +224,28 @@ pub const Msg = struct {
} }
pub fn printToStream(msg: *const Msg, stream: var, color_on: bool) !void { pub fn printToStream(msg: *const Msg, stream: var, color_on: bool) !void {
switch (msg.data) {
Data.Cli => {
try stream.print("{}:-:-: error: {}\n", msg.realpath, msg.text);
return;
},
else => {},
}
const allocator = msg.getAllocator(); const allocator = msg.getAllocator();
const realpath = msg.getRealPath();
const tree = msg.getTree(); const tree = msg.getTree();
const cwd = try os.getCwd(allocator); const cwd = try os.getCwdAlloc(allocator);
defer allocator.free(cwd); defer allocator.free(cwd);
const relpath = try os.path.relative(allocator, cwd, realpath); const relpath = try os.path.relative(allocator, cwd, msg.realpath);
defer allocator.free(relpath); defer allocator.free(relpath);
const path = if (relpath.len < realpath.len) relpath else realpath; const path = if (relpath.len < msg.realpath.len) relpath else msg.realpath;
const span = msg.getSpan();
const first_token = tree.tokens.at(msg.span.first); const first_token = tree.tokens.at(span.first);
const last_token = tree.tokens.at(msg.span.last); const last_token = tree.tokens.at(span.last);
const start_loc = tree.tokenLocationPtr(0, first_token); const start_loc = tree.tokenLocationPtr(0, first_token);
const end_loc = tree.tokenLocationPtr(first_token.end, last_token); const end_loc = tree.tokenLocationPtr(first_token.end, last_token);
if (!color_on) { if (!color_on) {

View File

@ -14,7 +14,7 @@ pub fn testZigInstallPrefix(allocator: *mem.Allocator, test_path: []const u8) ![
const test_index_file = try os.path.join(allocator, test_zig_dir, "std", "index.zig"); const test_index_file = try os.path.join(allocator, test_zig_dir, "std", "index.zig");
defer allocator.free(test_index_file); defer allocator.free(test_index_file);
var file = try os.File.openRead(allocator, test_index_file); var file = try os.File.openRead(test_index_file);
file.close(); file.close();
return test_zig_dir; return test_zig_dir;
@ -22,7 +22,7 @@ pub fn testZigInstallPrefix(allocator: *mem.Allocator, test_path: []const u8) ![
/// Caller must free result /// Caller must free result
pub fn findZigLibDir(allocator: *mem.Allocator) ![]u8 { pub fn findZigLibDir(allocator: *mem.Allocator) ![]u8 {
const self_exe_path = try os.selfExeDirPath(allocator); const self_exe_path = try os.selfExeDirPathAlloc(allocator);
defer allocator.free(self_exe_path); defer allocator.free(self_exe_path);
var cur_path: []const u8 = self_exe_path; var cur_path: []const u8 = self_exe_path;

View File

@ -961,6 +961,7 @@ pub const Code = struct {
basic_block_list: std.ArrayList(*BasicBlock), basic_block_list: std.ArrayList(*BasicBlock),
arena: std.heap.ArenaAllocator, arena: std.heap.ArenaAllocator,
return_type: ?*Type, return_type: ?*Type,
tree_scope: *Scope.AstTree,
/// allocator is comp.gpa() /// allocator is comp.gpa()
pub fn destroy(self: *Code, allocator: *Allocator) void { pub fn destroy(self: *Code, allocator: *Allocator) void {
@ -990,14 +991,14 @@ pub const Code = struct {
return ret_value.val.KnownValue.getRef(); return ret_value.val.KnownValue.getRef();
} }
try comp.addCompileError( try comp.addCompileError(
ret_value.scope.findRoot(), self.tree_scope,
ret_value.span, ret_value.span,
"unable to evaluate constant expression", "unable to evaluate constant expression",
); );
return error.SemanticAnalysisFailed; return error.SemanticAnalysisFailed;
} else if (inst.hasSideEffects()) { } else if (inst.hasSideEffects()) {
try comp.addCompileError( try comp.addCompileError(
inst.scope.findRoot(), self.tree_scope,
inst.span, inst.span,
"unable to evaluate constant expression", "unable to evaluate constant expression",
); );
@ -1013,25 +1014,24 @@ pub const Builder = struct {
code: *Code, code: *Code,
current_basic_block: *BasicBlock, current_basic_block: *BasicBlock,
next_debug_id: usize, next_debug_id: usize,
root_scope: *Scope.Root,
is_comptime: bool, is_comptime: bool,
is_async: bool, is_async: bool,
begin_scope: ?*Scope, begin_scope: ?*Scope,
pub const Error = Analyze.Error; pub const Error = Analyze.Error;
pub fn init(comp: *Compilation, root_scope: *Scope.Root, begin_scope: ?*Scope) !Builder { pub fn init(comp: *Compilation, tree_scope: *Scope.AstTree, begin_scope: ?*Scope) !Builder {
const code = try comp.gpa().create(Code{ const code = try comp.gpa().create(Code{
.basic_block_list = undefined, .basic_block_list = undefined,
.arena = std.heap.ArenaAllocator.init(comp.gpa()), .arena = std.heap.ArenaAllocator.init(comp.gpa()),
.return_type = null, .return_type = null,
.tree_scope = tree_scope,
}); });
code.basic_block_list = std.ArrayList(*BasicBlock).init(&code.arena.allocator); code.basic_block_list = std.ArrayList(*BasicBlock).init(&code.arena.allocator);
errdefer code.destroy(comp.gpa()); errdefer code.destroy(comp.gpa());
return Builder{ return Builder{
.comp = comp, .comp = comp,
.root_scope = root_scope,
.current_basic_block = undefined, .current_basic_block = undefined,
.code = code, .code = code,
.next_debug_id = 0, .next_debug_id = 0,
@ -1292,6 +1292,7 @@ pub const Builder = struct {
Scope.Id.FnDef => return false, Scope.Id.FnDef => return false,
Scope.Id.Decls => unreachable, Scope.Id.Decls => unreachable,
Scope.Id.Root => unreachable, Scope.Id.Root => unreachable,
Scope.Id.AstTree => unreachable,
Scope.Id.Block, Scope.Id.Block,
Scope.Id.Defer, Scope.Id.Defer,
Scope.Id.DeferExpr, Scope.Id.DeferExpr,
@ -1302,7 +1303,7 @@ pub const Builder = struct {
} }
pub fn genIntLit(irb: *Builder, int_lit: *ast.Node.IntegerLiteral, scope: *Scope) !*Inst { pub fn genIntLit(irb: *Builder, int_lit: *ast.Node.IntegerLiteral, scope: *Scope) !*Inst {
const int_token = irb.root_scope.tree.tokenSlice(int_lit.token); const int_token = irb.code.tree_scope.tree.tokenSlice(int_lit.token);
var base: u8 = undefined; var base: u8 = undefined;
var rest: []const u8 = undefined; var rest: []const u8 = undefined;
@ -1341,7 +1342,7 @@ pub const Builder = struct {
} }
pub async fn genStrLit(irb: *Builder, str_lit: *ast.Node.StringLiteral, scope: *Scope) !*Inst { pub async fn genStrLit(irb: *Builder, str_lit: *ast.Node.StringLiteral, scope: *Scope) !*Inst {
const str_token = irb.root_scope.tree.tokenSlice(str_lit.token); const str_token = irb.code.tree_scope.tree.tokenSlice(str_lit.token);
const src_span = Span.token(str_lit.token); const src_span = Span.token(str_lit.token);
var bad_index: usize = undefined; var bad_index: usize = undefined;
@ -1349,7 +1350,7 @@ pub const Builder = struct {
error.OutOfMemory => return error.OutOfMemory, error.OutOfMemory => return error.OutOfMemory,
error.InvalidCharacter => { error.InvalidCharacter => {
try irb.comp.addCompileError( try irb.comp.addCompileError(
irb.root_scope, irb.code.tree_scope,
src_span, src_span,
"invalid character in string literal: '{c}'", "invalid character in string literal: '{c}'",
str_token[bad_index], str_token[bad_index],
@ -1427,7 +1428,7 @@ pub const Builder = struct {
if (statement_node.cast(ast.Node.Defer)) |defer_node| { if (statement_node.cast(ast.Node.Defer)) |defer_node| {
// defer starts a new scope // defer starts a new scope
const defer_token = irb.root_scope.tree.tokens.at(defer_node.defer_token); const defer_token = irb.code.tree_scope.tree.tokens.at(defer_node.defer_token);
const kind = switch (defer_token.id) { const kind = switch (defer_token.id) {
Token.Id.Keyword_defer => Scope.Defer.Kind.ScopeExit, Token.Id.Keyword_defer => Scope.Defer.Kind.ScopeExit,
Token.Id.Keyword_errdefer => Scope.Defer.Kind.ErrorExit, Token.Id.Keyword_errdefer => Scope.Defer.Kind.ErrorExit,
@ -1513,7 +1514,7 @@ pub const Builder = struct {
const src_span = Span.token(control_flow_expr.ltoken); const src_span = Span.token(control_flow_expr.ltoken);
if (scope.findFnDef() == null) { if (scope.findFnDef() == null) {
try irb.comp.addCompileError( try irb.comp.addCompileError(
irb.root_scope, irb.code.tree_scope,
src_span, src_span,
"return expression outside function definition", "return expression outside function definition",
); );
@ -1523,7 +1524,7 @@ pub const Builder = struct {
if (scope.findDeferExpr()) |scope_defer_expr| { if (scope.findDeferExpr()) |scope_defer_expr| {
if (!scope_defer_expr.reported_err) { if (!scope_defer_expr.reported_err) {
try irb.comp.addCompileError( try irb.comp.addCompileError(
irb.root_scope, irb.code.tree_scope,
src_span, src_span,
"cannot return from defer expression", "cannot return from defer expression",
); );
@ -1599,7 +1600,7 @@ pub const Builder = struct {
pub async fn genIdentifier(irb: *Builder, identifier: *ast.Node.Identifier, scope: *Scope, lval: LVal) !*Inst { pub async fn genIdentifier(irb: *Builder, identifier: *ast.Node.Identifier, scope: *Scope, lval: LVal) !*Inst {
const src_span = Span.token(identifier.token); const src_span = Span.token(identifier.token);
const name = irb.root_scope.tree.tokenSlice(identifier.token); const name = irb.code.tree_scope.tree.tokenSlice(identifier.token);
//if (buf_eql_str(variable_name, "_") && lval == LValPtr) { //if (buf_eql_str(variable_name, "_") && lval == LValPtr) {
// IrInstructionConst *const_instruction = ir_build_instruction<IrInstructionConst>(irb, scope, node); // IrInstructionConst *const_instruction = ir_build_instruction<IrInstructionConst>(irb, scope, node);
@ -1622,7 +1623,7 @@ pub const Builder = struct {
} }
} else |err| switch (err) { } else |err| switch (err) {
error.Overflow => { error.Overflow => {
try irb.comp.addCompileError(irb.root_scope, src_span, "integer too large"); try irb.comp.addCompileError(irb.code.tree_scope, src_span, "integer too large");
return error.SemanticAnalysisFailed; return error.SemanticAnalysisFailed;
}, },
error.OutOfMemory => return error.OutOfMemory, error.OutOfMemory => return error.OutOfMemory,
@ -1656,7 +1657,7 @@ pub const Builder = struct {
// TODO put a variable of same name with invalid type in global scope // TODO put a variable of same name with invalid type in global scope
// so that future references to this same name will find a variable with an invalid type // so that future references to this same name will find a variable with an invalid type
try irb.comp.addCompileError(irb.root_scope, src_span, "unknown identifier '{}'", name); try irb.comp.addCompileError(irb.code.tree_scope, src_span, "unknown identifier '{}'", name);
return error.SemanticAnalysisFailed; return error.SemanticAnalysisFailed;
} }
@ -1689,6 +1690,7 @@ pub const Builder = struct {
=> scope = scope.parent orelse break, => scope = scope.parent orelse break,
Scope.Id.DeferExpr => unreachable, Scope.Id.DeferExpr => unreachable,
Scope.Id.AstTree => unreachable,
} }
} }
return result; return result;
@ -1740,6 +1742,7 @@ pub const Builder = struct {
=> scope = scope.parent orelse return is_noreturn, => scope = scope.parent orelse return is_noreturn,
Scope.Id.DeferExpr => unreachable, Scope.Id.DeferExpr => unreachable,
Scope.Id.AstTree => unreachable,
} }
} }
} }
@ -1929,8 +1932,9 @@ pub const Builder = struct {
Scope.Id.Root => return Ident.NotFound, Scope.Id.Root => return Ident.NotFound,
Scope.Id.Decls => { Scope.Id.Decls => {
const decls = @fieldParentPtr(Scope.Decls, "base", s); const decls = @fieldParentPtr(Scope.Decls, "base", s);
const table = await (async decls.getTableReadOnly() catch unreachable); const locked_table = await (async decls.table.acquireRead() catch unreachable);
if (table.get(name)) |entry| { defer locked_table.release();
if (locked_table.value.get(name)) |entry| {
return Ident{ .Decl = entry.value }; return Ident{ .Decl = entry.value };
} }
}, },
@ -1967,8 +1971,8 @@ const Analyze = struct {
OutOfMemory, OutOfMemory,
}; };
pub fn init(comp: *Compilation, root_scope: *Scope.Root, explicit_return_type: ?*Type) !Analyze { pub fn init(comp: *Compilation, tree_scope: *Scope.AstTree, explicit_return_type: ?*Type) !Analyze {
var irb = try Builder.init(comp, root_scope, null); var irb = try Builder.init(comp, tree_scope, null);
errdefer irb.abort(); errdefer irb.abort();
return Analyze{ return Analyze{
@ -2046,7 +2050,7 @@ const Analyze = struct {
} }
fn addCompileError(self: *Analyze, span: Span, comptime fmt: []const u8, args: ...) !void { fn addCompileError(self: *Analyze, span: Span, comptime fmt: []const u8, args: ...) !void {
return self.irb.comp.addCompileError(self.irb.root_scope, span, fmt, args); return self.irb.comp.addCompileError(self.irb.code.tree_scope, span, fmt, args);
} }
fn resolvePeerTypes(self: *Analyze, expected_type: ?*Type, peers: []const *Inst) Analyze.Error!*Type { fn resolvePeerTypes(self: *Analyze, expected_type: ?*Type, peers: []const *Inst) Analyze.Error!*Type {
@ -2534,9 +2538,10 @@ const Analyze = struct {
pub async fn gen( pub async fn gen(
comp: *Compilation, comp: *Compilation,
body_node: *ast.Node, body_node: *ast.Node,
tree_scope: *Scope.AstTree,
scope: *Scope, scope: *Scope,
) !*Code { ) !*Code {
var irb = try Builder.init(comp, scope.findRoot(), scope); var irb = try Builder.init(comp, tree_scope, scope);
errdefer irb.abort(); errdefer irb.abort();
const entry_block = try irb.createBasicBlock(scope, c"Entry"); const entry_block = try irb.createBasicBlock(scope, c"Entry");
@ -2554,9 +2559,8 @@ pub async fn gen(
pub async fn analyze(comp: *Compilation, old_code: *Code, expected_type: ?*Type) !*Code { pub async fn analyze(comp: *Compilation, old_code: *Code, expected_type: ?*Type) !*Code {
const old_entry_bb = old_code.basic_block_list.at(0); const old_entry_bb = old_code.basic_block_list.at(0);
const root_scope = old_entry_bb.scope.findRoot();
var ira = try Analyze.init(comp, root_scope, expected_type); var ira = try Analyze.init(comp, old_code.tree_scope, expected_type);
errdefer ira.abort(); errdefer ira.abort();
const new_entry_bb = try ira.getNewBasicBlock(old_entry_bb, null); const new_entry_bb = try ira.getNewBasicBlock(old_entry_bb, null);

View File

@ -143,7 +143,7 @@ pub const LibCInstallation = struct {
pub async fn findNative(self: *LibCInstallation, loop: *event.Loop) !void { pub async fn findNative(self: *LibCInstallation, loop: *event.Loop) !void {
self.initEmpty(); self.initEmpty();
var group = event.Group(FindError!void).init(loop); var group = event.Group(FindError!void).init(loop);
errdefer group.cancelAll(); errdefer group.deinit();
var windows_sdk: ?*c.ZigWindowsSDK = null; var windows_sdk: ?*c.ZigWindowsSDK = null;
errdefer if (windows_sdk) |sdk| c.zig_free_windows_sdk(@ptrCast(?[*]c.ZigWindowsSDK, sdk)); errdefer if (windows_sdk) |sdk| c.zig_free_windows_sdk(@ptrCast(?[*]c.ZigWindowsSDK, sdk));
@ -233,7 +233,7 @@ pub const LibCInstallation = struct {
const stdlib_path = try std.os.path.join(loop.allocator, search_path, "stdlib.h"); const stdlib_path = try std.os.path.join(loop.allocator, search_path, "stdlib.h");
defer loop.allocator.free(stdlib_path); defer loop.allocator.free(stdlib_path);
if (try fileExists(loop.allocator, stdlib_path)) { if (try fileExists(stdlib_path)) {
self.include_dir = try std.mem.dupe(loop.allocator, u8, search_path); self.include_dir = try std.mem.dupe(loop.allocator, u8, search_path);
return; return;
} }
@ -257,7 +257,7 @@ pub const LibCInstallation = struct {
const stdlib_path = try std.os.path.join(loop.allocator, result_buf.toSliceConst(), "stdlib.h"); const stdlib_path = try std.os.path.join(loop.allocator, result_buf.toSliceConst(), "stdlib.h");
defer loop.allocator.free(stdlib_path); defer loop.allocator.free(stdlib_path);
if (try fileExists(loop.allocator, stdlib_path)) { if (try fileExists(stdlib_path)) {
self.include_dir = result_buf.toOwnedSlice(); self.include_dir = result_buf.toOwnedSlice();
return; return;
} }
@ -285,7 +285,7 @@ pub const LibCInstallation = struct {
} }
const ucrt_lib_path = try std.os.path.join(loop.allocator, result_buf.toSliceConst(), "ucrt.lib"); const ucrt_lib_path = try std.os.path.join(loop.allocator, result_buf.toSliceConst(), "ucrt.lib");
defer loop.allocator.free(ucrt_lib_path); defer loop.allocator.free(ucrt_lib_path);
if (try fileExists(loop.allocator, ucrt_lib_path)) { if (try fileExists(ucrt_lib_path)) {
self.lib_dir = result_buf.toOwnedSlice(); self.lib_dir = result_buf.toOwnedSlice();
return; return;
} }
@ -313,7 +313,7 @@ pub const LibCInstallation = struct {
}, },
}; };
var group = event.Group(FindError!void).init(loop); var group = event.Group(FindError!void).init(loop);
errdefer group.cancelAll(); errdefer group.deinit();
for (dyn_tests) |*dyn_test| { for (dyn_tests) |*dyn_test| {
try group.call(testNativeDynamicLinker, self, loop, dyn_test); try group.call(testNativeDynamicLinker, self, loop, dyn_test);
} }
@ -341,7 +341,6 @@ pub const LibCInstallation = struct {
} }
} }
async fn findNativeKernel32LibDir(self: *LibCInstallation, loop: *event.Loop, sdk: *c.ZigWindowsSDK) FindError!void { async fn findNativeKernel32LibDir(self: *LibCInstallation, loop: *event.Loop, sdk: *c.ZigWindowsSDK) FindError!void {
var search_buf: [2]Search = undefined; var search_buf: [2]Search = undefined;
const searches = fillSearch(&search_buf, sdk); const searches = fillSearch(&search_buf, sdk);
@ -361,7 +360,7 @@ pub const LibCInstallation = struct {
} }
const kernel32_path = try std.os.path.join(loop.allocator, result_buf.toSliceConst(), "kernel32.lib"); const kernel32_path = try std.os.path.join(loop.allocator, result_buf.toSliceConst(), "kernel32.lib");
defer loop.allocator.free(kernel32_path); defer loop.allocator.free(kernel32_path);
if (try fileExists(loop.allocator, kernel32_path)) { if (try fileExists(kernel32_path)) {
self.kernel32_lib_dir = result_buf.toOwnedSlice(); self.kernel32_lib_dir = result_buf.toOwnedSlice();
return; return;
} }
@ -450,13 +449,11 @@ fn fillSearch(search_buf: *[2]Search, sdk: *c.ZigWindowsSDK) []Search {
return search_buf[0..search_end]; return search_buf[0..search_end];
} }
fn fileExists(path: []const u8) !bool {
fn fileExists(allocator: *std.mem.Allocator, path: []const u8) !bool { if (std.os.File.access(path)) |_| {
if (std.os.File.access(allocator, path)) |_| {
return true; return true;
} else |err| switch (err) { } else |err| switch (err) {
error.NotFound, error.PermissionDenied => return false, error.FileNotFound, error.PermissionDenied => return false,
error.OutOfMemory => return error.OutOfMemory,
else => return error.FileSystem, else => return error.FileSystem,
} }
} }

View File

@ -61,7 +61,7 @@ pub async fn link(comp: *Compilation) !void {
ctx.libc = ctx.comp.override_libc orelse blk: { ctx.libc = ctx.comp.override_libc orelse blk: {
switch (comp.target) { switch (comp.target) {
Target.Native => { Target.Native => {
break :blk (await (async comp.event_loop_local.getNativeLibC() catch unreachable)) catch return error.LibCRequiredButNotProvidedOrFound; break :blk (await (async comp.zig_compiler.getNativeLibC() catch unreachable)) catch return error.LibCRequiredButNotProvidedOrFound;
}, },
else => return error.LibCRequiredButNotProvidedOrFound, else => return error.LibCRequiredButNotProvidedOrFound,
} }
@ -83,7 +83,7 @@ pub async fn link(comp: *Compilation) !void {
{ {
// LLD is not thread-safe, so we grab a global lock. // LLD is not thread-safe, so we grab a global lock.
const held = await (async comp.event_loop_local.lld_lock.acquire() catch unreachable); const held = await (async comp.zig_compiler.lld_lock.acquire() catch unreachable);
defer held.release(); defer held.release();
// Not evented I/O. LLD does its own multithreading internally. // Not evented I/O. LLD does its own multithreading internally.

View File

@ -14,7 +14,7 @@ const c = @import("c.zig");
const introspect = @import("introspect.zig"); const introspect = @import("introspect.zig");
const Args = arg.Args; const Args = arg.Args;
const Flag = arg.Flag; const Flag = arg.Flag;
const EventLoopLocal = @import("compilation.zig").EventLoopLocal; const ZigCompiler = @import("compilation.zig").ZigCompiler;
const Compilation = @import("compilation.zig").Compilation; const Compilation = @import("compilation.zig").Compilation;
const Target = @import("target.zig").Target; const Target = @import("target.zig").Target;
const errmsg = @import("errmsg.zig"); const errmsg = @import("errmsg.zig");
@ -24,6 +24,8 @@ var stderr_file: os.File = undefined;
var stderr: *io.OutStream(io.FileOutStream.Error) = undefined; var stderr: *io.OutStream(io.FileOutStream.Error) = undefined;
var stdout: *io.OutStream(io.FileOutStream.Error) = undefined; var stdout: *io.OutStream(io.FileOutStream.Error) = undefined;
const max_src_size = 2 * 1024 * 1024 * 1024; // 2 GiB
const usage = const usage =
\\usage: zig [command] [options] \\usage: zig [command] [options]
\\ \\
@ -371,6 +373,16 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co
os.exit(1); os.exit(1);
} }
var clang_argv_buf = ArrayList([]const u8).init(allocator);
defer clang_argv_buf.deinit();
const mllvm_flags = flags.many("mllvm");
for (mllvm_flags) |mllvm| {
try clang_argv_buf.append("-mllvm");
try clang_argv_buf.append(mllvm);
}
try ZigCompiler.setLlvmArgv(allocator, mllvm_flags);
const zig_lib_dir = introspect.resolveZigLibDir(allocator) catch os.exit(1); const zig_lib_dir = introspect.resolveZigLibDir(allocator) catch os.exit(1);
defer allocator.free(zig_lib_dir); defer allocator.free(zig_lib_dir);
@ -380,11 +392,11 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co
try loop.initMultiThreaded(allocator); try loop.initMultiThreaded(allocator);
defer loop.deinit(); defer loop.deinit();
var event_loop_local = try EventLoopLocal.init(&loop); var zig_compiler = try ZigCompiler.init(&loop);
defer event_loop_local.deinit(); defer zig_compiler.deinit();
var comp = try Compilation.create( var comp = try Compilation.create(
&event_loop_local, &zig_compiler,
root_name, root_name,
root_source_file, root_source_file,
Target.Native, Target.Native,
@ -413,16 +425,6 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co
comp.linker_script = flags.single("linker-script"); comp.linker_script = flags.single("linker-script");
comp.each_lib_rpath = flags.present("each-lib-rpath"); comp.each_lib_rpath = flags.present("each-lib-rpath");
var clang_argv_buf = ArrayList([]const u8).init(allocator);
defer clang_argv_buf.deinit();
const mllvm_flags = flags.many("mllvm");
for (mllvm_flags) |mllvm| {
try clang_argv_buf.append("-mllvm");
try clang_argv_buf.append(mllvm);
}
comp.llvm_argv = mllvm_flags;
comp.clang_argv = clang_argv_buf.toSliceConst(); comp.clang_argv = clang_argv_buf.toSliceConst();
comp.strip = flags.present("strip"); comp.strip = flags.present("strip");
@ -465,25 +467,28 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co
comp.link_out_file = flags.single("output"); comp.link_out_file = flags.single("output");
comp.link_objects = link_objects; comp.link_objects = link_objects;
try comp.build(); comp.start();
const process_build_events_handle = try async<loop.allocator> processBuildEvents(comp, color); const process_build_events_handle = try async<loop.allocator> processBuildEvents(comp, color);
defer cancel process_build_events_handle; defer cancel process_build_events_handle;
loop.run(); loop.run();
} }
async fn processBuildEvents(comp: *Compilation, color: errmsg.Color) void { async fn processBuildEvents(comp: *Compilation, color: errmsg.Color) void {
var count: usize = 0;
while (true) {
// TODO directly awaiting async should guarantee memory allocation elision // TODO directly awaiting async should guarantee memory allocation elision
const build_event = await (async comp.events.get() catch unreachable); const build_event = await (async comp.events.get() catch unreachable);
count += 1;
switch (build_event) { switch (build_event) {
Compilation.Event.Ok => { Compilation.Event.Ok => {
return; stderr.print("Build {} succeeded\n", count) catch os.exit(1);
}, },
Compilation.Event.Error => |err| { Compilation.Event.Error => |err| {
std.debug.warn("build failed: {}\n", @errorName(err)); stderr.print("Build {} failed: {}\n", count, @errorName(err)) catch os.exit(1);
os.exit(1);
}, },
Compilation.Event.Fail => |msgs| { Compilation.Event.Fail => |msgs| {
stderr.print("Build {} compile errors:\n", count) catch os.exit(1);
for (msgs) |msg| { for (msgs) |msg| {
defer msg.destroy(); defer msg.destroy();
msg.printToFile(&stderr_file, color) catch os.exit(1); msg.printToFile(&stderr_file, color) catch os.exit(1);
@ -491,6 +496,7 @@ async fn processBuildEvents(comp: *Compilation, color: errmsg.Color) void {
}, },
} }
} }
}
fn cmdBuildExe(allocator: *Allocator, args: []const []const u8) !void { fn cmdBuildExe(allocator: *Allocator, args: []const []const u8) !void {
return buildOutputType(allocator, args, Compilation.Kind.Exe); return buildOutputType(allocator, args, Compilation.Kind.Exe);
@ -528,33 +534,12 @@ const args_fmt_spec = []Flag{
}; };
const Fmt = struct { const Fmt = struct {
seen: std.HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8), seen: event.Locked(SeenMap),
queue: std.LinkedList([]const u8),
any_error: bool, any_error: bool,
color: errmsg.Color,
loop: *event.Loop,
// file_path must outlive Fmt const SeenMap = std.HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8);
fn addToQueue(self: *Fmt, file_path: []const u8) !void {
const new_node = try self.seen.allocator.create(std.LinkedList([]const u8).Node{
.prev = undefined,
.next = undefined,
.data = file_path,
});
if (try self.seen.put(file_path, {})) |_| return;
self.queue.append(new_node);
}
fn addDirToQueue(self: *Fmt, file_path: []const u8) !void {
var dir = try std.os.Dir.open(self.seen.allocator, file_path);
defer dir.close();
while (try dir.next()) |entry| {
if (entry.kind == std.os.Dir.Entry.Kind.Directory or mem.endsWith(u8, entry.name, ".zig")) {
const full_path = try os.path.join(self.seen.allocator, file_path, entry.name);
try self.addToQueue(full_path);
}
}
}
}; };
fn parseLibcPaths(allocator: *Allocator, libc: *LibCInstallation, libc_paths_file: []const u8) void { fn parseLibcPaths(allocator: *Allocator, libc: *LibCInstallation, libc_paths_file: []const u8) void {
@ -587,17 +572,17 @@ fn cmdLibC(allocator: *Allocator, args: []const []const u8) !void {
try loop.initMultiThreaded(allocator); try loop.initMultiThreaded(allocator);
defer loop.deinit(); defer loop.deinit();
var event_loop_local = try EventLoopLocal.init(&loop); var zig_compiler = try ZigCompiler.init(&loop);
defer event_loop_local.deinit(); defer zig_compiler.deinit();
const handle = try async<loop.allocator> findLibCAsync(&event_loop_local); const handle = try async<loop.allocator> findLibCAsync(&zig_compiler);
defer cancel handle; defer cancel handle;
loop.run(); loop.run();
} }
async fn findLibCAsync(event_loop_local: *EventLoopLocal) void { async fn findLibCAsync(zig_compiler: *ZigCompiler) void {
const libc = (await (async event_loop_local.getNativeLibC() catch unreachable)) catch |err| { const libc = (await (async zig_compiler.getNativeLibC() catch unreachable)) catch |err| {
stderr.print("unable to find libc: {}\n", @errorName(err)) catch os.exit(1); stderr.print("unable to find libc: {}\n", @errorName(err)) catch os.exit(1);
os.exit(1); os.exit(1);
}; };
@ -636,7 +621,7 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
var stdin_file = try io.getStdIn(); var stdin_file = try io.getStdIn();
var stdin = io.FileInStream.init(&stdin_file); var stdin = io.FileInStream.init(&stdin_file);
const source_code = try stdin.stream.readAllAlloc(allocator, @maxValue(usize)); const source_code = try stdin.stream.readAllAlloc(allocator, max_src_size);
defer allocator.free(source_code); defer allocator.free(source_code);
var tree = std.zig.parse(allocator, source_code) catch |err| { var tree = std.zig.parse(allocator, source_code) catch |err| {
@ -665,69 +650,146 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
os.exit(1); os.exit(1);
} }
var fmt = Fmt{ var loop: event.Loop = undefined;
.seen = std.HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8).init(allocator), try loop.initMultiThreaded(allocator);
.queue = std.LinkedList([]const u8).init(), defer loop.deinit();
.any_error = false,
};
for (flags.positionals.toSliceConst()) |file_path| { var result: FmtError!void = undefined;
try fmt.addToQueue(file_path); const main_handle = try async<allocator> asyncFmtMainChecked(
&result,
&loop,
flags,
color,
);
defer cancel main_handle;
loop.run();
return result;
} }
while (fmt.queue.popFirst()) |node| { async fn asyncFmtMainChecked(
const file_path = node.data; result: *(FmtError!void),
loop: *event.Loop,
flags: *const Args,
color: errmsg.Color,
) void {
result.* = await (async asyncFmtMain(loop, flags, color) catch unreachable);
}
var file = try os.File.openRead(allocator, file_path); const FmtError = error{
defer file.close(); SystemResources,
OperationAborted,
IoPending,
BrokenPipe,
Unexpected,
WouldBlock,
FileClosed,
DestinationAddressRequired,
DiskQuota,
FileTooBig,
InputOutput,
NoSpaceLeft,
AccessDenied,
OutOfMemory,
RenameAcrossMountPoints,
ReadOnlyFileSystem,
LinkQuotaExceeded,
FileBusy,
} || os.File.OpenError;
const source_code = io.readFileAlloc(allocator, file_path) catch |err| switch (err) { async fn asyncFmtMain(
loop: *event.Loop,
flags: *const Args,
color: errmsg.Color,
) FmtError!void {
suspend {
resume @handle();
}
var fmt = Fmt{
.seen = event.Locked(Fmt.SeenMap).init(loop, Fmt.SeenMap.init(loop.allocator)),
.any_error = false,
.color = color,
.loop = loop,
};
var group = event.Group(FmtError!void).init(loop);
for (flags.positionals.toSliceConst()) |file_path| {
try group.call(fmtPath, &fmt, file_path);
}
try await (async group.wait() catch unreachable);
if (fmt.any_error) {
os.exit(1);
}
}
async fn fmtPath(fmt: *Fmt, file_path_ref: []const u8) FmtError!void {
const file_path = try std.mem.dupe(fmt.loop.allocator, u8, file_path_ref);
defer fmt.loop.allocator.free(file_path);
{
const held = await (async fmt.seen.acquire() catch unreachable);
defer held.release();
if (try held.value.put(file_path, {})) |_| return;
}
const source_code = (await try async event.fs.readFile(
fmt.loop,
file_path,
max_src_size,
)) catch |err| switch (err) {
error.IsDir => { error.IsDir => {
try fmt.addDirToQueue(file_path); // TODO make event based (and dir.next())
continue; var dir = try std.os.Dir.open(fmt.loop.allocator, file_path);
defer dir.close();
var group = event.Group(FmtError!void).init(fmt.loop);
while (try dir.next()) |entry| {
if (entry.kind == std.os.Dir.Entry.Kind.Directory or mem.endsWith(u8, entry.name, ".zig")) {
const full_path = try os.path.join(fmt.loop.allocator, file_path, entry.name);
try group.call(fmtPath, fmt, full_path);
}
}
return await (async group.wait() catch unreachable);
}, },
else => { else => {
// TODO lock stderr printing
try stderr.print("unable to open '{}': {}\n", file_path, err); try stderr.print("unable to open '{}': {}\n", file_path, err);
fmt.any_error = true; fmt.any_error = true;
continue; return;
}, },
}; };
defer allocator.free(source_code); defer fmt.loop.allocator.free(source_code);
var tree = std.zig.parse(allocator, source_code) catch |err| { var tree = std.zig.parse(fmt.loop.allocator, source_code) catch |err| {
try stderr.print("error parsing file '{}': {}\n", file_path, err); try stderr.print("error parsing file '{}': {}\n", file_path, err);
fmt.any_error = true; fmt.any_error = true;
continue; return;
}; };
defer tree.deinit(); defer tree.deinit();
var error_it = tree.errors.iterator(0); var error_it = tree.errors.iterator(0);
while (error_it.next()) |parse_error| { while (error_it.next()) |parse_error| {
const msg = try errmsg.Msg.createFromParseError(allocator, parse_error, &tree, file_path); const msg = try errmsg.Msg.createFromParseError(fmt.loop.allocator, parse_error, &tree, file_path);
defer msg.destroy(); defer fmt.loop.allocator.destroy(msg);
try msg.printToFile(&stderr_file, color); try msg.printToFile(&stderr_file, fmt.color);
} }
if (tree.errors.len != 0) { if (tree.errors.len != 0) {
fmt.any_error = true; fmt.any_error = true;
continue; return;
} }
const baf = try io.BufferedAtomicFile.create(allocator, file_path); // TODO make this evented
const baf = try io.BufferedAtomicFile.create(fmt.loop.allocator, file_path);
defer baf.destroy(); defer baf.destroy();
const anything_changed = try std.zig.render(allocator, baf.stream(), &tree); const anything_changed = try std.zig.render(fmt.loop.allocator, baf.stream(), &tree);
if (anything_changed) { if (anything_changed) {
try stderr.print("{}\n", file_path); try stderr.print("{}\n", file_path);
try baf.finish(); try baf.finish();
} }
} }
if (fmt.any_error) {
os.exit(1);
}
}
// cmd:targets ///////////////////////////////////////////////////////////////////////////////////// // cmd:targets /////////////////////////////////////////////////////////////////////////////////////
fn cmdTargets(allocator: *Allocator, args: []const []const u8) !void { fn cmdTargets(allocator: *Allocator, args: []const []const u8) !void {

View File

@ -36,6 +36,7 @@ pub const Scope = struct {
Id.Defer => @fieldParentPtr(Defer, "base", base).destroy(comp), Id.Defer => @fieldParentPtr(Defer, "base", base).destroy(comp),
Id.DeferExpr => @fieldParentPtr(DeferExpr, "base", base).destroy(comp), Id.DeferExpr => @fieldParentPtr(DeferExpr, "base", base).destroy(comp),
Id.Var => @fieldParentPtr(Var, "base", base).destroy(comp), Id.Var => @fieldParentPtr(Var, "base", base).destroy(comp),
Id.AstTree => @fieldParentPtr(AstTree, "base", base).destroy(comp),
} }
} }
} }
@ -62,6 +63,8 @@ pub const Scope = struct {
Id.CompTime, Id.CompTime,
Id.Var, Id.Var,
=> scope = scope.parent.?, => scope = scope.parent.?,
Id.AstTree => unreachable,
} }
} }
} }
@ -82,6 +85,8 @@ pub const Scope = struct {
Id.Root, Id.Root,
Id.Var, Id.Var,
=> scope = scope.parent orelse return null, => scope = scope.parent orelse return null,
Id.AstTree => unreachable,
} }
} }
} }
@ -97,6 +102,7 @@ pub const Scope = struct {
pub const Id = enum { pub const Id = enum {
Root, Root,
AstTree,
Decls, Decls,
Block, Block,
FnDef, FnDef,
@ -108,13 +114,12 @@ pub const Scope = struct {
pub const Root = struct { pub const Root = struct {
base: Scope, base: Scope,
tree: *ast.Tree,
realpath: []const u8, realpath: []const u8,
decls: *Decls,
/// Creates a Root scope with 1 reference /// Creates a Root scope with 1 reference
/// Takes ownership of realpath /// Takes ownership of realpath
/// Takes ownership of tree, will deinit and destroy when done. pub fn create(comp: *Compilation, realpath: []u8) !*Root {
pub fn create(comp: *Compilation, tree: *ast.Tree, realpath: []u8) !*Root {
const self = try comp.gpa().createOne(Root); const self = try comp.gpa().createOne(Root);
self.* = Root{ self.* = Root{
.base = Scope{ .base = Scope{
@ -122,41 +127,65 @@ pub const Scope = struct {
.parent = null, .parent = null,
.ref_count = std.atomic.Int(usize).init(1), .ref_count = std.atomic.Int(usize).init(1),
}, },
.tree = tree,
.realpath = realpath, .realpath = realpath,
.decls = undefined,
}; };
errdefer comp.gpa().destroy(self);
self.decls = try Decls.create(comp, &self.base);
return self; return self;
} }
pub fn destroy(self: *Root, comp: *Compilation) void { pub fn destroy(self: *Root, comp: *Compilation) void {
// TODO comp.fs_watch.removeFile(self.realpath);
self.decls.base.deref(comp);
comp.gpa().free(self.realpath);
comp.gpa().destroy(self);
}
};
pub const AstTree = struct {
base: Scope,
tree: *ast.Tree,
/// Creates a scope with 1 reference
/// Takes ownership of tree, will deinit and destroy when done.
pub fn create(comp: *Compilation, tree: *ast.Tree, root_scope: *Root) !*AstTree {
const self = try comp.gpa().createOne(AstTree);
self.* = AstTree{
.base = undefined,
.tree = tree,
};
self.base.init(Id.AstTree, &root_scope.base);
return self;
}
pub fn destroy(self: *AstTree, comp: *Compilation) void {
comp.gpa().free(self.tree.source); comp.gpa().free(self.tree.source);
self.tree.deinit(); self.tree.deinit();
comp.gpa().destroy(self.tree); comp.gpa().destroy(self.tree);
comp.gpa().free(self.realpath);
comp.gpa().destroy(self); comp.gpa().destroy(self);
} }
pub fn root(self: *AstTree) *Root {
return self.base.findRoot();
}
}; };
pub const Decls = struct { pub const Decls = struct {
base: Scope, base: Scope,
/// The lock must be respected for writing. However once name_future resolves, /// This table remains Write Locked when the names are incomplete or possibly outdated.
/// readers can freely access it. /// So if a reader manages to grab a lock, it can be sure that the set of names is complete
table: event.Locked(Decl.Table), /// and correct.
table: event.RwLocked(Decl.Table),
/// Once this future is resolved, the table is complete and available for unlocked
/// read-only access. It does not mean all the decls are resolved; it means only that
/// the table has all the names. Each decl in the table has its own resolution state.
name_future: event.Future(void),
/// Creates a Decls scope with 1 reference /// Creates a Decls scope with 1 reference
pub fn create(comp: *Compilation, parent: *Scope) !*Decls { pub fn create(comp: *Compilation, parent: *Scope) !*Decls {
const self = try comp.gpa().createOne(Decls); const self = try comp.gpa().createOne(Decls);
self.* = Decls{ self.* = Decls{
.base = undefined, .base = undefined,
.table = event.Locked(Decl.Table).init(comp.loop, Decl.Table.init(comp.gpa())), .table = event.RwLocked(Decl.Table).init(comp.loop, Decl.Table.init(comp.gpa())),
.name_future = event.Future(void).init(comp.loop),
}; };
self.base.init(Id.Decls, parent); self.base.init(Id.Decls, parent);
return self; return self;
@ -166,11 +195,6 @@ pub const Scope = struct {
self.table.deinit(); self.table.deinit();
comp.gpa().destroy(self); comp.gpa().destroy(self);
} }
pub async fn getTableReadOnly(self: *Decls) *Decl.Table {
_ = await (async self.name_future.get() catch unreachable);
return &self.table.private_data;
}
}; };
pub const Block = struct { pub const Block = struct {

View File

@ -6,7 +6,7 @@ const Compilation = @import("compilation.zig").Compilation;
const introspect = @import("introspect.zig"); const introspect = @import("introspect.zig");
const assertOrPanic = std.debug.assertOrPanic; const assertOrPanic = std.debug.assertOrPanic;
const errmsg = @import("errmsg.zig"); const errmsg = @import("errmsg.zig");
const EventLoopLocal = @import("compilation.zig").EventLoopLocal; const ZigCompiler = @import("compilation.zig").ZigCompiler;
var ctx: TestContext = undefined; var ctx: TestContext = undefined;
@ -25,7 +25,7 @@ const allocator = std.heap.c_allocator;
pub const TestContext = struct { pub const TestContext = struct {
loop: std.event.Loop, loop: std.event.Loop,
event_loop_local: EventLoopLocal, zig_compiler: ZigCompiler,
zig_lib_dir: []u8, zig_lib_dir: []u8,
file_index: std.atomic.Int(usize), file_index: std.atomic.Int(usize),
group: std.event.Group(error!void), group: std.event.Group(error!void),
@ -37,20 +37,20 @@ pub const TestContext = struct {
self.* = TestContext{ self.* = TestContext{
.any_err = {}, .any_err = {},
.loop = undefined, .loop = undefined,
.event_loop_local = undefined, .zig_compiler = undefined,
.zig_lib_dir = undefined, .zig_lib_dir = undefined,
.group = undefined, .group = undefined,
.file_index = std.atomic.Int(usize).init(0), .file_index = std.atomic.Int(usize).init(0),
}; };
try self.loop.initMultiThreaded(allocator); try self.loop.initSingleThreaded(allocator);
errdefer self.loop.deinit(); errdefer self.loop.deinit();
self.event_loop_local = try EventLoopLocal.init(&self.loop); self.zig_compiler = try ZigCompiler.init(&self.loop);
errdefer self.event_loop_local.deinit(); errdefer self.zig_compiler.deinit();
self.group = std.event.Group(error!void).init(&self.loop); self.group = std.event.Group(error!void).init(&self.loop);
errdefer self.group.cancelAll(); errdefer self.group.deinit();
self.zig_lib_dir = try introspect.resolveZigLibDir(allocator); self.zig_lib_dir = try introspect.resolveZigLibDir(allocator);
errdefer allocator.free(self.zig_lib_dir); errdefer allocator.free(self.zig_lib_dir);
@ -62,7 +62,7 @@ pub const TestContext = struct {
fn deinit(self: *TestContext) void { fn deinit(self: *TestContext) void {
std.os.deleteTree(allocator, tmp_dir_name) catch {}; std.os.deleteTree(allocator, tmp_dir_name) catch {};
allocator.free(self.zig_lib_dir); allocator.free(self.zig_lib_dir);
self.event_loop_local.deinit(); self.zig_compiler.deinit();
self.loop.deinit(); self.loop.deinit();
} }
@ -94,10 +94,10 @@ pub const TestContext = struct {
} }
// TODO async I/O // TODO async I/O
try std.io.writeFile(allocator, file1_path, source); try std.io.writeFile(file1_path, source);
var comp = try Compilation.create( var comp = try Compilation.create(
&self.event_loop_local, &self.zig_compiler,
"test", "test",
file1_path, file1_path,
Target.Native, Target.Native,
@ -108,7 +108,7 @@ pub const TestContext = struct {
); );
errdefer comp.destroy(); errdefer comp.destroy();
try comp.build(); comp.start();
try self.group.call(getModuleEvent, comp, source, path, line, column, msg); try self.group.call(getModuleEvent, comp, source, path, line, column, msg);
} }
@ -128,10 +128,10 @@ pub const TestContext = struct {
} }
// TODO async I/O // TODO async I/O
try std.io.writeFile(allocator, file1_path, source); try std.io.writeFile(file1_path, source);
var comp = try Compilation.create( var comp = try Compilation.create(
&self.event_loop_local, &self.zig_compiler,
"test", "test",
file1_path, file1_path,
Target.Native, Target.Native,
@ -144,7 +144,7 @@ pub const TestContext = struct {
_ = try comp.addLinkLib("c", true); _ = try comp.addLinkLib("c", true);
comp.link_out_file = output_file; comp.link_out_file = output_file;
try comp.build(); comp.start();
try self.group.call(getModuleEventSuccess, comp, output_file, expected_output); try self.group.call(getModuleEventSuccess, comp, output_file, expected_output);
} }
@ -212,9 +212,10 @@ pub const TestContext = struct {
Compilation.Event.Fail => |msgs| { Compilation.Event.Fail => |msgs| {
assertOrPanic(msgs.len != 0); assertOrPanic(msgs.len != 0);
for (msgs) |msg| { for (msgs) |msg| {
if (mem.endsWith(u8, msg.getRealPath(), path) and mem.eql(u8, msg.text, text)) { if (mem.endsWith(u8, msg.realpath, path) and mem.eql(u8, msg.text, text)) {
const first_token = msg.getTree().tokens.at(msg.span.first); const span = msg.getSpan();
const last_token = msg.getTree().tokens.at(msg.span.first); const first_token = msg.getTree().tokens.at(span.first);
const last_token = msg.getTree().tokens.at(span.first);
const start_loc = msg.getTree().tokenLocationPtr(0, first_token); const start_loc = msg.getTree().tokenLocationPtr(0, first_token);
if (start_loc.line + 1 == line and start_loc.column + 1 == column) { if (start_loc.line + 1 == line and start_loc.column + 1 == column) {
return; return;

View File

@ -184,8 +184,8 @@ pub const Type = struct {
if (await (async base.abi_alignment.start() catch unreachable)) |ptr| return ptr.*; if (await (async base.abi_alignment.start() catch unreachable)) |ptr| return ptr.*;
{ {
const held = try comp.event_loop_local.getAnyLlvmContext(); const held = try comp.zig_compiler.getAnyLlvmContext();
defer held.release(comp.event_loop_local); defer held.release(comp.zig_compiler);
const llvm_context = held.node.data; const llvm_context = held.node.data;

View File

@ -1850,7 +1850,7 @@ struct ScopeDecls {
HashMap<Buf *, Tld *, buf_hash, buf_eql_buf> decl_table; HashMap<Buf *, Tld *, buf_hash, buf_eql_buf> decl_table;
bool safety_off; bool safety_off;
AstNode *safety_set_node; AstNode *safety_set_node;
bool fast_math_off; bool fast_math_on;
AstNode *fast_math_set_node; AstNode *fast_math_set_node;
ImportTableEntry *import; ImportTableEntry *import;
// If this is a scope from a container, this is the type entry, otherwise null // If this is a scope from a container, this is the type entry, otherwise null
@ -1870,7 +1870,7 @@ struct ScopeBlock {
bool safety_off; bool safety_off;
AstNode *safety_set_node; AstNode *safety_set_node;
bool fast_math_off; bool fast_math_on;
AstNode *fast_math_set_node; AstNode *fast_math_set_node;
}; };

View File

@ -19,12 +19,12 @@
static const size_t default_backward_branch_quota = 1000; static const size_t default_backward_branch_quota = 1000;
static void resolve_enum_type(CodeGen *g, TypeTableEntry *enum_type); static Error resolve_enum_type(CodeGen *g, TypeTableEntry *enum_type);
static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type); static Error resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type);
static void resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type); static Error ATTRIBUTE_MUST_USE resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type);
static void resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type); static Error ATTRIBUTE_MUST_USE resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type);
static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type); static Error ATTRIBUTE_MUST_USE resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type);
static void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry); static void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry);
ErrorMsg *add_node_error(CodeGen *g, AstNode *node, Buf *msg) { ErrorMsg *add_node_error(CodeGen *g, AstNode *node, Buf *msg) {
@ -370,15 +370,20 @@ uint64_t type_size_bits(CodeGen *g, TypeTableEntry *type_entry) {
return LLVMSizeOfTypeInBits(g->target_data_ref, type_entry->type_ref); return LLVMSizeOfTypeInBits(g->target_data_ref, type_entry->type_ref);
} }
bool type_is_copyable(CodeGen *g, TypeTableEntry *type_entry) { Result<bool> type_is_copyable(CodeGen *g, TypeTableEntry *type_entry) {
type_ensure_zero_bits_known(g, type_entry); Error err;
if ((err = type_ensure_zero_bits_known(g, type_entry)))
return err;
if (!type_has_bits(type_entry)) if (!type_has_bits(type_entry))
return true; return true;
if (!handle_is_ptr(type_entry)) if (!handle_is_ptr(type_entry))
return true; return true;
ensure_complete_type(g, type_entry); if ((err = ensure_complete_type(g, type_entry)))
return err;
return type_entry->is_copyable; return type_entry->is_copyable;
} }
@ -447,7 +452,7 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type
} }
} }
type_ensure_zero_bits_known(g, child_type); assertNoError(type_ensure_zero_bits_known(g, child_type));
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdPointer); TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdPointer);
entry->is_copyable = true; entry->is_copyable = true;
@ -554,11 +559,11 @@ TypeTableEntry *get_optional_type(CodeGen *g, TypeTableEntry *child_type) {
TypeTableEntry *entry = child_type->optional_parent; TypeTableEntry *entry = child_type->optional_parent;
return entry; return entry;
} else { } else {
ensure_complete_type(g, child_type); assertNoError(ensure_complete_type(g, child_type));
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdOptional); TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdOptional);
assert(child_type->type_ref || child_type->zero_bits); assert(child_type->type_ref || child_type->zero_bits);
entry->is_copyable = type_is_copyable(g, child_type); entry->is_copyable = type_is_copyable(g, child_type).unwrap();
buf_resize(&entry->name, 0); buf_resize(&entry->name, 0);
buf_appendf(&entry->name, "?%s", buf_ptr(&child_type->name)); buf_appendf(&entry->name, "?%s", buf_ptr(&child_type->name));
@ -650,7 +655,7 @@ TypeTableEntry *get_error_union_type(CodeGen *g, TypeTableEntry *err_set_type, T
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdErrorUnion); TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdErrorUnion);
entry->is_copyable = true; entry->is_copyable = true;
assert(payload_type->di_type); assert(payload_type->di_type);
ensure_complete_type(g, payload_type); assertNoError(ensure_complete_type(g, payload_type));
buf_resize(&entry->name, 0); buf_resize(&entry->name, 0);
buf_appendf(&entry->name, "%s!%s", buf_ptr(&err_set_type->name), buf_ptr(&payload_type->name)); buf_appendf(&entry->name, "%s!%s", buf_ptr(&err_set_type->name), buf_ptr(&payload_type->name));
@ -739,7 +744,7 @@ TypeTableEntry *get_array_type(CodeGen *g, TypeTableEntry *child_type, uint64_t
return entry; return entry;
} }
ensure_complete_type(g, child_type); assertNoError(ensure_complete_type(g, child_type));
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdArray); TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdArray);
entry->zero_bits = (array_size == 0) || child_type->zero_bits; entry->zero_bits = (array_size == 0) || child_type->zero_bits;
@ -1050,13 +1055,13 @@ TypeTableEntry *get_ptr_to_stack_trace_type(CodeGen *g) {
} }
TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) {
Error err;
auto table_entry = g->fn_type_table.maybe_get(fn_type_id); auto table_entry = g->fn_type_table.maybe_get(fn_type_id);
if (table_entry) { if (table_entry) {
return table_entry->value; return table_entry->value;
} }
if (fn_type_id->return_type != nullptr) { if (fn_type_id->return_type != nullptr) {
ensure_complete_type(g, fn_type_id->return_type); if ((err = ensure_complete_type(g, fn_type_id->return_type)))
if (type_is_invalid(fn_type_id->return_type))
return g->builtin_types.entry_invalid; return g->builtin_types.entry_invalid;
assert(fn_type_id->return_type->id != TypeTableEntryIdOpaque); assert(fn_type_id->return_type->id != TypeTableEntryIdOpaque);
} else { } else {
@ -1172,8 +1177,7 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) {
gen_param_info->src_index = i; gen_param_info->src_index = i;
gen_param_info->gen_index = SIZE_MAX; gen_param_info->gen_index = SIZE_MAX;
ensure_complete_type(g, type_entry); if ((err = ensure_complete_type(g, type_entry)))
if (type_is_invalid(type_entry))
return g->builtin_types.entry_invalid; return g->builtin_types.entry_invalid;
if (type_has_bits(type_entry)) { if (type_has_bits(type_entry)) {
@ -1493,6 +1497,7 @@ TypeTableEntry *get_auto_err_set_type(CodeGen *g, FnTableEntry *fn_entry) {
static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_scope, FnTableEntry *fn_entry) { static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_scope, FnTableEntry *fn_entry) {
assert(proto_node->type == NodeTypeFnProto); assert(proto_node->type == NodeTypeFnProto);
AstNodeFnProto *fn_proto = &proto_node->data.fn_proto; AstNodeFnProto *fn_proto = &proto_node->data.fn_proto;
Error err;
FnTypeId fn_type_id = {0}; FnTypeId fn_type_id = {0};
init_fn_type_id(&fn_type_id, proto_node, proto_node->data.fn_proto.params.length); init_fn_type_id(&fn_type_id, proto_node, proto_node->data.fn_proto.params.length);
@ -1550,7 +1555,8 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
return g->builtin_types.entry_invalid; return g->builtin_types.entry_invalid;
} }
if (!calling_convention_allows_zig_types(fn_type_id.cc)) { if (!calling_convention_allows_zig_types(fn_type_id.cc)) {
type_ensure_zero_bits_known(g, type_entry); if ((err = type_ensure_zero_bits_known(g, type_entry)))
return g->builtin_types.entry_invalid;
if (!type_has_bits(type_entry)) { if (!type_has_bits(type_entry)) {
add_node_error(g, param_node->data.param_decl.type, add_node_error(g, param_node->data.param_decl.type,
buf_sprintf("parameter of type '%s' has 0 bits; not allowed in function with calling convention '%s'", buf_sprintf("parameter of type '%s' has 0 bits; not allowed in function with calling convention '%s'",
@ -1598,7 +1604,8 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
case TypeTableEntryIdUnion: case TypeTableEntryIdUnion:
case TypeTableEntryIdFn: case TypeTableEntryIdFn:
case TypeTableEntryIdPromise: case TypeTableEntryIdPromise:
type_ensure_zero_bits_known(g, type_entry); if ((err = type_ensure_zero_bits_known(g, type_entry)))
return g->builtin_types.entry_invalid;
if (type_requires_comptime(type_entry)) { if (type_requires_comptime(type_entry)) {
add_node_error(g, param_node->data.param_decl.type, add_node_error(g, param_node->data.param_decl.type,
buf_sprintf("parameter of type '%s' must be declared comptime", buf_sprintf("parameter of type '%s' must be declared comptime",
@ -1729,24 +1736,28 @@ bool type_is_invalid(TypeTableEntry *type_entry) {
} }
static void resolve_enum_type(CodeGen *g, TypeTableEntry *enum_type) { static Error resolve_enum_type(CodeGen *g, TypeTableEntry *enum_type) {
assert(enum_type->id == TypeTableEntryIdEnum); assert(enum_type->id == TypeTableEntryIdEnum);
if (enum_type->data.enumeration.complete) if (enum_type->data.enumeration.is_invalid)
return; return ErrorSemanticAnalyzeFail;
resolve_enum_zero_bits(g, enum_type); if (enum_type->data.enumeration.complete)
if (type_is_invalid(enum_type)) return ErrorNone;
return;
Error err;
if ((err = resolve_enum_zero_bits(g, enum_type)))
return err;
AstNode *decl_node = enum_type->data.enumeration.decl_node; AstNode *decl_node = enum_type->data.enumeration.decl_node;
if (enum_type->data.enumeration.embedded_in_current) { if (enum_type->data.enumeration.embedded_in_current) {
if (!enum_type->data.enumeration.reported_infinite_err) { if (!enum_type->data.enumeration.reported_infinite_err) {
enum_type->data.enumeration.is_invalid = true;
enum_type->data.enumeration.reported_infinite_err = true; enum_type->data.enumeration.reported_infinite_err = true;
add_node_error(g, decl_node, buf_sprintf("enum '%s' contains itself", buf_ptr(&enum_type->name))); add_node_error(g, decl_node, buf_sprintf("enum '%s' contains itself", buf_ptr(&enum_type->name)));
} }
return; return ErrorSemanticAnalyzeFail;
} }
assert(!enum_type->data.enumeration.zero_bits_loop_flag); assert(!enum_type->data.enumeration.zero_bits_loop_flag);
@ -1778,7 +1789,7 @@ static void resolve_enum_type(CodeGen *g, TypeTableEntry *enum_type) {
enum_type->data.enumeration.complete = true; enum_type->data.enumeration.complete = true;
if (enum_type->data.enumeration.is_invalid) if (enum_type->data.enumeration.is_invalid)
return; return ErrorSemanticAnalyzeFail;
if (enum_type->zero_bits) { if (enum_type->zero_bits) {
enum_type->type_ref = LLVMVoidType(); enum_type->type_ref = LLVMVoidType();
@ -1797,7 +1808,7 @@ static void resolve_enum_type(CodeGen *g, TypeTableEntry *enum_type) {
ZigLLVMReplaceTemporary(g->dbuilder, enum_type->di_type, replacement_di_type); ZigLLVMReplaceTemporary(g->dbuilder, enum_type->di_type, replacement_di_type);
enum_type->di_type = replacement_di_type; enum_type->di_type = replacement_di_type;
return; return ErrorNone;
} }
TypeTableEntry *tag_int_type = enum_type->data.enumeration.tag_int_type; TypeTableEntry *tag_int_type = enum_type->data.enumeration.tag_int_type;
@ -1815,6 +1826,7 @@ static void resolve_enum_type(CodeGen *g, TypeTableEntry *enum_type) {
ZigLLVMReplaceTemporary(g->dbuilder, enum_type->di_type, tag_di_type); ZigLLVMReplaceTemporary(g->dbuilder, enum_type->di_type, tag_di_type);
enum_type->di_type = tag_di_type; enum_type->di_type = tag_di_type;
return ErrorNone;
} }
@ -1897,15 +1909,15 @@ TypeTableEntry *get_struct_type(CodeGen *g, const char *type_name, const char *f
return struct_type; return struct_type;
} }
static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) { static Error resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) {
assert(struct_type->id == TypeTableEntryIdStruct); assert(struct_type->id == TypeTableEntryIdStruct);
if (struct_type->data.structure.complete) if (struct_type->data.structure.complete)
return; return ErrorNone;
resolve_struct_zero_bits(g, struct_type); Error err;
if (struct_type->data.structure.is_invalid) if ((err = resolve_struct_zero_bits(g, struct_type)))
return; return err;
AstNode *decl_node = struct_type->data.structure.decl_node; AstNode *decl_node = struct_type->data.structure.decl_node;
@ -1916,7 +1928,7 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) {
add_node_error(g, decl_node, add_node_error(g, decl_node,
buf_sprintf("struct '%s' contains itself", buf_ptr(&struct_type->name))); buf_sprintf("struct '%s' contains itself", buf_ptr(&struct_type->name)));
} }
return; return ErrorSemanticAnalyzeFail;
} }
assert(!struct_type->data.structure.zero_bits_loop_flag); assert(!struct_type->data.structure.zero_bits_loop_flag);
@ -1943,8 +1955,7 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) {
TypeStructField *type_struct_field = &struct_type->data.structure.fields[i]; TypeStructField *type_struct_field = &struct_type->data.structure.fields[i];
TypeTableEntry *field_type = type_struct_field->type_entry; TypeTableEntry *field_type = type_struct_field->type_entry;
ensure_complete_type(g, field_type); if ((err = ensure_complete_type(g, field_type))) {
if (type_is_invalid(field_type)) {
struct_type->data.structure.is_invalid = true; struct_type->data.structure.is_invalid = true;
break; break;
} }
@ -2026,7 +2037,7 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) {
struct_type->data.structure.complete = true; struct_type->data.structure.complete = true;
if (struct_type->data.structure.is_invalid) if (struct_type->data.structure.is_invalid)
return; return ErrorSemanticAnalyzeFail;
if (struct_type->zero_bits) { if (struct_type->zero_bits) {
struct_type->type_ref = LLVMVoidType(); struct_type->type_ref = LLVMVoidType();
@ -2045,7 +2056,7 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) {
0, nullptr, di_element_types, (int)debug_field_count, 0, nullptr, ""); 0, nullptr, di_element_types, (int)debug_field_count, 0, nullptr, "");
ZigLLVMReplaceTemporary(g->dbuilder, struct_type->di_type, replacement_di_type); ZigLLVMReplaceTemporary(g->dbuilder, struct_type->di_type, replacement_di_type);
struct_type->di_type = replacement_di_type; struct_type->di_type = replacement_di_type;
return; return ErrorNone;
} }
assert(struct_type->di_type); assert(struct_type->di_type);
@ -2128,17 +2139,19 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) {
ZigLLVMReplaceTemporary(g->dbuilder, struct_type->di_type, replacement_di_type); ZigLLVMReplaceTemporary(g->dbuilder, struct_type->di_type, replacement_di_type);
struct_type->di_type = replacement_di_type; struct_type->di_type = replacement_di_type;
return ErrorNone;
} }
static void resolve_union_type(CodeGen *g, TypeTableEntry *union_type) { static Error resolve_union_type(CodeGen *g, TypeTableEntry *union_type) {
assert(union_type->id == TypeTableEntryIdUnion); assert(union_type->id == TypeTableEntryIdUnion);
if (union_type->data.unionation.complete) if (union_type->data.unionation.complete)
return; return ErrorNone;
resolve_union_zero_bits(g, union_type); Error err;
if (type_is_invalid(union_type)) if ((err = resolve_union_zero_bits(g, union_type)))
return; return err;
AstNode *decl_node = union_type->data.unionation.decl_node; AstNode *decl_node = union_type->data.unionation.decl_node;
@ -2148,7 +2161,7 @@ static void resolve_union_type(CodeGen *g, TypeTableEntry *union_type) {
union_type->data.unionation.is_invalid = true; union_type->data.unionation.is_invalid = true;
add_node_error(g, decl_node, buf_sprintf("union '%s' contains itself", buf_ptr(&union_type->name))); add_node_error(g, decl_node, buf_sprintf("union '%s' contains itself", buf_ptr(&union_type->name)));
} }
return; return ErrorSemanticAnalyzeFail;
} }
assert(!union_type->data.unionation.zero_bits_loop_flag); assert(!union_type->data.unionation.zero_bits_loop_flag);
@ -2179,8 +2192,7 @@ static void resolve_union_type(CodeGen *g, TypeTableEntry *union_type) {
TypeUnionField *union_field = &union_type->data.unionation.fields[i]; TypeUnionField *union_field = &union_type->data.unionation.fields[i];
TypeTableEntry *field_type = union_field->type_entry; TypeTableEntry *field_type = union_field->type_entry;
ensure_complete_type(g, field_type); if ((err = ensure_complete_type(g, field_type))) {
if (type_is_invalid(field_type)) {
union_type->data.unionation.is_invalid = true; union_type->data.unionation.is_invalid = true;
continue; continue;
} }
@ -2219,7 +2231,7 @@ static void resolve_union_type(CodeGen *g, TypeTableEntry *union_type) {
union_type->data.unionation.most_aligned_union_member = most_aligned_union_member; union_type->data.unionation.most_aligned_union_member = most_aligned_union_member;
if (union_type->data.unionation.is_invalid) if (union_type->data.unionation.is_invalid)
return; return ErrorSemanticAnalyzeFail;
if (union_type->zero_bits) { if (union_type->zero_bits) {
union_type->type_ref = LLVMVoidType(); union_type->type_ref = LLVMVoidType();
@ -2238,7 +2250,7 @@ static void resolve_union_type(CodeGen *g, TypeTableEntry *union_type) {
ZigLLVMReplaceTemporary(g->dbuilder, union_type->di_type, replacement_di_type); ZigLLVMReplaceTemporary(g->dbuilder, union_type->di_type, replacement_di_type);
union_type->di_type = replacement_di_type; union_type->di_type = replacement_di_type;
return; return ErrorNone;
} }
uint64_t padding_in_bits = biggest_size_in_bits - size_of_most_aligned_member_in_bits; uint64_t padding_in_bits = biggest_size_in_bits - size_of_most_aligned_member_in_bits;
@ -2274,7 +2286,7 @@ static void resolve_union_type(CodeGen *g, TypeTableEntry *union_type) {
ZigLLVMReplaceTemporary(g->dbuilder, union_type->di_type, replacement_di_type); ZigLLVMReplaceTemporary(g->dbuilder, union_type->di_type, replacement_di_type);
union_type->di_type = replacement_di_type; union_type->di_type = replacement_di_type;
return; return ErrorNone;
} }
LLVMTypeRef union_type_ref; LLVMTypeRef union_type_ref;
@ -2293,7 +2305,7 @@ static void resolve_union_type(CodeGen *g, TypeTableEntry *union_type) {
ZigLLVMReplaceTemporary(g->dbuilder, union_type->di_type, tag_type->di_type); ZigLLVMReplaceTemporary(g->dbuilder, union_type->di_type, tag_type->di_type);
union_type->di_type = tag_type->di_type; union_type->di_type = tag_type->di_type;
return; return ErrorNone;
} else { } else {
union_type_ref = most_aligned_union_member->type_ref; union_type_ref = most_aligned_union_member->type_ref;
} }
@ -2367,19 +2379,21 @@ static void resolve_union_type(CodeGen *g, TypeTableEntry *union_type) {
ZigLLVMReplaceTemporary(g->dbuilder, union_type->di_type, replacement_di_type); ZigLLVMReplaceTemporary(g->dbuilder, union_type->di_type, replacement_di_type);
union_type->di_type = replacement_di_type; union_type->di_type = replacement_di_type;
return ErrorNone;
} }
static void resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type) { static Error resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type) {
assert(enum_type->id == TypeTableEntryIdEnum); assert(enum_type->id == TypeTableEntryIdEnum);
if (enum_type->data.enumeration.zero_bits_known) if (enum_type->data.enumeration.zero_bits_known)
return; return ErrorNone;
if (enum_type->data.enumeration.zero_bits_loop_flag) { if (enum_type->data.enumeration.zero_bits_loop_flag) {
add_node_error(g, enum_type->data.enumeration.decl_node, add_node_error(g, enum_type->data.enumeration.decl_node,
buf_sprintf("'%s' depends on itself", buf_ptr(&enum_type->name))); buf_sprintf("'%s' depends on itself", buf_ptr(&enum_type->name)));
enum_type->data.enumeration.is_invalid = true; enum_type->data.enumeration.is_invalid = true;
return; return ErrorSemanticAnalyzeFail;
} }
enum_type->data.enumeration.zero_bits_loop_flag = true; enum_type->data.enumeration.zero_bits_loop_flag = true;
@ -2398,7 +2412,7 @@ static void resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type) {
enum_type->data.enumeration.is_invalid = true; enum_type->data.enumeration.is_invalid = true;
enum_type->data.enumeration.zero_bits_loop_flag = false; enum_type->data.enumeration.zero_bits_loop_flag = false;
enum_type->data.enumeration.zero_bits_known = true; enum_type->data.enumeration.zero_bits_known = true;
return; return ErrorSemanticAnalyzeFail;
} }
enum_type->data.enumeration.src_field_count = field_count; enum_type->data.enumeration.src_field_count = field_count;
@ -2525,13 +2539,23 @@ static void resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type) {
enum_type->data.enumeration.zero_bits_loop_flag = false; enum_type->data.enumeration.zero_bits_loop_flag = false;
enum_type->zero_bits = !type_has_bits(tag_int_type); enum_type->zero_bits = !type_has_bits(tag_int_type);
enum_type->data.enumeration.zero_bits_known = true; enum_type->data.enumeration.zero_bits_known = true;
if (enum_type->data.enumeration.is_invalid)
return ErrorSemanticAnalyzeFail;
return ErrorNone;
} }
static void resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type) { static Error resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type) {
assert(struct_type->id == TypeTableEntryIdStruct); assert(struct_type->id == TypeTableEntryIdStruct);
Error err;
if (struct_type->data.structure.is_invalid)
return ErrorSemanticAnalyzeFail;
if (struct_type->data.structure.zero_bits_known) if (struct_type->data.structure.zero_bits_known)
return; return ErrorNone;
if (struct_type->data.structure.zero_bits_loop_flag) { if (struct_type->data.structure.zero_bits_loop_flag) {
// If we get here it's due to recursion. This is a design flaw in the compiler, // If we get here it's due to recursion. This is a design flaw in the compiler,
@ -2547,7 +2571,7 @@ static void resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type) {
struct_type->data.structure.abi_alignment = LLVMABIAlignmentOfType(g->target_data_ref, LLVMPointerType(LLVMInt8Type(), 0)); struct_type->data.structure.abi_alignment = LLVMABIAlignmentOfType(g->target_data_ref, LLVMPointerType(LLVMInt8Type(), 0));
} }
} }
return; return ErrorNone;
} }
struct_type->data.structure.zero_bits_loop_flag = true; struct_type->data.structure.zero_bits_loop_flag = true;
@ -2596,8 +2620,7 @@ static void resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type) {
buf_sprintf("enums, not structs, support field assignment")); buf_sprintf("enums, not structs, support field assignment"));
} }
type_ensure_zero_bits_known(g, field_type); if ((err = type_ensure_zero_bits_known(g, field_type))) {
if (type_is_invalid(field_type)) {
struct_type->data.structure.is_invalid = true; struct_type->data.structure.is_invalid = true;
continue; continue;
} }
@ -2634,16 +2657,27 @@ static void resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type) {
struct_type->data.structure.gen_field_count = (uint32_t)gen_field_index; struct_type->data.structure.gen_field_count = (uint32_t)gen_field_index;
struct_type->zero_bits = (gen_field_index == 0); struct_type->zero_bits = (gen_field_index == 0);
struct_type->data.structure.zero_bits_known = true; struct_type->data.structure.zero_bits_known = true;
if (struct_type->data.structure.is_invalid) {
return ErrorSemanticAnalyzeFail;
} }
static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) { return ErrorNone;
}
static Error resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
assert(union_type->id == TypeTableEntryIdUnion); assert(union_type->id == TypeTableEntryIdUnion);
Error err;
if (union_type->data.unionation.is_invalid)
return ErrorSemanticAnalyzeFail;
if (union_type->data.unionation.zero_bits_known) if (union_type->data.unionation.zero_bits_known)
return; return ErrorNone;
if (type_is_invalid(union_type)) if (type_is_invalid(union_type))
return; return ErrorSemanticAnalyzeFail;
if (union_type->data.unionation.zero_bits_loop_flag) { if (union_type->data.unionation.zero_bits_loop_flag) {
// If we get here it's due to recursion. From this we conclude that the struct is // If we get here it's due to recursion. From this we conclude that the struct is
@ -2660,7 +2694,7 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
LLVMPointerType(LLVMInt8Type(), 0)); LLVMPointerType(LLVMInt8Type(), 0));
} }
} }
return; return ErrorNone;
} }
union_type->data.unionation.zero_bits_loop_flag = true; union_type->data.unionation.zero_bits_loop_flag = true;
@ -2679,7 +2713,7 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
union_type->data.unionation.is_invalid = true; union_type->data.unionation.is_invalid = true;
union_type->data.unionation.zero_bits_loop_flag = false; union_type->data.unionation.zero_bits_loop_flag = false;
union_type->data.unionation.zero_bits_known = true; union_type->data.unionation.zero_bits_known = true;
return; return ErrorSemanticAnalyzeFail;
} }
union_type->data.unionation.src_field_count = field_count; union_type->data.unionation.src_field_count = field_count;
union_type->data.unionation.fields = allocate<TypeUnionField>(field_count); union_type->data.unionation.fields = allocate<TypeUnionField>(field_count);
@ -2711,13 +2745,13 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
tag_int_type = analyze_type_expr(g, scope, enum_type_node); tag_int_type = analyze_type_expr(g, scope, enum_type_node);
if (type_is_invalid(tag_int_type)) { if (type_is_invalid(tag_int_type)) {
union_type->data.unionation.is_invalid = true; union_type->data.unionation.is_invalid = true;
return; return ErrorSemanticAnalyzeFail;
} }
if (tag_int_type->id != TypeTableEntryIdInt) { if (tag_int_type->id != TypeTableEntryIdInt) {
add_node_error(g, enum_type_node, add_node_error(g, enum_type_node,
buf_sprintf("expected integer tag type, found '%s'", buf_ptr(&tag_int_type->name))); buf_sprintf("expected integer tag type, found '%s'", buf_ptr(&tag_int_type->name)));
union_type->data.unionation.is_invalid = true; union_type->data.unionation.is_invalid = true;
return; return ErrorSemanticAnalyzeFail;
} }
} else { } else {
tag_int_type = get_smallest_unsigned_int_type(g, field_count - 1); tag_int_type = get_smallest_unsigned_int_type(g, field_count - 1);
@ -2744,13 +2778,13 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
TypeTableEntry *enum_type = analyze_type_expr(g, scope, enum_type_node); TypeTableEntry *enum_type = analyze_type_expr(g, scope, enum_type_node);
if (type_is_invalid(enum_type)) { if (type_is_invalid(enum_type)) {
union_type->data.unionation.is_invalid = true; union_type->data.unionation.is_invalid = true;
return; return ErrorSemanticAnalyzeFail;
} }
if (enum_type->id != TypeTableEntryIdEnum) { if (enum_type->id != TypeTableEntryIdEnum) {
union_type->data.unionation.is_invalid = true; union_type->data.unionation.is_invalid = true;
add_node_error(g, enum_type_node, add_node_error(g, enum_type_node,
buf_sprintf("expected enum tag type, found '%s'", buf_ptr(&enum_type->name))); buf_sprintf("expected enum tag type, found '%s'", buf_ptr(&enum_type->name)));
return; return ErrorSemanticAnalyzeFail;
} }
tag_type = enum_type; tag_type = enum_type;
abi_alignment_so_far = get_abi_alignment(g, enum_type); // this populates src_field_count abi_alignment_so_far = get_abi_alignment(g, enum_type); // this populates src_field_count
@ -2789,8 +2823,7 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
} }
} else { } else {
field_type = analyze_type_expr(g, scope, field_node->data.struct_field.type); field_type = analyze_type_expr(g, scope, field_node->data.struct_field.type);
type_ensure_zero_bits_known(g, field_type); if ((err = type_ensure_zero_bits_known(g, field_type))) {
if (type_is_invalid(field_type)) {
union_type->data.unionation.is_invalid = true; union_type->data.unionation.is_invalid = true;
continue; continue;
} }
@ -2883,7 +2916,7 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
union_type->data.unionation.abi_alignment = abi_alignment_so_far; union_type->data.unionation.abi_alignment = abi_alignment_so_far;
if (union_type->data.unionation.is_invalid) if (union_type->data.unionation.is_invalid)
return; return ErrorSemanticAnalyzeFail;
bool src_have_tag = decl_node->data.container_decl.auto_enum || bool src_have_tag = decl_node->data.container_decl.auto_enum ||
decl_node->data.container_decl.init_arg_expr != nullptr; decl_node->data.container_decl.init_arg_expr != nullptr;
@ -2905,7 +2938,7 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
add_node_error(g, source_node, add_node_error(g, source_node,
buf_sprintf("%s union does not support enum tag type", qual_str)); buf_sprintf("%s union does not support enum tag type", qual_str));
union_type->data.unionation.is_invalid = true; union_type->data.unionation.is_invalid = true;
return; return ErrorSemanticAnalyzeFail;
} }
if (create_enum_type) { if (create_enum_type) {
@ -2970,6 +3003,11 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
union_type->data.unionation.gen_field_count = gen_field_index; union_type->data.unionation.gen_field_count = gen_field_index;
union_type->zero_bits = (gen_field_index == 0 && (field_count < 2 || !src_have_tag)); union_type->zero_bits = (gen_field_index == 0 && (field_count < 2 || !src_have_tag));
union_type->data.unionation.zero_bits_known = true; union_type->data.unionation.zero_bits_known = true;
if (union_type->data.unionation.is_invalid)
return ErrorSemanticAnalyzeFail;
return ErrorNone;
} }
static void get_fully_qualified_decl_name_internal(Buf *buf, Scope *scope, uint8_t sep) { static void get_fully_qualified_decl_name_internal(Buf *buf, Scope *scope, uint8_t sep) {
@ -3035,7 +3073,7 @@ static bool scope_is_root_decls(Scope *scope) {
static void wrong_panic_prototype(CodeGen *g, AstNode *proto_node, TypeTableEntry *fn_type) { static void wrong_panic_prototype(CodeGen *g, AstNode *proto_node, TypeTableEntry *fn_type) {
add_node_error(g, proto_node, add_node_error(g, proto_node,
buf_sprintf("expected 'fn([]const u8, ?&builtin.StackTrace) unreachable', found '%s'", buf_sprintf("expected 'fn([]const u8, ?*builtin.StackTrace) noreturn', found '%s'",
buf_ptr(&fn_type->name))); buf_ptr(&fn_type->name)));
} }
@ -3463,13 +3501,13 @@ VariableTableEntry *add_variable(CodeGen *g, AstNode *source_node, Scope *parent
variable_entry->shadowable = false; variable_entry->shadowable = false;
variable_entry->mem_slot_index = SIZE_MAX; variable_entry->mem_slot_index = SIZE_MAX;
variable_entry->src_arg_index = SIZE_MAX; variable_entry->src_arg_index = SIZE_MAX;
variable_entry->align_bytes = get_abi_alignment(g, value->type);
assert(name); assert(name);
buf_init_from_buf(&variable_entry->name, name); buf_init_from_buf(&variable_entry->name, name);
if (value->type->id != TypeTableEntryIdInvalid) { if (!type_is_invalid(value->type)) {
variable_entry->align_bytes = get_abi_alignment(g, value->type);
VariableTableEntry *existing_var = find_variable(g, parent_scope, name); VariableTableEntry *existing_var = find_variable(g, parent_scope, name);
if (existing_var && !existing_var->shadowable) { if (existing_var && !existing_var->shadowable) {
ErrorMsg *msg = add_node_error(g, source_node, ErrorMsg *msg = add_node_error(g, source_node,
@ -5311,13 +5349,13 @@ ConstExprValue *create_const_arg_tuple(CodeGen *g, size_t arg_index_start, size_
void init_const_undefined(CodeGen *g, ConstExprValue *const_val) { void init_const_undefined(CodeGen *g, ConstExprValue *const_val) {
Error err;
TypeTableEntry *wanted_type = const_val->type; TypeTableEntry *wanted_type = const_val->type;
if (wanted_type->id == TypeTableEntryIdArray) { if (wanted_type->id == TypeTableEntryIdArray) {
const_val->special = ConstValSpecialStatic; const_val->special = ConstValSpecialStatic;
const_val->data.x_array.special = ConstArraySpecialUndef; const_val->data.x_array.special = ConstArraySpecialUndef;
} else if (wanted_type->id == TypeTableEntryIdStruct) { } else if (wanted_type->id == TypeTableEntryIdStruct) {
ensure_complete_type(g, wanted_type); if ((err = ensure_complete_type(g, wanted_type))) {
if (type_is_invalid(wanted_type)) {
return; return;
} }
@ -5350,27 +5388,33 @@ ConstExprValue *create_const_vals(size_t count) {
return vals; return vals;
} }
void ensure_complete_type(CodeGen *g, TypeTableEntry *type_entry) { Error ensure_complete_type(CodeGen *g, TypeTableEntry *type_entry) {
if (type_is_invalid(type_entry))
return ErrorSemanticAnalyzeFail;
if (type_entry->id == TypeTableEntryIdStruct) { if (type_entry->id == TypeTableEntryIdStruct) {
if (!type_entry->data.structure.complete) if (!type_entry->data.structure.complete)
resolve_struct_type(g, type_entry); return resolve_struct_type(g, type_entry);
} else if (type_entry->id == TypeTableEntryIdEnum) { } else if (type_entry->id == TypeTableEntryIdEnum) {
if (!type_entry->data.enumeration.complete) if (!type_entry->data.enumeration.complete)
resolve_enum_type(g, type_entry); return resolve_enum_type(g, type_entry);
} else if (type_entry->id == TypeTableEntryIdUnion) { } else if (type_entry->id == TypeTableEntryIdUnion) {
if (!type_entry->data.unionation.complete) if (!type_entry->data.unionation.complete)
resolve_union_type(g, type_entry); return resolve_union_type(g, type_entry);
} }
return ErrorNone;
} }
void type_ensure_zero_bits_known(CodeGen *g, TypeTableEntry *type_entry) { Error type_ensure_zero_bits_known(CodeGen *g, TypeTableEntry *type_entry) {
if (type_is_invalid(type_entry))
return ErrorSemanticAnalyzeFail;
if (type_entry->id == TypeTableEntryIdStruct) { if (type_entry->id == TypeTableEntryIdStruct) {
resolve_struct_zero_bits(g, type_entry); return resolve_struct_zero_bits(g, type_entry);
} else if (type_entry->id == TypeTableEntryIdEnum) { } else if (type_entry->id == TypeTableEntryIdEnum) {
resolve_enum_zero_bits(g, type_entry); return resolve_enum_zero_bits(g, type_entry);
} else if (type_entry->id == TypeTableEntryIdUnion) { } else if (type_entry->id == TypeTableEntryIdUnion) {
resolve_union_zero_bits(g, type_entry); return resolve_union_zero_bits(g, type_entry);
} }
return ErrorNone;
} }
bool ir_get_var_is_comptime(VariableTableEntry *var) { bool ir_get_var_is_comptime(VariableTableEntry *var) {
@ -6213,7 +6257,7 @@ LinkLib *add_link_lib(CodeGen *g, Buf *name) {
} }
uint32_t get_abi_alignment(CodeGen *g, TypeTableEntry *type_entry) { uint32_t get_abi_alignment(CodeGen *g, TypeTableEntry *type_entry) {
type_ensure_zero_bits_known(g, type_entry); assertNoError(type_ensure_zero_bits_known(g, type_entry));
if (type_entry->zero_bits) return 0; if (type_entry->zero_bits) return 0;
// We need to make this function work without requiring ensure_complete_type // We need to make this function work without requiring ensure_complete_type

View File

@ -9,6 +9,7 @@
#define ZIG_ANALYZE_HPP #define ZIG_ANALYZE_HPP
#include "all_types.hpp" #include "all_types.hpp"
#include "result.hpp"
void semantic_analyze(CodeGen *g); void semantic_analyze(CodeGen *g);
ErrorMsg *add_node_error(CodeGen *g, AstNode *node, Buf *msg); ErrorMsg *add_node_error(CodeGen *g, AstNode *node, Buf *msg);
@ -88,8 +89,8 @@ void init_fn_type_id(FnTypeId *fn_type_id, AstNode *proto_node, size_t param_cou
AstNode *get_param_decl_node(FnTableEntry *fn_entry, size_t index); AstNode *get_param_decl_node(FnTableEntry *fn_entry, size_t index);
FnTableEntry *scope_get_fn_if_root(Scope *scope); FnTableEntry *scope_get_fn_if_root(Scope *scope);
bool type_requires_comptime(TypeTableEntry *type_entry); bool type_requires_comptime(TypeTableEntry *type_entry);
void ensure_complete_type(CodeGen *g, TypeTableEntry *type_entry); Error ATTRIBUTE_MUST_USE ensure_complete_type(CodeGen *g, TypeTableEntry *type_entry);
void type_ensure_zero_bits_known(CodeGen *g, TypeTableEntry *type_entry); Error ATTRIBUTE_MUST_USE type_ensure_zero_bits_known(CodeGen *g, TypeTableEntry *type_entry);
void complete_enum(CodeGen *g, TypeTableEntry *enum_type); void complete_enum(CodeGen *g, TypeTableEntry *enum_type);
bool ir_get_var_is_comptime(VariableTableEntry *var); bool ir_get_var_is_comptime(VariableTableEntry *var);
bool const_values_equal(ConstExprValue *a, ConstExprValue *b); bool const_values_equal(ConstExprValue *a, ConstExprValue *b);
@ -178,7 +179,7 @@ TypeTableEntryId type_id_at_index(size_t index);
size_t type_id_len(); size_t type_id_len();
size_t type_id_index(TypeTableEntry *entry); size_t type_id_index(TypeTableEntry *entry);
TypeTableEntry *get_generic_fn_type(CodeGen *g, FnTypeId *fn_type_id); TypeTableEntry *get_generic_fn_type(CodeGen *g, FnTypeId *fn_type_id);
bool type_is_copyable(CodeGen *g, TypeTableEntry *type_entry); Result<bool> type_is_copyable(CodeGen *g, TypeTableEntry *type_entry);
LinkLib *create_link_lib(Buf *name); LinkLib *create_link_lib(Buf *name);
bool calling_convention_does_first_arg_return(CallingConvention cc); bool calling_convention_does_first_arg_return(CallingConvention cc);
LinkLib *add_link_lib(CodeGen *codegen, Buf *lib); LinkLib *add_link_lib(CodeGen *codegen, Buf *lib);

View File

@ -829,15 +829,15 @@ static bool ir_want_fast_math(CodeGen *g, IrInstruction *instruction) {
if (scope->id == ScopeIdBlock) { if (scope->id == ScopeIdBlock) {
ScopeBlock *block_scope = (ScopeBlock *)scope; ScopeBlock *block_scope = (ScopeBlock *)scope;
if (block_scope->fast_math_set_node) if (block_scope->fast_math_set_node)
return !block_scope->fast_math_off; return block_scope->fast_math_on;
} else if (scope->id == ScopeIdDecls) { } else if (scope->id == ScopeIdDecls) {
ScopeDecls *decls_scope = (ScopeDecls *)scope; ScopeDecls *decls_scope = (ScopeDecls *)scope;
if (decls_scope->fast_math_set_node) if (decls_scope->fast_math_set_node)
return !decls_scope->fast_math_off; return decls_scope->fast_math_on;
} }
scope = scope->parent; scope = scope->parent;
} }
return true; return false;
} }
static bool ir_want_runtime_safety(CodeGen *g, IrInstruction *instruction) { static bool ir_want_runtime_safety(CodeGen *g, IrInstruction *instruction) {
@ -5131,13 +5131,13 @@ static bool is_llvm_value_unnamed_type(TypeTableEntry *type_entry, LLVMValueRef
} }
static LLVMValueRef gen_const_val_ptr(CodeGen *g, ConstExprValue *const_val, const char *name) { static LLVMValueRef gen_const_val_ptr(CodeGen *g, ConstExprValue *const_val, const char *name) {
render_const_val_global(g, const_val, name);
switch (const_val->data.x_ptr.special) { switch (const_val->data.x_ptr.special) {
case ConstPtrSpecialInvalid: case ConstPtrSpecialInvalid:
case ConstPtrSpecialDiscard: case ConstPtrSpecialDiscard:
zig_unreachable(); zig_unreachable();
case ConstPtrSpecialRef: case ConstPtrSpecialRef:
{ {
render_const_val_global(g, const_val, name);
ConstExprValue *pointee = const_val->data.x_ptr.data.ref.pointee; ConstExprValue *pointee = const_val->data.x_ptr.data.ref.pointee;
render_const_val(g, pointee, ""); render_const_val(g, pointee, "");
render_const_val_global(g, pointee, ""); render_const_val_global(g, pointee, "");
@ -5148,6 +5148,7 @@ static LLVMValueRef gen_const_val_ptr(CodeGen *g, ConstExprValue *const_val, con
} }
case ConstPtrSpecialBaseArray: case ConstPtrSpecialBaseArray:
{ {
render_const_val_global(g, const_val, name);
ConstExprValue *array_const_val = const_val->data.x_ptr.data.base_array.array_val; ConstExprValue *array_const_val = const_val->data.x_ptr.data.base_array.array_val;
size_t elem_index = const_val->data.x_ptr.data.base_array.elem_index; size_t elem_index = const_val->data.x_ptr.data.base_array.elem_index;
assert(array_const_val->type->id == TypeTableEntryIdArray); assert(array_const_val->type->id == TypeTableEntryIdArray);
@ -5168,6 +5169,7 @@ static LLVMValueRef gen_const_val_ptr(CodeGen *g, ConstExprValue *const_val, con
} }
case ConstPtrSpecialBaseStruct: case ConstPtrSpecialBaseStruct:
{ {
render_const_val_global(g, const_val, name);
ConstExprValue *struct_const_val = const_val->data.x_ptr.data.base_struct.struct_val; ConstExprValue *struct_const_val = const_val->data.x_ptr.data.base_struct.struct_val;
assert(struct_const_val->type->id == TypeTableEntryIdStruct); assert(struct_const_val->type->id == TypeTableEntryIdStruct);
if (struct_const_val->type->zero_bits) { if (struct_const_val->type->zero_bits) {
@ -5190,6 +5192,7 @@ static LLVMValueRef gen_const_val_ptr(CodeGen *g, ConstExprValue *const_val, con
} }
case ConstPtrSpecialHardCodedAddr: case ConstPtrSpecialHardCodedAddr:
{ {
render_const_val_global(g, const_val, name);
uint64_t addr_value = const_val->data.x_ptr.data.hard_coded_addr.addr; uint64_t addr_value = const_val->data.x_ptr.data.hard_coded_addr.addr;
TypeTableEntry *usize = g->builtin_types.entry_usize; TypeTableEntry *usize = g->builtin_types.entry_usize;
const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstInt(usize->type_ref, addr_value, false), const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstInt(usize->type_ref, addr_value, false),
@ -5720,12 +5723,16 @@ static void do_code_gen(CodeGen *g) {
LLVMValueRef global_value; LLVMValueRef global_value;
if (var->linkage == VarLinkageExternal) { if (var->linkage == VarLinkageExternal) {
LLVMValueRef existing_llvm_var = LLVMGetNamedGlobal(g->module, buf_ptr(&var->name));
if (existing_llvm_var) {
global_value = LLVMConstBitCast(existing_llvm_var, LLVMPointerType(var->value->type->type_ref, 0));
} else {
global_value = LLVMAddGlobal(g->module, var->value->type->type_ref, buf_ptr(&var->name)); global_value = LLVMAddGlobal(g->module, var->value->type->type_ref, buf_ptr(&var->name));
// TODO debug info for the extern variable // TODO debug info for the extern variable
LLVMSetLinkage(global_value, LLVMExternalLinkage); LLVMSetLinkage(global_value, LLVMExternalLinkage);
LLVMSetAlignment(global_value, var->align_bytes); LLVMSetAlignment(global_value, var->align_bytes);
}
} else { } else {
bool exported = (var->linkage == VarLinkageExport); bool exported = (var->linkage == VarLinkageExport);
const char *mangled_name = buf_ptr(get_mangled_name(g, &var->name, exported)); const char *mangled_name = buf_ptr(get_mangled_name(g, &var->name, exported));

File diff suppressed because it is too large Load Diff

36
src/result.hpp Normal file
View File

@ -0,0 +1,36 @@
/*
* Copyright (c) 2018 Andrew Kelley
*
* This file is part of zig, which is MIT licensed.
* See http://opensource.org/licenses/MIT
*/
#ifndef ZIG_RESULT_HPP
#define ZIG_RESULT_HPP
#include "error.hpp"
#include <assert.h>
static inline void assertNoError(Error err) {
assert(err == ErrorNone);
}
template<typename T>
struct Result {
T data;
Error err;
Result(T x) : data(x), err(ErrorNone) {}
Result(Error err) : err(err) {
assert(err != ErrorNone);
}
T unwrap() {
assert(err == ErrorNone);
return data;
}
};
#endif

View File

@ -2759,8 +2759,10 @@ static AstNode *trans_do_loop(Context *c, TransScope *parent_scope, const DoStmt
AstNode *child_statement; AstNode *child_statement;
child_scope = trans_stmt(c, &child_block_scope->base, stmt->getBody(), &child_statement); child_scope = trans_stmt(c, &child_block_scope->base, stmt->getBody(), &child_statement);
if (child_scope == nullptr) return nullptr; if (child_scope == nullptr) return nullptr;
if (child_statement != nullptr) {
body_node->data.block.statements.append(child_statement); body_node->data.block.statements.append(child_statement);
} }
}
// if (!cond) break; // if (!cond) break;
AstNode *condition_node = trans_expr(c, ResultUsedYes, child_scope, stmt->getCond(), TransRValue); AstNode *condition_node = trans_expr(c, ResultUsedYes, child_scope, stmt->getCond(), TransRValue);
@ -2769,6 +2771,7 @@ static AstNode *trans_do_loop(Context *c, TransScope *parent_scope, const DoStmt
terminator_node->data.if_bool_expr.condition = trans_create_node_prefix_op(c, PrefixOpBoolNot, condition_node); terminator_node->data.if_bool_expr.condition = trans_create_node_prefix_op(c, PrefixOpBoolNot, condition_node);
terminator_node->data.if_bool_expr.then_block = trans_create_node(c, NodeTypeBreak); terminator_node->data.if_bool_expr.then_block = trans_create_node(c, NodeTypeBreak);
assert(terminator_node != nullptr);
body_node->data.block.statements.append(terminator_node); body_node->data.block.statements.append(terminator_node);
while_scope->node->data.while_expr.body = body_node; while_scope->node->data.while_expr.body = body_node;
@ -2832,7 +2835,12 @@ static AstNode *trans_for_loop(Context *c, TransScope *parent_scope, const ForSt
TransScope *body_scope = trans_stmt(c, &while_scope->base, stmt->getBody(), &body_statement); TransScope *body_scope = trans_stmt(c, &while_scope->base, stmt->getBody(), &body_statement);
if (body_scope == nullptr) if (body_scope == nullptr)
return nullptr; return nullptr;
if (body_statement == nullptr) {
while_scope->node->data.while_expr.body = trans_create_node(c, NodeTypeBlock);
} else {
while_scope->node->data.while_expr.body = body_statement; while_scope->node->data.while_expr.body = body_statement;
}
return loop_block_node; return loop_block_node;
} }
@ -3067,9 +3075,14 @@ static int trans_stmt_extra(Context *c, TransScope *scope, const Stmt *stmt,
trans_unary_operator(c, result_used, scope, (const UnaryOperator *)stmt)); trans_unary_operator(c, result_used, scope, (const UnaryOperator *)stmt));
case Stmt::DeclStmtClass: case Stmt::DeclStmtClass:
return trans_local_declaration(c, scope, (const DeclStmt *)stmt, out_node, out_child_scope); return trans_local_declaration(c, scope, (const DeclStmt *)stmt, out_node, out_child_scope);
case Stmt::WhileStmtClass: case Stmt::WhileStmtClass: {
return wrap_stmt(out_node, out_child_scope, scope, AstNode *while_node = trans_while_loop(c, scope, (const WhileStmt *)stmt);
trans_while_loop(c, scope, (const WhileStmt *)stmt)); assert(while_node->type == NodeTypeWhileExpr);
if (while_node->data.while_expr.body == nullptr) {
while_node->data.while_expr.body = trans_create_node(c, NodeTypeBlock);
}
return wrap_stmt(out_node, out_child_scope, scope, while_node);
}
case Stmt::IfStmtClass: case Stmt::IfStmtClass:
return wrap_stmt(out_node, out_child_scope, scope, return wrap_stmt(out_node, out_child_scope, scope,
trans_if_statement(c, scope, (const IfStmt *)stmt)); trans_if_statement(c, scope, (const IfStmt *)stmt));
@ -3092,12 +3105,18 @@ static int trans_stmt_extra(Context *c, TransScope *scope, const Stmt *stmt,
case Stmt::UnaryExprOrTypeTraitExprClass: case Stmt::UnaryExprOrTypeTraitExprClass:
return wrap_stmt(out_node, out_child_scope, scope, return wrap_stmt(out_node, out_child_scope, scope,
trans_unary_expr_or_type_trait_expr(c, scope, (const UnaryExprOrTypeTraitExpr *)stmt)); trans_unary_expr_or_type_trait_expr(c, scope, (const UnaryExprOrTypeTraitExpr *)stmt));
case Stmt::DoStmtClass: case Stmt::DoStmtClass: {
return wrap_stmt(out_node, out_child_scope, scope, AstNode *while_node = trans_do_loop(c, scope, (const DoStmt *)stmt);
trans_do_loop(c, scope, (const DoStmt *)stmt)); assert(while_node->type == NodeTypeWhileExpr);
case Stmt::ForStmtClass: if (while_node->data.while_expr.body == nullptr) {
return wrap_stmt(out_node, out_child_scope, scope, while_node->data.while_expr.body = trans_create_node(c, NodeTypeBlock);
trans_for_loop(c, scope, (const ForStmt *)stmt)); }
return wrap_stmt(out_node, out_child_scope, scope, while_node);
}
case Stmt::ForStmtClass: {
AstNode *node = trans_for_loop(c, scope, (const ForStmt *)stmt);
return wrap_stmt(out_node, out_child_scope, scope, node);
}
case Stmt::StringLiteralClass: case Stmt::StringLiteralClass:
return wrap_stmt(out_node, out_child_scope, scope, return wrap_stmt(out_node, out_child_scope, scope,
trans_string_literal(c, scope, (const StringLiteral *)stmt)); trans_string_literal(c, scope, (const StringLiteral *)stmt));

View File

@ -21,6 +21,7 @@
#define ATTRIBUTE_PRINTF(a, b) #define ATTRIBUTE_PRINTF(a, b)
#define ATTRIBUTE_RETURNS_NOALIAS __declspec(restrict) #define ATTRIBUTE_RETURNS_NOALIAS __declspec(restrict)
#define ATTRIBUTE_NORETURN __declspec(noreturn) #define ATTRIBUTE_NORETURN __declspec(noreturn)
#define ATTRIBUTE_MUST_USE
#else #else
@ -28,6 +29,7 @@
#define ATTRIBUTE_PRINTF(a, b) __attribute__((format(printf, a, b))) #define ATTRIBUTE_PRINTF(a, b) __attribute__((format(printf, a, b)))
#define ATTRIBUTE_RETURNS_NOALIAS __attribute__((__malloc__)) #define ATTRIBUTE_RETURNS_NOALIAS __attribute__((__malloc__))
#define ATTRIBUTE_NORETURN __attribute__((noreturn)) #define ATTRIBUTE_NORETURN __attribute__((noreturn))
#define ATTRIBUTE_MUST_USE __attribute__((warn_unused_result))
#endif #endif

View File

@ -1,40 +1,38 @@
const std = @import("../index.zig");
const builtin = @import("builtin"); const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder; const AtomicOrder = builtin.AtomicOrder;
const AtomicRmwOp = builtin.AtomicRmwOp; const AtomicRmwOp = builtin.AtomicRmwOp;
const assert = std.debug.assert;
/// Many producer, many consumer, non-allocating, thread-safe. /// Many producer, many consumer, non-allocating, thread-safe.
/// Uses a spinlock to protect get() and put(). /// Uses a mutex to protect access.
pub fn Queue(comptime T: type) type { pub fn Queue(comptime T: type) type {
return struct { return struct {
head: ?*Node, head: ?*Node,
tail: ?*Node, tail: ?*Node,
lock: u8, mutex: std.Mutex,
pub const Self = this; pub const Self = this;
pub const Node = std.LinkedList(T).Node;
pub const Node = struct {
next: ?*Node,
data: T,
};
pub fn init() Self { pub fn init() Self {
return Self{ return Self{
.head = null, .head = null,
.tail = null, .tail = null,
.lock = 0, .mutex = std.Mutex.init(),
}; };
} }
pub fn put(self: *Self, node: *Node) void { pub fn put(self: *Self, node: *Node) void {
node.next = null; node.next = null;
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {} const held = self.mutex.acquire();
defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1); defer held.release();
const opt_tail = self.tail; node.prev = self.tail;
self.tail = node; self.tail = node;
if (opt_tail) |tail| { if (node.prev) |prev_tail| {
tail.next = node; prev_tail.next = node;
} else { } else {
assert(self.head == null); assert(self.head == null);
self.head = node; self.head = node;
@ -42,18 +40,27 @@ pub fn Queue(comptime T: type) type {
} }
pub fn get(self: *Self) ?*Node { pub fn get(self: *Self) ?*Node {
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {} const held = self.mutex.acquire();
defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1); defer held.release();
const head = self.head orelse return null; const head = self.head orelse return null;
self.head = head.next; self.head = head.next;
if (head.next == null) self.tail = null; if (head.next) |new_head| {
new_head.prev = null;
} else {
self.tail = null;
}
// This way, a get() and a remove() are thread-safe with each other.
head.prev = null;
head.next = null;
return head; return head;
} }
pub fn unget(self: *Self, node: *Node) void { pub fn unget(self: *Self, node: *Node) void {
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {} node.prev = null;
defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
const held = self.mutex.acquire();
defer held.release();
const opt_head = self.head; const opt_head = self.head;
self.head = node; self.head = node;
@ -65,13 +72,39 @@ pub fn Queue(comptime T: type) type {
} }
} }
/// Thread-safe with get() and remove(). Returns whether node was actually removed.
pub fn remove(self: *Self, node: *Node) bool {
const held = self.mutex.acquire();
defer held.release();
if (node.prev == null and node.next == null and self.head != node) {
return false;
}
if (node.prev) |prev| {
prev.next = node.next;
} else {
self.head = node.next;
}
if (node.next) |next| {
next.prev = node.prev;
} else {
self.tail = node.prev;
}
node.prev = null;
node.next = null;
return true;
}
pub fn isEmpty(self: *Self) bool { pub fn isEmpty(self: *Self) bool {
return @atomicLoad(?*Node, &self.head, builtin.AtomicOrder.SeqCst) != null; const held = self.mutex.acquire();
defer held.release();
return self.head != null;
} }
pub fn dump(self: *Self) void { pub fn dump(self: *Self) void {
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {} const held = self.mutex.acquire();
defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1); defer held.release();
std.debug.warn("head: "); std.debug.warn("head: ");
dumpRecursive(self.head, 0); dumpRecursive(self.head, 0);
@ -93,9 +126,6 @@ pub fn Queue(comptime T: type) type {
}; };
} }
const std = @import("../index.zig");
const assert = std.debug.assert;
const Context = struct { const Context = struct {
allocator: *std.mem.Allocator, allocator: *std.mem.Allocator,
queue: *Queue(i32), queue: *Queue(i32),
@ -169,6 +199,7 @@ fn startPuts(ctx: *Context) u8 {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
const x = @bitCast(i32, r.random.scalar(u32)); const x = @bitCast(i32, r.random.scalar(u32));
const node = ctx.allocator.create(Queue(i32).Node{ const node = ctx.allocator.create(Queue(i32).Node{
.prev = undefined,
.next = undefined, .next = undefined,
.data = x, .data = x,
}) catch unreachable; }) catch unreachable;
@ -198,12 +229,14 @@ test "std.atomic.Queue single-threaded" {
var node_0 = Queue(i32).Node{ var node_0 = Queue(i32).Node{
.data = 0, .data = 0,
.next = undefined, .next = undefined,
.prev = undefined,
}; };
queue.put(&node_0); queue.put(&node_0);
var node_1 = Queue(i32).Node{ var node_1 = Queue(i32).Node{
.data = 1, .data = 1,
.next = undefined, .next = undefined,
.prev = undefined,
}; };
queue.put(&node_1); queue.put(&node_1);
@ -212,12 +245,14 @@ test "std.atomic.Queue single-threaded" {
var node_2 = Queue(i32).Node{ var node_2 = Queue(i32).Node{
.data = 2, .data = 2,
.next = undefined, .next = undefined,
.prev = undefined,
}; };
queue.put(&node_2); queue.put(&node_2);
var node_3 = Queue(i32).Node{ var node_3 = Queue(i32).Node{
.data = 3, .data = 3,
.next = undefined, .next = undefined,
.prev = undefined,
}; };
queue.put(&node_3); queue.put(&node_3);
@ -228,6 +263,7 @@ test "std.atomic.Queue single-threaded" {
var node_4 = Queue(i32).Node{ var node_4 = Queue(i32).Node{
.data = 4, .data = 4,
.next = undefined, .next = undefined,
.prev = undefined,
}; };
queue.put(&node_4); queue.put(&node_4);

View File

@ -267,7 +267,7 @@ pub const Builder = struct {
if (self.verbose) { if (self.verbose) {
warn("rm {}\n", installed_file); warn("rm {}\n", installed_file);
} }
_ = os.deleteFile(self.allocator, installed_file); _ = os.deleteFile(installed_file);
} }
// TODO remove empty directories // TODO remove empty directories
@ -424,14 +424,19 @@ pub const Builder = struct {
return mode; return mode;
} }
pub fn addUserInputOption(self: *Builder, name: []const u8, value: []const u8) bool { pub fn addUserInputOption(self: *Builder, name: []const u8, value: []const u8) !bool {
if (self.user_input_options.put(name, UserInputOption{ const gop = try self.user_input_options.getOrPut(name);
if (!gop.found_existing) {
gop.kv.value = UserInputOption{
.name = name, .name = name,
.value = UserValue{ .Scalar = value }, .value = UserValue{ .Scalar = value },
.used = false, .used = false,
}) catch unreachable) |*prev_value| { };
return false;
}
// option already exists // option already exists
switch (prev_value.value) { switch (gop.kv.value.value) {
UserValue.Scalar => |s| { UserValue.Scalar => |s| {
// turn it into a list // turn it into a list
var list = ArrayList([]const u8).init(self.allocator); var list = ArrayList([]const u8).init(self.allocator);
@ -457,17 +462,22 @@ pub const Builder = struct {
return true; return true;
}, },
} }
}
return false; return false;
} }
pub fn addUserInputFlag(self: *Builder, name: []const u8) bool { pub fn addUserInputFlag(self: *Builder, name: []const u8) !bool {
if (self.user_input_options.put(name, UserInputOption{ const gop = try self.user_input_options.getOrPut(name);
if (!gop.found_existing) {
gop.kv.value = UserInputOption{
.name = name, .name = name,
.value = UserValue{ .Flag = {} }, .value = UserValue{ .Flag = {} },
.used = false, .used = false,
}) catch unreachable) |*prev_value| { };
switch (prev_value.value) { return false;
}
// option already exists
switch (gop.kv.value.value) {
UserValue.Scalar => |s| { UserValue.Scalar => |s| {
warn("Flag '-D{}' conflicts with option '-D{}={}'.\n", name, name, s); warn("Flag '-D{}' conflicts with option '-D{}={}'.\n", name, name, s);
return true; return true;
@ -478,7 +488,6 @@ pub const Builder = struct {
}, },
UserValue.Flag => {}, UserValue.Flag => {},
} }
}
return false; return false;
} }
@ -603,10 +612,10 @@ pub const Builder = struct {
} }
fn copyFile(self: *Builder, source_path: []const u8, dest_path: []const u8) !void { fn copyFile(self: *Builder, source_path: []const u8, dest_path: []const u8) !void {
return self.copyFileMode(source_path, dest_path, os.default_file_mode); return self.copyFileMode(source_path, dest_path, os.File.default_mode);
} }
fn copyFileMode(self: *Builder, source_path: []const u8, dest_path: []const u8, mode: os.FileMode) !void { fn copyFileMode(self: *Builder, source_path: []const u8, dest_path: []const u8, mode: os.File.Mode) !void {
if (self.verbose) { if (self.verbose) {
warn("cp {} {}\n", source_path, dest_path); warn("cp {} {}\n", source_path, dest_path);
} }
@ -1173,7 +1182,7 @@ pub const LibExeObjStep = struct {
if (self.build_options_contents.len() > 0) { if (self.build_options_contents.len() > 0) {
const build_options_file = try os.path.join(builder.allocator, builder.cache_root, builder.fmt("{}_build_options.zig", self.name)); const build_options_file = try os.path.join(builder.allocator, builder.cache_root, builder.fmt("{}_build_options.zig", self.name));
try std.io.writeFile(builder.allocator, build_options_file, self.build_options_contents.toSliceConst()); try std.io.writeFile(build_options_file, self.build_options_contents.toSliceConst());
try zig_args.append("--pkg-begin"); try zig_args.append("--pkg-begin");
try zig_args.append("build_options"); try zig_args.append("build_options");
try zig_args.append(builder.pathFromRoot(build_options_file)); try zig_args.append(builder.pathFromRoot(build_options_file));
@ -1482,11 +1491,14 @@ pub const LibExeObjStep = struct {
} }
if (!is_darwin) { if (!is_darwin) {
const rpath_arg = builder.fmt("-Wl,-rpath,{}", os.path.real(builder.allocator, builder.pathFromRoot(builder.cache_root)) catch unreachable); const rpath_arg = builder.fmt("-Wl,-rpath,{}", try os.path.realAlloc(
builder.allocator,
builder.pathFromRoot(builder.cache_root),
));
defer builder.allocator.free(rpath_arg); defer builder.allocator.free(rpath_arg);
cc_args.append(rpath_arg) catch unreachable; try cc_args.append(rpath_arg);
cc_args.append("-rdynamic") catch unreachable; try cc_args.append("-rdynamic");
} }
for (self.full_path_libs.toSliceConst()) |full_path_lib| { for (self.full_path_libs.toSliceConst()) |full_path_lib| {
@ -1557,11 +1569,14 @@ pub const LibExeObjStep = struct {
cc_args.append("-o") catch unreachable; cc_args.append("-o") catch unreachable;
cc_args.append(output_path) catch unreachable; cc_args.append(output_path) catch unreachable;
const rpath_arg = builder.fmt("-Wl,-rpath,{}", os.path.real(builder.allocator, builder.pathFromRoot(builder.cache_root)) catch unreachable); const rpath_arg = builder.fmt("-Wl,-rpath,{}", try os.path.realAlloc(
builder.allocator,
builder.pathFromRoot(builder.cache_root),
));
defer builder.allocator.free(rpath_arg); defer builder.allocator.free(rpath_arg);
cc_args.append(rpath_arg) catch unreachable; try cc_args.append(rpath_arg);
cc_args.append("-rdynamic") catch unreachable; try cc_args.append("-rdynamic");
{ {
var it = self.link_libs.iterator(); var it = self.link_libs.iterator();
@ -1908,7 +1923,7 @@ pub const WriteFileStep = struct {
warn("unable to make path {}: {}\n", full_path_dir, @errorName(err)); warn("unable to make path {}: {}\n", full_path_dir, @errorName(err));
return err; return err;
}; };
io.writeFile(self.builder.allocator, full_path, self.data) catch |err| { io.writeFile(full_path, self.data) catch |err| {
warn("unable to write {}: {}\n", full_path, @errorName(err)); warn("unable to write {}: {}\n", full_path, @errorName(err));
return err; return err;
}; };

View File

@ -1,5 +1,8 @@
const macho = @import("../macho.zig");
extern "c" fn __error() *c_int; extern "c" fn __error() *c_int;
pub extern "c" fn _NSGetExecutablePath(buf: [*]u8, bufsize: *u32) c_int; pub extern "c" fn _NSGetExecutablePath(buf: [*]u8, bufsize: *u32) c_int;
pub extern "c" fn _dyld_get_image_header(image_index: u32) ?*mach_header;
pub extern "c" fn __getdirentries64(fd: c_int, buf_ptr: [*]u8, buf_len: usize, basep: *i64) usize; pub extern "c" fn __getdirentries64(fd: c_int, buf_ptr: [*]u8, buf_len: usize, basep: *i64) usize;
@ -30,10 +33,45 @@ pub extern "c" fn sysctl(name: [*]c_int, namelen: c_uint, oldp: ?*c_void, oldlen
pub extern "c" fn sysctlbyname(name: [*]const u8, oldp: ?*c_void, oldlenp: ?*usize, newp: ?*c_void, newlen: usize) c_int; pub extern "c" fn sysctlbyname(name: [*]const u8, oldp: ?*c_void, oldlenp: ?*usize, newp: ?*c_void, newlen: usize) c_int;
pub extern "c" fn sysctlnametomib(name: [*]const u8, mibp: ?*c_int, sizep: ?*usize) c_int; pub extern "c" fn sysctlnametomib(name: [*]const u8, mibp: ?*c_int, sizep: ?*usize) c_int;
pub extern "c" fn bind(socket: c_int, address: ?*const sockaddr, address_len: socklen_t) c_int;
pub extern "c" fn socket(domain: c_int, type: c_int, protocol: c_int) c_int;
/// The value of the link editor defined symbol _MH_EXECUTE_SYM is the address
/// of the mach header in a Mach-O executable file type. It does not appear in
/// any file type other than a MH_EXECUTE file type. The type of the symbol is
/// absolute as the header is not part of any section.
pub extern "c" var _mh_execute_header: if (@sizeOf(usize) == 8) mach_header_64 else mach_header;
pub const mach_header_64 = macho.mach_header_64;
pub const mach_header = macho.mach_header;
pub use @import("../os/darwin/errno.zig"); pub use @import("../os/darwin/errno.zig");
pub const _errno = __error; pub const _errno = __error;
pub const in_port_t = u16;
pub const sa_family_t = u8;
pub const socklen_t = u32;
pub const sockaddr = extern union {
in: sockaddr_in,
in6: sockaddr_in6,
};
pub const sockaddr_in = extern struct {
len: u8,
family: sa_family_t,
port: in_port_t,
addr: u32,
zero: [8]u8,
};
pub const sockaddr_in6 = extern struct {
len: u8,
family: sa_family_t,
port: in_port_t,
flowinfo: u32,
addr: [16]u8,
scope_id: u32,
};
pub const timeval = extern struct { pub const timeval = extern struct {
tv_sec: isize, tv_sec: isize,
tv_usec: isize, tv_usec: isize,
@ -98,14 +136,6 @@ pub const dirent = extern struct {
d_name: u8, // field address is address of first byte of name d_name: u8, // field address is address of first byte of name
}; };
pub const sockaddr = extern struct {
sa_len: u8,
sa_family: sa_family_t,
sa_data: [14]u8,
};
pub const sa_family_t = u8;
pub const pthread_attr_t = extern struct { pub const pthread_attr_t = extern struct {
__sig: c_long, __sig: c_long,
__opaque: [56]u8, __opaque: [56]u8,

View File

@ -21,8 +21,10 @@ pub extern "c" fn lseek(fd: c_int, offset: isize, whence: c_int) isize;
pub extern "c" fn open(path: [*]const u8, oflag: c_int, ...) c_int; pub extern "c" fn open(path: [*]const u8, oflag: c_int, ...) c_int;
pub extern "c" fn raise(sig: c_int) c_int; pub extern "c" fn raise(sig: c_int) c_int;
pub extern "c" fn read(fd: c_int, buf: *c_void, nbyte: usize) isize; pub extern "c" fn read(fd: c_int, buf: *c_void, nbyte: usize) isize;
pub extern "c" fn pread(fd: c_int, buf: *c_void, nbyte: usize, offset: u64) isize;
pub extern "c" fn stat(noalias path: [*]const u8, noalias buf: *Stat) c_int; pub extern "c" fn stat(noalias path: [*]const u8, noalias buf: *Stat) c_int;
pub extern "c" fn write(fd: c_int, buf: *const c_void, nbyte: usize) isize; pub extern "c" fn write(fd: c_int, buf: *const c_void, nbyte: usize) isize;
pub extern "c" fn pwrite(fd: c_int, buf: *const c_void, nbyte: usize, offset: u64) isize;
pub extern "c" fn mmap(addr: ?*c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?*c_void; pub extern "c" fn mmap(addr: ?*c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?*c_void;
pub extern "c" fn munmap(addr: *c_void, len: usize) c_int; pub extern "c" fn munmap(addr: *c_void, len: usize) c_int;
pub extern "c" fn unlink(path: [*]const u8) c_int; pub extern "c" fn unlink(path: [*]const u8) c_int;
@ -58,6 +60,7 @@ pub extern "pthread" fn pthread_create(noalias newthread: *pthread_t, noalias at
pub extern "pthread" fn pthread_attr_init(attr: *pthread_attr_t) c_int; pub extern "pthread" fn pthread_attr_init(attr: *pthread_attr_t) c_int;
pub extern "pthread" fn pthread_attr_setstack(attr: *pthread_attr_t, stackaddr: *c_void, stacksize: usize) c_int; pub extern "pthread" fn pthread_attr_setstack(attr: *pthread_attr_t, stackaddr: *c_void, stacksize: usize) c_int;
pub extern "pthread" fn pthread_attr_destroy(attr: *pthread_attr_t) c_int; pub extern "pthread" fn pthread_attr_destroy(attr: *pthread_attr_t) c_int;
pub extern "pthread" fn pthread_self() pthread_t;
pub extern "pthread" fn pthread_join(thread: pthread_t, arg_return: ?*?*c_void) c_int; pub extern "pthread" fn pthread_join(thread: pthread_t, arg_return: ?*?*c_void) c_int;
pub const pthread_t = *@OpaqueType(); pub const pthread_t = *@OpaqueType();

View File

@ -8,3 +8,6 @@ pub const pthread_attr_t = extern struct {
__size: [56]u8, __size: [56]u8,
__align: c_long, __align: c_long,
}; };
/// See std.elf for constants for this
pub extern fn getauxval(__type: c_ulong) c_ulong;

View File

@ -9,10 +9,9 @@ pub const line_sep = switch (builtin.os) {
else => "\n", else => "\n",
}; };
/// Deprecated, use mem.len
pub fn len(ptr: [*]const u8) usize { pub fn len(ptr: [*]const u8) usize {
var count: usize = 0; return mem.len(u8, ptr);
while (ptr[count] != 0) : (count += 1) {}
return count;
} }
pub fn cmp(a: [*]const u8, b: [*]const u8) i8 { pub fn cmp(a: [*]const u8, b: [*]const u8) i8 {
@ -27,12 +26,14 @@ pub fn cmp(a: [*]const u8, b: [*]const u8) i8 {
} }
} }
/// Deprecated, use mem.toSliceConst
pub fn toSliceConst(str: [*]const u8) []const u8 { pub fn toSliceConst(str: [*]const u8) []const u8 {
return str[0..len(str)]; return mem.toSliceConst(u8, str);
} }
/// Deprecated, use mem.toSlice
pub fn toSlice(str: [*]u8) []u8 { pub fn toSlice(str: [*]u8) []u8 {
return str[0..len(str)]; return mem.toSlice(u8, str);
} }
test "cstr fns" { test "cstr fns" {

View File

@ -4,8 +4,8 @@ const mem = std.mem;
const io = std.io; const io = std.io;
const os = std.os; const os = std.os;
const elf = std.elf; const elf = std.elf;
const DW = std.dwarf;
const macho = std.macho; const macho = std.macho;
const DW = std.dwarf;
const ArrayList = std.ArrayList; const ArrayList = std.ArrayList;
const builtin = @import("builtin"); const builtin = @import("builtin");
@ -19,14 +19,19 @@ pub const runtime_safety = switch (builtin.mode) {
/// Tries to write to stderr, unbuffered, and ignores any error returned. /// Tries to write to stderr, unbuffered, and ignores any error returned.
/// Does not append a newline. /// Does not append a newline.
/// TODO atomic/multithread support
var stderr_file: os.File = undefined; var stderr_file: os.File = undefined;
var stderr_file_out_stream: io.FileOutStream = undefined; var stderr_file_out_stream: io.FileOutStream = undefined;
/// TODO multithreaded awareness
var stderr_stream: ?*io.OutStream(io.FileOutStream.Error) = null; var stderr_stream: ?*io.OutStream(io.FileOutStream.Error) = null;
var stderr_mutex = std.Mutex.init();
pub fn warn(comptime fmt: []const u8, args: ...) void { pub fn warn(comptime fmt: []const u8, args: ...) void {
const held = stderr_mutex.acquire();
defer held.release();
const stderr = getStderrStream() catch return; const stderr = getStderrStream() catch return;
stderr.print(fmt, args) catch return; stderr.print(fmt, args) catch return;
} }
pub fn getStderrStream() !*io.OutStream(io.FileOutStream.Error) { pub fn getStderrStream() !*io.OutStream(io.FileOutStream.Error) {
if (stderr_stream) |st| { if (stderr_stream) |st| {
return st; return st;
@ -39,14 +44,15 @@ pub fn getStderrStream() !*io.OutStream(io.FileOutStream.Error) {
} }
} }
var self_debug_info: ?*ElfStackTrace = null; /// TODO multithreaded awareness
pub fn getSelfDebugInfo() !*ElfStackTrace { var self_debug_info: ?DebugInfo = null;
if (self_debug_info) |info| {
pub fn getSelfDebugInfo() !*DebugInfo {
if (self_debug_info) |*info| {
return info; return info;
} else { } else {
const info = try openSelfDebugInfo(getDebugInfoAllocator()); self_debug_info = try openSelfDebugInfo(getDebugInfoAllocator());
self_debug_info = info; return &self_debug_info.?;
return info;
} }
} }
@ -57,6 +63,7 @@ fn wantTtyColor() bool {
} }
/// Tries to print the current stack trace to stderr, unbuffered, and ignores any error returned. /// Tries to print the current stack trace to stderr, unbuffered, and ignores any error returned.
/// TODO multithreaded awareness
pub fn dumpCurrentStackTrace(start_addr: ?usize) void { pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
const stderr = getStderrStream() catch return; const stderr = getStderrStream() catch return;
const debug_info = getSelfDebugInfo() catch |err| { const debug_info = getSelfDebugInfo() catch |err| {
@ -70,6 +77,7 @@ pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
} }
/// Tries to print a stack trace to stderr, unbuffered, and ignores any error returned. /// Tries to print a stack trace to stderr, unbuffered, and ignores any error returned.
/// TODO multithreaded awareness
pub fn dumpStackTrace(stack_trace: *const builtin.StackTrace) void { pub fn dumpStackTrace(stack_trace: *const builtin.StackTrace) void {
const stderr = getStderrStream() catch return; const stderr = getStderrStream() catch return;
const debug_info = getSelfDebugInfo() catch |err| { const debug_info = getSelfDebugInfo() catch |err| {
@ -124,6 +132,7 @@ pub fn panic(comptime format: []const u8, args: ...) noreturn {
panicExtra(null, first_trace_addr, format, args); panicExtra(null, first_trace_addr, format, args);
} }
/// TODO multithreaded awareness
var panicking: u8 = 0; // TODO make this a bool var panicking: u8 = 0; // TODO make this a bool
pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: ...) noreturn { pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: ...) noreturn {
@ -152,7 +161,7 @@ const WHITE = "\x1b[37;1m";
const DIM = "\x1b[2m"; const DIM = "\x1b[2m";
const RESET = "\x1b[0m"; const RESET = "\x1b[0m";
pub fn writeStackTrace(stack_trace: *const builtin.StackTrace, out_stream: var, allocator: *mem.Allocator, debug_info: *ElfStackTrace, tty_color: bool) !void { pub fn writeStackTrace(stack_trace: *const builtin.StackTrace, out_stream: var, allocator: *mem.Allocator, debug_info: *DebugInfo, tty_color: bool) !void {
var frame_index: usize = undefined; var frame_index: usize = undefined;
var frames_left: usize = undefined; var frames_left: usize = undefined;
if (stack_trace.index < stack_trace.instruction_addresses.len) { if (stack_trace.index < stack_trace.instruction_addresses.len) {
@ -182,7 +191,7 @@ pub inline fn getReturnAddress(frame_count: usize) usize {
return @intToPtr(*const usize, fp + @sizeOf(usize)).*; return @intToPtr(*const usize, fp + @sizeOf(usize)).*;
} }
pub fn writeCurrentStackTrace(out_stream: var, allocator: *mem.Allocator, debug_info: *ElfStackTrace, tty_color: bool, start_addr: ?usize) !void { pub fn writeCurrentStackTrace(out_stream: var, allocator: *mem.Allocator, debug_info: *DebugInfo, tty_color: bool, start_addr: ?usize) !void {
const AddressState = union(enum) { const AddressState = union(enum) {
NotLookingForStartAddress, NotLookingForStartAddress,
LookingForStartAddress: usize, LookingForStartAddress: usize,
@ -215,44 +224,115 @@ pub fn writeCurrentStackTrace(out_stream: var, allocator: *mem.Allocator, debug_
} }
} }
pub fn printSourceAtAddress(debug_info: *ElfStackTrace, out_stream: var, address: usize, tty_color: bool) !void { pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: var, address: usize, tty_color: bool) !void {
switch (builtin.os) { switch (builtin.os) {
builtin.Os.windows => return error.UnsupportedDebugInfo, builtin.Os.macosx => return printSourceAtAddressMacOs(debug_info, out_stream, address, tty_color),
builtin.Os.macosx => { builtin.Os.linux => return printSourceAtAddressLinux(debug_info, out_stream, address, tty_color),
// TODO(bnoordhuis) It's theoretically possible to obtain the builtin.Os.windows => {
// compilation unit from the symbtab but it's not that useful // TODO https://github.com/ziglang/zig/issues/721
// in practice because the compiler dumps everything in a single return error.UnsupportedOperatingSystem;
// object file. Future improvement: use external dSYM data when
// available.
const unknown = macho.Symbol{
.name = "???",
.address = address,
};
const symbol = debug_info.symbol_table.search(address) orelse &unknown;
try out_stream.print(WHITE ++ "{}" ++ RESET ++ ": " ++ DIM ++ "0x{x}" ++ " in ??? (???)" ++ RESET ++ "\n", symbol.name, address);
}, },
else => { else => return error.UnsupportedOperatingSystem,
}
}
fn machoSearchSymbols(symbols: []const MachoSymbol, address: usize) ?*const MachoSymbol {
var min: usize = 0;
var max: usize = symbols.len - 1; // Exclude sentinel.
while (min < max) {
const mid = min + (max - min) / 2;
const curr = &symbols[mid];
const next = &symbols[mid + 1];
if (address >= next.address()) {
min = mid + 1;
} else if (address < curr.address()) {
max = mid;
} else {
return curr;
}
}
return null;
}
fn printSourceAtAddressMacOs(di: *DebugInfo, out_stream: var, address: usize, tty_color: bool) !void {
const base_addr = @ptrToInt(&std.c._mh_execute_header);
const adjusted_addr = 0x100000000 + (address - base_addr);
const symbol = machoSearchSymbols(di.symbols, adjusted_addr) orelse {
if (tty_color) {
try out_stream.print("???:?:?: " ++ DIM ++ "0x{x} in ??? (???)" ++ RESET ++ "\n\n\n", address);
} else {
try out_stream.print("???:?:?: 0x{x} in ??? (???)\n\n\n", address);
}
return;
};
const symbol_name = mem.toSliceConst(u8, di.strings.ptr + symbol.nlist.n_strx);
const compile_unit_name = if (symbol.ofile) |ofile| blk: {
const ofile_path = mem.toSliceConst(u8, di.strings.ptr + ofile.n_strx);
break :blk os.path.basename(ofile_path);
} else "???";
if (getLineNumberInfoMacOs(di, symbol.*, adjusted_addr)) |line_info| {
defer line_info.deinit();
try printLineInfo(di, out_stream, line_info, address, symbol_name, compile_unit_name, tty_color);
} else |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => {
if (tty_color) {
try out_stream.print("???:?:?: " ++ DIM ++ "0x{x} in {} ({})" ++ RESET ++ "\n\n\n", address, symbol_name, compile_unit_name);
} else {
try out_stream.print("???:?:?: 0x{x} in {} ({})\n\n\n", address, symbol_name, compile_unit_name);
}
},
else => return err,
}
}
pub fn printSourceAtAddressLinux(debug_info: *DebugInfo, out_stream: var, address: usize, tty_color: bool) !void {
const compile_unit = findCompileUnit(debug_info, address) catch { const compile_unit = findCompileUnit(debug_info, address) catch {
if (tty_color) { if (tty_color) {
try out_stream.print("???:?:?: " ++ DIM ++ "0x{x} in ??? (???)" ++ RESET ++ "\n ???\n\n", address); try out_stream.print("???:?:?: " ++ DIM ++ "0x{x} in ??? (???)" ++ RESET ++ "\n\n\n", address);
} else { } else {
try out_stream.print("???:?:?: 0x{x} in ??? (???)\n ???\n\n", address); try out_stream.print("???:?:?: 0x{x} in ??? (???)\n\n\n", address);
} }
return; return;
}; };
const compile_unit_name = try compile_unit.die.getAttrString(debug_info, DW.AT_name); const compile_unit_name = try compile_unit.die.getAttrString(debug_info, DW.AT_name);
if (getLineNumberInfo(debug_info, compile_unit, address - 1)) |line_info| { if (getLineNumberInfoLinux(debug_info, compile_unit, address - 1)) |line_info| {
defer line_info.deinit(); defer line_info.deinit();
const symbol_name = "???";
try printLineInfo(debug_info, out_stream, line_info, address, symbol_name, compile_unit_name, tty_color);
} else |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => {
if (tty_color) {
try out_stream.print("???:?:?: " ++ DIM ++ "0x{x} in ??? ({})" ++ RESET ++ "\n\n\n", address, compile_unit_name);
} else {
try out_stream.print("???:?:?: 0x{x} in ??? ({})\n\n\n", address, compile_unit_name);
}
},
else => return err,
}
}
fn printLineInfo(
debug_info: *DebugInfo,
out_stream: var,
line_info: LineInfo,
address: usize,
symbol_name: []const u8,
compile_unit_name: []const u8,
tty_color: bool,
) !void {
if (tty_color) { if (tty_color) {
try out_stream.print( try out_stream.print(
WHITE ++ "{}:{}:{}" ++ RESET ++ ": " ++ DIM ++ "0x{x} in ??? ({})" ++ RESET ++ "\n", WHITE ++ "{}:{}:{}" ++ RESET ++ ": " ++ DIM ++ "0x{x} in {} ({})" ++ RESET ++ "\n",
line_info.file_name, line_info.file_name,
line_info.line, line_info.line,
line_info.column, line_info.column,
address, address,
symbol_name,
compile_unit_name, compile_unit_name,
); );
if (printLineFromFile(debug_info.allocator(), out_stream, line_info)) { if (printLineFromFile(out_stream, line_info)) {
if (line_info.column == 0) { if (line_info.column == 0) {
try out_stream.write("\n"); try out_stream.write("\n");
} else { } else {
@ -270,28 +350,38 @@ pub fn printSourceAtAddress(debug_info: *ElfStackTrace, out_stream: var, address
} }
} else { } else {
try out_stream.print( try out_stream.print(
"{}:{}:{}: 0x{x} in ??? ({})\n", "{}:{}:{}: 0x{x} in {} ({})\n",
line_info.file_name, line_info.file_name,
line_info.line, line_info.line,
line_info.column, line_info.column,
address, address,
symbol_name,
compile_unit_name, compile_unit_name,
); );
} }
} else |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => {
try out_stream.print("0x{x} in ??? ({})\n", address, compile_unit_name);
},
else => return err,
} }
// TODO use this
pub const OpenSelfDebugInfoError = error{
MissingDebugInfo,
OutOfMemory,
UnsupportedOperatingSystem,
};
pub fn openSelfDebugInfo(allocator: *mem.Allocator) !DebugInfo {
switch (builtin.os) {
builtin.Os.linux => return openSelfDebugInfoLinux(allocator),
builtin.Os.macosx, builtin.Os.ios => return openSelfDebugInfoMacOs(allocator),
builtin.Os.windows => {
// TODO: https://github.com/ziglang/zig/issues/721
return error.UnsupportedOperatingSystem;
}, },
else => return error.UnsupportedOperatingSystem,
} }
} }
pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace { fn openSelfDebugInfoLinux(allocator: *mem.Allocator) !DebugInfo {
switch (builtin.object_format) { var di = DebugInfo{
builtin.ObjectFormat.elf => {
const st = try allocator.create(ElfStackTrace{
.self_exe_file = undefined, .self_exe_file = undefined,
.elf = undefined, .elf = undefined,
.debug_info = undefined, .debug_info = undefined,
@ -301,44 +391,125 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace {
.debug_ranges = null, .debug_ranges = null,
.abbrev_table_list = ArrayList(AbbrevTableHeader).init(allocator), .abbrev_table_list = ArrayList(AbbrevTableHeader).init(allocator),
.compile_unit_list = ArrayList(CompileUnit).init(allocator), .compile_unit_list = ArrayList(CompileUnit).init(allocator),
}); };
errdefer allocator.destroy(st); di.self_exe_file = try os.openSelfExe();
st.self_exe_file = try os.openSelfExe(); errdefer di.self_exe_file.close();
errdefer st.self_exe_file.close();
try st.elf.openFile(allocator, &st.self_exe_file); try di.elf.openFile(allocator, &di.self_exe_file);
errdefer st.elf.close(); errdefer di.elf.close();
st.debug_info = (try st.elf.findSection(".debug_info")) orelse return error.MissingDebugInfo; di.debug_info = (try di.elf.findSection(".debug_info")) orelse return error.MissingDebugInfo;
st.debug_abbrev = (try st.elf.findSection(".debug_abbrev")) orelse return error.MissingDebugInfo; di.debug_abbrev = (try di.elf.findSection(".debug_abbrev")) orelse return error.MissingDebugInfo;
st.debug_str = (try st.elf.findSection(".debug_str")) orelse return error.MissingDebugInfo; di.debug_str = (try di.elf.findSection(".debug_str")) orelse return error.MissingDebugInfo;
st.debug_line = (try st.elf.findSection(".debug_line")) orelse return error.MissingDebugInfo; di.debug_line = (try di.elf.findSection(".debug_line")) orelse return error.MissingDebugInfo;
st.debug_ranges = (try st.elf.findSection(".debug_ranges")); di.debug_ranges = (try di.elf.findSection(".debug_ranges"));
try scanAllCompileUnits(st); try scanAllCompileUnits(&di);
return st; return di;
}, }
builtin.ObjectFormat.macho => {
var exe_file = try os.openSelfExe();
defer exe_file.close();
const st = try allocator.create(ElfStackTrace{ .symbol_table = try macho.loadSymbols(allocator, &io.FileInStream.init(&exe_file)) }); pub fn findElfSection(elf: *Elf, name: []const u8) ?*elf.Shdr {
errdefer allocator.destroy(st); var file_stream = io.FileInStream.init(elf.in_file);
return st; const in = &file_stream.stream;
},
builtin.ObjectFormat.coff => { section_loop: for (elf.section_headers) |*elf_section| {
return error.TodoSupportCoffDebugInfo; if (elf_section.sh_type == SHT_NULL) continue;
},
builtin.ObjectFormat.wasm => { const name_offset = elf.string_section.offset + elf_section.name;
return error.TodoSupportCOFFDebugInfo; try elf.in_file.seekTo(name_offset);
},
builtin.ObjectFormat.unknown => { for (name) |expected_c| {
return error.UnknownObjectFormat; const target_c = try in.readByte();
}, if (target_c == 0 or expected_c != target_c) continue :section_loop;
}
{
const null_byte = try in.readByte();
if (null_byte == 0) return elf_section;
} }
} }
fn printLineFromFile(allocator: *mem.Allocator, out_stream: var, line_info: *const LineInfo) !void { return null;
var f = try os.File.openRead(allocator, line_info.file_name); }
fn openSelfDebugInfoMacOs(allocator: *mem.Allocator) !DebugInfo {
const hdr = &std.c._mh_execute_header;
assert(hdr.magic == std.macho.MH_MAGIC_64);
const hdr_base = @ptrCast([*]u8, hdr);
var ptr = hdr_base + @sizeOf(macho.mach_header_64);
var ncmd: u32 = hdr.ncmds;
const symtab = while (ncmd != 0) : (ncmd -= 1) {
const lc = @ptrCast(*std.macho.load_command, ptr);
switch (lc.cmd) {
std.macho.LC_SYMTAB => break @ptrCast(*std.macho.symtab_command, ptr),
else => {},
}
ptr += lc.cmdsize; // TODO https://github.com/ziglang/zig/issues/1403
} else {
return error.MissingDebugInfo;
};
const syms = @ptrCast([*]macho.nlist_64, hdr_base + symtab.symoff)[0..symtab.nsyms];
const strings = @ptrCast([*]u8, hdr_base + symtab.stroff)[0..symtab.strsize];
const symbols_buf = try allocator.alloc(MachoSymbol, syms.len);
var ofile: ?*macho.nlist_64 = null;
var reloc: u64 = 0;
var symbol_index: usize = 0;
var last_len: u64 = 0;
for (syms) |*sym| {
if (sym.n_type & std.macho.N_STAB != 0) {
switch (sym.n_type) {
std.macho.N_OSO => {
ofile = sym;
reloc = 0;
},
std.macho.N_FUN => {
if (sym.n_sect == 0) {
last_len = sym.n_value;
} else {
symbols_buf[symbol_index] = MachoSymbol{
.nlist = sym,
.ofile = ofile,
.reloc = reloc,
};
symbol_index += 1;
}
},
std.macho.N_BNSYM => {
if (reloc == 0) {
reloc = sym.n_value;
}
},
else => continue,
}
}
}
const sentinel = try allocator.createOne(macho.nlist_64);
sentinel.* = macho.nlist_64{
.n_strx = 0,
.n_type = 36,
.n_sect = 0,
.n_desc = 0,
.n_value = symbols_buf[symbol_index - 1].nlist.n_value + last_len,
};
const symbols = allocator.shrink(MachoSymbol, symbols_buf, symbol_index);
// Even though lld emits symbols in ascending order, this debug code
// should work for programs linked in any valid way.
// This sort is so that we can binary search later.
std.sort.sort(MachoSymbol, symbols, MachoSymbol.addressLessThan);
return DebugInfo{
.ofiles = DebugInfo.OFileTable.init(allocator),
.symbols = symbols,
.strings = strings,
};
}
fn printLineFromFile(out_stream: var, line_info: *const LineInfo) !void {
var f = try os.File.openRead(line_info.file_name);
defer f.close(); defer f.close();
// TODO fstat and make sure that the file has the correct size // TODO fstat and make sure that the file has the correct size
@ -369,12 +540,42 @@ fn printLineFromFile(allocator: *mem.Allocator, out_stream: var, line_info: *con
} }
} }
pub const ElfStackTrace = switch (builtin.os) { const MachoSymbol = struct {
builtin.Os.macosx => struct { nlist: *macho.nlist_64,
symbol_table: macho.SymbolTable, ofile: ?*macho.nlist_64,
reloc: u64,
pub fn close(self: *ElfStackTrace) void { /// Returns the address from the macho file
self.symbol_table.deinit(); fn address(self: MachoSymbol) u64 {
return self.nlist.n_value;
}
fn addressLessThan(lhs: MachoSymbol, rhs: MachoSymbol) bool {
return lhs.address() < rhs.address();
}
};
const MachOFile = struct {
bytes: []align(@alignOf(macho.mach_header_64)) const u8,
sect_debug_info: ?*const macho.section_64,
sect_debug_line: ?*const macho.section_64,
};
pub const DebugInfo = switch (builtin.os) {
builtin.Os.macosx => struct {
symbols: []const MachoSymbol,
strings: []const u8,
ofiles: OFileTable,
const OFileTable = std.HashMap(
*macho.nlist_64,
MachOFile,
std.hash_map.getHashPtrAddrFn(*macho.nlist_64),
std.hash_map.getTrivialEqlFn(*macho.nlist_64),
);
pub fn allocator(self: DebugInfo) *mem.Allocator {
return self.ofiles.allocator;
} }
}, },
else => struct { else => struct {
@ -388,17 +589,17 @@ pub const ElfStackTrace = switch (builtin.os) {
abbrev_table_list: ArrayList(AbbrevTableHeader), abbrev_table_list: ArrayList(AbbrevTableHeader),
compile_unit_list: ArrayList(CompileUnit), compile_unit_list: ArrayList(CompileUnit),
pub fn allocator(self: *const ElfStackTrace) *mem.Allocator { pub fn allocator(self: DebugInfo) *mem.Allocator {
return self.abbrev_table_list.allocator; return self.abbrev_table_list.allocator;
} }
pub fn readString(self: *ElfStackTrace) ![]u8 { pub fn readString(self: *DebugInfo) ![]u8 {
var in_file_stream = io.FileInStream.init(&self.self_exe_file); var in_file_stream = io.FileInStream.init(&self.self_exe_file);
const in_stream = &in_file_stream.stream; const in_stream = &in_file_stream.stream;
return readStringRaw(self.allocator(), in_stream); return readStringRaw(self.allocator(), in_stream);
} }
pub fn close(self: *ElfStackTrace) void { pub fn close(self: *DebugInfo) void {
self.self_exe_file.close(); self.self_exe_file.close();
self.elf.close(); self.elf.close();
} }
@ -505,7 +706,7 @@ const Die = struct {
}; };
} }
fn getAttrString(self: *const Die, st: *ElfStackTrace, id: u64) ![]u8 { fn getAttrString(self: *const Die, st: *DebugInfo, id: u64) ![]u8 {
const form_value = self.getAttr(id) orelse return error.MissingDebugInfo; const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
return switch (form_value.*) { return switch (form_value.*) {
FormValue.String => |value| value, FormValue.String => |value| value,
@ -620,7 +821,7 @@ fn readStringRaw(allocator: *mem.Allocator, in_stream: var) ![]u8 {
return buf.toSlice(); return buf.toSlice();
} }
fn getString(st: *ElfStackTrace, offset: u64) ![]u8 { fn getString(st: *DebugInfo, offset: u64) ![]u8 {
const pos = st.debug_str.offset + offset; const pos = st.debug_str.offset + offset;
try st.self_exe_file.seekTo(pos); try st.self_exe_file.seekTo(pos);
return st.readString(); return st.readString();
@ -672,14 +873,10 @@ fn parseFormValueRef(allocator: *mem.Allocator, in_stream: var, comptime T: type
const ParseFormValueError = error{ const ParseFormValueError = error{
EndOfStream, EndOfStream,
Io,
BadFd,
Unexpected,
InvalidDebugInfo, InvalidDebugInfo,
EndOfFile, EndOfFile,
IsDir,
OutOfMemory, OutOfMemory,
}; } || std.os.File.ReadError;
fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, is_64: bool) ParseFormValueError!FormValue { fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, is_64: bool) ParseFormValueError!FormValue {
return switch (form_id) { return switch (form_id) {
@ -731,7 +928,7 @@ fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, is_64
}; };
} }
fn parseAbbrevTable(st: *ElfStackTrace) !AbbrevTable { fn parseAbbrevTable(st: *DebugInfo) !AbbrevTable {
const in_file = &st.self_exe_file; const in_file = &st.self_exe_file;
var in_file_stream = io.FileInStream.init(in_file); var in_file_stream = io.FileInStream.init(in_file);
const in_stream = &in_file_stream.stream; const in_stream = &in_file_stream.stream;
@ -761,7 +958,7 @@ fn parseAbbrevTable(st: *ElfStackTrace) !AbbrevTable {
/// Gets an already existing AbbrevTable given the abbrev_offset, or if not found, /// Gets an already existing AbbrevTable given the abbrev_offset, or if not found,
/// seeks in the stream and parses it. /// seeks in the stream and parses it.
fn getAbbrevTable(st: *ElfStackTrace, abbrev_offset: u64) !*const AbbrevTable { fn getAbbrevTable(st: *DebugInfo, abbrev_offset: u64) !*const AbbrevTable {
for (st.abbrev_table_list.toSlice()) |*header| { for (st.abbrev_table_list.toSlice()) |*header| {
if (header.offset == abbrev_offset) { if (header.offset == abbrev_offset) {
return &header.table; return &header.table;
@ -782,7 +979,7 @@ fn getAbbrevTableEntry(abbrev_table: *const AbbrevTable, abbrev_code: u64) ?*con
return null; return null;
} }
fn parseDie(st: *ElfStackTrace, abbrev_table: *const AbbrevTable, is_64: bool) !Die { fn parseDie(st: *DebugInfo, abbrev_table: *const AbbrevTable, is_64: bool) !Die {
const in_file = &st.self_exe_file; const in_file = &st.self_exe_file;
var in_file_stream = io.FileInStream.init(in_file); var in_file_stream = io.FileInStream.init(in_file);
const in_stream = &in_file_stream.stream; const in_stream = &in_file_stream.stream;
@ -804,12 +1001,210 @@ fn parseDie(st: *ElfStackTrace, abbrev_table: *const AbbrevTable, is_64: bool) !
return result; return result;
} }
fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, target_address: usize) !LineInfo { fn getLineNumberInfoMacOs(di: *DebugInfo, symbol: MachoSymbol, target_address: usize) !LineInfo {
const compile_unit_cwd = try compile_unit.die.getAttrString(st, DW.AT_comp_dir); const ofile = symbol.ofile orelse return error.MissingDebugInfo;
const gop = try di.ofiles.getOrPut(ofile);
const mach_o_file = if (gop.found_existing) &gop.kv.value else blk: {
errdefer _ = di.ofiles.remove(ofile);
const ofile_path = mem.toSliceConst(u8, di.strings.ptr + ofile.n_strx);
const in_file = &st.self_exe_file; gop.kv.value = MachOFile{
const debug_line_end = st.debug_line.offset + st.debug_line.size; .bytes = try std.io.readFileAllocAligned(di.ofiles.allocator, ofile_path, @alignOf(macho.mach_header_64)),
var this_offset = st.debug_line.offset; .sect_debug_info = null,
.sect_debug_line = null,
};
const hdr = @ptrCast(*const macho.mach_header_64, gop.kv.value.bytes.ptr);
if (hdr.magic != std.macho.MH_MAGIC_64) return error.InvalidDebugInfo;
const hdr_base = @ptrCast([*]const u8, hdr);
var ptr = hdr_base + @sizeOf(macho.mach_header_64);
var ncmd: u32 = hdr.ncmds;
const segcmd = while (ncmd != 0) : (ncmd -= 1) {
const lc = @ptrCast(*const std.macho.load_command, ptr);
switch (lc.cmd) {
std.macho.LC_SEGMENT_64 => break @ptrCast(*const std.macho.segment_command_64, ptr),
else => {},
}
ptr += lc.cmdsize; // TODO https://github.com/ziglang/zig/issues/1403
} else {
return error.MissingDebugInfo;
};
const sections = @alignCast(@alignOf(macho.section_64), @ptrCast([*]const macho.section_64, ptr + @sizeOf(std.macho.segment_command_64)))[0..segcmd.nsects];
for (sections) |*sect| {
if (sect.flags & macho.SECTION_TYPE == macho.S_REGULAR and
(sect.flags & macho.SECTION_ATTRIBUTES) & macho.S_ATTR_DEBUG == macho.S_ATTR_DEBUG)
{
const sect_name = mem.toSliceConst(u8, &sect.sectname);
if (mem.eql(u8, sect_name, "__debug_line")) {
gop.kv.value.sect_debug_line = sect;
} else if (mem.eql(u8, sect_name, "__debug_info")) {
gop.kv.value.sect_debug_info = sect;
}
}
}
break :blk &gop.kv.value;
};
const sect_debug_line = mach_o_file.sect_debug_line orelse return error.MissingDebugInfo;
var ptr = mach_o_file.bytes.ptr + sect_debug_line.offset;
var is_64: bool = undefined;
const unit_length = try readInitialLengthMem(&ptr, &is_64);
if (unit_length == 0) return error.MissingDebugInfo;
const version = readIntMem(&ptr, u16, builtin.Endian.Little);
// TODO support 3 and 5
if (version != 2 and version != 4) return error.InvalidDebugInfo;
const prologue_length = if (is_64)
readIntMem(&ptr, u64, builtin.Endian.Little)
else
readIntMem(&ptr, u32, builtin.Endian.Little);
const prog_start = ptr + prologue_length;
const minimum_instruction_length = readByteMem(&ptr);
if (minimum_instruction_length == 0) return error.InvalidDebugInfo;
if (version >= 4) {
// maximum_operations_per_instruction
ptr += 1;
}
const default_is_stmt = readByteMem(&ptr) != 0;
const line_base = readByteSignedMem(&ptr);
const line_range = readByteMem(&ptr);
if (line_range == 0) return error.InvalidDebugInfo;
const opcode_base = readByteMem(&ptr);
const standard_opcode_lengths = ptr[0 .. opcode_base - 1];
ptr += opcode_base - 1;
var include_directories = ArrayList([]const u8).init(di.allocator());
try include_directories.append("");
while (true) {
const dir = readStringMem(&ptr);
if (dir.len == 0) break;
try include_directories.append(dir);
}
var file_entries = ArrayList(FileEntry).init(di.allocator());
var prog = LineNumberProgram.init(default_is_stmt, include_directories.toSliceConst(), &file_entries, target_address);
while (true) {
const file_name = readStringMem(&ptr);
if (file_name.len == 0) break;
const dir_index = try readULeb128Mem(&ptr);
const mtime = try readULeb128Mem(&ptr);
const len_bytes = try readULeb128Mem(&ptr);
try file_entries.append(FileEntry{
.file_name = file_name,
.dir_index = dir_index,
.mtime = mtime,
.len_bytes = len_bytes,
});
}
ptr = prog_start;
while (true) {
const opcode = readByteMem(&ptr);
if (opcode == DW.LNS_extended_op) {
const op_size = try readULeb128Mem(&ptr);
if (op_size < 1) return error.InvalidDebugInfo;
var sub_op = readByteMem(&ptr);
switch (sub_op) {
DW.LNE_end_sequence => {
prog.end_sequence = true;
if (try prog.checkLineMatch()) |info| return info;
return error.MissingDebugInfo;
},
DW.LNE_set_address => {
const addr = readIntMem(&ptr, usize, builtin.Endian.Little);
prog.address = symbol.reloc + addr;
},
DW.LNE_define_file => {
const file_name = readStringMem(&ptr);
const dir_index = try readULeb128Mem(&ptr);
const mtime = try readULeb128Mem(&ptr);
const len_bytes = try readULeb128Mem(&ptr);
try file_entries.append(FileEntry{
.file_name = file_name,
.dir_index = dir_index,
.mtime = mtime,
.len_bytes = len_bytes,
});
},
else => {
ptr += op_size - 1;
},
}
} else if (opcode >= opcode_base) {
// special opcodes
const adjusted_opcode = opcode - opcode_base;
const inc_addr = minimum_instruction_length * (adjusted_opcode / line_range);
const inc_line = i32(line_base) + i32(adjusted_opcode % line_range);
prog.line += inc_line;
prog.address += inc_addr;
if (try prog.checkLineMatch()) |info| return info;
prog.basic_block = false;
} else {
switch (opcode) {
DW.LNS_copy => {
if (try prog.checkLineMatch()) |info| return info;
prog.basic_block = false;
},
DW.LNS_advance_pc => {
const arg = try readULeb128Mem(&ptr);
prog.address += arg * minimum_instruction_length;
},
DW.LNS_advance_line => {
const arg = try readILeb128Mem(&ptr);
prog.line += arg;
},
DW.LNS_set_file => {
const arg = try readULeb128Mem(&ptr);
prog.file = arg;
},
DW.LNS_set_column => {
const arg = try readULeb128Mem(&ptr);
prog.column = arg;
},
DW.LNS_negate_stmt => {
prog.is_stmt = !prog.is_stmt;
},
DW.LNS_set_basic_block => {
prog.basic_block = true;
},
DW.LNS_const_add_pc => {
const inc_addr = minimum_instruction_length * ((255 - opcode_base) / line_range);
prog.address += inc_addr;
},
DW.LNS_fixed_advance_pc => {
const arg = readIntMem(&ptr, u16, builtin.Endian.Little);
prog.address += arg;
},
DW.LNS_set_prologue_end => {},
else => {
if (opcode - 1 >= standard_opcode_lengths.len) return error.InvalidDebugInfo;
const len_bytes = standard_opcode_lengths[opcode - 1];
ptr += len_bytes;
},
}
}
}
return error.MissingDebugInfo;
}
fn getLineNumberInfoLinux(di: *DebugInfo, compile_unit: *const CompileUnit, target_address: usize) !LineInfo {
const compile_unit_cwd = try compile_unit.die.getAttrString(di, DW.AT_comp_dir);
const in_file = &di.self_exe_file;
const debug_line_end = di.debug_line.offset + di.debug_line.size;
var this_offset = di.debug_line.offset;
var this_index: usize = 0; var this_index: usize = 0;
var in_file_stream = io.FileInStream.init(in_file); var in_file_stream = io.FileInStream.init(in_file);
@ -828,11 +1223,11 @@ fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, targe
continue; continue;
} }
const version = try in_stream.readInt(st.elf.endian, u16); const version = try in_stream.readInt(di.elf.endian, u16);
// TODO support 3 and 5 // TODO support 3 and 5
if (version != 2 and version != 4) return error.InvalidDebugInfo; if (version != 2 and version != 4) return error.InvalidDebugInfo;
const prologue_length = if (is_64) try in_stream.readInt(st.elf.endian, u64) else try in_stream.readInt(st.elf.endian, u32); const prologue_length = if (is_64) try in_stream.readInt(di.elf.endian, u64) else try in_stream.readInt(di.elf.endian, u32);
const prog_start_offset = (try in_file.getPos()) + prologue_length; const prog_start_offset = (try in_file.getPos()) + prologue_length;
const minimum_instruction_length = try in_stream.readByte(); const minimum_instruction_length = try in_stream.readByte();
@ -851,7 +1246,7 @@ fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, targe
const opcode_base = try in_stream.readByte(); const opcode_base = try in_stream.readByte();
const standard_opcode_lengths = try st.allocator().alloc(u8, opcode_base - 1); const standard_opcode_lengths = try di.allocator().alloc(u8, opcode_base - 1);
{ {
var i: usize = 0; var i: usize = 0;
@ -860,19 +1255,19 @@ fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, targe
} }
} }
var include_directories = ArrayList([]u8).init(st.allocator()); var include_directories = ArrayList([]u8).init(di.allocator());
try include_directories.append(compile_unit_cwd); try include_directories.append(compile_unit_cwd);
while (true) { while (true) {
const dir = try st.readString(); const dir = try di.readString();
if (dir.len == 0) break; if (dir.len == 0) break;
try include_directories.append(dir); try include_directories.append(dir);
} }
var file_entries = ArrayList(FileEntry).init(st.allocator()); var file_entries = ArrayList(FileEntry).init(di.allocator());
var prog = LineNumberProgram.init(default_is_stmt, include_directories.toSliceConst(), &file_entries, target_address); var prog = LineNumberProgram.init(default_is_stmt, include_directories.toSliceConst(), &file_entries, target_address);
while (true) { while (true) {
const file_name = try st.readString(); const file_name = try di.readString();
if (file_name.len == 0) break; if (file_name.len == 0) break;
const dir_index = try readULeb128(in_stream); const dir_index = try readULeb128(in_stream);
const mtime = try readULeb128(in_stream); const mtime = try readULeb128(in_stream);
@ -890,11 +1285,10 @@ fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, targe
while (true) { while (true) {
const opcode = try in_stream.readByte(); const opcode = try in_stream.readByte();
var sub_op: u8 = undefined; // TODO move this to the correct scope and fix the compiler crash
if (opcode == DW.LNS_extended_op) { if (opcode == DW.LNS_extended_op) {
const op_size = try readULeb128(in_stream); const op_size = try readULeb128(in_stream);
if (op_size < 1) return error.InvalidDebugInfo; if (op_size < 1) return error.InvalidDebugInfo;
sub_op = try in_stream.readByte(); var sub_op = try in_stream.readByte();
switch (sub_op) { switch (sub_op) {
DW.LNE_end_sequence => { DW.LNE_end_sequence => {
prog.end_sequence = true; prog.end_sequence = true;
@ -902,11 +1296,11 @@ fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, targe
return error.MissingDebugInfo; return error.MissingDebugInfo;
}, },
DW.LNE_set_address => { DW.LNE_set_address => {
const addr = try in_stream.readInt(st.elf.endian, usize); const addr = try in_stream.readInt(di.elf.endian, usize);
prog.address = addr; prog.address = addr;
}, },
DW.LNE_define_file => { DW.LNE_define_file => {
const file_name = try st.readString(); const file_name = try di.readString();
const dir_index = try readULeb128(in_stream); const dir_index = try readULeb128(in_stream);
const mtime = try readULeb128(in_stream); const mtime = try readULeb128(in_stream);
const len_bytes = try readULeb128(in_stream); const len_bytes = try readULeb128(in_stream);
@ -964,7 +1358,7 @@ fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, targe
prog.address += inc_addr; prog.address += inc_addr;
}, },
DW.LNS_fixed_advance_pc => { DW.LNS_fixed_advance_pc => {
const arg = try in_stream.readInt(st.elf.endian, u16); const arg = try in_stream.readInt(di.elf.endian, u16);
prog.address += arg; prog.address += arg;
}, },
DW.LNS_set_prologue_end => {}, DW.LNS_set_prologue_end => {},
@ -983,7 +1377,7 @@ fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, targe
return error.MissingDebugInfo; return error.MissingDebugInfo;
} }
fn scanAllCompileUnits(st: *ElfStackTrace) !void { fn scanAllCompileUnits(st: *DebugInfo) !void {
const debug_info_end = st.debug_info.offset + st.debug_info.size; const debug_info_end = st.debug_info.offset + st.debug_info.size;
var this_unit_offset = st.debug_info.offset; var this_unit_offset = st.debug_info.offset;
var cu_index: usize = 0; var cu_index: usize = 0;
@ -1053,7 +1447,7 @@ fn scanAllCompileUnits(st: *ElfStackTrace) !void {
} }
} }
fn findCompileUnit(st: *ElfStackTrace, target_address: u64) !*const CompileUnit { fn findCompileUnit(st: *DebugInfo, target_address: u64) !*const CompileUnit {
var in_file_stream = io.FileInStream.init(&st.self_exe_file); var in_file_stream = io.FileInStream.init(&st.self_exe_file);
const in_stream = &in_file_stream.stream; const in_stream = &in_file_stream.stream;
for (st.compile_unit_list.toSlice()) |*compile_unit| { for (st.compile_unit_list.toSlice()) |*compile_unit| {
@ -1087,6 +1481,89 @@ fn findCompileUnit(st: *ElfStackTrace, target_address: u64) !*const CompileUnit
return error.MissingDebugInfo; return error.MissingDebugInfo;
} }
fn readIntMem(ptr: *[*]const u8, comptime T: type, endian: builtin.Endian) T {
const result = mem.readInt(ptr.*[0..@sizeOf(T)], T, endian);
ptr.* += @sizeOf(T);
return result;
}
fn readByteMem(ptr: *[*]const u8) u8 {
const result = ptr.*[0];
ptr.* += 1;
return result;
}
fn readByteSignedMem(ptr: *[*]const u8) i8 {
return @bitCast(i8, readByteMem(ptr));
}
fn readInitialLengthMem(ptr: *[*]const u8, is_64: *bool) !u64 {
const first_32_bits = mem.readIntLE(u32, ptr.*[0..4]);
is_64.* = (first_32_bits == 0xffffffff);
if (is_64.*) {
ptr.* += 4;
const result = mem.readIntLE(u64, ptr.*[0..8]);
ptr.* += 8;
return result;
} else {
if (first_32_bits >= 0xfffffff0) return error.InvalidDebugInfo;
ptr.* += 4;
return u64(first_32_bits);
}
}
fn readStringMem(ptr: *[*]const u8) []const u8 {
const result = mem.toSliceConst(u8, ptr.*);
ptr.* += result.len + 1;
return result;
}
fn readULeb128Mem(ptr: *[*]const u8) !u64 {
var result: u64 = 0;
var shift: usize = 0;
var i: usize = 0;
while (true) {
const byte = ptr.*[i];
i += 1;
var operand: u64 = undefined;
if (@shlWithOverflow(u64, byte & 0b01111111, @intCast(u6, shift), &operand)) return error.InvalidDebugInfo;
result |= operand;
if ((byte & 0b10000000) == 0) {
ptr.* += i;
return result;
}
shift += 7;
}
}
fn readILeb128Mem(ptr: *[*]const u8) !i64 {
var result: i64 = 0;
var shift: usize = 0;
var i: usize = 0;
while (true) {
const byte = ptr.*[i];
i += 1;
var operand: i64 = undefined;
if (@shlWithOverflow(i64, byte & 0b01111111, @intCast(u6, shift), &operand)) return error.InvalidDebugInfo;
result |= operand;
shift += 7;
if ((byte & 0b10000000) == 0) {
if (shift < @sizeOf(i64) * 8 and (byte & 0b01000000) != 0) result |= -(i64(1) << @intCast(u6, shift));
ptr.* += i;
return result;
}
}
}
fn readInitialLength(comptime E: type, in_stream: *io.InStream(E), is_64: *bool) !u64 { fn readInitialLength(comptime E: type, in_stream: *io.InStream(E), is_64: *bool) !u64 {
const first_32_bits = try in_stream.readIntLe(u32); const first_32_bits = try in_stream.readIntLe(u32);
is_64.* = (first_32_bits == 0xffffffff); is_64.* = (first_32_bits == 0xffffffff);
@ -1143,7 +1620,7 @@ pub const global_allocator = &global_fixed_allocator.allocator;
var global_fixed_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(global_allocator_mem[0..]); var global_fixed_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(global_allocator_mem[0..]);
var global_allocator_mem: [100 * 1024]u8 = undefined; var global_allocator_mem: [100 * 1024]u8 = undefined;
// TODO make thread safe /// TODO multithreaded awareness
var debug_info_allocator: ?*mem.Allocator = null; var debug_info_allocator: ?*mem.Allocator = null;
var debug_info_direct_allocator: std.heap.DirectAllocator = undefined; var debug_info_direct_allocator: std.heap.DirectAllocator = undefined;
var debug_info_arena_allocator: std.heap.ArenaAllocator = undefined; var debug_info_arena_allocator: std.heap.ArenaAllocator = undefined;

View File

@ -869,6 +869,11 @@ pub const Phdr = switch (@sizeOf(usize)) {
8 => Elf64_Phdr, 8 => Elf64_Phdr,
else => @compileError("expected pointer size of 32 or 64"), else => @compileError("expected pointer size of 32 or 64"),
}; };
pub const Shdr = switch (@sizeOf(usize)) {
4 => Elf32_Shdr,
8 => Elf64_Shdr,
else => @compileError("expected pointer size of 32 or 64"),
};
pub const Sym = switch (@sizeOf(usize)) { pub const Sym = switch (@sizeOf(usize)) {
4 => Elf32_Sym, 4 => Elf32_Sym,
8 => Elf64_Sym, 8 => Elf64_Sym,

View File

@ -1,17 +1,23 @@
pub const Locked = @import("event/locked.zig").Locked;
pub const Loop = @import("event/loop.zig").Loop;
pub const Lock = @import("event/lock.zig").Lock;
pub const tcp = @import("event/tcp.zig");
pub const Channel = @import("event/channel.zig").Channel; pub const Channel = @import("event/channel.zig").Channel;
pub const Group = @import("event/group.zig").Group;
pub const Future = @import("event/future.zig").Future; pub const Future = @import("event/future.zig").Future;
pub const Group = @import("event/group.zig").Group;
pub const Lock = @import("event/lock.zig").Lock;
pub const Locked = @import("event/locked.zig").Locked;
pub const RwLock = @import("event/rwlock.zig").RwLock;
pub const RwLocked = @import("event/rwlocked.zig").RwLocked;
pub const Loop = @import("event/loop.zig").Loop;
pub const fs = @import("event/fs.zig");
pub const tcp = @import("event/tcp.zig");
test "import event tests" { test "import event tests" {
_ = @import("event/locked.zig");
_ = @import("event/loop.zig");
_ = @import("event/lock.zig");
_ = @import("event/tcp.zig");
_ = @import("event/channel.zig"); _ = @import("event/channel.zig");
_ = @import("event/group.zig"); _ = @import("event/fs.zig");
_ = @import("event/future.zig"); _ = @import("event/future.zig");
_ = @import("event/group.zig");
_ = @import("event/lock.zig");
_ = @import("event/locked.zig");
_ = @import("event/rwlock.zig");
_ = @import("event/rwlocked.zig");
_ = @import("event/loop.zig");
_ = @import("event/tcp.zig");
} }

View File

@ -5,7 +5,7 @@ const AtomicRmwOp = builtin.AtomicRmwOp;
const AtomicOrder = builtin.AtomicOrder; const AtomicOrder = builtin.AtomicOrder;
const Loop = std.event.Loop; const Loop = std.event.Loop;
/// many producer, many consumer, thread-safe, lock-free, runtime configurable buffer size /// many producer, many consumer, thread-safe, runtime configurable buffer size
/// when buffer is empty, consumers suspend and are resumed by producers /// when buffer is empty, consumers suspend and are resumed by producers
/// when buffer is full, producers suspend and are resumed by consumers /// when buffer is full, producers suspend and are resumed by consumers
pub fn Channel(comptime T: type) type { pub fn Channel(comptime T: type) type {
@ -13,6 +13,7 @@ pub fn Channel(comptime T: type) type {
loop: *Loop, loop: *Loop,
getters: std.atomic.Queue(GetNode), getters: std.atomic.Queue(GetNode),
or_null_queue: std.atomic.Queue(*std.atomic.Queue(GetNode).Node),
putters: std.atomic.Queue(PutNode), putters: std.atomic.Queue(PutNode),
get_count: usize, get_count: usize,
put_count: usize, put_count: usize,
@ -26,8 +27,22 @@ pub fn Channel(comptime T: type) type {
const SelfChannel = this; const SelfChannel = this;
const GetNode = struct { const GetNode = struct {
ptr: *T,
tick_node: *Loop.NextTickNode, tick_node: *Loop.NextTickNode,
data: Data,
const Data = union(enum) {
Normal: Normal,
OrNull: OrNull,
};
const Normal = struct {
ptr: *T,
};
const OrNull = struct {
ptr: *?T,
or_null: *std.atomic.Queue(*std.atomic.Queue(GetNode).Node).Node,
};
}; };
const PutNode = struct { const PutNode = struct {
data: T, data: T,
@ -48,6 +63,7 @@ pub fn Channel(comptime T: type) type {
.need_dispatch = 0, .need_dispatch = 0,
.getters = std.atomic.Queue(GetNode).init(), .getters = std.atomic.Queue(GetNode).init(),
.putters = std.atomic.Queue(PutNode).init(), .putters = std.atomic.Queue(PutNode).init(),
.or_null_queue = std.atomic.Queue(*std.atomic.Queue(GetNode).Node).init(),
.get_count = 0, .get_count = 0,
.put_count = 0, .put_count = 0,
}); });
@ -71,18 +87,29 @@ pub fn Channel(comptime T: type) type {
/// puts a data item in the channel. The promise completes when the value has been added to the /// puts a data item in the channel. The promise completes when the value has been added to the
/// buffer, or in the case of a zero size buffer, when the item has been retrieved by a getter. /// buffer, or in the case of a zero size buffer, when the item has been retrieved by a getter.
pub async fn put(self: *SelfChannel, data: T) void { pub async fn put(self: *SelfChannel, data: T) void {
// TODO fix this workaround
suspend { suspend {
var my_tick_node = Loop.NextTickNode{ resume @handle();
.next = undefined, }
.data = @handle(),
}; var my_tick_node = Loop.NextTickNode.init(@handle());
var queue_node = std.atomic.Queue(PutNode).Node{ var queue_node = std.atomic.Queue(PutNode).Node.init(PutNode{
.data = PutNode{
.tick_node = &my_tick_node, .tick_node = &my_tick_node,
.data = data, .data = data,
}, });
.next = undefined,
}; // TODO test canceling a put()
errdefer {
_ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
const need_dispatch = !self.putters.remove(&queue_node);
self.loop.cancelOnNextTick(&my_tick_node);
if (need_dispatch) {
// oops we made the put_count incorrect for a period of time. fix by dispatching.
_ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
self.dispatch();
}
}
suspend {
self.putters.put(&queue_node); self.putters.put(&queue_node);
_ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); _ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
@ -93,21 +120,35 @@ pub fn Channel(comptime T: type) type {
/// await this function to get an item from the channel. If the buffer is empty, the promise will /// await this function to get an item from the channel. If the buffer is empty, the promise will
/// complete when the next item is put in the channel. /// complete when the next item is put in the channel.
pub async fn get(self: *SelfChannel) T { pub async fn get(self: *SelfChannel) T {
// TODO fix this workaround
suspend {
resume @handle();
}
// TODO integrate this function with named return values // TODO integrate this function with named return values
// so we can get rid of this extra result copy // so we can get rid of this extra result copy
var result: T = undefined; var result: T = undefined;
suspend { var my_tick_node = Loop.NextTickNode.init(@handle());
var my_tick_node = Loop.NextTickNode{ var queue_node = std.atomic.Queue(GetNode).Node.init(GetNode{
.next = undefined,
.data = @handle(),
};
var queue_node = std.atomic.Queue(GetNode).Node{
.data = GetNode{
.ptr = &result,
.tick_node = &my_tick_node, .tick_node = &my_tick_node,
.data = GetNode.Data{
.Normal = GetNode.Normal{ .ptr = &result },
}, },
.next = undefined, });
};
// TODO test canceling a get()
errdefer {
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
const need_dispatch = !self.getters.remove(&queue_node);
self.loop.cancelOnNextTick(&my_tick_node);
if (need_dispatch) {
// oops we made the get_count incorrect for a period of time. fix by dispatching.
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
self.dispatch();
}
}
suspend {
self.getters.put(&queue_node); self.getters.put(&queue_node);
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
@ -116,6 +157,64 @@ pub fn Channel(comptime T: type) type {
return result; return result;
} }
//pub async fn select(comptime EnumUnion: type, channels: ...) EnumUnion {
// assert(@memberCount(EnumUnion) == channels.len); // enum union and channels mismatch
// assert(channels.len != 0); // enum unions cannot have 0 fields
// if (channels.len == 1) {
// const result = await (async channels[0].get() catch unreachable);
// return @unionInit(EnumUnion, @memberName(EnumUnion, 0), result);
// }
//}
/// Await this function to get an item from the channel. If the buffer is empty and there are no
/// puts waiting, this returns null.
/// Await is necessary for locking purposes. The function will be resumed after checking the channel
/// for data and will not wait for data to be available.
pub async fn getOrNull(self: *SelfChannel) ?T {
// TODO fix this workaround
suspend {
resume @handle();
}
// TODO integrate this function with named return values
// so we can get rid of this extra result copy
var result: ?T = null;
var my_tick_node = Loop.NextTickNode.init(@handle());
var or_null_node = std.atomic.Queue(*std.atomic.Queue(GetNode).Node).Node.init(undefined);
var queue_node = std.atomic.Queue(GetNode).Node.init(GetNode{
.tick_node = &my_tick_node,
.data = GetNode.Data{
.OrNull = GetNode.OrNull{
.ptr = &result,
.or_null = &or_null_node,
},
},
});
or_null_node.data = &queue_node;
// TODO test canceling getOrNull
errdefer {
_ = self.or_null_queue.remove(&or_null_node);
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
const need_dispatch = !self.getters.remove(&queue_node);
self.loop.cancelOnNextTick(&my_tick_node);
if (need_dispatch) {
// oops we made the get_count incorrect for a period of time. fix by dispatching.
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
self.dispatch();
}
}
suspend {
self.getters.put(&queue_node);
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
self.or_null_queue.put(&or_null_node);
self.dispatch();
}
return result;
}
fn dispatch(self: *SelfChannel) void { fn dispatch(self: *SelfChannel) void {
// set the "need dispatch" flag // set the "need dispatch" flag
_ = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); _ = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
@ -139,7 +238,15 @@ pub fn Channel(comptime T: type) type {
if (get_count == 0) break :one_dispatch; if (get_count == 0) break :one_dispatch;
const get_node = &self.getters.get().?.data; const get_node = &self.getters.get().?.data;
get_node.ptr.* = self.buffer_nodes[self.buffer_index -% self.buffer_len]; switch (get_node.data) {
GetNode.Data.Normal => |info| {
info.ptr.* = self.buffer_nodes[self.buffer_index -% self.buffer_len];
},
GetNode.Data.OrNull => |info| {
_ = self.or_null_queue.remove(info.or_null);
info.ptr.* = self.buffer_nodes[self.buffer_index -% self.buffer_len];
},
}
self.loop.onNextTick(get_node.tick_node); self.loop.onNextTick(get_node.tick_node);
self.buffer_len -= 1; self.buffer_len -= 1;
@ -151,7 +258,15 @@ pub fn Channel(comptime T: type) type {
const get_node = &self.getters.get().?.data; const get_node = &self.getters.get().?.data;
const put_node = &self.putters.get().?.data; const put_node = &self.putters.get().?.data;
get_node.ptr.* = put_node.data; switch (get_node.data) {
GetNode.Data.Normal => |info| {
info.ptr.* = put_node.data;
},
GetNode.Data.OrNull => |info| {
_ = self.or_null_queue.remove(info.or_null);
info.ptr.* = put_node.data;
},
}
self.loop.onNextTick(get_node.tick_node); self.loop.onNextTick(get_node.tick_node);
self.loop.onNextTick(put_node.tick_node); self.loop.onNextTick(put_node.tick_node);
@ -176,6 +291,16 @@ pub fn Channel(comptime T: type) type {
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
_ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); _ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
// All the "get or null" functions should resume now.
var remove_count: usize = 0;
while (self.or_null_queue.get()) |or_null_node| {
remove_count += @boolToInt(self.getters.remove(or_null_node.data));
self.loop.onNextTick(or_null_node.data.data.tick_node);
}
if (remove_count != 0) {
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, remove_count, AtomicOrder.SeqCst);
}
// clear need-dispatch flag // clear need-dispatch flag
const need_dispatch = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst); const need_dispatch = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
if (need_dispatch != 0) continue; if (need_dispatch != 0) continue;
@ -226,6 +351,15 @@ async fn testChannelGetter(loop: *Loop, channel: *Channel(i32)) void {
const value2_promise = try async channel.get(); const value2_promise = try async channel.get();
const value2 = await value2_promise; const value2 = await value2_promise;
assert(value2 == 4567); assert(value2 == 4567);
const value3_promise = try async channel.getOrNull();
const value3 = await value3_promise;
assert(value3 == null);
const last_put = try async testPut(channel, 4444);
const value4 = await try async channel.getOrNull();
assert(value4.? == 4444);
await last_put;
} }
async fn testChannelPutter(channel: *Channel(i32)) void { async fn testChannelPutter(channel: *Channel(i32)) void {
@ -233,3 +367,6 @@ async fn testChannelPutter(channel: *Channel(i32)) void {
await (async channel.put(4567) catch @panic("out of memory")); await (async channel.put(4567) catch @panic("out of memory"));
} }
async fn testPut(channel: *Channel(i32), value: i32) void {
await (async channel.put(value) catch @panic("out of memory"));
}

1347
std/event/fs.zig Normal file

File diff suppressed because it is too large Load Diff

View File

@ -29,6 +29,17 @@ pub fn Group(comptime ReturnType: type) type {
}; };
} }
/// Cancel all the outstanding promises. Can be called even if wait was already called.
pub fn deinit(self: *Self) void {
while (self.coro_stack.pop()) |node| {
cancel node.data;
}
while (self.alloc_stack.pop()) |node| {
cancel node.data;
self.lock.loop.allocator.destroy(node);
}
}
/// Add a promise to the group. Thread-safe. /// Add a promise to the group. Thread-safe.
pub fn add(self: *Self, handle: promise->ReturnType) (error{OutOfMemory}!void) { pub fn add(self: *Self, handle: promise->ReturnType) (error{OutOfMemory}!void) {
const node = try self.lock.loop.allocator.create(Stack.Node{ const node = try self.lock.loop.allocator.create(Stack.Node{
@ -88,7 +99,7 @@ pub fn Group(comptime ReturnType: type) type {
await node.data; await node.data;
} else { } else {
(await node.data) catch |err| { (await node.data) catch |err| {
self.cancelAll(); self.deinit();
return err; return err;
}; };
} }
@ -100,25 +111,12 @@ pub fn Group(comptime ReturnType: type) type {
await handle; await handle;
} else { } else {
(await handle) catch |err| { (await handle) catch |err| {
self.cancelAll(); self.deinit();
return err; return err;
}; };
} }
} }
} }
/// Cancel all the outstanding promises. May only be called if wait was never called.
/// TODO These should be `cancelasync` not `cancel`.
/// See https://github.com/ziglang/zig/issues/1261
pub fn cancelAll(self: *Self) void {
while (self.coro_stack.pop()) |node| {
cancel node.data;
}
while (self.alloc_stack.pop()) |node| {
cancel node.data;
self.lock.loop.allocator.destroy(node);
}
}
}; };
} }

View File

@ -9,6 +9,7 @@ const Loop = std.event.Loop;
/// Thread-safe async/await lock. /// Thread-safe async/await lock.
/// Does not make any syscalls - coroutines which are waiting for the lock are suspended, and /// Does not make any syscalls - coroutines which are waiting for the lock are suspended, and
/// are resumed when the lock is released, in order. /// are resumed when the lock is released, in order.
/// Allows only one actor to hold the lock.
pub const Lock = struct { pub const Lock = struct {
loop: *Loop, loop: *Loop,
shared_bit: u8, // TODO make this a bool shared_bit: u8, // TODO make this a bool
@ -90,13 +91,14 @@ pub const Lock = struct {
} }
pub async fn acquire(self: *Lock) Held { pub async fn acquire(self: *Lock) Held {
suspend {
// TODO explicitly put this memory in the coroutine frame #1194 // TODO explicitly put this memory in the coroutine frame #1194
var my_tick_node = Loop.NextTickNode{ suspend {
.data = @handle(), resume @handle();
.next = undefined, }
}; var my_tick_node = Loop.NextTickNode.init(@handle());
errdefer _ = self.queue.remove(&my_tick_node); // TODO test canceling an acquire
suspend {
self.queue.put(&my_tick_node); self.queue.put(&my_tick_node);
// At this point, we are in the queue, so we might have already been resumed and this coroutine // At this point, we are in the queue, so we might have already been resumed and this coroutine
@ -146,6 +148,7 @@ async fn testLock(loop: *Loop, lock: *Lock) void {
} }
const handle1 = async lockRunner(lock) catch @panic("out of memory"); const handle1 = async lockRunner(lock) catch @panic("out of memory");
var tick_node1 = Loop.NextTickNode{ var tick_node1 = Loop.NextTickNode{
.prev = undefined,
.next = undefined, .next = undefined,
.data = handle1, .data = handle1,
}; };
@ -153,6 +156,7 @@ async fn testLock(loop: *Loop, lock: *Lock) void {
const handle2 = async lockRunner(lock) catch @panic("out of memory"); const handle2 = async lockRunner(lock) catch @panic("out of memory");
var tick_node2 = Loop.NextTickNode{ var tick_node2 = Loop.NextTickNode{
.prev = undefined,
.next = undefined, .next = undefined,
.data = handle2, .data = handle2,
}; };
@ -160,6 +164,7 @@ async fn testLock(loop: *Loop, lock: *Lock) void {
const handle3 = async lockRunner(lock) catch @panic("out of memory"); const handle3 = async lockRunner(lock) catch @panic("out of memory");
var tick_node3 = Loop.NextTickNode{ var tick_node3 = Loop.NextTickNode{
.prev = undefined,
.next = undefined, .next = undefined,
.data = handle3, .data = handle3,
}; };

View File

@ -2,10 +2,12 @@ const std = @import("../index.zig");
const builtin = @import("builtin"); const builtin = @import("builtin");
const assert = std.debug.assert; const assert = std.debug.assert;
const mem = std.mem; const mem = std.mem;
const posix = std.os.posix;
const windows = std.os.windows;
const AtomicRmwOp = builtin.AtomicRmwOp; const AtomicRmwOp = builtin.AtomicRmwOp;
const AtomicOrder = builtin.AtomicOrder; const AtomicOrder = builtin.AtomicOrder;
const fs = std.event.fs;
const os = std.os;
const posix = os.posix;
const windows = os.windows;
pub const Loop = struct { pub const Loop = struct {
allocator: *mem.Allocator, allocator: *mem.Allocator,
@ -13,7 +15,7 @@ pub const Loop = struct {
os_data: OsData, os_data: OsData,
final_resume_node: ResumeNode, final_resume_node: ResumeNode,
pending_event_count: usize, pending_event_count: usize,
extra_threads: []*std.os.Thread, extra_threads: []*os.Thread,
// pre-allocated eventfds. all permanently active. // pre-allocated eventfds. all permanently active.
// this is how we send promises to be resumed on other threads. // this is how we send promises to be resumed on other threads.
@ -50,6 +52,22 @@ pub const Loop = struct {
base: ResumeNode, base: ResumeNode,
kevent: posix.Kevent, kevent: posix.Kevent,
}; };
pub const Basic = switch (builtin.os) {
builtin.Os.macosx => MacOsBasic,
builtin.Os.linux => struct {
base: ResumeNode,
},
builtin.Os.windows => struct {
base: ResumeNode,
},
else => @compileError("unsupported OS"),
};
const MacOsBasic = struct {
base: ResumeNode,
kev: posix.Kevent,
};
}; };
/// After initialization, call run(). /// After initialization, call run().
@ -65,7 +83,7 @@ pub const Loop = struct {
/// TODO copy elision / named return values so that the threads referencing *Loop /// TODO copy elision / named return values so that the threads referencing *Loop
/// have the correct pointer value. /// have the correct pointer value.
pub fn initMultiThreaded(self: *Loop, allocator: *mem.Allocator) !void { pub fn initMultiThreaded(self: *Loop, allocator: *mem.Allocator) !void {
const core_count = try std.os.cpuCount(allocator); const core_count = try os.cpuCount(allocator);
return self.initInternal(allocator, core_count); return self.initInternal(allocator, core_count);
} }
@ -92,7 +110,7 @@ pub const Loop = struct {
); );
errdefer self.allocator.free(self.eventfd_resume_nodes); errdefer self.allocator.free(self.eventfd_resume_nodes);
self.extra_threads = try self.allocator.alloc(*std.os.Thread, extra_thread_count); self.extra_threads = try self.allocator.alloc(*os.Thread, extra_thread_count);
errdefer self.allocator.free(self.extra_threads); errdefer self.allocator.free(self.extra_threads);
try self.initOsData(extra_thread_count); try self.initOsData(extra_thread_count);
@ -104,17 +122,30 @@ pub const Loop = struct {
self.allocator.free(self.extra_threads); self.allocator.free(self.extra_threads);
} }
const InitOsDataError = std.os.LinuxEpollCreateError || mem.Allocator.Error || std.os.LinuxEventFdError || const InitOsDataError = os.LinuxEpollCreateError || mem.Allocator.Error || os.LinuxEventFdError ||
std.os.SpawnThreadError || std.os.LinuxEpollCtlError || std.os.BsdKEventError || os.SpawnThreadError || os.LinuxEpollCtlError || os.BsdKEventError ||
std.os.WindowsCreateIoCompletionPortError; os.WindowsCreateIoCompletionPortError;
const wakeup_bytes = []u8{0x1} ** 8; const wakeup_bytes = []u8{0x1} ** 8;
fn initOsData(self: *Loop, extra_thread_count: usize) InitOsDataError!void { fn initOsData(self: *Loop, extra_thread_count: usize) InitOsDataError!void {
switch (builtin.os) { switch (builtin.os) {
builtin.Os.linux => { builtin.Os.linux => {
self.os_data.fs_queue = std.atomic.Queue(fs.Request).init();
self.os_data.fs_queue_item = 0;
// we need another thread for the file system because Linux does not have an async
// file system I/O API.
self.os_data.fs_end_request = fs.RequestNode{
.prev = undefined,
.next = undefined,
.data = fs.Request{
.msg = fs.Request.Msg.End,
.finish = fs.Request.Finish.NoAction,
},
};
errdefer { errdefer {
while (self.available_eventfd_resume_nodes.pop()) |node| std.os.close(node.data.eventfd); while (self.available_eventfd_resume_nodes.pop()) |node| os.close(node.data.eventfd);
} }
for (self.eventfd_resume_nodes) |*eventfd_node| { for (self.eventfd_resume_nodes) |*eventfd_node| {
eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{ eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
@ -123,7 +154,7 @@ pub const Loop = struct {
.id = ResumeNode.Id.EventFd, .id = ResumeNode.Id.EventFd,
.handle = undefined, .handle = undefined,
}, },
.eventfd = try std.os.linuxEventFd(1, posix.EFD_CLOEXEC | posix.EFD_NONBLOCK), .eventfd = try os.linuxEventFd(1, posix.EFD_CLOEXEC | posix.EFD_NONBLOCK),
.epoll_op = posix.EPOLL_CTL_ADD, .epoll_op = posix.EPOLL_CTL_ADD,
}, },
.next = undefined, .next = undefined,
@ -131,44 +162,62 @@ pub const Loop = struct {
self.available_eventfd_resume_nodes.push(eventfd_node); self.available_eventfd_resume_nodes.push(eventfd_node);
} }
self.os_data.epollfd = try std.os.linuxEpollCreate(posix.EPOLL_CLOEXEC); self.os_data.epollfd = try os.linuxEpollCreate(posix.EPOLL_CLOEXEC);
errdefer std.os.close(self.os_data.epollfd); errdefer os.close(self.os_data.epollfd);
self.os_data.final_eventfd = try std.os.linuxEventFd(0, posix.EFD_CLOEXEC | posix.EFD_NONBLOCK); self.os_data.final_eventfd = try os.linuxEventFd(0, posix.EFD_CLOEXEC | posix.EFD_NONBLOCK);
errdefer std.os.close(self.os_data.final_eventfd); errdefer os.close(self.os_data.final_eventfd);
self.os_data.final_eventfd_event = posix.epoll_event{ self.os_data.final_eventfd_event = posix.epoll_event{
.events = posix.EPOLLIN, .events = posix.EPOLLIN,
.data = posix.epoll_data{ .ptr = @ptrToInt(&self.final_resume_node) }, .data = posix.epoll_data{ .ptr = @ptrToInt(&self.final_resume_node) },
}; };
try std.os.linuxEpollCtl( try os.linuxEpollCtl(
self.os_data.epollfd, self.os_data.epollfd,
posix.EPOLL_CTL_ADD, posix.EPOLL_CTL_ADD,
self.os_data.final_eventfd, self.os_data.final_eventfd,
&self.os_data.final_eventfd_event, &self.os_data.final_eventfd_event,
); );
self.os_data.fs_thread = try os.spawnThread(self, posixFsRun);
errdefer {
self.posixFsRequest(&self.os_data.fs_end_request);
self.os_data.fs_thread.wait();
}
var extra_thread_index: usize = 0; var extra_thread_index: usize = 0;
errdefer { errdefer {
// writing 8 bytes to an eventfd cannot fail // writing 8 bytes to an eventfd cannot fail
std.os.posixWrite(self.os_data.final_eventfd, wakeup_bytes) catch unreachable; os.posixWrite(self.os_data.final_eventfd, wakeup_bytes) catch unreachable;
while (extra_thread_index != 0) { while (extra_thread_index != 0) {
extra_thread_index -= 1; extra_thread_index -= 1;
self.extra_threads[extra_thread_index].wait(); self.extra_threads[extra_thread_index].wait();
} }
} }
while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) { while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) {
self.extra_threads[extra_thread_index] = try std.os.spawnThread(self, workerRun); self.extra_threads[extra_thread_index] = try os.spawnThread(self, workerRun);
} }
}, },
builtin.Os.macosx => { builtin.Os.macosx => {
self.os_data.kqfd = try std.os.bsdKQueue(); self.os_data.kqfd = try os.bsdKQueue();
errdefer std.os.close(self.os_data.kqfd); errdefer os.close(self.os_data.kqfd);
self.os_data.kevents = try self.allocator.alloc(posix.Kevent, extra_thread_count); self.os_data.fs_kqfd = try os.bsdKQueue();
errdefer self.allocator.free(self.os_data.kevents); errdefer os.close(self.os_data.fs_kqfd);
const eventlist = ([*]posix.Kevent)(undefined)[0..0]; self.os_data.fs_queue = std.atomic.Queue(fs.Request).init();
// we need another thread for the file system because Darwin does not have an async
// file system I/O API.
self.os_data.fs_end_request = fs.RequestNode{
.prev = undefined,
.next = undefined,
.data = fs.Request{
.msg = fs.Request.Msg.End,
.finish = fs.Request.Finish.NoAction,
},
};
const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
for (self.eventfd_resume_nodes) |*eventfd_node, i| { for (self.eventfd_resume_nodes) |*eventfd_node, i| {
eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{ eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
@ -191,18 +240,9 @@ pub const Loop = struct {
}; };
self.available_eventfd_resume_nodes.push(eventfd_node); self.available_eventfd_resume_nodes.push(eventfd_node);
const kevent_array = (*[1]posix.Kevent)(&eventfd_node.data.kevent); const kevent_array = (*[1]posix.Kevent)(&eventfd_node.data.kevent);
_ = try std.os.bsdKEvent(self.os_data.kqfd, kevent_array, eventlist, null); _ = try os.bsdKEvent(self.os_data.kqfd, kevent_array, empty_kevs, null);
eventfd_node.data.kevent.flags = posix.EV_CLEAR | posix.EV_ENABLE; eventfd_node.data.kevent.flags = posix.EV_CLEAR | posix.EV_ENABLE;
eventfd_node.data.kevent.fflags = posix.NOTE_TRIGGER; eventfd_node.data.kevent.fflags = posix.NOTE_TRIGGER;
// this one is for waiting for events
self.os_data.kevents[i] = posix.Kevent{
.ident = i,
.filter = posix.EVFILT_USER,
.flags = 0,
.fflags = 0,
.data = 0,
.udata = @ptrToInt(&eventfd_node.data.base),
};
} }
// Pre-add so that we cannot get error.SystemResources // Pre-add so that we cannot get error.SystemResources
@ -215,31 +255,55 @@ pub const Loop = struct {
.data = 0, .data = 0,
.udata = @ptrToInt(&self.final_resume_node), .udata = @ptrToInt(&self.final_resume_node),
}; };
const kevent_array = (*[1]posix.Kevent)(&self.os_data.final_kevent); const final_kev_arr = (*[1]posix.Kevent)(&self.os_data.final_kevent);
_ = try std.os.bsdKEvent(self.os_data.kqfd, kevent_array, eventlist, null); _ = try os.bsdKEvent(self.os_data.kqfd, final_kev_arr, empty_kevs, null);
self.os_data.final_kevent.flags = posix.EV_ENABLE; self.os_data.final_kevent.flags = posix.EV_ENABLE;
self.os_data.final_kevent.fflags = posix.NOTE_TRIGGER; self.os_data.final_kevent.fflags = posix.NOTE_TRIGGER;
self.os_data.fs_kevent_wake = posix.Kevent{
.ident = 0,
.filter = posix.EVFILT_USER,
.flags = posix.EV_ADD | posix.EV_ENABLE,
.fflags = posix.NOTE_TRIGGER,
.data = 0,
.udata = undefined,
};
self.os_data.fs_kevent_wait = posix.Kevent{
.ident = 0,
.filter = posix.EVFILT_USER,
.flags = posix.EV_ADD | posix.EV_CLEAR,
.fflags = 0,
.data = 0,
.udata = undefined,
};
self.os_data.fs_thread = try os.spawnThread(self, posixFsRun);
errdefer {
self.posixFsRequest(&self.os_data.fs_end_request);
self.os_data.fs_thread.wait();
}
var extra_thread_index: usize = 0; var extra_thread_index: usize = 0;
errdefer { errdefer {
_ = std.os.bsdKEvent(self.os_data.kqfd, kevent_array, eventlist, null) catch unreachable; _ = os.bsdKEvent(self.os_data.kqfd, final_kev_arr, empty_kevs, null) catch unreachable;
while (extra_thread_index != 0) { while (extra_thread_index != 0) {
extra_thread_index -= 1; extra_thread_index -= 1;
self.extra_threads[extra_thread_index].wait(); self.extra_threads[extra_thread_index].wait();
} }
} }
while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) { while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) {
self.extra_threads[extra_thread_index] = try std.os.spawnThread(self, workerRun); self.extra_threads[extra_thread_index] = try os.spawnThread(self, workerRun);
} }
}, },
builtin.Os.windows => { builtin.Os.windows => {
self.os_data.io_port = try std.os.windowsCreateIoCompletionPort( self.os_data.io_port = try os.windowsCreateIoCompletionPort(
windows.INVALID_HANDLE_VALUE, windows.INVALID_HANDLE_VALUE,
null, null,
undefined, undefined,
undefined, @maxValue(windows.DWORD),
); );
errdefer std.os.close(self.os_data.io_port); errdefer os.close(self.os_data.io_port);
for (self.eventfd_resume_nodes) |*eventfd_node, i| { for (self.eventfd_resume_nodes) |*eventfd_node, i| {
eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{ eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
@ -262,7 +326,7 @@ pub const Loop = struct {
while (i < extra_thread_index) : (i += 1) { while (i < extra_thread_index) : (i += 1) {
while (true) { while (true) {
const overlapped = @intToPtr(?*windows.OVERLAPPED, 0x1); const overlapped = @intToPtr(?*windows.OVERLAPPED, 0x1);
std.os.windowsPostQueuedCompletionStatus(self.os_data.io_port, undefined, @ptrToInt(&self.final_resume_node), overlapped) catch continue; os.windowsPostQueuedCompletionStatus(self.os_data.io_port, undefined, @ptrToInt(&self.final_resume_node), overlapped) catch continue;
break; break;
} }
} }
@ -272,7 +336,7 @@ pub const Loop = struct {
} }
} }
while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) { while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) {
self.extra_threads[extra_thread_index] = try std.os.spawnThread(self, workerRun); self.extra_threads[extra_thread_index] = try os.spawnThread(self, workerRun);
} }
}, },
else => {}, else => {},
@ -282,65 +346,115 @@ pub const Loop = struct {
fn deinitOsData(self: *Loop) void { fn deinitOsData(self: *Loop) void {
switch (builtin.os) { switch (builtin.os) {
builtin.Os.linux => { builtin.Os.linux => {
std.os.close(self.os_data.final_eventfd); os.close(self.os_data.final_eventfd);
while (self.available_eventfd_resume_nodes.pop()) |node| std.os.close(node.data.eventfd); while (self.available_eventfd_resume_nodes.pop()) |node| os.close(node.data.eventfd);
std.os.close(self.os_data.epollfd); os.close(self.os_data.epollfd);
self.allocator.free(self.eventfd_resume_nodes); self.allocator.free(self.eventfd_resume_nodes);
}, },
builtin.Os.macosx => { builtin.Os.macosx => {
self.allocator.free(self.os_data.kevents); os.close(self.os_data.kqfd);
std.os.close(self.os_data.kqfd); os.close(self.os_data.fs_kqfd);
}, },
builtin.Os.windows => { builtin.Os.windows => {
std.os.close(self.os_data.io_port); os.close(self.os_data.io_port);
}, },
else => {}, else => {},
} }
} }
/// resume_node must live longer than the promise that it holds a reference to. /// resume_node must live longer than the promise that it holds a reference to.
pub fn addFd(self: *Loop, fd: i32, resume_node: *ResumeNode) !void { /// flags must contain EPOLLET
_ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); pub fn linuxAddFd(self: *Loop, fd: i32, resume_node: *ResumeNode, flags: u32) !void {
errdefer { assert(flags & posix.EPOLLET == posix.EPOLLET);
self.finishOneEvent(); self.beginOneEvent();
} errdefer self.finishOneEvent();
try self.modFd( try self.linuxModFd(
fd, fd,
posix.EPOLL_CTL_ADD, posix.EPOLL_CTL_ADD,
std.os.linux.EPOLLIN | std.os.linux.EPOLLOUT | std.os.linux.EPOLLET, flags,
resume_node, resume_node,
); );
} }
pub fn modFd(self: *Loop, fd: i32, op: u32, events: u32, resume_node: *ResumeNode) !void { pub fn linuxModFd(self: *Loop, fd: i32, op: u32, flags: u32, resume_node: *ResumeNode) !void {
var ev = std.os.linux.epoll_event{ assert(flags & posix.EPOLLET == posix.EPOLLET);
.events = events, var ev = os.linux.epoll_event{
.data = std.os.linux.epoll_data{ .ptr = @ptrToInt(resume_node) }, .events = flags,
.data = os.linux.epoll_data{ .ptr = @ptrToInt(resume_node) },
}; };
try std.os.linuxEpollCtl(self.os_data.epollfd, op, fd, &ev); try os.linuxEpollCtl(self.os_data.epollfd, op, fd, &ev);
} }
pub fn removeFd(self: *Loop, fd: i32) void { pub fn linuxRemoveFd(self: *Loop, fd: i32) void {
self.removeFdNoCounter(fd); os.linuxEpollCtl(self.os_data.epollfd, os.linux.EPOLL_CTL_DEL, fd, undefined) catch {};
self.finishOneEvent(); self.finishOneEvent();
} }
fn removeFdNoCounter(self: *Loop, fd: i32) void { pub async fn linuxWaitFd(self: *Loop, fd: i32, flags: u32) !void {
std.os.linuxEpollCtl(self.os_data.epollfd, std.os.linux.EPOLL_CTL_DEL, fd, undefined) catch {}; defer self.linuxRemoveFd(fd);
}
pub async fn waitFd(self: *Loop, fd: i32) !void {
defer self.removeFd(fd);
suspend { suspend {
// TODO explicitly put this memory in the coroutine frame #1194 // TODO explicitly put this memory in the coroutine frame #1194
var resume_node = ResumeNode{ var resume_node = ResumeNode.Basic{
.base = ResumeNode{
.id = ResumeNode.Id.Basic, .id = ResumeNode.Id.Basic,
.handle = @handle(), .handle = @handle(),
},
}; };
try self.addFd(fd, &resume_node); try self.linuxAddFd(fd, &resume_node.base, flags);
} }
} }
pub async fn bsdWaitKev(self: *Loop, ident: usize, filter: i16, fflags: u32) !posix.Kevent {
// TODO #1194
suspend {
resume @handle();
}
var resume_node = ResumeNode.Basic{
.base = ResumeNode{
.id = ResumeNode.Id.Basic,
.handle = @handle(),
},
.kev = undefined,
};
defer self.bsdRemoveKev(ident, filter);
suspend {
try self.bsdAddKev(&resume_node, ident, filter, fflags);
}
return resume_node.kev;
}
/// resume_node must live longer than the promise that it holds a reference to.
pub fn bsdAddKev(self: *Loop, resume_node: *ResumeNode.Basic, ident: usize, filter: i16, fflags: u32) !void {
self.beginOneEvent();
errdefer self.finishOneEvent();
var kev = posix.Kevent{
.ident = ident,
.filter = filter,
.flags = posix.EV_ADD | posix.EV_ENABLE | posix.EV_CLEAR,
.fflags = fflags,
.data = 0,
.udata = @ptrToInt(&resume_node.base),
};
const kevent_array = (*[1]posix.Kevent)(&kev);
const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
_ = try os.bsdKEvent(self.os_data.kqfd, kevent_array, empty_kevs, null);
}
pub fn bsdRemoveKev(self: *Loop, ident: usize, filter: i16) void {
var kev = posix.Kevent{
.ident = ident,
.filter = filter,
.flags = posix.EV_DELETE,
.fflags = 0,
.data = 0,
.udata = 0,
};
const kevent_array = (*[1]posix.Kevent)(&kev);
const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
_ = os.bsdKEvent(self.os_data.kqfd, kevent_array, empty_kevs, null) catch undefined;
self.finishOneEvent();
}
fn dispatch(self: *Loop) void { fn dispatch(self: *Loop) void {
while (self.available_eventfd_resume_nodes.pop()) |resume_stack_node| { while (self.available_eventfd_resume_nodes.pop()) |resume_stack_node| {
const next_tick_node = self.next_tick_queue.get() orelse { const next_tick_node = self.next_tick_queue.get() orelse {
@ -352,8 +466,8 @@ pub const Loop = struct {
switch (builtin.os) { switch (builtin.os) {
builtin.Os.macosx => { builtin.Os.macosx => {
const kevent_array = (*[1]posix.Kevent)(&eventfd_node.kevent); const kevent_array = (*[1]posix.Kevent)(&eventfd_node.kevent);
const eventlist = ([*]posix.Kevent)(undefined)[0..0]; const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
_ = std.os.bsdKEvent(self.os_data.kqfd, kevent_array, eventlist, null) catch { _ = os.bsdKEvent(self.os_data.kqfd, kevent_array, empty_kevs, null) catch {
self.next_tick_queue.unget(next_tick_node); self.next_tick_queue.unget(next_tick_node);
self.available_eventfd_resume_nodes.push(resume_stack_node); self.available_eventfd_resume_nodes.push(resume_stack_node);
return; return;
@ -361,9 +475,9 @@ pub const Loop = struct {
}, },
builtin.Os.linux => { builtin.Os.linux => {
// the pending count is already accounted for // the pending count is already accounted for
const epoll_events = posix.EPOLLONESHOT | std.os.linux.EPOLLIN | std.os.linux.EPOLLOUT | const epoll_events = posix.EPOLLONESHOT | os.linux.EPOLLIN | os.linux.EPOLLOUT |
std.os.linux.EPOLLET; os.linux.EPOLLET;
self.modFd( self.linuxModFd(
eventfd_node.eventfd, eventfd_node.eventfd,
eventfd_node.epoll_op, eventfd_node.epoll_op,
epoll_events, epoll_events,
@ -379,7 +493,7 @@ pub const Loop = struct {
// the consumer code can decide whether to read the completion key. // the consumer code can decide whether to read the completion key.
// it has to do this for normal I/O, so we match that behavior here. // it has to do this for normal I/O, so we match that behavior here.
const overlapped = @intToPtr(?*windows.OVERLAPPED, 0x1); const overlapped = @intToPtr(?*windows.OVERLAPPED, 0x1);
std.os.windowsPostQueuedCompletionStatus( os.windowsPostQueuedCompletionStatus(
self.os_data.io_port, self.os_data.io_port,
undefined, undefined,
eventfd_node.completion_key, eventfd_node.completion_key,
@ -397,15 +511,29 @@ pub const Loop = struct {
/// Bring your own linked list node. This means it can't fail. /// Bring your own linked list node. This means it can't fail.
pub fn onNextTick(self: *Loop, node: *NextTickNode) void { pub fn onNextTick(self: *Loop, node: *NextTickNode) void {
_ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); self.beginOneEvent(); // finished in dispatch()
self.next_tick_queue.put(node); self.next_tick_queue.put(node);
self.dispatch(); self.dispatch();
} }
pub fn cancelOnNextTick(self: *Loop, node: *NextTickNode) void {
if (self.next_tick_queue.remove(node)) {
self.finishOneEvent();
}
}
pub fn run(self: *Loop) void { pub fn run(self: *Loop) void {
self.finishOneEvent(); // the reference we start with self.finishOneEvent(); // the reference we start with
self.workerRun(); self.workerRun();
switch (builtin.os) {
builtin.Os.linux,
builtin.Os.macosx,
=> self.os_data.fs_thread.wait(),
else => {},
}
for (self.extra_threads) |extra_thread| { for (self.extra_threads) |extra_thread| {
extra_thread.wait(); extra_thread.wait();
} }
@ -420,6 +548,7 @@ pub const Loop = struct {
suspend { suspend {
handle.* = @handle(); handle.* = @handle();
var my_tick_node = Loop.NextTickNode{ var my_tick_node = Loop.NextTickNode{
.prev = undefined,
.next = undefined, .next = undefined,
.data = @handle(), .data = @handle(),
}; };
@ -441,6 +570,7 @@ pub const Loop = struct {
pub async fn yield(self: *Loop) void { pub async fn yield(self: *Loop) void {
suspend { suspend {
var my_tick_node = Loop.NextTickNode{ var my_tick_node = Loop.NextTickNode{
.prev = undefined,
.next = undefined, .next = undefined,
.data = @handle(), .data = @handle(),
}; };
@ -448,20 +578,28 @@ pub const Loop = struct {
} }
} }
fn finishOneEvent(self: *Loop) void { /// call finishOneEvent when done
if (@atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst) == 1) { pub fn beginOneEvent(self: *Loop) void {
_ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
}
pub fn finishOneEvent(self: *Loop) void {
const prev = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
if (prev == 1) {
// cause all the threads to stop // cause all the threads to stop
switch (builtin.os) { switch (builtin.os) {
builtin.Os.linux => { builtin.Os.linux => {
self.posixFsRequest(&self.os_data.fs_end_request);
// writing 8 bytes to an eventfd cannot fail // writing 8 bytes to an eventfd cannot fail
std.os.posixWrite(self.os_data.final_eventfd, wakeup_bytes) catch unreachable; os.posixWrite(self.os_data.final_eventfd, wakeup_bytes) catch unreachable;
return; return;
}, },
builtin.Os.macosx => { builtin.Os.macosx => {
self.posixFsRequest(&self.os_data.fs_end_request);
const final_kevent = (*[1]posix.Kevent)(&self.os_data.final_kevent); const final_kevent = (*[1]posix.Kevent)(&self.os_data.final_kevent);
const eventlist = ([*]posix.Kevent)(undefined)[0..0]; const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
// cannot fail because we already added it and this just enables it // cannot fail because we already added it and this just enables it
_ = std.os.bsdKEvent(self.os_data.kqfd, final_kevent, eventlist, null) catch unreachable; _ = os.bsdKEvent(self.os_data.kqfd, final_kevent, empty_kevs, null) catch unreachable;
return; return;
}, },
builtin.Os.windows => { builtin.Os.windows => {
@ -469,7 +607,7 @@ pub const Loop = struct {
while (i < self.extra_threads.len + 1) : (i += 1) { while (i < self.extra_threads.len + 1) : (i += 1) {
while (true) { while (true) {
const overlapped = @intToPtr(?*windows.OVERLAPPED, 0x1); const overlapped = @intToPtr(?*windows.OVERLAPPED, 0x1);
std.os.windowsPostQueuedCompletionStatus(self.os_data.io_port, undefined, @ptrToInt(&self.final_resume_node), overlapped) catch continue; os.windowsPostQueuedCompletionStatus(self.os_data.io_port, undefined, @ptrToInt(&self.final_resume_node), overlapped) catch continue;
break; break;
} }
} }
@ -492,8 +630,8 @@ pub const Loop = struct {
switch (builtin.os) { switch (builtin.os) {
builtin.Os.linux => { builtin.Os.linux => {
// only process 1 event so we don't steal from other threads // only process 1 event so we don't steal from other threads
var events: [1]std.os.linux.epoll_event = undefined; var events: [1]os.linux.epoll_event = undefined;
const count = std.os.linuxEpollWait(self.os_data.epollfd, events[0..], -1); const count = os.linuxEpollWait(self.os_data.epollfd, events[0..], -1);
for (events[0..count]) |ev| { for (events[0..count]) |ev| {
const resume_node = @intToPtr(*ResumeNode, ev.data.ptr); const resume_node = @intToPtr(*ResumeNode, ev.data.ptr);
const handle = resume_node.handle; const handle = resume_node.handle;
@ -516,13 +654,17 @@ pub const Loop = struct {
}, },
builtin.Os.macosx => { builtin.Os.macosx => {
var eventlist: [1]posix.Kevent = undefined; var eventlist: [1]posix.Kevent = undefined;
const count = std.os.bsdKEvent(self.os_data.kqfd, self.os_data.kevents, eventlist[0..], null) catch unreachable; const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
const count = os.bsdKEvent(self.os_data.kqfd, empty_kevs, eventlist[0..], null) catch unreachable;
for (eventlist[0..count]) |ev| { for (eventlist[0..count]) |ev| {
const resume_node = @intToPtr(*ResumeNode, ev.udata); const resume_node = @intToPtr(*ResumeNode, ev.udata);
const handle = resume_node.handle; const handle = resume_node.handle;
const resume_node_id = resume_node.id; const resume_node_id = resume_node.id;
switch (resume_node_id) { switch (resume_node_id) {
ResumeNode.Id.Basic => {}, ResumeNode.Id.Basic => {
const basic_node = @fieldParentPtr(ResumeNode.Basic, "base", resume_node);
basic_node.kev = ev;
},
ResumeNode.Id.Stop => return, ResumeNode.Id.Stop => return,
ResumeNode.Id.EventFd => { ResumeNode.Id.EventFd => {
const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node); const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node);
@ -541,9 +683,10 @@ pub const Loop = struct {
while (true) { while (true) {
var nbytes: windows.DWORD = undefined; var nbytes: windows.DWORD = undefined;
var overlapped: ?*windows.OVERLAPPED = undefined; var overlapped: ?*windows.OVERLAPPED = undefined;
switch (std.os.windowsGetQueuedCompletionStatus(self.os_data.io_port, &nbytes, &completion_key, &overlapped, windows.INFINITE)) { switch (os.windowsGetQueuedCompletionStatus(self.os_data.io_port, &nbytes, &completion_key, &overlapped, windows.INFINITE)) {
std.os.WindowsWaitResult.Aborted => return, os.WindowsWaitResult.Aborted => return,
std.os.WindowsWaitResult.Normal => {}, os.WindowsWaitResult.Normal => {},
os.WindowsWaitResult.Cancelled => continue,
} }
if (overlapped != null) break; if (overlapped != null) break;
} }
@ -560,21 +703,101 @@ pub const Loop = struct {
}, },
} }
resume handle; resume handle;
if (resume_node_id == ResumeNode.Id.EventFd) {
self.finishOneEvent(); self.finishOneEvent();
}
}, },
else => @compileError("unsupported OS"), else => @compileError("unsupported OS"),
} }
} }
} }
const OsData = switch (builtin.os) { fn posixFsRequest(self: *Loop, request_node: *fs.RequestNode) void {
builtin.Os.linux => struct { self.beginOneEvent(); // finished in posixFsRun after processing the msg
epollfd: i32, self.os_data.fs_queue.put(request_node);
final_eventfd: i32, switch (builtin.os) {
final_eventfd_event: std.os.linux.epoll_event, builtin.Os.macosx => {
const fs_kevs = (*[1]posix.Kevent)(&self.os_data.fs_kevent_wake);
const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
_ = os.bsdKEvent(self.os_data.fs_kqfd, fs_kevs, empty_kevs, null) catch unreachable;
}, },
builtin.Os.linux => {
_ = @atomicRmw(u8, &self.os_data.fs_queue_item, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
const rc = os.linux.futex_wake(@ptrToInt(&self.os_data.fs_queue_item), os.linux.FUTEX_WAKE, 1);
switch (os.linux.getErrno(rc)) {
0 => {},
posix.EINVAL => unreachable,
else => unreachable,
}
},
else => @compileError("Unsupported OS"),
}
}
fn posixFsCancel(self: *Loop, request_node: *fs.RequestNode) void {
if (self.os_data.fs_queue.remove(request_node)) {
self.finishOneEvent();
}
}
fn posixFsRun(self: *Loop) void {
while (true) {
if (builtin.os == builtin.Os.linux) {
_ = @atomicRmw(u8, &self.os_data.fs_queue_item, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
}
while (self.os_data.fs_queue.get()) |node| {
switch (node.data.msg) {
@TagType(fs.Request.Msg).End => return,
@TagType(fs.Request.Msg).PWriteV => |*msg| {
msg.result = os.posix_pwritev(msg.fd, msg.iov.ptr, msg.iov.len, msg.offset);
},
@TagType(fs.Request.Msg).PReadV => |*msg| {
msg.result = os.posix_preadv(msg.fd, msg.iov.ptr, msg.iov.len, msg.offset);
},
@TagType(fs.Request.Msg).Open => |*msg| {
msg.result = os.posixOpenC(msg.path.ptr, msg.flags, msg.mode);
},
@TagType(fs.Request.Msg).Close => |*msg| os.close(msg.fd),
@TagType(fs.Request.Msg).WriteFile => |*msg| blk: {
const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT |
posix.O_CLOEXEC | posix.O_TRUNC;
const fd = os.posixOpenC(msg.path.ptr, flags, msg.mode) catch |err| {
msg.result = err;
break :blk;
};
defer os.close(fd);
msg.result = os.posixWrite(fd, msg.contents);
},
}
switch (node.data.finish) {
@TagType(fs.Request.Finish).TickNode => |*tick_node| self.onNextTick(tick_node),
@TagType(fs.Request.Finish).DeallocCloseOperation => |close_op| {
self.allocator.destroy(close_op);
},
@TagType(fs.Request.Finish).NoAction => {},
}
self.finishOneEvent();
}
switch (builtin.os) {
builtin.Os.linux => {
const rc = os.linux.futex_wait(@ptrToInt(&self.os_data.fs_queue_item), os.linux.FUTEX_WAIT, 0, null);
switch (os.linux.getErrno(rc)) {
0 => continue,
posix.EINTR => continue,
posix.EAGAIN => continue,
else => unreachable,
}
},
builtin.Os.macosx => {
const fs_kevs = (*[1]posix.Kevent)(&self.os_data.fs_kevent_wait);
var out_kevs: [1]posix.Kevent = undefined;
_ = os.bsdKEvent(self.os_data.fs_kqfd, fs_kevs, out_kevs[0..], null) catch unreachable;
},
else => @compileError("Unsupported OS"),
}
}
}
const OsData = switch (builtin.os) {
builtin.Os.linux => LinuxOsData,
builtin.Os.macosx => MacOsData, builtin.Os.macosx => MacOsData,
builtin.Os.windows => struct { builtin.Os.windows => struct {
io_port: windows.HANDLE, io_port: windows.HANDLE,
@ -586,7 +809,22 @@ pub const Loop = struct {
const MacOsData = struct { const MacOsData = struct {
kqfd: i32, kqfd: i32,
final_kevent: posix.Kevent, final_kevent: posix.Kevent,
kevents: []posix.Kevent, fs_kevent_wake: posix.Kevent,
fs_kevent_wait: posix.Kevent,
fs_thread: *os.Thread,
fs_kqfd: i32,
fs_queue: std.atomic.Queue(fs.Request),
fs_end_request: fs.RequestNode,
};
const LinuxOsData = struct {
epollfd: i32,
final_eventfd: i32,
final_eventfd_event: os.linux.epoll_event,
fs_thread: *os.Thread,
fs_queue_item: u8,
fs_queue: std.atomic.Queue(fs.Request),
fs_end_request: fs.RequestNode,
}; };
}; };

296
std/event/rwlock.zig Normal file
View File

@ -0,0 +1,296 @@
const std = @import("../index.zig");
const builtin = @import("builtin");
const assert = std.debug.assert;
const mem = std.mem;
const AtomicRmwOp = builtin.AtomicRmwOp;
const AtomicOrder = builtin.AtomicOrder;
const Loop = std.event.Loop;
/// Thread-safe async/await lock.
/// Does not make any syscalls - coroutines which are waiting for the lock are suspended, and
/// are resumed when the lock is released, in order.
/// Many readers can hold the lock at the same time; however locking for writing is exclusive.
/// When a read lock is held, it will not be released until the reader queue is empty.
/// When a write lock is held, it will not be released until the writer queue is empty.
pub const RwLock = struct {
loop: *Loop,
shared_state: u8, // TODO make this an enum
writer_queue: Queue,
reader_queue: Queue,
writer_queue_empty_bit: u8, // TODO make this a bool
reader_queue_empty_bit: u8, // TODO make this a bool
reader_lock_count: usize,
const State = struct {
const Unlocked = 0;
const WriteLock = 1;
const ReadLock = 2;
};
const Queue = std.atomic.Queue(promise);
pub const HeldRead = struct {
lock: *RwLock,
pub fn release(self: HeldRead) void {
// If other readers still hold the lock, we're done.
if (@atomicRmw(usize, &self.lock.reader_lock_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst) != 1) {
return;
}
_ = @atomicRmw(u8, &self.lock.reader_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
if (@cmpxchgStrong(u8, &self.lock.shared_state, State.ReadLock, State.Unlocked, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) {
// Didn't unlock. Someone else's problem.
return;
}
self.lock.commonPostUnlock();
}
};
pub const HeldWrite = struct {
lock: *RwLock,
pub fn release(self: HeldWrite) void {
// See if we can leave it locked for writing, and pass the lock to the next writer
// in the queue to grab the lock.
if (self.lock.writer_queue.get()) |node| {
self.lock.loop.onNextTick(node);
return;
}
// We need to release the write lock. Check if any readers are waiting to grab the lock.
if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, AtomicOrder.SeqCst) == 0) {
// Switch to a read lock.
_ = @atomicRmw(u8, &self.lock.shared_state, AtomicRmwOp.Xchg, State.ReadLock, AtomicOrder.SeqCst);
while (self.lock.reader_queue.get()) |node| {
self.lock.loop.onNextTick(node);
}
return;
}
_ = @atomicRmw(u8, &self.lock.writer_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
_ = @atomicRmw(u8, &self.lock.shared_state, AtomicRmwOp.Xchg, State.Unlocked, AtomicOrder.SeqCst);
self.lock.commonPostUnlock();
}
};
pub fn init(loop: *Loop) RwLock {
return RwLock{
.loop = loop,
.shared_state = State.Unlocked,
.writer_queue = Queue.init(),
.writer_queue_empty_bit = 1,
.reader_queue = Queue.init(),
.reader_queue_empty_bit = 1,
.reader_lock_count = 0,
};
}
/// Must be called when not locked. Not thread safe.
/// All calls to acquire() and release() must complete before calling deinit().
pub fn deinit(self: *RwLock) void {
assert(self.shared_state == State.Unlocked);
while (self.writer_queue.get()) |node| cancel node.data;
while (self.reader_queue.get()) |node| cancel node.data;
}
pub async fn acquireRead(self: *RwLock) HeldRead {
_ = @atomicRmw(usize, &self.reader_lock_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
suspend {
// TODO explicitly put this memory in the coroutine frame #1194
var my_tick_node = Loop.NextTickNode{
.data = @handle(),
.prev = undefined,
.next = undefined,
};
self.reader_queue.put(&my_tick_node);
// At this point, we are in the reader_queue, so we might have already been resumed and this coroutine
// frame might be destroyed. For the rest of the suspend block we cannot access the coroutine frame.
// We set this bit so that later we can rely on the fact, that if reader_queue_empty_bit is 1,
// some actor will attempt to grab the lock.
_ = @atomicRmw(u8, &self.reader_queue_empty_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
// Here we don't care if we are the one to do the locking or if it was already locked for reading.
const have_read_lock = if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |old_state| old_state == State.ReadLock else true;
if (have_read_lock) {
// Give out all the read locks.
if (self.reader_queue.get()) |first_node| {
while (self.reader_queue.get()) |node| {
self.loop.onNextTick(node);
}
resume first_node.data;
}
}
}
return HeldRead{ .lock = self };
}
pub async fn acquireWrite(self: *RwLock) HeldWrite {
suspend {
// TODO explicitly put this memory in the coroutine frame #1194
var my_tick_node = Loop.NextTickNode{
.data = @handle(),
.prev = undefined,
.next = undefined,
};
self.writer_queue.put(&my_tick_node);
// At this point, we are in the writer_queue, so we might have already been resumed and this coroutine
// frame might be destroyed. For the rest of the suspend block we cannot access the coroutine frame.
// We set this bit so that later we can rely on the fact, that if writer_queue_empty_bit is 1,
// some actor will attempt to grab the lock.
_ = @atomicRmw(u8, &self.writer_queue_empty_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
// Here we must be the one to acquire the write lock. It cannot already be locked.
if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) == null) {
// We now have a write lock.
if (self.writer_queue.get()) |node| {
// Whether this node is us or someone else, we tail resume it.
resume node.data;
}
}
}
return HeldWrite{ .lock = self };
}
fn commonPostUnlock(self: *RwLock) void {
while (true) {
// There might be a writer_queue item or a reader_queue item
// If we check and both are empty, we can be done, because the other actors will try to
// obtain the lock.
// But if there's a writer_queue item or a reader_queue item,
// we are the actor which must loop and attempt to grab the lock again.
if (@atomicLoad(u8, &self.writer_queue_empty_bit, AtomicOrder.SeqCst) == 0) {
if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) {
// We did not obtain the lock. Great, the queues are someone else's problem.
return;
}
// If there's an item in the writer queue, give them the lock, and we're done.
if (self.writer_queue.get()) |node| {
self.loop.onNextTick(node);
return;
}
// Release the lock again.
_ = @atomicRmw(u8, &self.writer_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
_ = @atomicRmw(u8, &self.shared_state, AtomicRmwOp.Xchg, State.Unlocked, AtomicOrder.SeqCst);
continue;
}
if (@atomicLoad(u8, &self.reader_queue_empty_bit, AtomicOrder.SeqCst) == 0) {
if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) {
// We did not obtain the lock. Great, the queues are someone else's problem.
return;
}
// If there are any items in the reader queue, give out all the reader locks, and we're done.
if (self.reader_queue.get()) |first_node| {
self.loop.onNextTick(first_node);
while (self.reader_queue.get()) |node| {
self.loop.onNextTick(node);
}
return;
}
// Release the lock again.
_ = @atomicRmw(u8, &self.reader_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
if (@cmpxchgStrong(u8, &self.shared_state, State.ReadLock, State.Unlocked, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) {
// Didn't unlock. Someone else's problem.
return;
}
continue;
}
return;
}
}
};
test "std.event.RwLock" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
const allocator = &da.allocator;
var loop: Loop = undefined;
try loop.initMultiThreaded(allocator);
defer loop.deinit();
var lock = RwLock.init(&loop);
defer lock.deinit();
const handle = try async<allocator> testLock(&loop, &lock);
defer cancel handle;
loop.run();
const expected_result = [1]i32{shared_it_count * @intCast(i32, shared_test_data.len)} ** shared_test_data.len;
assert(mem.eql(i32, shared_test_data, expected_result));
}
async fn testLock(loop: *Loop, lock: *RwLock) void {
// TODO explicitly put next tick node memory in the coroutine frame #1194
suspend {
resume @handle();
}
var read_nodes: [100]Loop.NextTickNode = undefined;
for (read_nodes) |*read_node| {
read_node.data = async readRunner(lock) catch @panic("out of memory");
loop.onNextTick(read_node);
}
var write_nodes: [shared_it_count]Loop.NextTickNode = undefined;
for (write_nodes) |*write_node| {
write_node.data = async writeRunner(lock) catch @panic("out of memory");
loop.onNextTick(write_node);
}
for (write_nodes) |*write_node| {
await @ptrCast(promise->void, write_node.data);
}
for (read_nodes) |*read_node| {
await @ptrCast(promise->void, read_node.data);
}
}
const shared_it_count = 10;
var shared_test_data = [1]i32{0} ** 10;
var shared_test_index: usize = 0;
var shared_count: usize = 0;
async fn writeRunner(lock: *RwLock) void {
suspend; // resumed by onNextTick
var i: usize = 0;
while (i < shared_test_data.len) : (i += 1) {
std.os.time.sleep(0, 100000);
const lock_promise = async lock.acquireWrite() catch @panic("out of memory");
const handle = await lock_promise;
defer handle.release();
shared_count += 1;
while (shared_test_index < shared_test_data.len) : (shared_test_index += 1) {
shared_test_data[shared_test_index] = shared_test_data[shared_test_index] + 1;
}
shared_test_index = 0;
}
}
async fn readRunner(lock: *RwLock) void {
suspend; // resumed by onNextTick
std.os.time.sleep(0, 1);
var i: usize = 0;
while (i < shared_test_data.len) : (i += 1) {
const lock_promise = async lock.acquireRead() catch @panic("out of memory");
const handle = await lock_promise;
defer handle.release();
assert(shared_test_index == 0);
assert(shared_test_data[i] == @intCast(i32, shared_count));
}
}

58
std/event/rwlocked.zig Normal file
View File

@ -0,0 +1,58 @@
const std = @import("../index.zig");
const RwLock = std.event.RwLock;
const Loop = std.event.Loop;
/// Thread-safe async/await RW lock that protects one piece of data.
/// Does not make any syscalls - coroutines which are waiting for the lock are suspended, and
/// are resumed when the lock is released, in order.
pub fn RwLocked(comptime T: type) type {
return struct {
lock: RwLock,
locked_data: T,
const Self = this;
pub const HeldReadLock = struct {
value: *const T,
held: RwLock.HeldRead,
pub fn release(self: HeldReadLock) void {
self.held.release();
}
};
pub const HeldWriteLock = struct {
value: *T,
held: RwLock.HeldWrite,
pub fn release(self: HeldWriteLock) void {
self.held.release();
}
};
pub fn init(loop: *Loop, data: T) Self {
return Self{
.lock = RwLock.init(loop),
.locked_data = data,
};
}
pub fn deinit(self: *Self) void {
self.lock.deinit();
}
pub async fn acquireRead(self: *Self) HeldReadLock {
return HeldReadLock{
.held = await (async self.lock.acquireRead() catch unreachable),
.value = &self.locked_data,
};
}
pub async fn acquireWrite(self: *Self) HeldWriteLock {
return HeldWriteLock{
.held = await (async self.lock.acquireWrite() catch unreachable),
.value = &self.locked_data,
};
}
};
}

View File

@ -55,13 +55,13 @@ pub const Server = struct {
errdefer cancel self.accept_coro.?; errdefer cancel self.accept_coro.?;
self.listen_resume_node.handle = self.accept_coro.?; self.listen_resume_node.handle = self.accept_coro.?;
try self.loop.addFd(sockfd, &self.listen_resume_node); try self.loop.linuxAddFd(sockfd, &self.listen_resume_node, posix.EPOLLIN | posix.EPOLLOUT | posix.EPOLLET);
errdefer self.loop.removeFd(sockfd); errdefer self.loop.removeFd(sockfd);
} }
/// Stop listening /// Stop listening
pub fn close(self: *Server) void { pub fn close(self: *Server) void {
self.loop.removeFd(self.sockfd.?); self.loop.linuxRemoveFd(self.sockfd.?);
std.os.close(self.sockfd.?); std.os.close(self.sockfd.?);
} }
@ -116,7 +116,7 @@ pub async fn connect(loop: *Loop, _address: *const std.net.Address) !std.os.File
errdefer std.os.close(sockfd); errdefer std.os.close(sockfd);
try std.os.posixConnectAsync(sockfd, &address.os_addr); try std.os.posixConnectAsync(sockfd, &address.os_addr);
try await try async loop.waitFd(sockfd); try await try async loop.linuxWaitFd(sockfd, posix.EPOLLIN | posix.EPOLLOUT | posix.EPOLLET);
try std.os.posixGetSockOptConnectError(sockfd); try std.os.posixGetSockOptConnectError(sockfd);
return std.os.File.openHandle(sockfd); return std.os.File.openHandle(sockfd);
@ -181,4 +181,3 @@ async fn doAsyncTest(loop: *Loop, address: *const std.net.Address, server: *Serv
assert(mem.eql(u8, msg, "hello from server\n")); assert(mem.eql(u8, msg, "hello from server\n"));
server.close(); server.close();
} }

View File

@ -253,11 +253,7 @@ fn gethi(in: f64) f64 {
/// Normalize the number by factoring in the error. /// Normalize the number by factoring in the error.
/// @hp: The float pair. /// @hp: The float pair.
fn hpNormalize(hp: *HP) void { fn hpNormalize(hp: *HP) void {
// Required to avoid segfaults causing buffer overrun during errol3 digit output termination.
@setFloatMode(this, @import("builtin").FloatMode.Strict);
const val = hp.val; const val = hp.val;
hp.val += hp.off; hp.val += hp.off;
hp.off += val - hp.val; hp.off += val - hp.val;
} }

View File

@ -146,17 +146,9 @@ pub fn formatType(
builtin.TypeId.Promise => { builtin.TypeId.Promise => {
return format(context, Errors, output, "promise@{x}", @ptrToInt(value)); return format(context, Errors, output, "promise@{x}", @ptrToInt(value));
}, },
builtin.TypeId.Pointer => |ptr_info| switch (ptr_info.size) {
builtin.TypeInfo.Pointer.Size.One => switch (@typeInfo(ptr_info.child)) {
builtin.TypeId.Array => |info| {
if (info.child == u8) {
return formatText(value, fmt, context, Errors, output);
}
return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value));
},
builtin.TypeId.Enum, builtin.TypeId.Union, builtin.TypeId.Struct => { builtin.TypeId.Enum, builtin.TypeId.Union, builtin.TypeId.Struct => {
const has_cust_fmt = comptime cf: { const has_cust_fmt = comptime cf: {
const info = @typeInfo(T.Child); const info = @typeInfo(T);
const defs = switch (info) { const defs = switch (info) {
builtin.TypeId.Struct => |s| s.defs, builtin.TypeId.Struct => |s| s.defs,
builtin.TypeId.Union => |u| u.defs, builtin.TypeId.Union => |u| u.defs,
@ -173,13 +165,42 @@ pub fn formatType(
}; };
if (has_cust_fmt) return value.format(fmt, context, Errors, output); if (has_cust_fmt) return value.format(fmt, context, Errors, output);
try output(context, @typeName(T));
if (comptime @typeId(T) == builtin.TypeId.Enum) {
try output(context, ".");
try formatType(@tagName(value), "", context, Errors, output);
return;
}
comptime var field_i = 0;
inline while (field_i < @memberCount(T)) : (field_i += 1) {
if (field_i == 0) {
try output(context, "{ .");
} else {
try output(context, ", .");
}
try output(context, @memberName(T, field_i));
try output(context, " = ");
try formatType(@field(value, @memberName(T, field_i)), "", context, Errors, output);
}
try output(context, " }");
return;
},
builtin.TypeId.Pointer => |ptr_info| switch (ptr_info.size) {
builtin.TypeInfo.Pointer.Size.One => switch (@typeInfo(ptr_info.child)) {
builtin.TypeId.Array => |info| {
if (info.child == u8) {
return formatText(value, fmt, context, Errors, output);
}
return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value)); return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value));
}, },
builtin.TypeId.Enum, builtin.TypeId.Union, builtin.TypeId.Struct => {
return formatType(value.*, fmt, context, Errors, output);
},
else => return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value)), else => return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value)),
}, },
builtin.TypeInfo.Pointer.Size.Many => { builtin.TypeInfo.Pointer.Size.Many => {
if (ptr_info.child == u8) { if (ptr_info.child == u8) {
if (fmt[0] == 's') { if (fmt.len > 0 and fmt[0] == 's') {
const len = std.cstr.len(value); const len = std.cstr.len(value);
return formatText(value[0..len], fmt, context, Errors, output); return formatText(value[0..len], fmt, context, Errors, output);
} }
@ -911,14 +932,21 @@ test "fmt.format" {
try testFmt("file size: 63MiB\n", "file size: {Bi}\n", usize(63 * 1024 * 1024)); try testFmt("file size: 63MiB\n", "file size: {Bi}\n", usize(63 * 1024 * 1024));
try testFmt("file size: 66.06MB\n", "file size: {B2}\n", usize(63 * 1024 * 1024)); try testFmt("file size: 66.06MB\n", "file size: {B2}\n", usize(63 * 1024 * 1024));
{ {
// Dummy field because of https://github.com/ziglang/zig/issues/557.
const Struct = struct { const Struct = struct {
unused: u8, field: u8,
}; };
var buf1: [32]u8 = undefined; const value = Struct{ .field = 42 };
const value = Struct{ .unused = 42 }; try testFmt("struct: Struct{ .field = 42 }\n", "struct: {}\n", value);
const result = try bufPrint(buf1[0..], "pointer: {}\n", &value); try testFmt("struct: Struct{ .field = 42 }\n", "struct: {}\n", &value);
assert(mem.startsWith(u8, result, "pointer: Struct@")); }
{
const Enum = enum {
One,
Two,
};
const value = Enum.Two;
try testFmt("enum: Enum.Two\n", "enum: {}\n", value);
try testFmt("enum: Enum.Two\n", "enum: {}\n", &value);
} }
{ {
var buf1: [32]u8 = undefined; var buf1: [32]u8 = undefined;
@ -941,6 +969,7 @@ test "fmt.format" {
{ {
// This fails on release due to a minor rounding difference. // This fails on release due to a minor rounding difference.
// --release-fast outputs 9.999960000000001e-40 vs. the expected. // --release-fast outputs 9.999960000000001e-40 vs. the expected.
// TODO fix this, it should be the same in Debug and ReleaseFast
if (builtin.mode == builtin.Mode.Debug) { if (builtin.mode == builtin.Mode.Debug) {
var buf1: [32]u8 = undefined; var buf1: [32]u8 = undefined;
const value: f64 = 9.999960e-40; const value: f64 = 9.999960e-40;
@ -1133,24 +1162,24 @@ test "fmt.format" {
y: f32, y: f32,
pub fn format( pub fn format(
self: *SelfType, self: SelfType,
comptime fmt: []const u8, comptime fmt: []const u8,
context: var, context: var,
comptime Errors: type, comptime Errors: type,
output: fn (@typeOf(context), []const u8) Errors!void, output: fn (@typeOf(context), []const u8) Errors!void,
) Errors!void { ) Errors!void {
if (fmt.len > 0) { switch (fmt.len) {
if (fmt.len > 1) unreachable; 0 => return std.fmt.format(context, Errors, output, "({.3},{.3})", self.x, self.y),
switch (fmt[0]) { 1 => switch (fmt[0]) {
//point format //point format
'p' => return std.fmt.format(context, Errors, output, "({.3},{.3})", self.x, self.y), 'p' => return std.fmt.format(context, Errors, output, "({.3},{.3})", self.x, self.y),
//dimension format //dimension format
'd' => return std.fmt.format(context, Errors, output, "{.3}x{.3}", self.x, self.y), 'd' => return std.fmt.format(context, Errors, output, "{.3}x{.3}", self.x, self.y),
else => unreachable, else => unreachable,
},
else => unreachable,
} }
} }
return std.fmt.format(context, Errors, output, "({.3},{.3})", self.x, self.y);
}
}; };
var buf1: [32]u8 = undefined; var buf1: [32]u8 = undefined;
@ -1160,6 +1189,10 @@ test "fmt.format" {
}; };
try testFmt("point: (10.200,2.220)\n", "point: {}\n", &value); try testFmt("point: (10.200,2.220)\n", "point: {}\n", &value);
try testFmt("dim: 10.200x2.220\n", "dim: {d}\n", &value); try testFmt("dim: 10.200x2.220\n", "dim: {d}\n", &value);
// same thing but not passing a pointer
try testFmt("point: (10.200,2.220)\n", "point: {}\n", value);
try testFmt("dim: 10.200x2.220\n", "dim: {d}\n", value);
} }
} }

View File

@ -9,6 +9,10 @@ const builtin = @import("builtin");
const want_modification_safety = builtin.mode != builtin.Mode.ReleaseFast; const want_modification_safety = builtin.mode != builtin.Mode.ReleaseFast;
const debug_u32 = if (want_modification_safety) u32 else void; const debug_u32 = if (want_modification_safety) u32 else void;
pub fn AutoHashMap(comptime K: type, comptime V: type) type {
return HashMap(K, V, getAutoHashFn(K), getAutoEqlFn(K));
}
pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u32, comptime eql: fn (a: K, b: K) bool) type { pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u32, comptime eql: fn (a: K, b: K) bool) type {
return struct { return struct {
entries: []Entry, entries: []Entry,
@ -20,13 +24,22 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
const Self = this; const Self = this;
pub const Entry = struct { pub const KV = struct {
used: bool,
distance_from_start_index: usize,
key: K, key: K,
value: V, value: V,
}; };
const Entry = struct {
used: bool,
distance_from_start_index: usize,
kv: KV,
};
pub const GetOrPutResult = struct {
kv: *KV,
found_existing: bool,
};
pub const Iterator = struct { pub const Iterator = struct {
hm: *const Self, hm: *const Self,
// how many items have we returned // how many items have we returned
@ -36,7 +49,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
// used to detect concurrent modification // used to detect concurrent modification
initial_modification_count: debug_u32, initial_modification_count: debug_u32,
pub fn next(it: *Iterator) ?*Entry { pub fn next(it: *Iterator) ?*KV {
if (want_modification_safety) { if (want_modification_safety) {
assert(it.initial_modification_count == it.hm.modification_count); // concurrent modification assert(it.initial_modification_count == it.hm.modification_count); // concurrent modification
} }
@ -46,7 +59,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
if (entry.used) { if (entry.used) {
it.index += 1; it.index += 1;
it.count += 1; it.count += 1;
return entry; return &entry.kv;
} }
} }
unreachable; // no next item unreachable; // no next item
@ -71,7 +84,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
}; };
} }
pub fn deinit(hm: *const Self) void { pub fn deinit(hm: Self) void {
hm.allocator.free(hm.entries); hm.allocator.free(hm.entries);
} }
@ -84,34 +97,65 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
hm.incrementModificationCount(); hm.incrementModificationCount();
} }
pub fn count(hm: *const Self) usize { pub fn count(self: Self) usize {
return hm.size; return self.size;
} }
/// Returns the value that was already there. /// If key exists this function cannot fail.
pub fn put(hm: *Self, key: K, value: *const V) !?V { /// If there is an existing item with `key`, then the result
if (hm.entries.len == 0) { /// kv pointer points to it, and found_existing is true.
try hm.initCapacity(16); /// Otherwise, puts a new item with undefined value, and
/// the kv pointer points to it. Caller should then initialize
/// the data.
pub fn getOrPut(self: *Self, key: K) !GetOrPutResult {
// TODO this implementation can be improved - we should only
// have to hash once and find the entry once.
if (self.get(key)) |kv| {
return GetOrPutResult{
.kv = kv,
.found_existing = true,
};
}
self.incrementModificationCount();
try self.ensureCapacity();
const put_result = self.internalPut(key);
assert(put_result.old_kv == null);
return GetOrPutResult{
.kv = &put_result.new_entry.kv,
.found_existing = false,
};
}
fn ensureCapacity(self: *Self) !void {
if (self.entries.len == 0) {
return self.initCapacity(16);
} }
hm.incrementModificationCount();
// if we get too full (60%), double the capacity // if we get too full (60%), double the capacity
if (hm.size * 5 >= hm.entries.len * 3) { if (self.size * 5 >= self.entries.len * 3) {
const old_entries = hm.entries; const old_entries = self.entries;
try hm.initCapacity(hm.entries.len * 2); try self.initCapacity(self.entries.len * 2);
// dump all of the old elements into the new table // dump all of the old elements into the new table
for (old_entries) |*old_entry| { for (old_entries) |*old_entry| {
if (old_entry.used) { if (old_entry.used) {
_ = hm.internalPut(old_entry.key, old_entry.value); self.internalPut(old_entry.kv.key).new_entry.kv.value = old_entry.kv.value;
} }
} }
hm.allocator.free(old_entries); self.allocator.free(old_entries);
}
} }
return hm.internalPut(key, value); /// Returns the kv pair that was already there.
pub fn put(self: *Self, key: K, value: V) !?KV {
self.incrementModificationCount();
try self.ensureCapacity();
const put_result = self.internalPut(key);
put_result.new_entry.kv.value = value;
return put_result.old_kv;
} }
pub fn get(hm: *const Self, key: K) ?*Entry { pub fn get(hm: *const Self, key: K) ?*KV {
if (hm.entries.len == 0) { if (hm.entries.len == 0) {
return null; return null;
} }
@ -122,7 +166,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
return hm.get(key) != null; return hm.get(key) != null;
} }
pub fn remove(hm: *Self, key: K) ?*Entry { pub fn remove(hm: *Self, key: K) ?*KV {
if (hm.entries.len == 0) return null; if (hm.entries.len == 0) return null;
hm.incrementModificationCount(); hm.incrementModificationCount();
const start_index = hm.keyToIndex(key); const start_index = hm.keyToIndex(key);
@ -134,7 +178,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
if (!entry.used) return null; if (!entry.used) return null;
if (!eql(entry.key, key)) continue; if (!eql(entry.kv.key, key)) continue;
while (roll_over < hm.entries.len) : (roll_over += 1) { while (roll_over < hm.entries.len) : (roll_over += 1) {
const next_index = (start_index + roll_over + 1) % hm.entries.len; const next_index = (start_index + roll_over + 1) % hm.entries.len;
@ -142,7 +186,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
if (!next_entry.used or next_entry.distance_from_start_index == 0) { if (!next_entry.used or next_entry.distance_from_start_index == 0) {
entry.used = false; entry.used = false;
hm.size -= 1; hm.size -= 1;
return entry; return &entry.kv;
} }
entry.* = next_entry.*; entry.* = next_entry.*;
entry.distance_from_start_index -= 1; entry.distance_from_start_index -= 1;
@ -163,6 +207,16 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
}; };
} }
pub fn clone(self: Self) !Self {
var other = Self.init(self.allocator);
try other.initCapacity(self.entries.len);
var it = self.iterator();
while (it.next()) |entry| {
assert((try other.put(entry.key, entry.value)) == null);
}
return other;
}
fn initCapacity(hm: *Self, capacity: usize) !void { fn initCapacity(hm: *Self, capacity: usize) !void {
hm.entries = try hm.allocator.alloc(Entry, capacity); hm.entries = try hm.allocator.alloc(Entry, capacity);
hm.size = 0; hm.size = 0;
@ -178,60 +232,81 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
} }
} }
/// Returns the value that was already there. const InternalPutResult = struct {
fn internalPut(hm: *Self, orig_key: K, orig_value: *const V) ?V { new_entry: *Entry,
old_kv: ?KV,
};
/// Returns a pointer to the new entry.
/// Asserts that there is enough space for the new item.
fn internalPut(self: *Self, orig_key: K) InternalPutResult {
var key = orig_key; var key = orig_key;
var value = orig_value.*; var value: V = undefined;
const start_index = hm.keyToIndex(key); const start_index = self.keyToIndex(key);
var roll_over: usize = 0; var roll_over: usize = 0;
var distance_from_start_index: usize = 0; var distance_from_start_index: usize = 0;
while (roll_over < hm.entries.len) : ({ var got_result_entry = false;
var result = InternalPutResult{
.new_entry = undefined,
.old_kv = null,
};
while (roll_over < self.entries.len) : ({
roll_over += 1; roll_over += 1;
distance_from_start_index += 1; distance_from_start_index += 1;
}) { }) {
const index = (start_index + roll_over) % hm.entries.len; const index = (start_index + roll_over) % self.entries.len;
const entry = &hm.entries[index]; const entry = &self.entries[index];
if (entry.used and !eql(entry.key, key)) { if (entry.used and !eql(entry.kv.key, key)) {
if (entry.distance_from_start_index < distance_from_start_index) { if (entry.distance_from_start_index < distance_from_start_index) {
// robin hood to the rescue // robin hood to the rescue
const tmp = entry.*; const tmp = entry.*;
hm.max_distance_from_start_index = math.max(hm.max_distance_from_start_index, distance_from_start_index); self.max_distance_from_start_index = math.max(self.max_distance_from_start_index, distance_from_start_index);
if (!got_result_entry) {
got_result_entry = true;
result.new_entry = entry;
}
entry.* = Entry{ entry.* = Entry{
.used = true, .used = true,
.distance_from_start_index = distance_from_start_index, .distance_from_start_index = distance_from_start_index,
.kv = KV{
.key = key, .key = key,
.value = value, .value = value,
},
}; };
key = tmp.key; key = tmp.kv.key;
value = tmp.value; value = tmp.kv.value;
distance_from_start_index = tmp.distance_from_start_index; distance_from_start_index = tmp.distance_from_start_index;
} }
continue; continue;
} }
var result: ?V = null;
if (entry.used) { if (entry.used) {
result = entry.value; result.old_kv = entry.kv;
} else { } else {
// adding an entry. otherwise overwriting old value with // adding an entry. otherwise overwriting old value with
// same key // same key
hm.size += 1; self.size += 1;
} }
hm.max_distance_from_start_index = math.max(distance_from_start_index, hm.max_distance_from_start_index); self.max_distance_from_start_index = math.max(distance_from_start_index, self.max_distance_from_start_index);
if (!got_result_entry) {
result.new_entry = entry;
}
entry.* = Entry{ entry.* = Entry{
.used = true, .used = true,
.distance_from_start_index = distance_from_start_index, .distance_from_start_index = distance_from_start_index,
.kv = KV{
.key = key, .key = key,
.value = value, .value = value,
},
}; };
return result; return result;
} }
unreachable; // put into a full map unreachable; // put into a full map
} }
fn internalGet(hm: *const Self, key: K) ?*Entry { fn internalGet(hm: Self, key: K) ?*KV {
const start_index = hm.keyToIndex(key); const start_index = hm.keyToIndex(key);
{ {
var roll_over: usize = 0; var roll_over: usize = 0;
@ -240,13 +315,13 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
const entry = &hm.entries[index]; const entry = &hm.entries[index];
if (!entry.used) return null; if (!entry.used) return null;
if (eql(entry.key, key)) return entry; if (eql(entry.kv.key, key)) return &entry.kv;
} }
} }
return null; return null;
} }
fn keyToIndex(hm: *const Self, key: K) usize { fn keyToIndex(hm: Self, key: K) usize {
return usize(hash(key)) % hm.entries.len; return usize(hash(key)) % hm.entries.len;
} }
}; };
@ -256,7 +331,7 @@ test "basic hash map usage" {
var direct_allocator = std.heap.DirectAllocator.init(); var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit(); defer direct_allocator.deinit();
var map = HashMap(i32, i32, hash_i32, eql_i32).init(&direct_allocator.allocator); var map = AutoHashMap(i32, i32).init(&direct_allocator.allocator);
defer map.deinit(); defer map.deinit();
assert((try map.put(1, 11)) == null); assert((try map.put(1, 11)) == null);
@ -265,8 +340,19 @@ test "basic hash map usage" {
assert((try map.put(4, 44)) == null); assert((try map.put(4, 44)) == null);
assert((try map.put(5, 55)) == null); assert((try map.put(5, 55)) == null);
assert((try map.put(5, 66)).? == 55); assert((try map.put(5, 66)).?.value == 55);
assert((try map.put(5, 55)).? == 66); assert((try map.put(5, 55)).?.value == 66);
const gop1 = try map.getOrPut(5);
assert(gop1.found_existing == true);
assert(gop1.kv.value == 55);
gop1.kv.value = 77;
assert(map.get(5).?.value == 77);
const gop2 = try map.getOrPut(99);
assert(gop2.found_existing == false);
gop2.kv.value = 42;
assert(map.get(99).?.value == 42);
assert(map.contains(2)); assert(map.contains(2));
assert(map.get(2).?.value == 22); assert(map.get(2).?.value == 22);
@ -279,7 +365,7 @@ test "iterator hash map" {
var direct_allocator = std.heap.DirectAllocator.init(); var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit(); defer direct_allocator.deinit();
var reset_map = HashMap(i32, i32, hash_i32, eql_i32).init(&direct_allocator.allocator); var reset_map = AutoHashMap(i32, i32).init(&direct_allocator.allocator);
defer reset_map.deinit(); defer reset_map.deinit();
assert((try reset_map.put(1, 11)) == null); assert((try reset_map.put(1, 11)) == null);
@ -287,14 +373,14 @@ test "iterator hash map" {
assert((try reset_map.put(3, 33)) == null); assert((try reset_map.put(3, 33)) == null);
var keys = []i32{ var keys = []i32{
1,
2,
3, 3,
2,
1,
}; };
var values = []i32{ var values = []i32{
11,
22,
33, 33,
22,
11,
}; };
var it = reset_map.iterator(); var it = reset_map.iterator();
@ -322,10 +408,140 @@ test "iterator hash map" {
assert(entry.value == values[0]); assert(entry.value == values[0]);
} }
fn hash_i32(x: i32) u32 { pub fn getHashPtrAddrFn(comptime K: type) (fn (K) u32) {
return @bitCast(u32, x); return struct {
fn hash(key: K) u32 {
return getAutoHashFn(usize)(@ptrToInt(key));
}
}.hash;
} }
fn eql_i32(a: i32, b: i32) bool { pub fn getTrivialEqlFn(comptime K: type) (fn (K, K) bool) {
return struct {
fn eql(a: K, b: K) bool {
return a == b; return a == b;
} }
}.eql;
}
pub fn getAutoHashFn(comptime K: type) (fn (K) u32) {
return struct {
fn hash(key: K) u32 {
comptime var rng = comptime std.rand.DefaultPrng.init(0);
return autoHash(key, &rng.random, u32);
}
}.hash;
}
pub fn getAutoEqlFn(comptime K: type) (fn (K, K) bool) {
return struct {
fn eql(a: K, b: K) bool {
return autoEql(a, b);
}
}.eql;
}
// TODO improve these hash functions
pub fn autoHash(key: var, comptime rng: *std.rand.Random, comptime HashInt: type) HashInt {
switch (@typeInfo(@typeOf(key))) {
builtin.TypeId.NoReturn,
builtin.TypeId.Opaque,
builtin.TypeId.Undefined,
builtin.TypeId.ArgTuple,
=> @compileError("cannot hash this type"),
builtin.TypeId.Void,
builtin.TypeId.Null,
=> return 0,
builtin.TypeId.Int => |info| {
const unsigned_x = @bitCast(@IntType(false, info.bits), key);
if (info.bits <= HashInt.bit_count) {
return HashInt(unsigned_x) ^ comptime rng.scalar(HashInt);
} else {
return @truncate(HashInt, unsigned_x ^ comptime rng.scalar(@typeOf(unsigned_x)));
}
},
builtin.TypeId.Float => |info| {
return autoHash(@bitCast(@IntType(false, info.bits), key), rng);
},
builtin.TypeId.Bool => return autoHash(@boolToInt(key), rng),
builtin.TypeId.Enum => return autoHash(@enumToInt(key), rng),
builtin.TypeId.ErrorSet => return autoHash(@errorToInt(key), rng),
builtin.TypeId.Promise, builtin.TypeId.Fn => return autoHash(@ptrToInt(key), rng),
builtin.TypeId.Namespace,
builtin.TypeId.Block,
builtin.TypeId.BoundFn,
builtin.TypeId.ComptimeFloat,
builtin.TypeId.ComptimeInt,
builtin.TypeId.Type,
=> return 0,
builtin.TypeId.Pointer => |info| switch (info.size) {
builtin.TypeInfo.Pointer.Size.One => @compileError("TODO auto hash for single item pointers"),
builtin.TypeInfo.Pointer.Size.Many => @compileError("TODO auto hash for many item pointers"),
builtin.TypeInfo.Pointer.Size.Slice => {
const interval = std.math.max(1, key.len / 256);
var i: usize = 0;
var h = comptime rng.scalar(HashInt);
while (i < key.len) : (i += interval) {
h ^= autoHash(key[i], rng, HashInt);
}
return h;
},
},
builtin.TypeId.Optional => @compileError("TODO auto hash for optionals"),
builtin.TypeId.Array => @compileError("TODO auto hash for arrays"),
builtin.TypeId.Struct => @compileError("TODO auto hash for structs"),
builtin.TypeId.Union => @compileError("TODO auto hash for unions"),
builtin.TypeId.ErrorUnion => @compileError("TODO auto hash for unions"),
}
}
pub fn autoEql(a: var, b: @typeOf(a)) bool {
switch (@typeInfo(@typeOf(a))) {
builtin.TypeId.NoReturn,
builtin.TypeId.Opaque,
builtin.TypeId.Undefined,
builtin.TypeId.ArgTuple,
=> @compileError("cannot test equality of this type"),
builtin.TypeId.Void,
builtin.TypeId.Null,
=> return true,
builtin.TypeId.Bool,
builtin.TypeId.Int,
builtin.TypeId.Float,
builtin.TypeId.ComptimeFloat,
builtin.TypeId.ComptimeInt,
builtin.TypeId.Namespace,
builtin.TypeId.Block,
builtin.TypeId.Promise,
builtin.TypeId.Enum,
builtin.TypeId.BoundFn,
builtin.TypeId.Fn,
builtin.TypeId.ErrorSet,
builtin.TypeId.Type,
=> return a == b,
builtin.TypeId.Pointer => |info| switch (info.size) {
builtin.TypeInfo.Pointer.Size.One => @compileError("TODO auto eql for single item pointers"),
builtin.TypeInfo.Pointer.Size.Many => @compileError("TODO auto eql for many item pointers"),
builtin.TypeInfo.Pointer.Size.Slice => {
if (a.len != b.len) return false;
for (a) |a_item, i| {
if (!autoEql(a_item, b[i])) return false;
}
return true;
},
},
builtin.TypeId.Optional => @compileError("TODO auto eql for optionals"),
builtin.TypeId.Array => @compileError("TODO auto eql for arrays"),
builtin.TypeId.Struct => @compileError("TODO auto eql for structs"),
builtin.TypeId.Union => @compileError("TODO auto eql for unions"),
builtin.TypeId.ErrorUnion => @compileError("TODO auto eql for unions"),
}
}

View File

@ -5,10 +5,11 @@ pub const BufSet = @import("buf_set.zig").BufSet;
pub const Buffer = @import("buffer.zig").Buffer; pub const Buffer = @import("buffer.zig").Buffer;
pub const BufferOutStream = @import("buffer.zig").BufferOutStream; pub const BufferOutStream = @import("buffer.zig").BufferOutStream;
pub const HashMap = @import("hash_map.zig").HashMap; pub const HashMap = @import("hash_map.zig").HashMap;
pub const AutoHashMap = @import("hash_map.zig").AutoHashMap;
pub const LinkedList = @import("linked_list.zig").LinkedList; pub const LinkedList = @import("linked_list.zig").LinkedList;
pub const IntrusiveLinkedList = @import("linked_list.zig").IntrusiveLinkedList;
pub const SegmentedList = @import("segmented_list.zig").SegmentedList; pub const SegmentedList = @import("segmented_list.zig").SegmentedList;
pub const DynLib = @import("dynamic_library.zig").DynLib; pub const DynLib = @import("dynamic_library.zig").DynLib;
pub const Mutex = @import("mutex.zig").Mutex;
pub const atomic = @import("atomic/index.zig"); pub const atomic = @import("atomic/index.zig");
pub const base64 = @import("base64.zig"); pub const base64 = @import("base64.zig");
@ -23,6 +24,7 @@ pub const empty_import = @import("empty.zig");
pub const event = @import("event.zig"); pub const event = @import("event.zig");
pub const fmt = @import("fmt/index.zig"); pub const fmt = @import("fmt/index.zig");
pub const hash = @import("hash/index.zig"); pub const hash = @import("hash/index.zig");
pub const hash_map = @import("hash_map.zig");
pub const heap = @import("heap.zig"); pub const heap = @import("heap.zig");
pub const io = @import("io.zig"); pub const io = @import("io.zig");
pub const json = @import("json.zig"); pub const json = @import("json.zig");
@ -32,6 +34,7 @@ pub const mem = @import("mem.zig");
pub const net = @import("net.zig"); pub const net = @import("net.zig");
pub const os = @import("os/index.zig"); pub const os = @import("os/index.zig");
pub const rand = @import("rand/index.zig"); pub const rand = @import("rand/index.zig");
pub const rb = @import("rb.zig");
pub const sort = @import("sort.zig"); pub const sort = @import("sort.zig");
pub const unicode = @import("unicode.zig"); pub const unicode = @import("unicode.zig");
pub const zig = @import("zig/index.zig"); pub const zig = @import("zig/index.zig");
@ -48,6 +51,7 @@ test "std" {
_ = @import("hash_map.zig"); _ = @import("hash_map.zig");
_ = @import("linked_list.zig"); _ = @import("linked_list.zig");
_ = @import("segmented_list.zig"); _ = @import("segmented_list.zig");
_ = @import("mutex.zig");
_ = @import("base64.zig"); _ = @import("base64.zig");
_ = @import("build.zig"); _ = @import("build.zig");

View File

@ -207,6 +207,12 @@ pub fn InStream(comptime ReadError: type) type {
_ = try self.readByte(); _ = try self.readByte();
} }
} }
pub fn readStruct(self: *Self, comptime T: type, ptr: *T) !void {
// Only extern and packed structs have defined in-memory layout.
assert(@typeInfo(T).Struct.layout != builtin.TypeInfo.ContainerLayout.Auto);
return self.readNoEof(@sliceToBytes((*[1]T)(ptr)[0..]));
}
}; };
} }
@ -254,9 +260,8 @@ pub fn OutStream(comptime WriteError: type) type {
}; };
} }
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator. pub fn writeFile(path: []const u8, data: []const u8) !void {
pub fn writeFile(allocator: *mem.Allocator, path: []const u8, data: []const u8) !void { var file = try File.openWrite(path);
var file = try File.openWrite(allocator, path);
defer file.close(); defer file.close();
try file.write(data); try file.write(data);
} }
@ -268,7 +273,7 @@ pub fn readFileAlloc(allocator: *mem.Allocator, path: []const u8) ![]u8 {
/// On success, caller owns returned buffer. /// On success, caller owns returned buffer.
pub fn readFileAllocAligned(allocator: *mem.Allocator, path: []const u8, comptime A: u29) ![]align(A) u8 { pub fn readFileAllocAligned(allocator: *mem.Allocator, path: []const u8, comptime A: u29) ![]align(A) u8 {
var file = try File.openRead(allocator, path); var file = try File.openRead(path);
defer file.close(); defer file.close();
const size = try file.getEndPos(); const size = try file.getEndPos();
@ -415,7 +420,6 @@ pub fn PeekStream(comptime buffer_size: usize, comptime InStreamError: type) typ
self.at_end = (read < left); self.at_end = (read < left);
return pos + read; return pos + read;
} }
}; };
} }
@ -481,8 +485,7 @@ pub const SliceOutStream = struct {
assert(self.pos <= self.slice.len); assert(self.pos <= self.slice.len);
const n = const n = if (self.pos + bytes.len <= self.slice.len)
if (self.pos + bytes.len <= self.slice.len)
bytes.len bytes.len
else else
self.slice.len - self.pos; self.slice.len - self.pos;
@ -586,7 +589,7 @@ pub const BufferedAtomicFile = struct {
}); });
errdefer allocator.destroy(self); errdefer allocator.destroy(self);
self.atomic_file = try os.AtomicFile.init(allocator, dest_path, os.default_file_mode); self.atomic_file = try os.AtomicFile.init(allocator, dest_path, os.File.default_mode);
errdefer self.atomic_file.deinit(); errdefer self.atomic_file.deinit();
self.file_stream = FileOutStream.init(&self.atomic_file.file); self.file_stream = FileOutStream.init(&self.atomic_file.file);

View File

@ -16,7 +16,7 @@ test "write a file, read it, then delete it" {
prng.random.bytes(data[0..]); prng.random.bytes(data[0..]);
const tmp_file_name = "temp_test_file.txt"; const tmp_file_name = "temp_test_file.txt";
{ {
var file = try os.File.openWrite(allocator, tmp_file_name); var file = try os.File.openWrite(tmp_file_name);
defer file.close(); defer file.close();
var file_out_stream = io.FileOutStream.init(&file); var file_out_stream = io.FileOutStream.init(&file);
@ -28,7 +28,7 @@ test "write a file, read it, then delete it" {
try buf_stream.flush(); try buf_stream.flush();
} }
{ {
var file = try os.File.openRead(allocator, tmp_file_name); var file = try os.File.openRead(tmp_file_name);
defer file.close(); defer file.close();
const file_size = try file.getEndPos(); const file_size = try file.getEndPos();
@ -45,7 +45,7 @@ test "write a file, read it, then delete it" {
assert(mem.eql(u8, contents["begin".len .. contents.len - "end".len], data)); assert(mem.eql(u8, contents["begin".len .. contents.len - "end".len], data));
assert(mem.eql(u8, contents[contents.len - "end".len ..], "end")); assert(mem.eql(u8, contents[contents.len - "end".len ..], "end"));
} }
try os.deleteFile(allocator, tmp_file_name); try os.deleteFile(tmp_file_name);
} }
test "BufferOutStream" { test "BufferOutStream" {

View File

@ -1318,7 +1318,7 @@ pub const Parser = struct {
_ = p.stack.pop(); _ = p.stack.pop();
var object = &p.stack.items[p.stack.len - 1].Object; var object = &p.stack.items[p.stack.len - 1].Object;
_ = try object.put(key, value); _ = try object.put(key, value.*);
p.state = State.ObjectKey; p.state = State.ObjectKey;
}, },
// Array Parent -> [ ..., <array>, value ] // Array Parent -> [ ..., <array>, value ]

View File

@ -4,18 +4,8 @@ const assert = debug.assert;
const mem = std.mem; const mem = std.mem;
const Allocator = mem.Allocator; const Allocator = mem.Allocator;
/// Generic non-intrusive doubly linked list.
pub fn LinkedList(comptime T: type) type {
return BaseLinkedList(T, void, "");
}
/// Generic intrusive doubly linked list.
pub fn IntrusiveLinkedList(comptime ParentType: type, comptime field_name: []const u8) type {
return BaseLinkedList(void, ParentType, field_name);
}
/// Generic doubly linked list. /// Generic doubly linked list.
fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_name: []const u8) type { pub fn LinkedList(comptime T: type) type {
return struct { return struct {
const Self = this; const Self = this;
@ -25,23 +15,13 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
next: ?*Node, next: ?*Node,
data: T, data: T,
pub fn init(value: *const T) Node { pub fn init(data: T) Node {
return Node{ return Node{
.prev = null, .prev = null,
.next = null, .next = null,
.data = value.*, .data = data,
}; };
} }
pub fn initIntrusive() Node {
// TODO: when #678 is solved this can become `init`.
return Node.init({});
}
pub fn toData(node: *Node) *ParentType {
comptime assert(isIntrusive());
return @fieldParentPtr(ParentType, field_name, node);
}
}; };
first: ?*Node, first: ?*Node,
@ -60,10 +40,6 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
}; };
} }
fn isIntrusive() bool {
return ParentType != void or field_name.len != 0;
}
/// Insert a new node after an existing one. /// Insert a new node after an existing one.
/// ///
/// Arguments: /// Arguments:
@ -192,7 +168,6 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Returns: /// Returns:
/// A pointer to the new node. /// A pointer to the new node.
pub fn allocateNode(list: *Self, allocator: *Allocator) !*Node { pub fn allocateNode(list: *Self, allocator: *Allocator) !*Node {
comptime assert(!isIntrusive());
return allocator.create(Node(undefined)); return allocator.create(Node(undefined));
} }
@ -202,7 +177,6 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// node: Pointer to the node to deallocate. /// node: Pointer to the node to deallocate.
/// allocator: Dynamic memory allocator. /// allocator: Dynamic memory allocator.
pub fn destroyNode(list: *Self, node: *Node, allocator: *Allocator) void { pub fn destroyNode(list: *Self, node: *Node, allocator: *Allocator) void {
comptime assert(!isIntrusive());
allocator.destroy(node); allocator.destroy(node);
} }
@ -214,8 +188,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// ///
/// Returns: /// Returns:
/// A pointer to the new node. /// A pointer to the new node.
pub fn createNode(list: *Self, data: *const T, allocator: *Allocator) !*Node { pub fn createNode(list: *Self, data: T, allocator: *Allocator) !*Node {
comptime assert(!isIntrusive());
var node = try list.allocateNode(allocator); var node = try list.allocateNode(allocator);
node.* = Node.init(data); node.* = Node.init(data);
return node; return node;
@ -274,69 +247,3 @@ test "basic linked list test" {
assert(list.last.?.data == 4); assert(list.last.?.data == 4);
assert(list.len == 2); assert(list.len == 2);
} }
const ElementList = IntrusiveLinkedList(Element, "link");
const Element = struct {
value: u32,
link: IntrusiveLinkedList(Element, "link").Node,
};
test "basic intrusive linked list test" {
const allocator = debug.global_allocator;
var list = ElementList.init();
var one = Element{
.value = 1,
.link = ElementList.Node.initIntrusive(),
};
var two = Element{
.value = 2,
.link = ElementList.Node.initIntrusive(),
};
var three = Element{
.value = 3,
.link = ElementList.Node.initIntrusive(),
};
var four = Element{
.value = 4,
.link = ElementList.Node.initIntrusive(),
};
var five = Element{
.value = 5,
.link = ElementList.Node.initIntrusive(),
};
list.append(&two.link); // {2}
list.append(&five.link); // {2, 5}
list.prepend(&one.link); // {1, 2, 5}
list.insertBefore(&five.link, &four.link); // {1, 2, 4, 5}
list.insertAfter(&two.link, &three.link); // {1, 2, 3, 4, 5}
// Traverse forwards.
{
var it = list.first;
var index: u32 = 1;
while (it) |node| : (it = node.next) {
assert(node.toData().value == index);
index += 1;
}
}
// Traverse backwards.
{
var it = list.last;
var index: u32 = 1;
while (it) |node| : (it = node.prev) {
assert(node.toData().value == (6 - index));
index += 1;
}
}
var first = list.popFirst(); // {2, 3, 4, 5}
var last = list.pop(); // {2, 3, 4}
list.remove(&three.link); // {2, 4}
assert(list.first.?.toData().value == 2);
assert(list.last.?.toData().value == 4);
assert(list.len == 2);
}

View File

@ -1,16 +1,18 @@
const builtin = @import("builtin");
const std = @import("index.zig");
const io = std.io;
const mem = std.mem;
const MH_MAGIC_64 = 0xFEEDFACF; pub const mach_header = extern struct {
const MH_PIE = 0x200000;
const LC_SYMTAB = 2;
const MachHeader64 = packed struct {
magic: u32, magic: u32,
cputype: u32, cputype: cpu_type_t,
cpusubtype: u32, cpusubtype: cpu_subtype_t,
filetype: u32,
ncmds: u32,
sizeofcmds: u32,
flags: u32,
};
pub const mach_header_64 = extern struct {
magic: u32,
cputype: cpu_type_t,
cpusubtype: cpu_subtype_t,
filetype: u32, filetype: u32,
ncmds: u32, ncmds: u32,
sizeofcmds: u32, sizeofcmds: u32,
@ -18,19 +20,138 @@ const MachHeader64 = packed struct {
reserved: u32, reserved: u32,
}; };
const LoadCommand = packed struct { pub const load_command = extern struct {
cmd: u32, cmd: u32,
cmdsize: u32, cmdsize: u32,
}; };
const SymtabCommand = packed struct {
symoff: u32, /// The symtab_command contains the offsets and sizes of the link-edit 4.3BSD
nsyms: u32, /// "stab" style symbol table information as described in the header files
stroff: u32, /// <nlist.h> and <stab.h>.
strsize: u32, pub const symtab_command = extern struct {
cmd: u32, /// LC_SYMTAB
cmdsize: u32, /// sizeof(struct symtab_command)
symoff: u32, /// symbol table offset
nsyms: u32, /// number of symbol table entries
stroff: u32, /// string table offset
strsize: u32, /// string table size in bytes
}; };
const Nlist64 = packed struct { /// The linkedit_data_command contains the offsets and sizes of a blob
/// of data in the __LINKEDIT segment.
const linkedit_data_command = extern struct {
cmd: u32,/// LC_CODE_SIGNATURE, LC_SEGMENT_SPLIT_INFO, LC_FUNCTION_STARTS, LC_DATA_IN_CODE, LC_DYLIB_CODE_SIGN_DRS or LC_LINKER_OPTIMIZATION_HINT.
cmdsize: u32, /// sizeof(struct linkedit_data_command)
dataoff: u32 , /// file offset of data in __LINKEDIT segment
datasize: u32 , /// file size of data in __LINKEDIT segment
};
/// The segment load command indicates that a part of this file is to be
/// mapped into the task's address space. The size of this segment in memory,
/// vmsize, maybe equal to or larger than the amount to map from this file,
/// filesize. The file is mapped starting at fileoff to the beginning of
/// the segment in memory, vmaddr. The rest of the memory of the segment,
/// if any, is allocated zero fill on demand. The segment's maximum virtual
/// memory protection and initial virtual memory protection are specified
/// by the maxprot and initprot fields. If the segment has sections then the
/// section structures directly follow the segment command and their size is
/// reflected in cmdsize.
pub const segment_command = extern struct {
cmd: u32,/// LC_SEGMENT
cmdsize: u32,/// includes sizeof section structs
segname: [16]u8,/// segment name
vmaddr: u32,/// memory address of this segment
vmsize: u32,/// memory size of this segment
fileoff: u32,/// file offset of this segment
filesize: u32,/// amount to map from the file
maxprot: vm_prot_t,/// maximum VM protection
initprot: vm_prot_t,/// initial VM protection
nsects: u32,/// number of sections in segment
flags: u32,
};
/// The 64-bit segment load command indicates that a part of this file is to be
/// mapped into a 64-bit task's address space. If the 64-bit segment has
/// sections then section_64 structures directly follow the 64-bit segment
/// command and their size is reflected in cmdsize.
pub const segment_command_64 = extern struct {
cmd: u32, /// LC_SEGMENT_64
cmdsize: u32, /// includes sizeof section_64 structs
segname: [16]u8, /// segment name
vmaddr: u64, /// memory address of this segment
vmsize: u64, /// memory size of this segment
fileoff: u64, /// file offset of this segment
filesize: u64, /// amount to map from the file
maxprot: vm_prot_t, /// maximum VM protection
initprot: vm_prot_t, /// initial VM protection
nsects: u32, /// number of sections in segment
flags: u32,
};
/// A segment is made up of zero or more sections. Non-MH_OBJECT files have
/// all of their segments with the proper sections in each, and padded to the
/// specified segment alignment when produced by the link editor. The first
/// segment of a MH_EXECUTE and MH_FVMLIB format file contains the mach_header
/// and load commands of the object file before its first section. The zero
/// fill sections are always last in their segment (in all formats). This
/// allows the zeroed segment padding to be mapped into memory where zero fill
/// sections might be. The gigabyte zero fill sections, those with the section
/// type S_GB_ZEROFILL, can only be in a segment with sections of this type.
/// These segments are then placed after all other segments.
///
/// The MH_OBJECT format has all of its sections in one segment for
/// compactness. There is no padding to a specified segment boundary and the
/// mach_header and load commands are not part of the segment.
///
/// Sections with the same section name, sectname, going into the same segment,
/// segname, are combined by the link editor. The resulting section is aligned
/// to the maximum alignment of the combined sections and is the new section's
/// alignment. The combined sections are aligned to their original alignment in
/// the combined section. Any padded bytes to get the specified alignment are
/// zeroed.
///
/// The format of the relocation entries referenced by the reloff and nreloc
/// fields of the section structure for mach object files is described in the
/// header file <reloc.h>.
pub const @"section" = extern struct {
sectname: [16]u8, /// name of this section
segname: [16]u8, /// segment this section goes in
addr: u32, /// memory address of this section
size: u32, /// size in bytes of this section
offset: u32, /// file offset of this section
@"align": u32, /// section alignment (power of 2)
reloff: u32, /// file offset of relocation entries
nreloc: u32, /// number of relocation entries
flags: u32, /// flags (section type and attributes
reserved1: u32, /// reserved (for offset or index)
reserved2: u32, /// reserved (for count or sizeof)
};
pub const section_64 = extern struct {
sectname: [16]u8, /// name of this section
segname: [16]u8, /// segment this section goes in
addr: u64, /// memory address of this section
size: u64, /// size in bytes of this section
offset: u32, /// file offset of this section
@"align": u32, /// section alignment (power of 2)
reloff: u32, /// file offset of relocation entries
nreloc: u32, /// number of relocation entries
flags: u32, /// flags (section type and attributes
reserved1: u32, /// reserved (for offset or index)
reserved2: u32, /// reserved (for count or sizeof)
reserved3: u32, /// reserved
};
pub const nlist = extern struct {
n_strx: u32,
n_type: u8,
n_sect: u8,
n_desc: i16,
n_value: u32,
};
pub const nlist_64 = extern struct {
n_strx: u32, n_strx: u32,
n_type: u8, n_type: u8,
n_sect: u8, n_sect: u8,
@ -38,135 +159,190 @@ const Nlist64 = packed struct {
n_value: u64, n_value: u64,
}; };
pub const Symbol = struct { /// After MacOS X 10.1 when a new load command is added that is required to be
name: []const u8, /// understood by the dynamic linker for the image to execute properly the
address: u64, /// LC_REQ_DYLD bit will be or'ed into the load command constant. If the dynamic
/// linker sees such a load command it it does not understand will issue a
/// "unknown load command required for execution" error and refuse to use the
/// image. Other load commands without this bit that are not understood will
/// simply be ignored.
pub const LC_REQ_DYLD = 0x80000000;
fn addressLessThan(lhs: Symbol, rhs: Symbol) bool { pub const LC_SEGMENT = 0x1; /// segment of this file to be mapped
return lhs.address < rhs.address; pub const LC_SYMTAB = 0x2; /// link-edit stab symbol table info
} pub const LC_SYMSEG = 0x3; /// link-edit gdb symbol table info (obsolete)
}; pub const LC_THREAD = 0x4; /// thread
pub const LC_UNIXTHREAD = 0x5; /// unix thread (includes a stack)
pub const LC_LOADFVMLIB = 0x6; /// load a specified fixed VM shared library
pub const LC_IDFVMLIB = 0x7; /// fixed VM shared library identification
pub const LC_IDENT = 0x8; /// object identification info (obsolete)
pub const LC_FVMFILE = 0x9; /// fixed VM file inclusion (internal use)
pub const LC_PREPAGE = 0xa; /// prepage command (internal use)
pub const LC_DYSYMTAB = 0xb; /// dynamic link-edit symbol table info
pub const LC_LOAD_DYLIB = 0xc; /// load a dynamically linked shared library
pub const LC_ID_DYLIB = 0xd; /// dynamically linked shared lib ident
pub const LC_LOAD_DYLINKER = 0xe; /// load a dynamic linker
pub const LC_ID_DYLINKER = 0xf; /// dynamic linker identification
pub const LC_PREBOUND_DYLIB = 0x10; /// modules prebound for a dynamically
pub const LC_ROUTINES = 0x11; /// image routines
pub const LC_SUB_FRAMEWORK = 0x12; /// sub framework
pub const LC_SUB_UMBRELLA = 0x13; /// sub umbrella
pub const LC_SUB_CLIENT = 0x14; /// sub client
pub const LC_SUB_LIBRARY = 0x15; /// sub library
pub const LC_TWOLEVEL_HINTS = 0x16; /// two-level namespace lookup hints
pub const LC_PREBIND_CKSUM = 0x17; /// prebind checksum
pub const SymbolTable = struct { /// load a dynamically linked shared library that is allowed to be missing
allocator: *mem.Allocator, /// (all symbols are weak imported).
symbols: []const Symbol, pub const LC_LOAD_WEAK_DYLIB = (0x18 | LC_REQ_DYLD);
strings: []const u8,
// Doubles as an eyecatcher to calculate the PIE slide, see loadSymbols(). pub const LC_SEGMENT_64 = 0x19; /// 64-bit segment of this file to be mapped
// Ideally we'd use _mh_execute_header because it's always at 0x100000000 pub const LC_ROUTINES_64 = 0x1a; /// 64-bit image routines
// in the image but as it's located in a different section than executable pub const LC_UUID = 0x1b; /// the uuid
// code, its displacement is different. pub const LC_RPATH = (0x1c | LC_REQ_DYLD); /// runpath additions
pub fn deinit(self: *SymbolTable) void { pub const LC_CODE_SIGNATURE = 0x1d; /// local of code signature
self.allocator.free(self.symbols); pub const LC_SEGMENT_SPLIT_INFO = 0x1e; /// local of info to split segments
self.symbols = []const Symbol{}; pub const LC_REEXPORT_DYLIB = (0x1f | LC_REQ_DYLD); /// load and re-export dylib
pub const LC_LAZY_LOAD_DYLIB = 0x20; /// delay load of dylib until first use
pub const LC_ENCRYPTION_INFO = 0x21; /// encrypted segment information
pub const LC_DYLD_INFO = 0x22; /// compressed dyld information
pub const LC_DYLD_INFO_ONLY = (0x22|LC_REQ_DYLD); /// compressed dyld information only
pub const LC_LOAD_UPWARD_DYLIB = (0x23 | LC_REQ_DYLD); /// load upward dylib
pub const LC_VERSION_MIN_MACOSX = 0x24; /// build for MacOSX min OS version
pub const LC_VERSION_MIN_IPHONEOS = 0x25; /// build for iPhoneOS min OS version
pub const LC_FUNCTION_STARTS = 0x26; /// compressed table of function start addresses
pub const LC_DYLD_ENVIRONMENT = 0x27; /// string for dyld to treat like environment variable
pub const LC_MAIN = (0x28|LC_REQ_DYLD); /// replacement for LC_UNIXTHREAD
pub const LC_DATA_IN_CODE = 0x29; /// table of non-instructions in __text
pub const LC_SOURCE_VERSION = 0x2A; /// source version used to build binary
pub const LC_DYLIB_CODE_SIGN_DRS = 0x2B; /// Code signing DRs copied from linked dylibs
pub const LC_ENCRYPTION_INFO_64 = 0x2C; /// 64-bit encrypted segment information
pub const LC_LINKER_OPTION = 0x2D; /// linker options in MH_OBJECT files
pub const LC_LINKER_OPTIMIZATION_HINT = 0x2E; /// optimization hints in MH_OBJECT files
pub const LC_VERSION_MIN_TVOS = 0x2F; /// build for AppleTV min OS version
pub const LC_VERSION_MIN_WATCHOS = 0x30; /// build for Watch min OS version
pub const LC_NOTE = 0x31; /// arbitrary data included within a Mach-O file
pub const LC_BUILD_VERSION = 0x32; /// build for platform min OS version
self.allocator.free(self.strings); pub const MH_MAGIC = 0xfeedface; /// the mach magic number
self.strings = []const u8{}; pub const MH_CIGAM = 0xcefaedfe; /// NXSwapInt(MH_MAGIC)
}
pub fn search(self: *const SymbolTable, address: usize) ?*const Symbol { pub const MH_MAGIC_64 = 0xfeedfacf; /// the 64-bit mach magic number
var min: usize = 0; pub const MH_CIGAM_64 = 0xcffaedfe; /// NXSwapInt(MH_MAGIC_64)
var max: usize = self.symbols.len - 1; // Exclude sentinel.
while (min < max) {
const mid = min + (max - min) / 2;
const curr = &self.symbols[mid];
const next = &self.symbols[mid + 1];
if (address >= next.address) {
min = mid + 1;
} else if (address < curr.address) {
max = mid;
} else {
return curr;
}
}
return null;
}
};
pub fn loadSymbols(allocator: *mem.Allocator, in: *io.FileInStream) !SymbolTable { pub const MH_OBJECT = 0x1; /// relocatable object file
var file = in.file; pub const MH_EXECUTE = 0x2; /// demand paged executable file
try file.seekTo(0); pub const MH_FVMLIB = 0x3; /// fixed VM shared library file
pub const MH_CORE = 0x4; /// core file
pub const MH_PRELOAD = 0x5; /// preloaded executable file
pub const MH_DYLIB = 0x6; /// dynamically bound shared library
pub const MH_DYLINKER = 0x7; /// dynamic link editor
pub const MH_BUNDLE = 0x8; /// dynamically bound bundle file
pub const MH_DYLIB_STUB = 0x9; /// shared library stub for static linking only, no section contents
pub const MH_DSYM = 0xa; /// companion file with only debug sections
pub const MH_KEXT_BUNDLE = 0xb; /// x86_64 kexts
var hdr: MachHeader64 = undefined; // Constants for the flags field of the mach_header
try readOneNoEof(in, MachHeader64, &hdr);
if (hdr.magic != MH_MAGIC_64) return error.MissingDebugInfo;
const is_pie = MH_PIE == (hdr.flags & MH_PIE);
var pos: usize = @sizeOf(@typeOf(hdr)); pub const MH_NOUNDEFS = 0x1; /// the object file has no undefined references
var ncmd: u32 = hdr.ncmds; pub const MH_INCRLINK = 0x2; /// the object file is the output of an incremental link against a base file and can't be link edited again
while (ncmd != 0) : (ncmd -= 1) { pub const MH_DYLDLINK = 0x4; /// the object file is input for the dynamic linker and can't be staticly link edited again
try file.seekTo(pos); pub const MH_BINDATLOAD = 0x8; /// the object file's undefined references are bound by the dynamic linker when loaded.
var lc: LoadCommand = undefined; pub const MH_PREBOUND = 0x10; /// the file has its dynamic undefined references prebound.
try readOneNoEof(in, LoadCommand, &lc); pub const MH_SPLIT_SEGS = 0x20; /// the file has its read-only and read-write segments split
if (lc.cmd == LC_SYMTAB) break; pub const MH_LAZY_INIT = 0x40; /// the shared library init routine is to be run lazily via catching memory faults to its writeable segments (obsolete)
pos += lc.cmdsize; pub const MH_TWOLEVEL = 0x80; /// the image is using two-level name space bindings
} else { pub const MH_FORCE_FLAT = 0x100; /// the executable is forcing all images to use flat name space bindings
return error.MissingDebugInfo; pub const MH_NOMULTIDEFS = 0x200; /// this umbrella guarantees no multiple defintions of symbols in its sub-images so the two-level namespace hints can always be used.
} pub const MH_NOFIXPREBINDING = 0x400; /// do not have dyld notify the prebinding agent about this executable
pub const MH_PREBINDABLE = 0x800; /// the binary is not prebound but can have its prebinding redone. only used when MH_PREBOUND is not set.
pub const MH_ALLMODSBOUND = 0x1000; /// indicates that this binary binds to all two-level namespace modules of its dependent libraries. only used when MH_PREBINDABLE and MH_TWOLEVEL are both set.
pub const MH_SUBSECTIONS_VIA_SYMBOLS = 0x2000;/// safe to divide up the sections into sub-sections via symbols for dead code stripping
pub const MH_CANONICAL = 0x4000; /// the binary has been canonicalized via the unprebind operation
pub const MH_WEAK_DEFINES = 0x8000; /// the final linked image contains external weak symbols
pub const MH_BINDS_TO_WEAK = 0x10000; /// the final linked image uses weak symbols
var cmd: SymtabCommand = undefined; pub const MH_ALLOW_STACK_EXECUTION = 0x20000;/// When this bit is set, all stacks in the task will be given stack execution privilege. Only used in MH_EXECUTE filetypes.
try readOneNoEof(in, SymtabCommand, &cmd); pub const MH_ROOT_SAFE = 0x40000; /// When this bit is set, the binary declares it is safe for use in processes with uid zero
try file.seekTo(cmd.symoff); pub const MH_SETUID_SAFE = 0x80000; /// When this bit is set, the binary declares it is safe for use in processes when issetugid() is true
var syms = try allocator.alloc(Nlist64, cmd.nsyms);
defer allocator.free(syms);
try readNoEof(in, Nlist64, syms);
try file.seekTo(cmd.stroff); pub const MH_NO_REEXPORTED_DYLIBS = 0x100000; /// When this bit is set on a dylib, the static linker does not need to examine dependent dylibs to see if any are re-exported
var strings = try allocator.alloc(u8, cmd.strsize); pub const MH_PIE = 0x200000; /// When this bit is set, the OS will load the main executable at a random address. Only used in MH_EXECUTE filetypes.
errdefer allocator.free(strings); pub const MH_DEAD_STRIPPABLE_DYLIB = 0x400000; /// Only for use on dylibs. When linking against a dylib that has this bit set, the static linker will automatically not create a LC_LOAD_DYLIB load command to the dylib if no symbols are being referenced from the dylib.
try in.stream.readNoEof(strings); pub const MH_HAS_TLV_DESCRIPTORS = 0x800000; /// Contains a section of type S_THREAD_LOCAL_VARIABLES
var nsyms: usize = 0; pub const MH_NO_HEAP_EXECUTION = 0x1000000; /// When this bit is set, the OS will run the main executable with a non-executable heap even on platforms (e.g. i386) that don't require it. Only used in MH_EXECUTE filetypes.
for (syms) |sym|
if (isSymbol(sym)) nsyms += 1;
if (nsyms == 0) return error.MissingDebugInfo;
var symbols = try allocator.alloc(Symbol, nsyms + 1); // Room for sentinel. pub const MH_APP_EXTENSION_SAFE = 0x02000000; /// The code was linked for use in an application extension.
errdefer allocator.free(symbols);
var pie_slide: usize = 0; pub const MH_NLIST_OUTOFSYNC_WITH_DYLDINFO = 0x04000000; /// The external symbols listed in the nlist symbol table do not include all the symbols listed in the dyld info.
var nsym: usize = 0;
for (syms) |sym| {
if (!isSymbol(sym)) continue;
const start = sym.n_strx;
const end = mem.indexOfScalarPos(u8, strings, start, 0).?;
const name = strings[start..end];
const address = sym.n_value;
symbols[nsym] = Symbol{ .name = name, .address = address };
nsym += 1;
if (is_pie and mem.eql(u8, name, "_SymbolTable_deinit")) {
pie_slide = @ptrToInt(SymbolTable.deinit) - address;
}
}
// Effectively a no-op, lld emits symbols in ascending order.
std.sort.sort(Symbol, symbols[0..nsyms], Symbol.addressLessThan);
// Insert the sentinel. Since we don't know where the last function ends, /// The flags field of a section structure is separated into two parts a section
// we arbitrarily limit it to the start address + 4 KB. /// type and section attributes. The section types are mutually exclusive (it
const top = symbols[nsyms - 1].address + 4096; /// can only have one type) but the section attributes are not (it may have more
symbols[nsyms] = Symbol{ .name = "", .address = top }; /// than one attribute).
/// 256 section types
pub const SECTION_TYPE = 0x000000ff;
pub const SECTION_ATTRIBUTES = 0xffffff00; /// 24 section attributes
if (pie_slide != 0) { pub const S_REGULAR = 0x0; /// regular section
for (symbols) |*symbol| pub const S_ZEROFILL = 0x1; /// zero fill on demand section
symbol.address += pie_slide; pub const S_CSTRING_LITERALS = 0x2; /// section with only literal C string
} pub const S_4BYTE_LITERALS = 0x3; /// section with only 4 byte literals
pub const S_8BYTE_LITERALS = 0x4; /// section with only 8 byte literals
pub const S_LITERAL_POINTERS = 0x5; /// section with only pointers to
return SymbolTable{
.allocator = allocator,
.symbols = symbols,
.strings = strings,
};
}
fn readNoEof(in: *io.FileInStream, comptime T: type, result: []T) !void { pub const N_STAB = 0xe0; /// if any of these bits set, a symbolic debugging entry
return in.stream.readNoEof(@sliceToBytes(result)); pub const N_PEXT = 0x10; /// private external symbol bit
} pub const N_TYPE = 0x0e; /// mask for the type bits
fn readOneNoEof(in: *io.FileInStream, comptime T: type, result: *T) !void { pub const N_EXT = 0x01; /// external symbol bit, set for external symbols
return readNoEof(in, T, (*[1]T)(result)[0..]);
}
pub const N_GSYM = 0x20; /// global symbol: name,,NO_SECT,type,0
pub const N_FNAME = 0x22; /// procedure name (f77 kludge): name,,NO_SECT,0,0
pub const N_FUN = 0x24; /// procedure: name,,n_sect,linenumber,address
pub const N_STSYM = 0x26; /// static symbol: name,,n_sect,type,address
pub const N_LCSYM = 0x28; /// .lcomm symbol: name,,n_sect,type,address
pub const N_BNSYM = 0x2e; /// begin nsect sym: 0,,n_sect,0,address
pub const N_AST = 0x32; /// AST file path: name,,NO_SECT,0,0
pub const N_OPT = 0x3c; /// emitted with gcc2_compiled and in gcc source
pub const N_RSYM = 0x40; /// register sym: name,,NO_SECT,type,register
pub const N_SLINE = 0x44; /// src line: 0,,n_sect,linenumber,address
pub const N_ENSYM = 0x4e; /// end nsect sym: 0,,n_sect,0,address
pub const N_SSYM = 0x60; /// structure elt: name,,NO_SECT,type,struct_offset
pub const N_SO = 0x64; /// source file name: name,,n_sect,0,address
pub const N_OSO = 0x66; /// object file name: name,,0,0,st_mtime
pub const N_LSYM = 0x80; /// local sym: name,,NO_SECT,type,offset
pub const N_BINCL = 0x82; /// include file beginning: name,,NO_SECT,0,sum
pub const N_SOL = 0x84; /// #included file name: name,,n_sect,0,address
pub const N_PARAMS = 0x86; /// compiler parameters: name,,NO_SECT,0,0
pub const N_VERSION = 0x88; /// compiler version: name,,NO_SECT,0,0
pub const N_OLEVEL = 0x8A; /// compiler -O level: name,,NO_SECT,0,0
pub const N_PSYM = 0xa0; /// parameter: name,,NO_SECT,type,offset
pub const N_EINCL = 0xa2; /// include file end: name,,NO_SECT,0,0
pub const N_ENTRY = 0xa4; /// alternate entry: name,,n_sect,linenumber,address
pub const N_LBRAC = 0xc0; /// left bracket: 0,,NO_SECT,nesting level,address
pub const N_EXCL = 0xc2; /// deleted include file: name,,NO_SECT,0,sum
pub const N_RBRAC = 0xe0; /// right bracket: 0,,NO_SECT,nesting level,address
pub const N_BCOMM = 0xe2; /// begin common: name,,NO_SECT,0,0
pub const N_ECOMM = 0xe4; /// end common: name,,n_sect,0,0
pub const N_ECOML = 0xe8; /// end common (local name): 0,,n_sect,0,address
pub const N_LENG = 0xfe; /// second stab entry with length information
/// If a segment contains any sections marked with S_ATTR_DEBUG then all
/// sections in that segment must have this attribute. No section other than
/// a section marked with this attribute may reference the contents of this
/// section. A section with this attribute may contain no symbols and must have
/// a section type S_REGULAR. The static linker will not copy section contents
/// from sections with this attribute into its output file. These sections
/// generally contain DWARF debugging info.
pub const S_ATTR_DEBUG = 0x02000000; /// a debug section
pub const cpu_type_t = integer_t;
pub const cpu_subtype_t = integer_t;
pub const integer_t = c_int;
pub const vm_prot_t = c_int;
fn isSymbol(sym: *const Nlist64) bool {
return sym.n_value != 0 and sym.n_desc == 0;
}

View File

@ -61,10 +61,8 @@ fn ceil64(x: f64) f64 {
} }
if (u >> 63 != 0) { if (u >> 63 != 0) {
@setFloatMode(this, builtin.FloatMode.Strict);
y = x - math.f64_toint + math.f64_toint - x; y = x - math.f64_toint + math.f64_toint - x;
} else { } else {
@setFloatMode(this, builtin.FloatMode.Strict);
y = x + math.f64_toint - math.f64_toint - x; y = x + math.f64_toint - math.f64_toint - x;
} }

View File

@ -17,8 +17,6 @@ pub fn exp(z: var) @typeOf(z) {
} }
fn exp32(z: Complex(f32)) Complex(f32) { fn exp32(z: Complex(f32)) Complex(f32) {
@setFloatMode(this, @import("builtin").FloatMode.Strict);
const exp_overflow = 0x42b17218; // max_exp * ln2 ~= 88.72283955 const exp_overflow = 0x42b17218; // max_exp * ln2 ~= 88.72283955
const cexp_overflow = 0x43400074; // (max_exp - min_denom_exp) * ln2 const cexp_overflow = 0x43400074; // (max_exp - min_denom_exp) * ln2

View File

@ -37,8 +37,6 @@ const C5 = 4.16666666666665929218E-2;
// //
// This may have slight differences on some edge cases and may need to replaced if so. // This may have slight differences on some edge cases and may need to replaced if so.
fn cos32(x_: f32) f32 { fn cos32(x_: f32) f32 {
@setFloatMode(this, @import("builtin").FloatMode.Strict);
const pi4a = 7.85398125648498535156e-1; const pi4a = 7.85398125648498535156e-1;
const pi4b = 3.77489470793079817668E-8; const pi4b = 3.77489470793079817668E-8;
const pi4c = 2.69515142907905952645E-15; const pi4c = 2.69515142907905952645E-15;

View File

@ -18,8 +18,6 @@ pub fn exp(x: var) @typeOf(x) {
} }
fn exp32(x_: f32) f32 { fn exp32(x_: f32) f32 {
@setFloatMode(this, builtin.FloatMode.Strict);
const half = []f32{ 0.5, -0.5 }; const half = []f32{ 0.5, -0.5 };
const ln2hi = 6.9314575195e-1; const ln2hi = 6.9314575195e-1;
const ln2lo = 1.4286067653e-6; const ln2lo = 1.4286067653e-6;
@ -95,8 +93,6 @@ fn exp32(x_: f32) f32 {
} }
fn exp64(x_: f64) f64 { fn exp64(x_: f64) f64 {
@setFloatMode(this, builtin.FloatMode.Strict);
const half = []const f64{ 0.5, -0.5 }; const half = []const f64{ 0.5, -0.5 };
const ln2hi: f64 = 6.93147180369123816490e-01; const ln2hi: f64 = 6.93147180369123816490e-01;
const ln2lo: f64 = 1.90821492927058770002e-10; const ln2lo: f64 = 1.90821492927058770002e-10;

View File

@ -36,8 +36,6 @@ const exp2ft = []const f64{
}; };
fn exp2_32(x: f32) f32 { fn exp2_32(x: f32) f32 {
@setFloatMode(this, @import("builtin").FloatMode.Strict);
const tblsiz = @intCast(u32, exp2ft.len); const tblsiz = @intCast(u32, exp2ft.len);
const redux: f32 = 0x1.8p23 / @intToFloat(f32, tblsiz); const redux: f32 = 0x1.8p23 / @intToFloat(f32, tblsiz);
const P1: f32 = 0x1.62e430p-1; const P1: f32 = 0x1.62e430p-1;
@ -353,8 +351,6 @@ const exp2dt = []f64{
}; };
fn exp2_64(x: f64) f64 { fn exp2_64(x: f64) f64 {
@setFloatMode(this, @import("builtin").FloatMode.Strict);
const tblsiz = @intCast(u32, exp2dt.len / 2); const tblsiz = @intCast(u32, exp2dt.len / 2);
const redux: f64 = 0x1.8p52 / @intToFloat(f64, tblsiz); const redux: f64 = 0x1.8p52 / @intToFloat(f64, tblsiz);
const P1: f64 = 0x1.62e42fefa39efp-1; const P1: f64 = 0x1.62e42fefa39efp-1;

View File

@ -19,8 +19,6 @@ pub fn expm1(x: var) @typeOf(x) {
} }
fn expm1_32(x_: f32) f32 { fn expm1_32(x_: f32) f32 {
@setFloatMode(this, builtin.FloatMode.Strict);
if (math.isNan(x_)) if (math.isNan(x_))
return math.nan(f32); return math.nan(f32);
@ -149,8 +147,6 @@ fn expm1_32(x_: f32) f32 {
} }
fn expm1_64(x_: f64) f64 { fn expm1_64(x_: f64) f64 {
@setFloatMode(this, builtin.FloatMode.Strict);
if (math.isNan(x_)) if (math.isNan(x_))
return math.nan(f64); return math.nan(f64);

View File

@ -97,10 +97,8 @@ fn floor64(x: f64) f64 {
} }
if (u >> 63 != 0) { if (u >> 63 != 0) {
@setFloatMode(this, builtin.FloatMode.Strict);
y = x - math.f64_toint + math.f64_toint - x; y = x - math.f64_toint + math.f64_toint - x;
} else { } else {
@setFloatMode(this, builtin.FloatMode.Strict);
y = x + math.f64_toint - math.f64_toint - x; y = x + math.f64_toint - math.f64_toint - x;
} }

View File

@ -35,8 +35,6 @@ pub fn ln(x: var) @typeOf(x) {
} }
pub fn ln_32(x_: f32) f32 { pub fn ln_32(x_: f32) f32 {
@setFloatMode(this, @import("builtin").FloatMode.Strict);
const ln2_hi: f32 = 6.9313812256e-01; const ln2_hi: f32 = 6.9313812256e-01;
const ln2_lo: f32 = 9.0580006145e-06; const ln2_lo: f32 = 9.0580006145e-06;
const Lg1: f32 = 0xaaaaaa.0p-24; const Lg1: f32 = 0xaaaaaa.0p-24;
@ -89,8 +87,6 @@ pub fn ln_32(x_: f32) f32 {
} }
pub fn ln_64(x_: f64) f64 { pub fn ln_64(x_: f64) f64 {
@setFloatMode(this, @import("builtin").FloatMode.Strict);
const ln2_hi: f64 = 6.93147180369123816490e-01; const ln2_hi: f64 = 6.93147180369123816490e-01;
const ln2_lo: f64 = 1.90821492927058770002e-10; const ln2_lo: f64 = 1.90821492927058770002e-10;
const Lg1: f64 = 6.666666666666735130e-01; const Lg1: f64 = 6.666666666666735130e-01;

View File

@ -28,8 +28,6 @@ const assert = std.debug.assert;
// This implementation is taken from the go stlib, musl is a bit more complex. // This implementation is taken from the go stlib, musl is a bit more complex.
pub fn pow(comptime T: type, x: T, y: T) T { pub fn pow(comptime T: type, x: T, y: T) T {
@setFloatMode(this, @import("builtin").FloatMode.Strict);
if (T != f32 and T != f64) { if (T != f32 and T != f64) {
@compileError("pow not implemented for " ++ @typeName(T)); @compileError("pow not implemented for " ++ @typeName(T));
} }

View File

@ -35,11 +35,7 @@ fn round32(x_: f32) f32 {
return 0 * @bitCast(f32, u); return 0 * @bitCast(f32, u);
} }
{
@setFloatMode(this, builtin.FloatMode.Strict);
y = x + math.f32_toint - math.f32_toint - x; y = x + math.f32_toint - math.f32_toint - x;
}
if (y > 0.5) { if (y > 0.5) {
y = y + x - 1; y = y + x - 1;
} else if (y <= -0.5) { } else if (y <= -0.5) {
@ -72,11 +68,7 @@ fn round64(x_: f64) f64 {
return 0 * @bitCast(f64, u); return 0 * @bitCast(f64, u);
} }
{
@setFloatMode(this, builtin.FloatMode.Strict);
y = x + math.f64_toint - math.f64_toint - x; y = x + math.f64_toint - math.f64_toint - x;
}
if (y > 0.5) { if (y > 0.5) {
y = y + x - 1; y = y + x - 1;
} else if (y <= -0.5) { } else if (y <= -0.5) {

View File

@ -38,8 +38,6 @@ const C5 = 4.16666666666665929218E-2;
// //
// This may have slight differences on some edge cases and may need to replaced if so. // This may have slight differences on some edge cases and may need to replaced if so.
fn sin32(x_: f32) f32 { fn sin32(x_: f32) f32 {
@setFloatMode(this, @import("builtin").FloatMode.Strict);
const pi4a = 7.85398125648498535156e-1; const pi4a = 7.85398125648498535156e-1;
const pi4b = 3.77489470793079817668E-8; const pi4b = 3.77489470793079817668E-8;
const pi4c = 2.69515142907905952645E-15; const pi4c = 2.69515142907905952645E-15;

View File

@ -54,8 +54,6 @@ fn sinh32(x: f32) f32 {
} }
fn sinh64(x: f64) f64 { fn sinh64(x: f64) f64 {
@setFloatMode(this, @import("builtin").FloatMode.Strict);
const u = @bitCast(u64, x); const u = @bitCast(u64, x);
const w = @intCast(u32, u >> 32); const w = @intCast(u32, u >> 32);
const ax = @bitCast(f64, u & (@maxValue(u64) >> 1)); const ax = @bitCast(f64, u & (@maxValue(u64) >> 1));

View File

@ -31,8 +31,6 @@ const Tq4 = -5.38695755929454629881E7;
// //
// This may have slight differences on some edge cases and may need to replaced if so. // This may have slight differences on some edge cases and may need to replaced if so.
fn tan32(x_: f32) f32 { fn tan32(x_: f32) f32 {
@setFloatMode(this, @import("builtin").FloatMode.Strict);
const pi4a = 7.85398125648498535156e-1; const pi4a = 7.85398125648498535156e-1;
const pi4b = 3.77489470793079817668E-8; const pi4b = 3.77489470793079817668E-8;
const pi4c = 2.69515142907905952645E-15; const pi4c = 2.69515142907905952645E-15;

View File

@ -135,6 +135,12 @@ pub const Allocator = struct {
} }
}; };
pub const Compare = enum {
LessThan,
Equal,
GreaterThan,
};
/// Copy all of source into dest at position 0. /// Copy all of source into dest at position 0.
/// dest.len must be >= source.len. /// dest.len must be >= source.len.
/// dest.ptr must be <= src.ptr. /// dest.ptr must be <= src.ptr.
@ -169,16 +175,64 @@ pub fn set(comptime T: type, dest: []T, value: T) void {
d.* = value; d.* = value;
} }
/// Returns true if lhs < rhs, false otherwise pub fn secureZero(comptime T: type, s: []T) void {
pub fn lessThan(comptime T: type, lhs: []const T, rhs: []const T) bool { // NOTE: We do not use a volatile slice cast here since LLVM cannot
// see that it can be replaced by a memset.
const ptr = @ptrCast([*]volatile u8, s.ptr);
const length = s.len * @sizeOf(T);
@memset(ptr, 0, length);
}
test "mem.secureZero" {
var a = []u8{0xfe} ** 8;
var b = []u8{0xfe} ** 8;
set(u8, a[0..], 0);
secureZero(u8, b[0..]);
assert(eql(u8, a[0..], b[0..]));
}
pub fn compare(comptime T: type, lhs: []const T, rhs: []const T) Compare {
const n = math.min(lhs.len, rhs.len); const n = math.min(lhs.len, rhs.len);
var i: usize = 0; var i: usize = 0;
while (i < n) : (i += 1) { while (i < n) : (i += 1) {
if (lhs[i] == rhs[i]) continue; if (lhs[i] == rhs[i]) {
return lhs[i] < rhs[i]; continue;
} else if (lhs[i] < rhs[i]) {
return Compare.LessThan;
} else if (lhs[i] > rhs[i]) {
return Compare.GreaterThan;
} else {
unreachable;
}
} }
return lhs.len < rhs.len; if (lhs.len == rhs.len) {
return Compare.Equal;
} else if (lhs.len < rhs.len) {
return Compare.LessThan;
} else if (lhs.len > rhs.len) {
return Compare.GreaterThan;
}
unreachable;
}
test "mem.compare" {
assert(compare(u8, "abcd", "bee") == Compare.LessThan);
assert(compare(u8, "abc", "abc") == Compare.Equal);
assert(compare(u8, "abc", "abc0") == Compare.LessThan);
assert(compare(u8, "", "") == Compare.Equal);
assert(compare(u8, "", "a") == Compare.LessThan);
}
/// Returns true if lhs < rhs, false otherwise
pub fn lessThan(comptime T: type, lhs: []const T, rhs: []const T) bool {
var result = compare(T, lhs, rhs);
if (result == Compare.LessThan) {
return true;
} else
return false;
} }
test "mem.lessThan" { test "mem.lessThan" {
@ -198,6 +252,20 @@ pub fn eql(comptime T: type, a: []const T, b: []const T) bool {
return true; return true;
} }
pub fn len(comptime T: type, ptr: [*]const T) usize {
var count: usize = 0;
while (ptr[count] != 0) : (count += 1) {}
return count;
}
pub fn toSliceConst(comptime T: type, ptr: [*]const T) []const T {
return ptr[0..len(T, ptr)];
}
pub fn toSlice(comptime T: type, ptr: [*]T) []T {
return ptr[0..len(T, ptr)];
}
/// Returns true if all elements in a slice are equal to the scalar value provided /// Returns true if all elements in a slice are equal to the scalar value provided
pub fn allEqual(comptime T: type, slice: []const T, scalar: T) bool { pub fn allEqual(comptime T: type, slice: []const T, scalar: T) bool {
for (slice) |item| { for (slice) |item| {
@ -541,7 +609,7 @@ pub fn join(allocator: *Allocator, sep: u8, strings: ...) ![]u8 {
} }
} }
return buf[0..buf_index]; return allocator.shrink(u8, buf, buf_index);
} }
test "mem.join" { test "mem.join" {
@ -611,10 +679,38 @@ test "testWriteInt" {
comptime testWriteIntImpl(); comptime testWriteIntImpl();
} }
fn testWriteIntImpl() void { fn testWriteIntImpl() void {
var bytes: [4]u8 = undefined; var bytes: [8]u8 = undefined;
writeInt(bytes[0..], u64(0x12345678CAFEBABE), builtin.Endian.Big);
assert(eql(u8, bytes, []u8{
0x12,
0x34,
0x56,
0x78,
0xCA,
0xFE,
0xBA,
0xBE,
}));
writeInt(bytes[0..], u64(0xBEBAFECA78563412), builtin.Endian.Little);
assert(eql(u8, bytes, []u8{
0x12,
0x34,
0x56,
0x78,
0xCA,
0xFE,
0xBA,
0xBE,
}));
writeInt(bytes[0..], u32(0x12345678), builtin.Endian.Big); writeInt(bytes[0..], u32(0x12345678), builtin.Endian.Big);
assert(eql(u8, bytes, []u8{ assert(eql(u8, bytes, []u8{
0x00,
0x00,
0x00,
0x00,
0x12, 0x12,
0x34, 0x34,
0x56, 0x56,
@ -627,10 +723,18 @@ fn testWriteIntImpl() void {
0x34, 0x34,
0x56, 0x56,
0x78, 0x78,
0x00,
0x00,
0x00,
0x00,
})); }));
writeInt(bytes[0..], u16(0x1234), builtin.Endian.Big); writeInt(bytes[0..], u16(0x1234), builtin.Endian.Big);
assert(eql(u8, bytes, []u8{ assert(eql(u8, bytes, []u8{
0x00,
0x00,
0x00,
0x00,
0x00, 0x00,
0x00, 0x00,
0x12, 0x12,
@ -643,6 +747,10 @@ fn testWriteIntImpl() void {
0x12, 0x12,
0x00, 0x00,
0x00, 0x00,
0x00,
0x00,
0x00,
0x00,
})); }));
} }
@ -755,3 +863,4 @@ pub fn endianSwap(comptime T: type, x: T) T {
test "std.mem.endianSwap" { test "std.mem.endianSwap" {
assert(endianSwap(u32, 0xDEADBEEF) == 0xEFBEADDE); assert(endianSwap(u32, 0xDEADBEEF) == 0xEFBEADDE);
} }

27
std/mutex.zig Normal file
View File

@ -0,0 +1,27 @@
const std = @import("index.zig");
const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder;
const AtomicRmwOp = builtin.AtomicRmwOp;
const assert = std.debug.assert;
/// TODO use syscalls instead of a spinlock
pub const Mutex = struct {
lock: u8, // TODO use a bool
pub const Held = struct {
mutex: *Mutex,
pub fn release(self: Held) void {
assert(@atomicRmw(u8, &self.mutex.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
}
};
pub fn init() Mutex {
return Mutex{ .lock = 0 };
}
pub fn acquire(self: *Mutex) Held {
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
return Held{ .mutex = self };
}
};

View File

@ -349,14 +349,7 @@ pub const ChildProcess = struct {
}; };
const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore); const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore);
const dev_null_fd = if (any_ignore) blk: { const dev_null_fd = if (any_ignore) try os.posixOpenC(c"/dev/null", posix.O_RDWR, 0) else undefined;
const dev_null_path = "/dev/null";
var fixed_buffer_mem: [dev_null_path.len + 1]u8 = undefined;
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
break :blk try os.posixOpen(&fixed_allocator.allocator, "/dev/null", posix.O_RDWR, 0);
} else blk: {
break :blk undefined;
};
defer { defer {
if (any_ignore) os.close(dev_null_fd); if (any_ignore) os.close(dev_null_fd);
} }
@ -453,10 +446,7 @@ pub const ChildProcess = struct {
const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore); const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore);
const nul_handle = if (any_ignore) blk: { const nul_handle = if (any_ignore) blk: {
const nul_file_path = "NUL"; break :blk try os.windowsOpen("NUL", windows.GENERIC_READ, windows.FILE_SHARE_READ, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL);
var fixed_buffer_mem: [nul_file_path.len + 1]u8 = undefined;
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
break :blk try os.windowsOpen(&fixed_allocator.allocator, "NUL", windows.GENERIC_READ, windows.FILE_SHARE_READ, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL);
} else blk: { } else blk: {
break :blk undefined; break :blk undefined;
}; };

View File

@ -482,91 +482,98 @@ pub const NOTE_MACH_CONTINUOUS_TIME = 0x00000080;
/// data is mach absolute time units /// data is mach absolute time units
pub const NOTE_MACHTIME = 0x00000100; pub const NOTE_MACHTIME = 0x00000100;
pub const AF_UNSPEC: c_int = 0; pub const AF_UNSPEC = 0;
pub const AF_LOCAL: c_int = 1; pub const AF_LOCAL = 1;
pub const AF_UNIX: c_int = AF_LOCAL; pub const AF_UNIX = AF_LOCAL;
pub const AF_INET: c_int = 2; pub const AF_INET = 2;
pub const AF_SYS_CONTROL: c_int = 2; pub const AF_SYS_CONTROL = 2;
pub const AF_IMPLINK: c_int = 3; pub const AF_IMPLINK = 3;
pub const AF_PUP: c_int = 4; pub const AF_PUP = 4;
pub const AF_CHAOS: c_int = 5; pub const AF_CHAOS = 5;
pub const AF_NS: c_int = 6; pub const AF_NS = 6;
pub const AF_ISO: c_int = 7; pub const AF_ISO = 7;
pub const AF_OSI: c_int = AF_ISO; pub const AF_OSI = AF_ISO;
pub const AF_ECMA: c_int = 8; pub const AF_ECMA = 8;
pub const AF_DATAKIT: c_int = 9; pub const AF_DATAKIT = 9;
pub const AF_CCITT: c_int = 10; pub const AF_CCITT = 10;
pub const AF_SNA: c_int = 11; pub const AF_SNA = 11;
pub const AF_DECnet: c_int = 12; pub const AF_DECnet = 12;
pub const AF_DLI: c_int = 13; pub const AF_DLI = 13;
pub const AF_LAT: c_int = 14; pub const AF_LAT = 14;
pub const AF_HYLINK: c_int = 15; pub const AF_HYLINK = 15;
pub const AF_APPLETALK: c_int = 16; pub const AF_APPLETALK = 16;
pub const AF_ROUTE: c_int = 17; pub const AF_ROUTE = 17;
pub const AF_LINK: c_int = 18; pub const AF_LINK = 18;
pub const AF_XTP: c_int = 19; pub const AF_XTP = 19;
pub const AF_COIP: c_int = 20; pub const AF_COIP = 20;
pub const AF_CNT: c_int = 21; pub const AF_CNT = 21;
pub const AF_RTIP: c_int = 22; pub const AF_RTIP = 22;
pub const AF_IPX: c_int = 23; pub const AF_IPX = 23;
pub const AF_SIP: c_int = 24; pub const AF_SIP = 24;
pub const AF_PIP: c_int = 25; pub const AF_PIP = 25;
pub const AF_ISDN: c_int = 28; pub const AF_ISDN = 28;
pub const AF_E164: c_int = AF_ISDN; pub const AF_E164 = AF_ISDN;
pub const AF_KEY: c_int = 29; pub const AF_KEY = 29;
pub const AF_INET6: c_int = 30; pub const AF_INET6 = 30;
pub const AF_NATM: c_int = 31; pub const AF_NATM = 31;
pub const AF_SYSTEM: c_int = 32; pub const AF_SYSTEM = 32;
pub const AF_NETBIOS: c_int = 33; pub const AF_NETBIOS = 33;
pub const AF_PPP: c_int = 34; pub const AF_PPP = 34;
pub const AF_MAX: c_int = 40; pub const AF_MAX = 40;
pub const PF_UNSPEC: c_int = AF_UNSPEC; pub const PF_UNSPEC = AF_UNSPEC;
pub const PF_LOCAL: c_int = AF_LOCAL; pub const PF_LOCAL = AF_LOCAL;
pub const PF_UNIX: c_int = PF_LOCAL; pub const PF_UNIX = PF_LOCAL;
pub const PF_INET: c_int = AF_INET; pub const PF_INET = AF_INET;
pub const PF_IMPLINK: c_int = AF_IMPLINK; pub const PF_IMPLINK = AF_IMPLINK;
pub const PF_PUP: c_int = AF_PUP; pub const PF_PUP = AF_PUP;
pub const PF_CHAOS: c_int = AF_CHAOS; pub const PF_CHAOS = AF_CHAOS;
pub const PF_NS: c_int = AF_NS; pub const PF_NS = AF_NS;
pub const PF_ISO: c_int = AF_ISO; pub const PF_ISO = AF_ISO;
pub const PF_OSI: c_int = AF_ISO; pub const PF_OSI = AF_ISO;
pub const PF_ECMA: c_int = AF_ECMA; pub const PF_ECMA = AF_ECMA;
pub const PF_DATAKIT: c_int = AF_DATAKIT; pub const PF_DATAKIT = AF_DATAKIT;
pub const PF_CCITT: c_int = AF_CCITT; pub const PF_CCITT = AF_CCITT;
pub const PF_SNA: c_int = AF_SNA; pub const PF_SNA = AF_SNA;
pub const PF_DECnet: c_int = AF_DECnet; pub const PF_DECnet = AF_DECnet;
pub const PF_DLI: c_int = AF_DLI; pub const PF_DLI = AF_DLI;
pub const PF_LAT: c_int = AF_LAT; pub const PF_LAT = AF_LAT;
pub const PF_HYLINK: c_int = AF_HYLINK; pub const PF_HYLINK = AF_HYLINK;
pub const PF_APPLETALK: c_int = AF_APPLETALK; pub const PF_APPLETALK = AF_APPLETALK;
pub const PF_ROUTE: c_int = AF_ROUTE; pub const PF_ROUTE = AF_ROUTE;
pub const PF_LINK: c_int = AF_LINK; pub const PF_LINK = AF_LINK;
pub const PF_XTP: c_int = AF_XTP; pub const PF_XTP = AF_XTP;
pub const PF_COIP: c_int = AF_COIP; pub const PF_COIP = AF_COIP;
pub const PF_CNT: c_int = AF_CNT; pub const PF_CNT = AF_CNT;
pub const PF_SIP: c_int = AF_SIP; pub const PF_SIP = AF_SIP;
pub const PF_IPX: c_int = AF_IPX; pub const PF_IPX = AF_IPX;
pub const PF_RTIP: c_int = AF_RTIP; pub const PF_RTIP = AF_RTIP;
pub const PF_PIP: c_int = AF_PIP; pub const PF_PIP = AF_PIP;
pub const PF_ISDN: c_int = AF_ISDN; pub const PF_ISDN = AF_ISDN;
pub const PF_KEY: c_int = AF_KEY; pub const PF_KEY = AF_KEY;
pub const PF_INET6: c_int = AF_INET6; pub const PF_INET6 = AF_INET6;
pub const PF_NATM: c_int = AF_NATM; pub const PF_NATM = AF_NATM;
pub const PF_SYSTEM: c_int = AF_SYSTEM; pub const PF_SYSTEM = AF_SYSTEM;
pub const PF_NETBIOS: c_int = AF_NETBIOS; pub const PF_NETBIOS = AF_NETBIOS;
pub const PF_PPP: c_int = AF_PPP; pub const PF_PPP = AF_PPP;
pub const PF_MAX: c_int = AF_MAX; pub const PF_MAX = AF_MAX;
pub const SYSPROTO_EVENT: c_int = 1; pub const SYSPROTO_EVENT = 1;
pub const SYSPROTO_CONTROL: c_int = 2; pub const SYSPROTO_CONTROL = 2;
pub const SOCK_STREAM: c_int = 1; pub const SOCK_STREAM = 1;
pub const SOCK_DGRAM: c_int = 2; pub const SOCK_DGRAM = 2;
pub const SOCK_RAW: c_int = 3; pub const SOCK_RAW = 3;
pub const SOCK_RDM: c_int = 4; pub const SOCK_RDM = 4;
pub const SOCK_SEQPACKET: c_int = 5; pub const SOCK_SEQPACKET = 5;
pub const SOCK_MAXADDRLEN: c_int = 255; pub const SOCK_MAXADDRLEN = 255;
pub const IPPROTO_ICMP = 1;
pub const IPPROTO_ICMPV6 = 58;
pub const IPPROTO_TCP = 6;
pub const IPPROTO_UDP = 17;
pub const IPPROTO_IP = 0;
pub const IPPROTO_IPV6 = 41;
fn wstatus(x: i32) i32 { fn wstatus(x: i32) i32 {
return x & 0o177; return x & 0o177;
@ -605,6 +612,11 @@ pub fn abort() noreturn {
c.abort(); c.abort();
} }
// bind(int socket, const struct sockaddr *address, socklen_t address_len)
pub fn bind(fd: i32, addr: *const sockaddr, len: socklen_t) usize {
return errnoWrap(c.bind(@bitCast(c_int, fd), addr, len));
}
pub fn exit(code: i32) noreturn { pub fn exit(code: i32) noreturn {
c.exit(code); c.exit(code);
} }
@ -634,6 +646,10 @@ pub fn read(fd: i32, buf: [*]u8, nbyte: usize) usize {
return errnoWrap(c.read(fd, @ptrCast(*c_void, buf), nbyte)); return errnoWrap(c.read(fd, @ptrCast(*c_void, buf), nbyte));
} }
pub fn pread(fd: i32, buf: [*]u8, nbyte: usize, offset: u64) usize {
return errnoWrap(c.pread(fd, @ptrCast(*c_void, buf), nbyte, offset));
}
pub fn stat(noalias path: [*]const u8, noalias buf: *stat) usize { pub fn stat(noalias path: [*]const u8, noalias buf: *stat) usize {
return errnoWrap(c.stat(path, buf)); return errnoWrap(c.stat(path, buf));
} }
@ -642,6 +658,10 @@ pub fn write(fd: i32, buf: [*]const u8, nbyte: usize) usize {
return errnoWrap(c.write(fd, @ptrCast(*const c_void, buf), nbyte)); return errnoWrap(c.write(fd, @ptrCast(*const c_void, buf), nbyte));
} }
pub fn pwrite(fd: i32, buf: [*]const u8, nbyte: usize, offset: u64) usize {
return errnoWrap(c.pwrite(fd, @ptrCast(*const c_void, buf), nbyte, offset));
}
pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize { pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize {
const ptr_result = c.mmap( const ptr_result = c.mmap(
@ptrCast(*c_void, address), @ptrCast(*c_void, address),
@ -805,6 +825,20 @@ pub fn sigaction(sig: u5, noalias act: *const Sigaction, noalias oact: ?*Sigacti
return result; return result;
} }
pub fn socket(domain: u32, socket_type: u32, protocol: u32) usize {
return errnoWrap(c.socket(@bitCast(c_int, domain), @bitCast(c_int, socket_type), @bitCast(c_int, protocol)));
}
pub const iovec = extern struct {
iov_base: [*]u8,
iov_len: usize,
};
pub const iovec_const = extern struct {
iov_base: [*]const u8,
iov_len: usize,
};
pub const sigset_t = c.sigset_t; pub const sigset_t = c.sigset_t;
pub const empty_sigset = sigset_t(0); pub const empty_sigset = sigset_t(0);
@ -812,8 +846,13 @@ pub const timespec = c.timespec;
pub const Stat = c.Stat; pub const Stat = c.Stat;
pub const dirent = c.dirent; pub const dirent = c.dirent;
pub const in_port_t = c.in_port_t;
pub const sa_family_t = c.sa_family_t; pub const sa_family_t = c.sa_family_t;
pub const socklen_t = c.socklen_t;
pub const sockaddr = c.sockaddr; pub const sockaddr = c.sockaddr;
pub const sockaddr_in = c.sockaddr_in;
pub const sockaddr_in6 = c.sockaddr_in6;
/// Renamed from `kevent` to `Kevent` to avoid conflict with the syscall. /// Renamed from `kevent` to `Kevent` to avoid conflict with the syscall.
pub const Kevent = c.Kevent; pub const Kevent = c.Kevent;

View File

@ -7,6 +7,7 @@ const assert = std.debug.assert;
const posix = os.posix; const posix = os.posix;
const windows = os.windows; const windows = os.windows;
const Os = builtin.Os; const Os = builtin.Os;
const windows_util = @import("windows/util.zig");
const is_posix = builtin.os != builtin.Os.windows; const is_posix = builtin.os != builtin.Os.windows;
const is_windows = builtin.os == builtin.Os.windows; const is_windows = builtin.os == builtin.Os.windows;
@ -15,18 +16,39 @@ pub const File = struct {
/// The OS-specific file descriptor or file handle. /// The OS-specific file descriptor or file handle.
handle: os.FileHandle, handle: os.FileHandle,
pub const Mode = switch (builtin.os) {
Os.windows => void,
else => u32,
};
pub const default_mode = switch (builtin.os) {
Os.windows => {},
else => 0o666,
};
pub const OpenError = os.WindowsOpenError || os.PosixOpenError; pub const OpenError = os.WindowsOpenError || os.PosixOpenError;
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator. /// `openRead` except with a null terminated path
/// Call close to clean up. pub fn openReadC(path: [*]const u8) OpenError!File {
pub fn openRead(allocator: *mem.Allocator, path: []const u8) OpenError!File {
if (is_posix) { if (is_posix) {
const flags = posix.O_LARGEFILE | posix.O_RDONLY; const flags = posix.O_LARGEFILE | posix.O_RDONLY;
const fd = try os.posixOpen(allocator, path, flags, 0); const fd = try os.posixOpenC(path, flags, 0);
return openHandle(fd); return openHandle(fd);
} else if (is_windows) { }
if (is_windows) {
return openRead(mem.toSliceConst(u8, path));
}
@compileError("Unsupported OS");
}
/// Call close to clean up.
pub fn openRead(path: []const u8) OpenError!File {
if (is_posix) {
const path_c = try os.toPosixPath(path);
return openReadC(&path_c);
}
if (is_windows) {
const handle = try os.windowsOpen( const handle = try os.windowsOpen(
allocator,
path, path,
windows.GENERIC_READ, windows.GENERIC_READ,
windows.FILE_SHARE_READ, windows.FILE_SHARE_READ,
@ -34,28 +56,25 @@ pub const File = struct {
windows.FILE_ATTRIBUTE_NORMAL, windows.FILE_ATTRIBUTE_NORMAL,
); );
return openHandle(handle); return openHandle(handle);
} else {
@compileError("TODO implement openRead for this OS");
} }
@compileError("Unsupported OS");
} }
/// Calls `openWriteMode` with os.default_file_mode for the mode. /// Calls `openWriteMode` with os.File.default_mode for the mode.
pub fn openWrite(allocator: *mem.Allocator, path: []const u8) OpenError!File { pub fn openWrite(path: []const u8) OpenError!File {
return openWriteMode(allocator, path, os.default_file_mode); return openWriteMode(path, os.File.default_mode);
} }
/// If the path does not exist it will be created. /// If the path does not exist it will be created.
/// If a file already exists in the destination it will be truncated. /// If a file already exists in the destination it will be truncated.
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
/// Call close to clean up. /// Call close to clean up.
pub fn openWriteMode(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File { pub fn openWriteMode(path: []const u8, file_mode: Mode) OpenError!File {
if (is_posix) { if (is_posix) {
const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_TRUNC; const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_TRUNC;
const fd = try os.posixOpen(allocator, path, flags, file_mode); const fd = try os.posixOpen(path, flags, file_mode);
return openHandle(fd); return openHandle(fd);
} else if (is_windows) { } else if (is_windows) {
const handle = try os.windowsOpen( const handle = try os.windowsOpen(
allocator,
path, path,
windows.GENERIC_WRITE, windows.GENERIC_WRITE,
windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE, windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE,
@ -70,16 +89,14 @@ pub const File = struct {
/// If the path does not exist it will be created. /// If the path does not exist it will be created.
/// If a file already exists in the destination this returns OpenError.PathAlreadyExists /// If a file already exists in the destination this returns OpenError.PathAlreadyExists
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
/// Call close to clean up. /// Call close to clean up.
pub fn openWriteNoClobber(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File { pub fn openWriteNoClobber(path: []const u8, file_mode: Mode) OpenError!File {
if (is_posix) { if (is_posix) {
const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_EXCL; const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_EXCL;
const fd = try os.posixOpen(allocator, path, flags, file_mode); const fd = try os.posixOpen(path, flags, file_mode);
return openHandle(fd); return openHandle(fd);
} else if (is_windows) { } else if (is_windows) {
const handle = try os.windowsOpen( const handle = try os.windowsOpen(
allocator,
path, path,
windows.GENERIC_WRITE, windows.GENERIC_WRITE,
windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE, windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE,
@ -98,23 +115,43 @@ pub const File = struct {
pub const AccessError = error{ pub const AccessError = error{
PermissionDenied, PermissionDenied,
NotFound, FileNotFound,
NameTooLong, NameTooLong,
BadMode, InputOutput,
BadPathName,
Io,
SystemResources, SystemResources,
OutOfMemory, BadPathName,
/// On Windows, file paths must be valid Unicode.
InvalidUtf8,
Unexpected, Unexpected,
}; };
pub fn access(allocator: *mem.Allocator, path: []const u8) AccessError!void { /// Call from Windows-specific code if you already have a UTF-16LE encoded, null terminated string.
const path_with_null = try std.cstr.addNullByte(allocator, path); /// Otherwise use `access` or `accessC`.
defer allocator.free(path_with_null); pub fn accessW(path: [*]const u16) AccessError!void {
if (os.windows.GetFileAttributesW(path) != os.windows.INVALID_FILE_ATTRIBUTES) {
return;
}
const err = windows.GetLastError();
switch (err) {
windows.ERROR.FILE_NOT_FOUND => return error.FileNotFound,
windows.ERROR.PATH_NOT_FOUND => return error.FileNotFound,
windows.ERROR.ACCESS_DENIED => return error.PermissionDenied,
else => return os.unexpectedErrorWindows(err),
}
}
/// Call if you have a UTF-8 encoded, null-terminated string.
/// Otherwise use `access` or `accessW`.
pub fn accessC(path: [*]const u8) AccessError!void {
if (is_windows) {
const path_w = try windows_util.cStrToPrefixedFileW(path);
return accessW(&path_w);
}
if (is_posix) { if (is_posix) {
const result = posix.access(path_with_null.ptr, posix.F_OK); const result = posix.access(path, posix.F_OK);
const err = posix.getErrno(result); const err = posix.getErrno(result);
switch (err) { switch (err) {
0 => return, 0 => return,
@ -122,32 +159,33 @@ pub const File = struct {
posix.EROFS => return error.PermissionDenied, posix.EROFS => return error.PermissionDenied,
posix.ELOOP => return error.PermissionDenied, posix.ELOOP => return error.PermissionDenied,
posix.ETXTBSY => return error.PermissionDenied, posix.ETXTBSY => return error.PermissionDenied,
posix.ENOTDIR => return error.NotFound, posix.ENOTDIR => return error.FileNotFound,
posix.ENOENT => return error.NotFound, posix.ENOENT => return error.FileNotFound,
posix.ENAMETOOLONG => return error.NameTooLong, posix.ENAMETOOLONG => return error.NameTooLong,
posix.EINVAL => unreachable, posix.EINVAL => unreachable,
posix.EFAULT => return error.BadPathName, posix.EFAULT => unreachable,
posix.EIO => return error.Io, posix.EIO => return error.InputOutput,
posix.ENOMEM => return error.SystemResources, posix.ENOMEM => return error.SystemResources,
else => return os.unexpectedErrorPosix(err), else => return os.unexpectedErrorPosix(err),
} }
} else if (is_windows) { }
if (os.windows.GetFileAttributesA(path_with_null.ptr) != os.windows.INVALID_FILE_ATTRIBUTES) { @compileError("Unsupported OS");
return;
} }
const err = windows.GetLastError(); pub fn access(path: []const u8) AccessError!void {
switch (err) { if (is_windows) {
windows.ERROR.FILE_NOT_FOUND, const path_w = try windows_util.sliceToPrefixedFileW(path);
windows.ERROR.PATH_NOT_FOUND, return accessW(&path_w);
=> return error.NotFound,
windows.ERROR.ACCESS_DENIED => return error.PermissionDenied,
else => return os.unexpectedErrorWindows(err),
} }
} else { if (is_posix) {
@compileError("TODO implement access for this OS"); var path_with_null: [posix.PATH_MAX]u8 = undefined;
if (path.len >= posix.PATH_MAX) return error.NameTooLong;
mem.copy(u8, path_with_null[0..], path);
path_with_null[path.len] = 0;
return accessC(&path_with_null);
} }
@compileError("Unsupported OS");
} }
/// Upon success, the stream is in an uninitialized state. To continue using it, /// Upon success, the stream is in an uninitialized state. To continue using it,
@ -169,7 +207,9 @@ pub const File = struct {
const err = posix.getErrno(result); const err = posix.getErrno(result);
if (err > 0) { if (err > 0) {
return switch (err) { return switch (err) {
posix.EBADF => error.BadFd, // We do not make this an error code because if you get EBADF it's always a bug,
// since the fd could have been reused.
posix.EBADF => unreachable,
posix.EINVAL => error.Unseekable, posix.EINVAL => error.Unseekable,
posix.EOVERFLOW => error.Unseekable, posix.EOVERFLOW => error.Unseekable,
posix.ESPIPE => error.Unseekable, posix.ESPIPE => error.Unseekable,
@ -182,7 +222,7 @@ pub const File = struct {
if (windows.SetFilePointerEx(self.handle, amount, null, windows.FILE_CURRENT) == 0) { if (windows.SetFilePointerEx(self.handle, amount, null, windows.FILE_CURRENT) == 0) {
const err = windows.GetLastError(); const err = windows.GetLastError();
return switch (err) { return switch (err) {
windows.ERROR.INVALID_PARAMETER => error.BadFd, windows.ERROR.INVALID_PARAMETER => unreachable,
else => os.unexpectedErrorWindows(err), else => os.unexpectedErrorWindows(err),
}; };
} }
@ -199,7 +239,9 @@ pub const File = struct {
const err = posix.getErrno(result); const err = posix.getErrno(result);
if (err > 0) { if (err > 0) {
return switch (err) { return switch (err) {
posix.EBADF => error.BadFd, // We do not make this an error code because if you get EBADF it's always a bug,
// since the fd could have been reused.
posix.EBADF => unreachable,
posix.EINVAL => error.Unseekable, posix.EINVAL => error.Unseekable,
posix.EOVERFLOW => error.Unseekable, posix.EOVERFLOW => error.Unseekable,
posix.ESPIPE => error.Unseekable, posix.ESPIPE => error.Unseekable,
@ -213,7 +255,7 @@ pub const File = struct {
if (windows.SetFilePointerEx(self.handle, ipos, null, windows.FILE_BEGIN) == 0) { if (windows.SetFilePointerEx(self.handle, ipos, null, windows.FILE_BEGIN) == 0) {
const err = windows.GetLastError(); const err = windows.GetLastError();
return switch (err) { return switch (err) {
windows.ERROR.INVALID_PARAMETER => error.BadFd, windows.ERROR.INVALID_PARAMETER => unreachable,
else => os.unexpectedErrorWindows(err), else => os.unexpectedErrorWindows(err),
}; };
} }
@ -229,7 +271,9 @@ pub const File = struct {
const err = posix.getErrno(result); const err = posix.getErrno(result);
if (err > 0) { if (err > 0) {
return switch (err) { return switch (err) {
posix.EBADF => error.BadFd, // We do not make this an error code because if you get EBADF it's always a bug,
// since the fd could have been reused.
posix.EBADF => unreachable,
posix.EINVAL => error.Unseekable, posix.EINVAL => error.Unseekable,
posix.EOVERFLOW => error.Unseekable, posix.EOVERFLOW => error.Unseekable,
posix.ESPIPE => error.Unseekable, posix.ESPIPE => error.Unseekable,
@ -244,7 +288,7 @@ pub const File = struct {
if (windows.SetFilePointerEx(self.handle, 0, &pos, windows.FILE_CURRENT) == 0) { if (windows.SetFilePointerEx(self.handle, 0, &pos, windows.FILE_CURRENT) == 0) {
const err = windows.GetLastError(); const err = windows.GetLastError();
return switch (err) { return switch (err) {
windows.ERROR.INVALID_PARAMETER => error.BadFd, windows.ERROR.INVALID_PARAMETER => unreachable,
else => os.unexpectedErrorWindows(err), else => os.unexpectedErrorWindows(err),
}; };
} }
@ -277,18 +321,19 @@ pub const File = struct {
} }
pub const ModeError = error{ pub const ModeError = error{
BadFd,
SystemResources, SystemResources,
Unexpected, Unexpected,
}; };
pub fn mode(self: *File) ModeError!os.FileMode { pub fn mode(self: *File) ModeError!Mode {
if (is_posix) { if (is_posix) {
var stat: posix.Stat = undefined; var stat: posix.Stat = undefined;
const err = posix.getErrno(posix.fstat(self.handle, &stat)); const err = posix.getErrno(posix.fstat(self.handle, &stat));
if (err > 0) { if (err > 0) {
return switch (err) { return switch (err) {
posix.EBADF => error.BadFd, // We do not make this an error code because if you get EBADF it's always a bug,
// since the fd could have been reused.
posix.EBADF => unreachable,
posix.ENOMEM => error.SystemResources, posix.ENOMEM => error.SystemResources,
else => os.unexpectedErrorPosix(err), else => os.unexpectedErrorPosix(err),
}; };
@ -296,7 +341,7 @@ pub const File = struct {
// TODO: we should be able to cast u16 to ModeError!u32, making this // TODO: we should be able to cast u16 to ModeError!u32, making this
// explicit cast not necessary // explicit cast not necessary
return os.FileMode(stat.mode); return Mode(stat.mode);
} else if (is_windows) { } else if (is_windows) {
return {}; return {};
} else { } else {
@ -305,9 +350,11 @@ pub const File = struct {
} }
pub const ReadError = error{ pub const ReadError = error{
BadFd, FileClosed,
Io, InputOutput,
IsDir, IsDir,
WouldBlock,
SystemResources,
Unexpected, Unexpected,
}; };
@ -323,9 +370,12 @@ pub const File = struct {
posix.EINTR => continue, posix.EINTR => continue,
posix.EINVAL => unreachable, posix.EINVAL => unreachable,
posix.EFAULT => unreachable, posix.EFAULT => unreachable,
posix.EBADF => return error.BadFd, posix.EAGAIN => return error.WouldBlock,
posix.EIO => return error.Io, posix.EBADF => return error.FileClosed,
posix.EIO => return error.InputOutput,
posix.EISDIR => return error.IsDir, posix.EISDIR => return error.IsDir,
posix.ENOBUFS => return error.SystemResources,
posix.ENOMEM => return error.SystemResources,
else => return os.unexpectedErrorPosix(read_err), else => return os.unexpectedErrorPosix(read_err),
} }
} }
@ -338,7 +388,7 @@ pub const File = struct {
while (index < buffer.len) { while (index < buffer.len) {
const want_read_count = @intCast(windows.DWORD, math.min(windows.DWORD(@maxValue(windows.DWORD)), buffer.len - index)); const want_read_count = @intCast(windows.DWORD, math.min(windows.DWORD(@maxValue(windows.DWORD)), buffer.len - index));
var amt_read: windows.DWORD = undefined; var amt_read: windows.DWORD = undefined;
if (windows.ReadFile(self.handle, @ptrCast(*c_void, buffer.ptr + index), want_read_count, &amt_read, null) == 0) { if (windows.ReadFile(self.handle, buffer.ptr + index, want_read_count, &amt_read, null) == 0) {
const err = windows.GetLastError(); const err = windows.GetLastError();
return switch (err) { return switch (err) {
windows.ERROR.OPERATION_ABORTED => continue, windows.ERROR.OPERATION_ABORTED => continue,

View File

@ -10,6 +10,7 @@ pub const GetAppDataDirError = error{
}; };
/// Caller owns returned memory. /// Caller owns returned memory.
/// TODO determine if we can remove the allocator requirement
pub fn getAppDataDir(allocator: *mem.Allocator, appname: []const u8) GetAppDataDirError![]u8 { pub fn getAppDataDir(allocator: *mem.Allocator, appname: []const u8) GetAppDataDirError![]u8 {
switch (builtin.os) { switch (builtin.os) {
builtin.Os.windows => { builtin.Os.windows => {
@ -22,7 +23,7 @@ pub fn getAppDataDir(allocator: *mem.Allocator, appname: []const u8) GetAppDataD
)) { )) {
os.windows.S_OK => { os.windows.S_OK => {
defer os.windows.CoTaskMemFree(@ptrCast(*c_void, dir_path_ptr)); defer os.windows.CoTaskMemFree(@ptrCast(*c_void, dir_path_ptr));
const global_dir = unicode.utf16leToUtf8(allocator, utf16lePtrSlice(dir_path_ptr)) catch |err| switch (err) { const global_dir = unicode.utf16leToUtf8Alloc(allocator, utf16lePtrSlice(dir_path_ptr)) catch |err| switch (err) {
error.UnexpectedSecondSurrogateHalf => return error.AppDataDirUnavailable, error.UnexpectedSecondSurrogateHalf => return error.AppDataDirUnavailable,
error.ExpectedSecondSurrogateHalf => return error.AppDataDirUnavailable, error.ExpectedSecondSurrogateHalf => return error.AppDataDirUnavailable,
error.DanglingSurrogateHalf => return error.AppDataDirUnavailable, error.DanglingSurrogateHalf => return error.AppDataDirUnavailable,

File diff suppressed because it is too large Load Diff

View File

@ -567,6 +567,37 @@ pub const MNT_DETACH = 2;
pub const MNT_EXPIRE = 4; pub const MNT_EXPIRE = 4;
pub const UMOUNT_NOFOLLOW = 8; pub const UMOUNT_NOFOLLOW = 8;
pub const IN_CLOEXEC = O_CLOEXEC;
pub const IN_NONBLOCK = O_NONBLOCK;
pub const IN_ACCESS = 0x00000001;
pub const IN_MODIFY = 0x00000002;
pub const IN_ATTRIB = 0x00000004;
pub const IN_CLOSE_WRITE = 0x00000008;
pub const IN_CLOSE_NOWRITE = 0x00000010;
pub const IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE;
pub const IN_OPEN = 0x00000020;
pub const IN_MOVED_FROM = 0x00000040;
pub const IN_MOVED_TO = 0x00000080;
pub const IN_MOVE = IN_MOVED_FROM | IN_MOVED_TO;
pub const IN_CREATE = 0x00000100;
pub const IN_DELETE = 0x00000200;
pub const IN_DELETE_SELF = 0x00000400;
pub const IN_MOVE_SELF = 0x00000800;
pub const IN_ALL_EVENTS = 0x00000fff;
pub const IN_UNMOUNT = 0x00002000;
pub const IN_Q_OVERFLOW = 0x00004000;
pub const IN_IGNORED = 0x00008000;
pub const IN_ONLYDIR = 0x01000000;
pub const IN_DONT_FOLLOW = 0x02000000;
pub const IN_EXCL_UNLINK = 0x04000000;
pub const IN_MASK_ADD = 0x20000000;
pub const IN_ISDIR = 0x40000000;
pub const IN_ONESHOT = 0x80000000;
pub const S_IFMT = 0o170000; pub const S_IFMT = 0o170000;
pub const S_IFDIR = 0o040000; pub const S_IFDIR = 0o040000;
@ -692,6 +723,10 @@ pub fn futex_wait(uaddr: usize, futex_op: u32, val: i32, timeout: ?*timespec) us
return syscall4(SYS_futex, uaddr, futex_op, @bitCast(u32, val), @ptrToInt(timeout)); return syscall4(SYS_futex, uaddr, futex_op, @bitCast(u32, val), @ptrToInt(timeout));
} }
pub fn futex_wake(uaddr: usize, futex_op: u32, val: i32) usize {
return syscall3(SYS_futex, uaddr, futex_op, @bitCast(u32, val));
}
pub fn getcwd(buf: [*]u8, size: usize) usize { pub fn getcwd(buf: [*]u8, size: usize) usize {
return syscall2(SYS_getcwd, @ptrToInt(buf), size); return syscall2(SYS_getcwd, @ptrToInt(buf), size);
} }
@ -700,6 +735,18 @@ pub fn getdents(fd: i32, dirp: [*]u8, count: usize) usize {
return syscall3(SYS_getdents, @intCast(usize, fd), @ptrToInt(dirp), count); return syscall3(SYS_getdents, @intCast(usize, fd), @ptrToInt(dirp), count);
} }
pub fn inotify_init1(flags: u32) usize {
return syscall1(SYS_inotify_init1, flags);
}
pub fn inotify_add_watch(fd: i32, pathname: [*]const u8, mask: u32) usize {
return syscall3(SYS_inotify_add_watch, @intCast(usize, fd), @ptrToInt(pathname), mask);
}
pub fn inotify_rm_watch(fd: i32, wd: i32) usize {
return syscall2(SYS_inotify_rm_watch, @intCast(usize, fd), @intCast(usize, wd));
}
pub fn isatty(fd: i32) bool { pub fn isatty(fd: i32) bool {
var wsz: winsize = undefined; var wsz: winsize = undefined;
return syscall3(SYS_ioctl, @intCast(usize, fd), TIOCGWINSZ, @ptrToInt(&wsz)) == 0; return syscall3(SYS_ioctl, @intCast(usize, fd), TIOCGWINSZ, @ptrToInt(&wsz)) == 0;
@ -742,6 +789,14 @@ pub fn read(fd: i32, buf: [*]u8, count: usize) usize {
return syscall3(SYS_read, @intCast(usize, fd), @ptrToInt(buf), count); return syscall3(SYS_read, @intCast(usize, fd), @ptrToInt(buf), count);
} }
pub fn preadv(fd: i32, iov: [*]const iovec, count: usize, offset: u64) usize {
return syscall4(SYS_preadv, @intCast(usize, fd), @ptrToInt(iov), count, offset);
}
pub fn pwritev(fd: i32, iov: [*]const iovec_const, count: usize, offset: u64) usize {
return syscall4(SYS_pwritev, @intCast(usize, fd), @ptrToInt(iov), count, offset);
}
// TODO https://github.com/ziglang/zig/issues/265 // TODO https://github.com/ziglang/zig/issues/265
pub fn rmdir(path: [*]const u8) usize { pub fn rmdir(path: [*]const u8) usize {
return syscall1(SYS_rmdir, @ptrToInt(path)); return syscall1(SYS_rmdir, @ptrToInt(path));
@ -947,6 +1002,10 @@ pub fn getpid() i32 {
return @bitCast(i32, @truncate(u32, syscall0(SYS_getpid))); return @bitCast(i32, @truncate(u32, syscall0(SYS_getpid)));
} }
pub fn gettid() i32 {
return @bitCast(i32, @truncate(u32, syscall0(SYS_gettid)));
}
pub fn sigprocmask(flags: u32, noalias set: *const sigset_t, noalias oldset: ?*sigset_t) usize { pub fn sigprocmask(flags: u32, noalias set: *const sigset_t, noalias oldset: ?*sigset_t) usize {
return syscall4(SYS_rt_sigprocmask, flags, @ptrToInt(set), @ptrToInt(oldset), NSIG / 8); return syscall4(SYS_rt_sigprocmask, flags, @ptrToInt(set), @ptrToInt(oldset), NSIG / 8);
} }
@ -1060,6 +1119,11 @@ pub const iovec = extern struct {
iov_len: usize, iov_len: usize,
}; };
pub const iovec_const = extern struct {
iov_base: [*]const u8,
iov_len: usize,
};
pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize { pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
return syscall3(SYS_getsockname, @intCast(usize, fd), @ptrToInt(addr), @ptrToInt(len)); return syscall3(SYS_getsockname, @intCast(usize, fd), @ptrToInt(addr), @ptrToInt(len));
} }
@ -1368,6 +1432,14 @@ pub fn capset(hdrp: *cap_user_header_t, datap: *const cap_user_data_t) usize {
return syscall2(SYS_capset, @ptrToInt(hdrp), @ptrToInt(datap)); return syscall2(SYS_capset, @ptrToInt(hdrp), @ptrToInt(datap));
} }
pub const inotify_event = extern struct {
wd: i32,
mask: u32,
cookie: u32,
len: u32,
//name: [?]u8,
};
test "import" { test "import" {
if (builtin.os == builtin.Os.linux) { if (builtin.os == builtin.Os.linux) {
_ = @import("test.zig"); _ = @import("test.zig");

View File

@ -11,11 +11,14 @@ const math = std.math;
const posix = os.posix; const posix = os.posix;
const windows = os.windows; const windows = os.windows;
const cstr = std.cstr; const cstr = std.cstr;
const windows_util = @import("windows/util.zig");
pub const sep_windows = '\\'; pub const sep_windows = '\\';
pub const sep_posix = '/'; pub const sep_posix = '/';
pub const sep = if (is_windows) sep_windows else sep_posix; pub const sep = if (is_windows) sep_windows else sep_posix;
pub const sep_str = [1]u8{sep};
pub const delimiter_windows = ';'; pub const delimiter_windows = ';';
pub const delimiter_posix = ':'; pub const delimiter_posix = ':';
pub const delimiter = if (is_windows) delimiter_windows else delimiter_posix; pub const delimiter = if (is_windows) delimiter_windows else delimiter_posix;
@ -337,7 +340,7 @@ pub fn resolveSlice(allocator: *Allocator, paths: []const []const u8) ![]u8 {
pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 { pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
if (paths.len == 0) { if (paths.len == 0) {
assert(is_windows); // resolveWindows called on non windows can't use getCwd assert(is_windows); // resolveWindows called on non windows can't use getCwd
return os.getCwd(allocator); return os.getCwdAlloc(allocator);
} }
// determine which disk designator we will result with, if any // determine which disk designator we will result with, if any
@ -432,7 +435,7 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
}, },
WindowsPath.Kind.None => { WindowsPath.Kind.None => {
assert(is_windows); // resolveWindows called on non windows can't use getCwd assert(is_windows); // resolveWindows called on non windows can't use getCwd
const cwd = try os.getCwd(allocator); const cwd = try os.getCwdAlloc(allocator);
defer allocator.free(cwd); defer allocator.free(cwd);
const parsed_cwd = windowsParsePath(cwd); const parsed_cwd = windowsParsePath(cwd);
result = try allocator.alloc(u8, max_size + parsed_cwd.disk_designator.len + 1); result = try allocator.alloc(u8, max_size + parsed_cwd.disk_designator.len + 1);
@ -448,7 +451,7 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
} else { } else {
assert(is_windows); // resolveWindows called on non windows can't use getCwd assert(is_windows); // resolveWindows called on non windows can't use getCwd
// TODO call get cwd for the result_disk_designator instead of the global one // TODO call get cwd for the result_disk_designator instead of the global one
const cwd = try os.getCwd(allocator); const cwd = try os.getCwdAlloc(allocator);
defer allocator.free(cwd); defer allocator.free(cwd);
result = try allocator.alloc(u8, max_size + cwd.len + 1); result = try allocator.alloc(u8, max_size + cwd.len + 1);
@ -506,7 +509,7 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
result_index += 1; result_index += 1;
} }
return result[0..result_index]; return allocator.shrink(u8, result, result_index);
} }
/// This function is like a series of `cd` statements executed one after another. /// This function is like a series of `cd` statements executed one after another.
@ -516,7 +519,7 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 { pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
if (paths.len == 0) { if (paths.len == 0) {
assert(!is_windows); // resolvePosix called on windows can't use getCwd assert(!is_windows); // resolvePosix called on windows can't use getCwd
return os.getCwd(allocator); return os.getCwdAlloc(allocator);
} }
var first_index: usize = 0; var first_index: usize = 0;
@ -538,7 +541,7 @@ pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
result = try allocator.alloc(u8, max_size); result = try allocator.alloc(u8, max_size);
} else { } else {
assert(!is_windows); // resolvePosix called on windows can't use getCwd assert(!is_windows); // resolvePosix called on windows can't use getCwd
const cwd = try os.getCwd(allocator); const cwd = try os.getCwdAlloc(allocator);
defer allocator.free(cwd); defer allocator.free(cwd);
result = try allocator.alloc(u8, max_size + cwd.len + 1); result = try allocator.alloc(u8, max_size + cwd.len + 1);
mem.copy(u8, result, cwd); mem.copy(u8, result, cwd);
@ -573,11 +576,11 @@ pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
result_index += 1; result_index += 1;
} }
return result[0..result_index]; return allocator.shrink(u8, result, result_index);
} }
test "os.path.resolve" { test "os.path.resolve" {
const cwd = try os.getCwd(debug.global_allocator); const cwd = try os.getCwdAlloc(debug.global_allocator);
if (is_windows) { if (is_windows) {
if (windowsParsePath(cwd).kind == WindowsPath.Kind.Drive) { if (windowsParsePath(cwd).kind == WindowsPath.Kind.Drive) {
cwd[0] = asciiUpper(cwd[0]); cwd[0] = asciiUpper(cwd[0]);
@ -591,7 +594,7 @@ test "os.path.resolve" {
test "os.path.resolveWindows" { test "os.path.resolveWindows" {
if (is_windows) { if (is_windows) {
const cwd = try os.getCwd(debug.global_allocator); const cwd = try os.getCwdAlloc(debug.global_allocator);
const parsed_cwd = windowsParsePath(cwd); const parsed_cwd = windowsParsePath(cwd);
{ {
const result = testResolveWindows([][]const u8{ "/usr/local", "lib\\zig\\std\\array_list.zig" }); const result = testResolveWindows([][]const u8{ "/usr/local", "lib\\zig\\std\\array_list.zig" });
@ -1073,112 +1076,148 @@ fn testRelativeWindows(from: []const u8, to: []const u8, expected_output: []cons
assert(mem.eql(u8, result, expected_output)); assert(mem.eql(u8, result, expected_output));
} }
/// Return the canonicalized absolute pathname. pub const RealError = error{
/// Expands all symbolic links and resolves references to `.`, `..`, and FileNotFound,
/// extra `/` characters in ::pathname. AccessDenied,
/// Caller must deallocate result. NameTooLong,
pub fn real(allocator: *Allocator, pathname: []const u8) ![]u8 { NotSupported,
switch (builtin.os) { NotDir,
Os.windows => { SymLinkLoop,
const pathname_buf = try allocator.alloc(u8, pathname.len + 1); InputOutput,
defer allocator.free(pathname_buf); FileTooBig,
IsDir,
ProcessFdQuotaExceeded,
SystemFdQuotaExceeded,
NoDevice,
SystemResources,
NoSpaceLeft,
FileSystem,
BadPathName,
mem.copy(u8, pathname_buf, pathname); /// On Windows, file paths must be valid Unicode.
pathname_buf[pathname.len] = 0; InvalidUtf8,
const h_file = windows.CreateFileA(pathname_buf.ptr, windows.GENERIC_READ, windows.FILE_SHARE_READ, null, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, null); /// TODO remove this possibility
PathAlreadyExists,
/// TODO remove this possibility
Unexpected,
};
/// Call from Windows-specific code if you already have a UTF-16LE encoded, null terminated string.
/// Otherwise use `real` or `realC`.
pub fn realW(out_buffer: *[os.MAX_PATH_BYTES]u8, pathname: [*]const u16) RealError![]u8 {
const h_file = windows.CreateFileW(
pathname,
windows.GENERIC_READ,
windows.FILE_SHARE_READ,
null,
windows.OPEN_EXISTING,
windows.FILE_ATTRIBUTE_NORMAL,
null,
);
if (h_file == windows.INVALID_HANDLE_VALUE) { if (h_file == windows.INVALID_HANDLE_VALUE) {
const err = windows.GetLastError(); const err = windows.GetLastError();
return switch (err) { switch (err) {
windows.ERROR.FILE_NOT_FOUND => error.FileNotFound, windows.ERROR.FILE_NOT_FOUND => return error.FileNotFound,
windows.ERROR.ACCESS_DENIED => error.AccessDenied, windows.ERROR.ACCESS_DENIED => return error.AccessDenied,
windows.ERROR.FILENAME_EXCED_RANGE => error.NameTooLong, windows.ERROR.FILENAME_EXCED_RANGE => return error.NameTooLong,
else => os.unexpectedErrorWindows(err), else => return os.unexpectedErrorWindows(err),
}; }
} }
defer os.close(h_file); defer os.close(h_file);
var buf = try allocator.alloc(u8, 256); var utf16le_buf: [windows_util.PATH_MAX_WIDE]u16 = undefined;
errdefer allocator.free(buf); const casted_len = @intCast(windows.DWORD, utf16le_buf.len); // TODO shouldn't need this cast
while (true) { const result = windows.GetFinalPathNameByHandleW(h_file, &utf16le_buf, casted_len, windows.VOLUME_NAME_DOS);
const buf_len = math.cast(windows.DWORD, buf.len) catch return error.NameTooLong; assert(result <= utf16le_buf.len);
const result = windows.GetFinalPathNameByHandleA(h_file, buf.ptr, buf_len, windows.VOLUME_NAME_DOS);
if (result == 0) { if (result == 0) {
const err = windows.GetLastError(); const err = windows.GetLastError();
return switch (err) { switch (err) {
windows.ERROR.PATH_NOT_FOUND => error.FileNotFound, windows.ERROR.FILE_NOT_FOUND => return error.FileNotFound,
windows.ERROR.NOT_ENOUGH_MEMORY => error.OutOfMemory, windows.ERROR.PATH_NOT_FOUND => return error.FileNotFound,
windows.ERROR.NOT_ENOUGH_MEMORY => return error.SystemResources,
windows.ERROR.FILENAME_EXCED_RANGE => return error.NameTooLong,
windows.ERROR.INVALID_PARAMETER => unreachable, windows.ERROR.INVALID_PARAMETER => unreachable,
else => os.unexpectedErrorWindows(err), else => return os.unexpectedErrorWindows(err),
};
} }
if (result > buf.len) {
buf = try allocator.realloc(u8, buf, result);
continue;
} }
const utf16le_slice = utf16le_buf[0..result];
// windows returns \\?\ prepended to the path // windows returns \\?\ prepended to the path
// we strip it because nobody wants \\?\ prepended to their path // we strip it because nobody wants \\?\ prepended to their path
const final_len = x: { const prefix = []u16{ '\\', '\\', '?', '\\' };
if (result > 4 and mem.startsWith(u8, buf, "\\\\?\\")) { const start_index = if (mem.startsWith(u16, utf16le_slice, prefix)) prefix.len else 0;
var i: usize = 4;
while (i < result) : (i += 1) {
buf[i - 4] = buf[i];
}
break :x result - 4;
} else {
break :x result;
}
};
return allocator.shrink(u8, buf, final_len); // Trust that Windows gives us valid UTF-16LE.
const end_index = std.unicode.utf16leToUtf8(out_buffer, utf16le_slice[start_index..]) catch unreachable;
return out_buffer[0..end_index];
} }
/// See `real`
/// Use this when you have a null terminated pointer path.
pub fn realC(out_buffer: *[os.MAX_PATH_BYTES]u8, pathname: [*]const u8) RealError![]u8 {
switch (builtin.os) {
Os.windows => {
const pathname_w = try windows_util.cStrToPrefixedFileW(pathname);
return realW(out_buffer, pathname_w);
}, },
Os.macosx, Os.ios => { Os.macosx, Os.ios => {
// TODO instead of calling the libc function here, port the implementation // TODO instead of calling the libc function here, port the implementation to Zig
// to Zig, and then remove the NameTooLong error possibility. const err = posix.getErrno(posix.realpath(pathname, out_buffer));
const pathname_buf = try allocator.alloc(u8, pathname.len + 1); switch (err) {
defer allocator.free(pathname_buf); 0 => return mem.toSlice(u8, out_buffer),
const result_buf = try allocator.alloc(u8, posix.PATH_MAX);
errdefer allocator.free(result_buf);
mem.copy(u8, pathname_buf, pathname);
pathname_buf[pathname.len] = 0;
const err = posix.getErrno(posix.realpath(pathname_buf.ptr, result_buf.ptr));
if (err > 0) {
return switch (err) {
posix.EINVAL => unreachable, posix.EINVAL => unreachable,
posix.EBADF => unreachable, posix.EBADF => unreachable,
posix.EFAULT => unreachable, posix.EFAULT => unreachable,
posix.EACCES => error.AccessDenied, posix.EACCES => return error.AccessDenied,
posix.ENOENT => error.FileNotFound, posix.ENOENT => return error.FileNotFound,
posix.ENOTSUP => error.NotSupported, posix.ENOTSUP => return error.NotSupported,
posix.ENOTDIR => error.NotDir, posix.ENOTDIR => return error.NotDir,
posix.ENAMETOOLONG => error.NameTooLong, posix.ENAMETOOLONG => return error.NameTooLong,
posix.ELOOP => error.SymLinkLoop, posix.ELOOP => return error.SymLinkLoop,
posix.EIO => error.InputOutput, posix.EIO => return error.InputOutput,
else => os.unexpectedErrorPosix(err), else => return os.unexpectedErrorPosix(err),
};
} }
return allocator.shrink(u8, result_buf, cstr.len(result_buf.ptr));
}, },
Os.linux => { Os.linux => {
const fd = try os.posixOpen(allocator, pathname, posix.O_PATH | posix.O_NONBLOCK | posix.O_CLOEXEC, 0); const fd = try os.posixOpenC(pathname, posix.O_PATH | posix.O_NONBLOCK | posix.O_CLOEXEC, 0);
defer os.close(fd); defer os.close(fd);
var buf: ["/proc/self/fd/-2147483648".len]u8 = undefined; var buf: ["/proc/self/fd/-2147483648".len]u8 = undefined;
const proc_path = fmt.bufPrint(buf[0..], "/proc/self/fd/{}", fd) catch unreachable; const proc_path = fmt.bufPrint(buf[0..], "/proc/self/fd/{}\x00", fd) catch unreachable;
return os.readLink(allocator, proc_path); return os.readLinkC(out_buffer, proc_path.ptr);
}, },
else => @compileError("TODO implement os.path.real for " ++ @tagName(builtin.os)), else => @compileError("TODO implement os.path.real for " ++ @tagName(builtin.os)),
} }
} }
/// Return the canonicalized absolute pathname.
/// Expands all symbolic links and resolves references to `.`, `..`, and
/// extra `/` characters in ::pathname.
/// The return value is a slice of out_buffer, and not necessarily from the beginning.
pub fn real(out_buffer: *[os.MAX_PATH_BYTES]u8, pathname: []const u8) RealError![]u8 {
switch (builtin.os) {
Os.windows => {
const pathname_w = try windows_util.sliceToPrefixedFileW(pathname);
return realW(out_buffer, &pathname_w);
},
Os.macosx, Os.ios, Os.linux => {
const pathname_c = try os.toPosixPath(pathname);
return realC(out_buffer, &pathname_c);
},
else => @compileError("Unsupported OS"),
}
}
/// `real`, except caller must free the returned memory.
pub fn realAlloc(allocator: *Allocator, pathname: []const u8) ![]u8 {
var buf: [os.MAX_PATH_BYTES]u8 = undefined;
return mem.dupe(allocator, u8, try real(&buf, pathname));
}
test "os.path.real" { test "os.path.real" {
// at least call it so it gets compiled // at least call it so it gets compiled
_ = real(debug.global_allocator, "some_path"); var buf: [os.MAX_PATH_BYTES]u8 = undefined;
std.debug.assertError(real(&buf, "definitely_bogus_does_not_exist1234"), error.FileNotFound);
} }

View File

@ -10,30 +10,47 @@ const AtomicRmwOp = builtin.AtomicRmwOp;
const AtomicOrder = builtin.AtomicOrder; const AtomicOrder = builtin.AtomicOrder;
test "makePath, put some files in it, deleteTree" { test "makePath, put some files in it, deleteTree" {
try os.makePath(a, "os_test_tmp/b/c"); try os.makePath(a, "os_test_tmp" ++ os.path.sep_str ++ "b" ++ os.path.sep_str ++ "c");
try io.writeFile(a, "os_test_tmp/b/c/file.txt", "nonsense"); try io.writeFile("os_test_tmp" ++ os.path.sep_str ++ "b" ++ os.path.sep_str ++ "c" ++ os.path.sep_str ++ "file.txt", "nonsense");
try io.writeFile(a, "os_test_tmp/b/file2.txt", "blah"); try io.writeFile("os_test_tmp" ++ os.path.sep_str ++ "b" ++ os.path.sep_str ++ "file2.txt", "blah");
try os.deleteTree(a, "os_test_tmp"); try os.deleteTree(a, "os_test_tmp");
if (os.Dir.open(a, "os_test_tmp")) |dir| { if (os.Dir.open(a, "os_test_tmp")) |dir| {
@panic("expected error"); @panic("expected error");
} else |err| { } else |err| {
assert(err == error.PathNotFound); assert(err == error.FileNotFound);
} }
} }
test "access file" { test "access file" {
try os.makePath(a, "os_test_tmp"); try os.makePath(a, "os_test_tmp");
if (os.File.access(a, "os_test_tmp/file.txt")) |ok| { if (os.File.access("os_test_tmp" ++ os.path.sep_str ++ "file.txt")) |ok| {
@panic("expected error"); @panic("expected error");
} else |err| { } else |err| {
assert(err == error.NotFound); assert(err == error.FileNotFound);
} }
try io.writeFile(a, "os_test_tmp/file.txt", ""); try io.writeFile("os_test_tmp" ++ os.path.sep_str ++ "file.txt", "");
try os.File.access(a, "os_test_tmp/file.txt"); try os.File.access("os_test_tmp" ++ os.path.sep_str ++ "file.txt");
try os.deleteTree(a, "os_test_tmp"); try os.deleteTree(a, "os_test_tmp");
} }
fn testThreadIdFn(thread_id: *os.Thread.Id) void {
thread_id.* = os.Thread.getCurrentId();
}
test "std.os.Thread.getCurrentId" {
var thread_current_id: os.Thread.Id = undefined;
const thread = try os.spawnThread(&thread_current_id, testThreadIdFn);
const thread_id = thread.handle();
thread.wait();
switch (builtin.os) {
builtin.Os.windows => assert(os.Thread.getCurrentId() != thread_current_id),
else => {
assert(thread_current_id == thread_id);
},
}
}
test "spawn threads" { test "spawn threads" {
var shared_ctx: i32 = 1; var shared_ctx: i32 = 1;

View File

@ -67,8 +67,9 @@ pub const INVALID_FILE_ATTRIBUTES = DWORD(@maxValue(DWORD));
pub const OVERLAPPED = extern struct { pub const OVERLAPPED = extern struct {
Internal: ULONG_PTR, Internal: ULONG_PTR,
InternalHigh: ULONG_PTR, InternalHigh: ULONG_PTR,
Pointer: PVOID, Offset: DWORD,
hEvent: HANDLE, OffsetHigh: DWORD,
hEvent: ?HANDLE,
}; };
pub const LPOVERLAPPED = *OVERLAPPED; pub const LPOVERLAPPED = *OVERLAPPED;
@ -350,3 +351,15 @@ pub const E_ACCESSDENIED = @bitCast(c_long, c_ulong(0x80070005));
pub const E_HANDLE = @bitCast(c_long, c_ulong(0x80070006)); pub const E_HANDLE = @bitCast(c_long, c_ulong(0x80070006));
pub const E_OUTOFMEMORY = @bitCast(c_long, c_ulong(0x8007000E)); pub const E_OUTOFMEMORY = @bitCast(c_long, c_ulong(0x8007000E));
pub const E_INVALIDARG = @bitCast(c_long, c_ulong(0x80070057)); pub const E_INVALIDARG = @bitCast(c_long, c_ulong(0x80070057));
pub const FILE_FLAG_BACKUP_SEMANTICS = 0x02000000;
pub const FILE_FLAG_DELETE_ON_CLOSE = 0x04000000;
pub const FILE_FLAG_NO_BUFFERING = 0x20000000;
pub const FILE_FLAG_OPEN_NO_RECALL = 0x00100000;
pub const FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000;
pub const FILE_FLAG_OVERLAPPED = 0x40000000;
pub const FILE_FLAG_POSIX_SEMANTICS = 0x0100000;
pub const FILE_FLAG_RANDOM_ACCESS = 0x10000000;
pub const FILE_FLAG_SESSION_AWARE = 0x00800000;
pub const FILE_FLAG_SEQUENTIAL_SCAN = 0x08000000;
pub const FILE_FLAG_WRITE_THROUGH = 0x80000000;

View File

@ -1,14 +1,24 @@
use @import("index.zig"); use @import("index.zig");
pub extern "kernel32" stdcallcc fn CancelIoEx(hFile: HANDLE, lpOverlapped: LPOVERLAPPED) BOOL;
pub extern "kernel32" stdcallcc fn CloseHandle(hObject: HANDLE) BOOL; pub extern "kernel32" stdcallcc fn CloseHandle(hObject: HANDLE) BOOL;
pub extern "kernel32" stdcallcc fn CreateDirectoryA( pub extern "kernel32" stdcallcc fn CreateDirectoryA(lpPathName: [*]const u8, lpSecurityAttributes: ?*SECURITY_ATTRIBUTES) BOOL;
lpPathName: LPCSTR, pub extern "kernel32" stdcallcc fn CreateDirectoryW(lpPathName: [*]const u16, lpSecurityAttributes: ?*SECURITY_ATTRIBUTES) BOOL;
lpSecurityAttributes: ?*SECURITY_ATTRIBUTES,
) BOOL;
pub extern "kernel32" stdcallcc fn CreateFileA( pub extern "kernel32" stdcallcc fn CreateFileA(
lpFileName: LPCSTR, lpFileName: [*]const u8, // TODO null terminated pointer type
dwDesiredAccess: DWORD,
dwShareMode: DWORD,
lpSecurityAttributes: ?LPSECURITY_ATTRIBUTES,
dwCreationDisposition: DWORD,
dwFlagsAndAttributes: DWORD,
hTemplateFile: ?HANDLE,
) HANDLE;
pub extern "kernel32" stdcallcc fn CreateFileW(
lpFileName: [*]const u16, // TODO null terminated pointer type
dwDesiredAccess: DWORD, dwDesiredAccess: DWORD,
dwShareMode: DWORD, dwShareMode: DWORD,
lpSecurityAttributes: ?LPSECURITY_ATTRIBUTES, lpSecurityAttributes: ?LPSECURITY_ATTRIBUTES,
@ -47,7 +57,8 @@ pub extern "kernel32" stdcallcc fn CreateIoCompletionPort(FileHandle: HANDLE, Ex
pub extern "kernel32" stdcallcc fn CreateThread(lpThreadAttributes: ?LPSECURITY_ATTRIBUTES, dwStackSize: SIZE_T, lpStartAddress: LPTHREAD_START_ROUTINE, lpParameter: ?LPVOID, dwCreationFlags: DWORD, lpThreadId: ?LPDWORD) ?HANDLE; pub extern "kernel32" stdcallcc fn CreateThread(lpThreadAttributes: ?LPSECURITY_ATTRIBUTES, dwStackSize: SIZE_T, lpStartAddress: LPTHREAD_START_ROUTINE, lpParameter: ?LPVOID, dwCreationFlags: DWORD, lpThreadId: ?LPDWORD) ?HANDLE;
pub extern "kernel32" stdcallcc fn DeleteFileA(lpFileName: LPCSTR) BOOL; pub extern "kernel32" stdcallcc fn DeleteFileA(lpFileName: [*]const u8) BOOL;
pub extern "kernel32" stdcallcc fn DeleteFileW(lpFileName: [*]const u16) BOOL;
pub extern "kernel32" stdcallcc fn ExitProcess(exit_code: UINT) noreturn; pub extern "kernel32" stdcallcc fn ExitProcess(exit_code: UINT) noreturn;
@ -61,7 +72,11 @@ pub extern "kernel32" stdcallcc fn GetCommandLineA() LPSTR;
pub extern "kernel32" stdcallcc fn GetConsoleMode(in_hConsoleHandle: HANDLE, out_lpMode: *DWORD) BOOL; pub extern "kernel32" stdcallcc fn GetConsoleMode(in_hConsoleHandle: HANDLE, out_lpMode: *DWORD) BOOL;
pub extern "kernel32" stdcallcc fn GetCurrentDirectoryA(nBufferLength: WORD, lpBuffer: ?LPSTR) DWORD; pub extern "kernel32" stdcallcc fn GetCurrentDirectoryA(nBufferLength: DWORD, lpBuffer: ?[*]CHAR) DWORD;
pub extern "kernel32" stdcallcc fn GetCurrentDirectoryW(nBufferLength: DWORD, lpBuffer: ?[*]WCHAR) DWORD;
pub extern "kernel32" stdcallcc fn GetCurrentThread() HANDLE;
pub extern "kernel32" stdcallcc fn GetCurrentThreadId() DWORD;
pub extern "kernel32" stdcallcc fn GetEnvironmentStringsA() ?[*]u8; pub extern "kernel32" stdcallcc fn GetEnvironmentStringsA() ?[*]u8;
@ -71,9 +86,11 @@ pub extern "kernel32" stdcallcc fn GetExitCodeProcess(hProcess: HANDLE, lpExitCo
pub extern "kernel32" stdcallcc fn GetFileSizeEx(hFile: HANDLE, lpFileSize: *LARGE_INTEGER) BOOL; pub extern "kernel32" stdcallcc fn GetFileSizeEx(hFile: HANDLE, lpFileSize: *LARGE_INTEGER) BOOL;
pub extern "kernel32" stdcallcc fn GetFileAttributesA(lpFileName: LPCSTR) DWORD; pub extern "kernel32" stdcallcc fn GetFileAttributesA(lpFileName: [*]const CHAR) DWORD;
pub extern "kernel32" stdcallcc fn GetFileAttributesW(lpFileName: [*]const WCHAR) DWORD;
pub extern "kernel32" stdcallcc fn GetModuleFileNameA(hModule: ?HMODULE, lpFilename: LPSTR, nSize: DWORD) DWORD; pub extern "kernel32" stdcallcc fn GetModuleFileNameA(hModule: ?HMODULE, lpFilename: [*]u8, nSize: DWORD) DWORD;
pub extern "kernel32" stdcallcc fn GetModuleFileNameW(hModule: ?HMODULE, lpFilename: [*]u16, nSize: DWORD) DWORD;
pub extern "kernel32" stdcallcc fn GetLastError() DWORD; pub extern "kernel32" stdcallcc fn GetLastError() DWORD;
@ -91,6 +108,15 @@ pub extern "kernel32" stdcallcc fn GetFinalPathNameByHandleA(
dwFlags: DWORD, dwFlags: DWORD,
) DWORD; ) DWORD;
pub extern "kernel32" stdcallcc fn GetFinalPathNameByHandleW(
hFile: HANDLE,
lpszFilePath: [*]u16,
cchFilePath: DWORD,
dwFlags: DWORD,
) DWORD;
pub extern "kernel32" stdcallcc fn GetOverlappedResult(hFile: HANDLE, lpOverlapped: *OVERLAPPED, lpNumberOfBytesTransferred: *DWORD, bWait: BOOL) BOOL;
pub extern "kernel32" stdcallcc fn GetProcessHeap() ?HANDLE; pub extern "kernel32" stdcallcc fn GetProcessHeap() ?HANDLE;
pub extern "kernel32" stdcallcc fn GetQueuedCompletionStatus(CompletionPort: HANDLE, lpNumberOfBytesTransferred: LPDWORD, lpCompletionKey: *ULONG_PTR, lpOverlapped: *?*OVERLAPPED, dwMilliseconds: DWORD) BOOL; pub extern "kernel32" stdcallcc fn GetQueuedCompletionStatus(CompletionPort: HANDLE, lpNumberOfBytesTransferred: LPDWORD, lpCompletionKey: *ULONG_PTR, lpOverlapped: *?*OVERLAPPED, dwMilliseconds: DWORD) BOOL;
@ -101,7 +127,6 @@ pub extern "kernel32" stdcallcc fn HeapCreate(flOptions: DWORD, dwInitialSize: S
pub extern "kernel32" stdcallcc fn HeapDestroy(hHeap: HANDLE) BOOL; pub extern "kernel32" stdcallcc fn HeapDestroy(hHeap: HANDLE) BOOL;
pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void, dwBytes: SIZE_T) ?*c_void; pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void, dwBytes: SIZE_T) ?*c_void;
pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) SIZE_T; pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) SIZE_T;
pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) BOOL;
pub extern "kernel32" stdcallcc fn HeapCompact(hHeap: HANDLE, dwFlags: DWORD) SIZE_T; pub extern "kernel32" stdcallcc fn HeapCompact(hHeap: HANDLE, dwFlags: DWORD) SIZE_T;
pub extern "kernel32" stdcallcc fn HeapSummary(hHeap: HANDLE, dwFlags: DWORD, lpSummary: LPHEAP_SUMMARY) BOOL; pub extern "kernel32" stdcallcc fn HeapSummary(hHeap: HANDLE, dwFlags: DWORD, lpSummary: LPHEAP_SUMMARY) BOOL;
@ -111,9 +136,17 @@ pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBy
pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void) BOOL; pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void) BOOL;
pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: ?*const c_void) BOOL;
pub extern "kernel32" stdcallcc fn MoveFileExA( pub extern "kernel32" stdcallcc fn MoveFileExA(
lpExistingFileName: LPCSTR, lpExistingFileName: [*]const u8,
lpNewFileName: LPCSTR, lpNewFileName: [*]const u8,
dwFlags: DWORD,
) BOOL;
pub extern "kernel32" stdcallcc fn MoveFileExW(
lpExistingFileName: [*]const u16,
lpNewFileName: [*]const u16,
dwFlags: DWORD, dwFlags: DWORD,
) BOOL; ) BOOL;
@ -123,11 +156,22 @@ pub extern "kernel32" stdcallcc fn QueryPerformanceCounter(lpPerformanceCount: *
pub extern "kernel32" stdcallcc fn QueryPerformanceFrequency(lpFrequency: *LARGE_INTEGER) BOOL; pub extern "kernel32" stdcallcc fn QueryPerformanceFrequency(lpFrequency: *LARGE_INTEGER) BOOL;
pub extern "kernel32" stdcallcc fn ReadDirectoryChangesW(
hDirectory: HANDLE,
lpBuffer: [*]align(@alignOf(FILE_NOTIFY_INFORMATION)) u8,
nBufferLength: DWORD,
bWatchSubtree: BOOL,
dwNotifyFilter: DWORD,
lpBytesReturned: ?*DWORD,
lpOverlapped: ?*OVERLAPPED,
lpCompletionRoutine: LPOVERLAPPED_COMPLETION_ROUTINE,
) BOOL;
pub extern "kernel32" stdcallcc fn ReadFile( pub extern "kernel32" stdcallcc fn ReadFile(
in_hFile: HANDLE, in_hFile: HANDLE,
out_lpBuffer: *c_void, out_lpBuffer: [*]u8,
in_nNumberOfBytesToRead: DWORD, in_nNumberOfBytesToRead: DWORD,
out_lpNumberOfBytesRead: *DWORD, out_lpNumberOfBytesRead: ?*DWORD,
in_out_lpOverlapped: ?*OVERLAPPED, in_out_lpOverlapped: ?*OVERLAPPED,
) BOOL; ) BOOL;
@ -150,13 +194,41 @@ pub extern "kernel32" stdcallcc fn WaitForSingleObject(hHandle: HANDLE, dwMillis
pub extern "kernel32" stdcallcc fn WriteFile( pub extern "kernel32" stdcallcc fn WriteFile(
in_hFile: HANDLE, in_hFile: HANDLE,
in_lpBuffer: *const c_void, in_lpBuffer: [*]const u8,
in_nNumberOfBytesToWrite: DWORD, in_nNumberOfBytesToWrite: DWORD,
out_lpNumberOfBytesWritten: ?*DWORD, out_lpNumberOfBytesWritten: ?*DWORD,
in_out_lpOverlapped: ?*OVERLAPPED, in_out_lpOverlapped: ?*OVERLAPPED,
) BOOL; ) BOOL;
pub extern "kernel32" stdcallcc fn WriteFileEx(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpOverlapped: LPOVERLAPPED, lpCompletionRoutine: LPOVERLAPPED_COMPLETION_ROUTINE) BOOL;
//TODO: call unicode versions instead of relying on ANSI code page //TODO: call unicode versions instead of relying on ANSI code page
pub extern "kernel32" stdcallcc fn LoadLibraryA(lpLibFileName: LPCSTR) ?HMODULE; pub extern "kernel32" stdcallcc fn LoadLibraryA(lpLibFileName: LPCSTR) ?HMODULE;
pub extern "kernel32" stdcallcc fn FreeLibrary(hModule: HMODULE) BOOL; pub extern "kernel32" stdcallcc fn FreeLibrary(hModule: HMODULE) BOOL;
pub const FILE_NOTIFY_INFORMATION = extern struct {
NextEntryOffset: DWORD,
Action: DWORD,
FileNameLength: DWORD,
FileName: [1]WCHAR,
};
pub const FILE_ACTION_ADDED = 0x00000001;
pub const FILE_ACTION_REMOVED = 0x00000002;
pub const FILE_ACTION_MODIFIED = 0x00000003;
pub const FILE_ACTION_RENAMED_OLD_NAME = 0x00000004;
pub const FILE_ACTION_RENAMED_NEW_NAME = 0x00000005;
pub const LPOVERLAPPED_COMPLETION_ROUTINE = ?extern fn (DWORD, DWORD, *OVERLAPPED) void;
pub const FILE_LIST_DIRECTORY = 1;
pub const FILE_NOTIFY_CHANGE_CREATION = 64;
pub const FILE_NOTIFY_CHANGE_SIZE = 8;
pub const FILE_NOTIFY_CHANGE_SECURITY = 256;
pub const FILE_NOTIFY_CHANGE_LAST_ACCESS = 32;
pub const FILE_NOTIFY_CHANGE_LAST_WRITE = 16;
pub const FILE_NOTIFY_CHANGE_DIR_NAME = 2;
pub const FILE_NOTIFY_CHANGE_FILE_NAME = 1;
pub const FILE_NOTIFY_CHANGE_ATTRIBUTES = 4;

View File

@ -7,9 +7,17 @@ const mem = std.mem;
const BufMap = std.BufMap; const BufMap = std.BufMap;
const cstr = std.cstr; const cstr = std.cstr;
// > The maximum path of 32,767 characters is approximate, because the "\\?\"
// > prefix may be expanded to a longer string by the system at run time, and
// > this expansion applies to the total length.
// from https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file#maximum-path-length-limitation
pub const PATH_MAX_WIDE = 32767;
pub const WaitError = error{ pub const WaitError = error{
WaitAbandoned, WaitAbandoned,
WaitTimeOut, WaitTimeOut,
/// See https://github.com/ziglang/zig/issues/1396
Unexpected, Unexpected,
}; };
@ -36,20 +44,21 @@ pub fn windowsClose(handle: windows.HANDLE) void {
pub const WriteError = error{ pub const WriteError = error{
SystemResources, SystemResources,
OperationAborted, OperationAborted,
IoPending,
BrokenPipe, BrokenPipe,
/// See https://github.com/ziglang/zig/issues/1396
Unexpected, Unexpected,
}; };
pub fn windowsWrite(handle: windows.HANDLE, bytes: []const u8) WriteError!void { pub fn windowsWrite(handle: windows.HANDLE, bytes: []const u8) WriteError!void {
if (windows.WriteFile(handle, @ptrCast(*const c_void, bytes.ptr), @intCast(u32, bytes.len), null, null) == 0) { if (windows.WriteFile(handle, bytes.ptr, @intCast(u32, bytes.len), null, null) == 0) {
const err = windows.GetLastError(); const err = windows.GetLastError();
return switch (err) { return switch (err) {
windows.ERROR.INVALID_USER_BUFFER => WriteError.SystemResources, windows.ERROR.INVALID_USER_BUFFER => WriteError.SystemResources,
windows.ERROR.NOT_ENOUGH_MEMORY => WriteError.SystemResources, windows.ERROR.NOT_ENOUGH_MEMORY => WriteError.SystemResources,
windows.ERROR.OPERATION_ABORTED => WriteError.OperationAborted, windows.ERROR.OPERATION_ABORTED => WriteError.OperationAborted,
windows.ERROR.NOT_ENOUGH_QUOTA => WriteError.SystemResources, windows.ERROR.NOT_ENOUGH_QUOTA => WriteError.SystemResources,
windows.ERROR.IO_PENDING => WriteError.IoPending, windows.ERROR.IO_PENDING => unreachable,
windows.ERROR.BROKEN_PIPE => WriteError.BrokenPipe, windows.ERROR.BROKEN_PIPE => WriteError.BrokenPipe,
else => os.unexpectedErrorWindows(err), else => os.unexpectedErrorWindows(err),
}; };
@ -87,37 +96,51 @@ pub fn windowsIsCygwinPty(handle: windows.HANDLE) bool {
pub const OpenError = error{ pub const OpenError = error{
SharingViolation, SharingViolation,
PathAlreadyExists, PathAlreadyExists,
/// When any of the path components can not be found or the file component can not
/// be found. Some operating systems distinguish between path components not found and
/// file components not found, but they are collapsed into FileNotFound to gain
/// consistency across operating systems.
FileNotFound, FileNotFound,
AccessDenied, AccessDenied,
PipeBusy, PipeBusy,
NameTooLong,
/// On Windows, file paths must be valid Unicode.
InvalidUtf8,
/// On Windows, file paths cannot contain these characters:
/// '/', '*', '?', '"', '<', '>', '|'
BadPathName,
/// See https://github.com/ziglang/zig/issues/1396
Unexpected, Unexpected,
OutOfMemory,
}; };
/// `file_path` needs to be copied in memory to add a null terminating byte, hence the allocator.
pub fn windowsOpen( pub fn windowsOpen(
allocator: *mem.Allocator,
file_path: []const u8, file_path: []const u8,
desired_access: windows.DWORD, desired_access: windows.DWORD,
share_mode: windows.DWORD, share_mode: windows.DWORD,
creation_disposition: windows.DWORD, creation_disposition: windows.DWORD,
flags_and_attrs: windows.DWORD, flags_and_attrs: windows.DWORD,
) OpenError!windows.HANDLE { ) OpenError!windows.HANDLE {
const path_with_null = try cstr.addNullByte(allocator, file_path); const file_path_w = try sliceToPrefixedFileW(file_path);
defer allocator.free(path_with_null);
const result = windows.CreateFileA(path_with_null.ptr, desired_access, share_mode, null, creation_disposition, flags_and_attrs, null); const result = windows.CreateFileW(&file_path_w, desired_access, share_mode, null, creation_disposition, flags_and_attrs, null);
if (result == windows.INVALID_HANDLE_VALUE) { if (result == windows.INVALID_HANDLE_VALUE) {
const err = windows.GetLastError(); const err = windows.GetLastError();
return switch (err) { switch (err) {
windows.ERROR.SHARING_VIOLATION => OpenError.SharingViolation, windows.ERROR.SHARING_VIOLATION => return OpenError.SharingViolation,
windows.ERROR.ALREADY_EXISTS, windows.ERROR.FILE_EXISTS => OpenError.PathAlreadyExists, windows.ERROR.ALREADY_EXISTS => return OpenError.PathAlreadyExists,
windows.ERROR.FILE_NOT_FOUND => OpenError.FileNotFound, windows.ERROR.FILE_EXISTS => return OpenError.PathAlreadyExists,
windows.ERROR.ACCESS_DENIED => OpenError.AccessDenied, windows.ERROR.FILE_NOT_FOUND => return OpenError.FileNotFound,
windows.ERROR.PIPE_BUSY => OpenError.PipeBusy, windows.ERROR.PATH_NOT_FOUND => return OpenError.FileNotFound,
else => os.unexpectedErrorWindows(err), windows.ERROR.ACCESS_DENIED => return OpenError.AccessDenied,
}; windows.ERROR.PIPE_BUSY => return OpenError.PipeBusy,
else => return os.unexpectedErrorWindows(err),
}
} }
return result; return result;
@ -193,9 +216,8 @@ pub fn windowsFindFirstFile(
if (handle == windows.INVALID_HANDLE_VALUE) { if (handle == windows.INVALID_HANDLE_VALUE) {
const err = windows.GetLastError(); const err = windows.GetLastError();
switch (err) { switch (err) {
windows.ERROR.FILE_NOT_FOUND, windows.ERROR.FILE_NOT_FOUND => return error.FileNotFound,
windows.ERROR.PATH_NOT_FOUND, windows.ERROR.PATH_NOT_FOUND => return error.FileNotFound,
=> return error.PathNotFound,
else => return os.unexpectedErrorWindows(err), else => return os.unexpectedErrorWindows(err),
} }
} }
@ -221,6 +243,7 @@ pub fn windowsCreateIoCompletionPort(file_handle: windows.HANDLE, existing_compl
const handle = windows.CreateIoCompletionPort(file_handle, existing_completion_port, completion_key, concurrent_thread_count) orelse { const handle = windows.CreateIoCompletionPort(file_handle, existing_completion_port, completion_key, concurrent_thread_count) orelse {
const err = windows.GetLastError(); const err = windows.GetLastError();
switch (err) { switch (err) {
windows.ERROR.INVALID_PARAMETER => unreachable,
else => return os.unexpectedErrorWindows(err), else => return os.unexpectedErrorWindows(err),
} }
}; };
@ -238,21 +261,55 @@ pub fn windowsPostQueuedCompletionStatus(completion_port: windows.HANDLE, bytes_
} }
} }
pub const WindowsWaitResult = error{ pub const WindowsWaitResult = enum {
Normal, Normal,
Aborted, Aborted,
Cancelled,
}; };
pub fn windowsGetQueuedCompletionStatus(completion_port: windows.HANDLE, bytes_transferred_count: *windows.DWORD, lpCompletionKey: *usize, lpOverlapped: *?*windows.OVERLAPPED, dwMilliseconds: windows.DWORD) WindowsWaitResult { pub fn windowsGetQueuedCompletionStatus(completion_port: windows.HANDLE, bytes_transferred_count: *windows.DWORD, lpCompletionKey: *usize, lpOverlapped: *?*windows.OVERLAPPED, dwMilliseconds: windows.DWORD) WindowsWaitResult {
if (windows.GetQueuedCompletionStatus(completion_port, bytes_transferred_count, lpCompletionKey, lpOverlapped, dwMilliseconds) == windows.FALSE) { if (windows.GetQueuedCompletionStatus(completion_port, bytes_transferred_count, lpCompletionKey, lpOverlapped, dwMilliseconds) == windows.FALSE) {
if (std.debug.runtime_safety) {
const err = windows.GetLastError(); const err = windows.GetLastError();
if (err != windows.ERROR.ABANDONED_WAIT_0) { switch (err) {
std.debug.warn("err: {}\n", err); windows.ERROR.ABANDONED_WAIT_0 => return WindowsWaitResult.Aborted,
windows.ERROR.OPERATION_ABORTED => return WindowsWaitResult.Cancelled,
else => {
if (std.debug.runtime_safety) {
std.debug.panic("unexpected error: {}\n", err);
} }
assert(err == windows.ERROR.ABANDONED_WAIT_0); },
} }
return WindowsWaitResult.Aborted;
} }
return WindowsWaitResult.Normal; return WindowsWaitResult.Normal;
} }
pub fn cStrToPrefixedFileW(s: [*]const u8) ![PATH_MAX_WIDE + 1]u16 {
return sliceToPrefixedFileW(mem.toSliceConst(u8, s));
}
pub fn sliceToPrefixedFileW(s: []const u8) ![PATH_MAX_WIDE + 1]u16 {
// TODO well defined copy elision
var result: [PATH_MAX_WIDE + 1]u16 = undefined;
// > File I/O functions in the Windows API convert "/" to "\" as part of
// > converting the name to an NT-style name, except when using the "\\?\"
// > prefix as detailed in the following sections.
// from https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file#maximum-path-length-limitation
// Because we want the larger maximum path length for absolute paths, we
// disallow forward slashes in zig std lib file functions on Windows.
for (s) |byte|
switch (byte) {
'/', '*', '?', '"', '<', '>', '|' => return error.BadPathName,
else => {},
};
const start_index = if (mem.startsWith(u8, s, "\\\\") or !os.path.isAbsolute(s)) 0 else blk: {
const prefix = []u16{ '\\', '\\', '?', '\\' };
mem.copy(u16, result[0..], prefix);
break :blk prefix.len;
};
const end_index = start_index + try std.unicode.utf8ToUtf16Le(result[start_index..], s);
assert(end_index <= result.len);
if (end_index == result.len) return error.NameTooLong;
result[end_index] = 0;
return result;
}

View File

@ -1,3 +1,6 @@
const std = @import("../index.zig");
const assert = std.debug.assert;
////////////////////////// //////////////////////////
//// IPC structures //// //// IPC structures ////
////////////////////////// //////////////////////////
@ -5,34 +8,48 @@
pub const Message = struct { pub const Message = struct {
sender: MailboxId, sender: MailboxId,
receiver: MailboxId, receiver: MailboxId,
type: usize, code: usize,
payload: usize, args: [5]usize,
payload: ?[]const u8,
pub fn from(mailbox_id: *const MailboxId) Message { pub fn from(mailbox_id: *const MailboxId) Message {
return Message { return Message {
.sender = MailboxId.Undefined, .sender = MailboxId.Undefined,
.receiver = *mailbox_id, .receiver = mailbox_id.*,
.type = 0, .code = undefined,
.payload = 0, .args = undefined,
.payload = null,
}; };
} }
pub fn to(mailbox_id: *const MailboxId, msg_type: usize) Message { pub fn to(mailbox_id: *const MailboxId, msg_code: usize, args: ...) Message {
return Message{ var message = Message {
.sender = MailboxId.This, .sender = MailboxId.This,
.receiver = *mailbox_id, .receiver = mailbox_id.*,
.type = msg_type, .code = msg_code,
.payload = 0, .args = undefined,
.payload = null,
}; };
assert (args.len <= message.args.len);
comptime var i = 0;
inline while (i < args.len) : (i += 1) {
message.args[i] = args[i];
} }
pub fn withData(mailbox_id: *const MailboxId, msg_type: usize, payload: usize) Message { return message;
return Message{ }
.sender = MailboxId.This,
.receiver = *mailbox_id, pub fn as(self: *const Message, sender: *const MailboxId) Message {
.type = msg_type, var message = self.*;
.payload = payload, message.sender = sender.*;
}; return message;
}
pub fn withPayload(self: *const Message, payload: []const u8) Message {
var message = self.*;
message.payload = payload;
return message;
} }
}; };
@ -63,21 +80,26 @@ pub const STDOUT_FILENO = 1;
pub const STDERR_FILENO = 2; pub const STDERR_FILENO = 2;
// FIXME: let's borrow Linux's error numbers for now. // FIXME: let's borrow Linux's error numbers for now.
pub const getErrno = @import("linux/index.zig").getErrno;
use @import("linux/errno.zig"); use @import("linux/errno.zig");
// Get the errno from a syscall return value, or 0 for no error.
pub fn getErrno(r: usize) usize {
const signed_r = @bitCast(isize, r);
return if (signed_r > -4096 and signed_r < 0) @intCast(usize, -signed_r) else 0;
}
// TODO: implement this correctly. // TODO: implement this correctly.
pub fn read(fd: i32, buf: *u8, count: usize) usize { pub fn read(fd: i32, buf: [*]u8, count: usize) usize {
switch (fd) { switch (fd) {
STDIN_FILENO => { STDIN_FILENO => {
var i: usize = 0; var i: usize = 0;
while (i < count) : (i += 1) { while (i < count) : (i += 1) {
send(Message.to(Server.Keyboard, 0)); send(Message.to(Server.Keyboard, 0));
// FIXME: we should be certain that we are receiving from Keyboard.
var message = Message.from(MailboxId.This); var message = Message.from(MailboxId.This);
receive(*message); receive(&message);
buf[i] = u8(message.payload); buf[i] = @intCast(u8, message.args[0]);
} }
}, },
else => unreachable, else => unreachable,
@ -86,13 +108,11 @@ pub fn read(fd: i32, buf: *u8, count: usize) usize {
} }
// TODO: implement this correctly. // TODO: implement this correctly.
pub fn write(fd: i32, buf: *const u8, count: usize) usize { pub fn write(fd: i32, buf: [*]const u8, count: usize) usize {
switch (fd) { switch (fd) {
STDOUT_FILENO, STDERR_FILENO => { STDOUT_FILENO, STDERR_FILENO => {
var i: usize = 0; send(Message.to(Server.Terminal, 1)
while (i < count) : (i += 1) { .withPayload(buf[0..count]));
send(Message.withData(Server.Terminal, 1, buf[i]));
}
}, },
else => unreachable, else => unreachable,
} }
@ -105,16 +125,13 @@ pub fn write(fd: i32, buf: *const u8, count: usize) usize {
pub const Syscall = enum(usize) { pub const Syscall = enum(usize) {
exit = 0, exit = 0,
createPort = 1, send = 1,
send = 2, receive = 2,
receive = 3, subscribeIRQ = 3,
subscribeIRQ = 4, inb = 4,
inb = 5, outb = 5,
map = 6, map = 6,
createThread = 7, createThread = 7,
createProcess = 8,
wait = 9,
portReady = 10,
}; };
//////////////////// ////////////////////
@ -126,13 +143,6 @@ pub fn exit(status: i32) noreturn {
unreachable; unreachable;
} }
pub fn createPort(mailbox_id: *const MailboxId) void {
_ = switch (*mailbox_id) {
MailboxId.Port => |id| syscall1(Syscall.createPort, id),
else => unreachable,
};
}
pub fn send(message: *const Message) void { pub fn send(message: *const Message) void {
_ = syscall1(Syscall.send, @ptrToInt(message)); _ = syscall1(Syscall.send, @ptrToInt(message));
} }
@ -146,29 +156,21 @@ pub fn subscribeIRQ(irq: u8, mailbox_id: *const MailboxId) void {
} }
pub fn inb(port: u16) u8 { pub fn inb(port: u16) u8 {
return u8(syscall1(Syscall.inb, port)); return @intCast(u8, syscall1(Syscall.inb, port));
}
pub fn outb(port: u16, value: u8) void {
_ = syscall2(Syscall.outb, port, value);
} }
pub fn map(v_addr: usize, p_addr: usize, size: usize, writable: bool) bool { pub fn map(v_addr: usize, p_addr: usize, size: usize, writable: bool) bool {
return syscall4(Syscall.map, v_addr, p_addr, size, usize(writable)) != 0; return syscall4(Syscall.map, v_addr, p_addr, size, @boolToInt(writable)) != 0;
} }
pub fn createThread(function: fn () void) u16 { pub fn createThread(function: fn () void) u16 {
return u16(syscall1(Syscall.createThread, @ptrToInt(function))); return u16(syscall1(Syscall.createThread, @ptrToInt(function)));
} }
pub fn createProcess(elf_addr: usize) u16 {
return u16(syscall1(Syscall.createProcess, elf_addr));
}
pub fn wait(tid: u16) void {
_ = syscall1(Syscall.wait, tid);
}
pub fn portReady(port: u16) bool {
return syscall1(Syscall.portReady, port) != 0;
}
///////////////////////// /////////////////////////
//// Syscall stubs //// //// Syscall stubs ////
///////////////////////// /////////////////////////

543
std/rb.zig Normal file
View File

@ -0,0 +1,543 @@
const std = @import("index.zig");
const assert = std.debug.assert;
const mem = std.mem; // For mem.Compare
const Color = enum(u1) {
Black,
Red,
};
const Red = Color.Red;
const Black = Color.Black;
const ReplaceError = error {
NotEqual,
};
/// Insert this into your struct that you want to add to a red-black tree.
/// Do not use a pointer. Turn the *rb.Node results of the functions in rb
/// (after resolving optionals) to your structure using @fieldParentPtr(). Example:
///
/// const Number = struct {
/// node: rb.Node,
/// value: i32,
/// };
/// fn number(node: *Node) Number {
/// return @fieldParentPtr(Number, "node", node);
/// }
pub const Node = struct {
left: ?*Node,
right: ?*Node,
parent_and_color: usize, /// parent | color
pub fn next(constnode: *Node) ?*Node {
var node = constnode;
if (node.right) |right| {
var n = right;
while (n.left) |left|
n = left;
return n;
}
while (true) {
var parent = node.get_parent();
if (parent) |p| {
if (node != p.right)
return p;
node = p;
} else
return null;
}
}
pub fn prev(constnode: *Node) ?*Node {
var node = constnode;
if (node.left) |left| {
var n = left;
while (n.right) |right|
n = right;
return n;
}
while (true) {
var parent = node.get_parent();
if (parent) |p| {
if (node != p.left)
return p;
node = p;
} else
return null;
}
}
pub fn is_root(node: *Node) bool {
return node.get_parent() == null;
}
fn is_red(node: *Node) bool {
return node.get_color() == Red;
}
fn is_black(node: *Node) bool {
return node.get_color() == Black;
}
fn set_parent(node: *Node, parent: ?*Node) void {
node.parent_and_color = @ptrToInt(parent) | (node.parent_and_color & 1);
}
fn get_parent(node: *Node) ?*Node {
const mask: usize = 1;
comptime {
assert(@alignOf(*Node) >= 2);
}
return @intToPtr(*Node, node.parent_and_color & ~mask);
}
fn set_color(node: *Node, color: Color) void {
const mask: usize = 1;
node.parent_and_color = (node.parent_and_color & ~mask) | @enumToInt(color);
}
fn get_color(node: *Node) Color {
return @intToEnum(Color, @intCast(u1, node.parent_and_color & 1));
}
fn set_child(node: *Node, child: ?*Node, is_left: bool) void {
if (is_left) {
node.left = child;
} else {
node.right = child;
}
}
fn get_first(nodeconst: *Node) *Node {
var node = nodeconst;
while (node.left) |left| {
node = left;
}
return node;
}
fn get_last(node: *Node) *Node {
while (node.right) |right| {
node = right;
}
return node;
}
};
pub const Tree = struct {
root: ?*Node,
compareFn: fn(*Node, *Node) mem.Compare,
/// If you have a need for a version that caches this, please file a bug.
pub fn first(tree: *Tree) ?*Node {
var node: *Node = tree.root orelse return null;
while (node.left) |left| {
node = left;
}
return node;
}
pub fn last(tree: *Tree) ?*Node {
var node: *Node = tree.root orelse return null;
while (node.right) |right| {
node = right;
}
return node;
}
/// Duplicate keys are not allowed. The item with the same key already in the
/// tree will be returned, and the item will not be inserted.
pub fn insert(tree: *Tree, node_const: *Node) ?*Node {
var node = node_const;
var maybe_key: ?*Node = undefined;
var maybe_parent: ?*Node = undefined;
var is_left: bool = undefined;
maybe_key = do_lookup(node, tree, &maybe_parent, &is_left);
if (maybe_key) |key| {
return key;
}
node.left = null;
node.right = null;
node.set_color(Red);
node.set_parent(maybe_parent);
if (maybe_parent) |parent| {
parent.set_child(node, is_left);
} else {
tree.root = node;
}
while (node.get_parent()) |*parent| {
if (parent.*.is_black())
break;
// the root is always black
var grandpa = parent.*.get_parent() orelse unreachable;
if (parent.* == grandpa.left) {
var maybe_uncle = grandpa.right;
if (maybe_uncle) |uncle| {
if (uncle.is_black())
break;
parent.*.set_color(Black);
uncle.set_color(Black);
grandpa.set_color(Red);
node = grandpa;
} else {
if (node == parent.*.right) {
rotate_left(parent.*, tree);
node = parent.*;
parent.* = node.get_parent().?; // Just rotated
}
parent.*.set_color(Black);
grandpa.set_color(Red);
rotate_right(grandpa, tree);
}
} else {
var maybe_uncle = grandpa.left;
if (maybe_uncle) |uncle| {
if (uncle.is_black())
break;
parent.*.set_color(Black);
uncle.set_color(Black);
grandpa.set_color(Red);
node = grandpa;
} else {
if (node == parent.*.left) {
rotate_right(parent.*, tree);
node = parent.*;
parent.* = node.get_parent().?; // Just rotated
}
parent.*.set_color(Black);
grandpa.set_color(Red);
rotate_left(grandpa, tree);
}
}
}
// This was an insert, there is at least one node.
tree.root.?.set_color(Black);
return null;
}
pub fn lookup(tree: *Tree, key: *Node) ?*Node {
var parent: *Node = undefined;
var is_left: bool = undefined;
return do_lookup(key, tree, &parent, &is_left);
}
pub fn remove(tree: *Tree, nodeconst: *Node) void {
var node = nodeconst;
// as this has the same value as node, it is unsafe to access node after newnode
var newnode: ?*Node = nodeconst;
var maybe_parent: ?*Node = node.get_parent();
var color: Color = undefined;
var next: *Node = undefined;
// This clause is to avoid optionals
if (node.left == null and node.right == null) {
if (maybe_parent) |parent| {
parent.set_child(null, parent.left == node);
} else
tree.root = null;
color = node.get_color();
newnode = null;
} else {
if (node.left == null) {
next = node.right.?; // Not both null as per above
} else if (node.right == null) {
next = node.left.?; // Not both null as per above
} else
next = node.right.?.get_first(); // Just checked for null above
if (maybe_parent) |parent| {
parent.set_child(next, parent.left == node);
} else
tree.root = next;
if (node.left != null and node.right != null) {
const left = node.left.?;
const right = node.right.?;
color = next.get_color();
next.set_color(node.get_color());
next.left = left;
left.set_parent(next);
if (next != right) {
var parent = next.get_parent().?; // Was traversed via child node (right/left)
next.set_parent(node.get_parent());
newnode = next.right;
parent.left = node;
next.right = right;
right.set_parent(next);
} else {
next.set_parent(maybe_parent);
maybe_parent = next;
newnode = next.right;
}
} else {
color = node.get_color();
newnode = next;
}
}
if (newnode) |n|
n.set_parent(maybe_parent);
if (color == Red)
return;
if (newnode) |n| {
n.set_color(Black);
return;
}
while (node == tree.root) {
// If not root, there must be parent
var parent = maybe_parent.?;
if (node == parent.left) {
var sibling = parent.right.?; // Same number of black nodes.
if (sibling.is_red()) {
sibling.set_color(Black);
parent.set_color(Red);
rotate_left(parent, tree);
sibling = parent.right.?; // Just rotated
}
if ((if (sibling.left) |n| n.is_black() else true) and
(if (sibling.right) |n| n.is_black() else true)) {
sibling.set_color(Red);
node = parent;
maybe_parent = parent.get_parent();
continue;
}
if (if (sibling.right) |n| n.is_black() else true) {
sibling.left.?.set_color(Black); // Same number of black nodes.
sibling.set_color(Red);
rotate_right(sibling, tree);
sibling = parent.right.?; // Just rotated
}
sibling.set_color(parent.get_color());
parent.set_color(Black);
sibling.right.?.set_color(Black); // Same number of black nodes.
rotate_left(parent, tree);
newnode = tree.root;
break;
} else {
var sibling = parent.left.?; // Same number of black nodes.
if (sibling.is_red()) {
sibling.set_color(Black);
parent.set_color(Red);
rotate_right(parent, tree);
sibling = parent.left.?; // Just rotated
}
if ((if (sibling.left) |n| n.is_black() else true) and
(if (sibling.right) |n| n.is_black() else true)) {
sibling.set_color(Red);
node = parent;
maybe_parent = parent.get_parent();
continue;
}
if (if (sibling.left) |n| n.is_black() else true) {
sibling.right.?.set_color(Black); // Same number of black nodes
sibling.set_color(Red);
rotate_left(sibling, tree);
sibling = parent.left.?; // Just rotated
}
sibling.set_color(parent.get_color());
parent.set_color(Black);
sibling.left.?.set_color(Black); // Same number of black nodes
rotate_right(parent, tree);
newnode = tree.root;
break;
}
if (node.is_red())
break;
}
if (newnode) |n|
n.set_color(Black);
}
/// This is a shortcut to avoid removing and re-inserting an item with the same key.
pub fn replace(tree: *Tree, old: *Node, newconst: *Node) !void {
var new = newconst;
// I assume this can get optimized out if the caller already knows.
if (tree.compareFn(old, new) != mem.Compare.Equal) return ReplaceError.NotEqual;
if (old.get_parent()) |parent| {
parent.set_child(new, parent.left == old);
} else
tree.root = new;
if (old.left) |left|
left.set_parent(new);
if (old.right) |right|
right.set_parent(new);
new.* = old.*;
}
pub fn init(tree: *Tree, f: fn(*Node, *Node) mem.Compare) void {
tree.root = null;
tree.compareFn = f;
}
};
fn rotate_left(node: *Node, tree: *Tree) void {
var p: *Node = node;
var q: *Node = node.right orelse unreachable;
var parent: *Node = undefined;
if (!p.is_root()) {
parent = p.get_parent().?;
if (parent.left == p) {
parent.left = q;
} else {
parent.right = q;
}
q.set_parent(parent);
} else {
tree.root = q;
q.set_parent(null);
}
p.set_parent(q);
p.right = q.left;
if (p.right) |right| {
right.set_parent(p);
}
q.left = p;
}
fn rotate_right(node: *Node, tree: *Tree) void {
var p: *Node = node;
var q: *Node = node.left orelse unreachable;
var parent: *Node = undefined;
if (!p.is_root()) {
parent = p.get_parent().?;
if (parent.left == p) {
parent.left = q;
} else {
parent.right = q;
}
q.set_parent(parent);
} else {
tree.root = q;
q.set_parent(null);
}
p.set_parent(q);
p.left = q.right;
if (p.left) |left| {
left.set_parent(p);
}
q.right = p;
}
fn do_lookup(key: *Node, tree: *Tree, pparent: *?*Node, is_left: *bool) ?*Node {
var maybe_node: ?*Node = tree.root;
pparent.* = null;
is_left.* = false;
while (maybe_node) |node| {
var res: mem.Compare = tree.compareFn(node, key);
if (res == mem.Compare.Equal) {
return node;
}
pparent.* = node;
if (res == mem.Compare.GreaterThan) {
is_left.* = true;
maybe_node = node.left;
} else if (res == mem.Compare.LessThan) {
is_left.* = false;
maybe_node = node.right;
} else {
unreachable;
}
}
return null;
}
const testNumber = struct {
node: Node,
value: usize,
};
fn testGetNumber(node: *Node) *testNumber {
return @fieldParentPtr(testNumber, "node", node);
}
fn testCompare(l: *Node, r: *Node) mem.Compare {
var left = testGetNumber(l);
var right = testGetNumber(r);
if (left.value < right.value) {
return mem.Compare.LessThan;
} else if (left.value == right.value) {
return mem.Compare.Equal;
} else if (left.value > right.value) {
return mem.Compare.GreaterThan;
}
unreachable;
}
test "rb" {
var tree: Tree = undefined;
var ns: [10]testNumber = undefined;
ns[0].value = 42;
ns[1].value = 41;
ns[2].value = 40;
ns[3].value = 39;
ns[4].value = 38;
ns[5].value = 39;
ns[6].value = 3453;
ns[7].value = 32345;
ns[8].value = 392345;
ns[9].value = 4;
var dup: testNumber = undefined;
dup.value = 32345;
tree.init(testCompare);
_ = tree.insert(&ns[1].node);
_ = tree.insert(&ns[2].node);
_ = tree.insert(&ns[3].node);
_ = tree.insert(&ns[4].node);
_ = tree.insert(&ns[5].node);
_ = tree.insert(&ns[6].node);
_ = tree.insert(&ns[7].node);
_ = tree.insert(&ns[8].node);
_ = tree.insert(&ns[9].node);
tree.remove(&ns[3].node);
assert(tree.insert(&dup.node) == &ns[7].node);
try tree.replace(&ns[7].node, &dup.node);
var num: *testNumber = undefined;
num = testGetNumber(tree.first().?);
while (num.node.next() != null) {
assert(testGetNumber(num.node.next().?).value > num.value);
num = testGetNumber(num.node.next().?);
}
}

View File

@ -2,7 +2,7 @@ const std = @import("index.zig");
const assert = std.debug.assert; const assert = std.debug.assert;
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
// Imagine that `fn at(self: &Self, index: usize) &T` is a customer asking for a box // Imagine that `fn at(self: *Self, index: usize) &T` is a customer asking for a box
// from a warehouse, based on a flat array, boxes ordered from 0 to N - 1. // from a warehouse, based on a flat array, boxes ordered from 0 to N - 1.
// But the warehouse actually stores boxes in shelves of increasing powers of 2 sizes. // But the warehouse actually stores boxes in shelves of increasing powers of 2 sizes.
// So when the customer requests a box index, we have to translate it to shelf index // So when the customer requests a box index, we have to translate it to shelf index
@ -93,6 +93,14 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
pub const prealloc_count = prealloc_item_count; pub const prealloc_count = prealloc_item_count;
fn AtType(comptime SelfType: type) type {
if (@typeInfo(SelfType).Pointer.is_const) {
return *const T;
} else {
return *T;
}
}
/// Deinitialize with `deinit` /// Deinitialize with `deinit`
pub fn init(allocator: *Allocator) Self { pub fn init(allocator: *Allocator) Self {
return Self{ return Self{
@ -109,7 +117,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
self.* = undefined; self.* = undefined;
} }
pub fn at(self: *Self, i: usize) *T { pub fn at(self: var, i: usize) AtType(@typeOf(self)) {
assert(i < self.len); assert(i < self.len);
return self.uncheckedAt(i); return self.uncheckedAt(i);
} }
@ -133,7 +141,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
if (self.len == 0) return null; if (self.len == 0) return null;
const index = self.len - 1; const index = self.len - 1;
const result = self.uncheckedAt(index).*; const result = uncheckedAt(self, index).*;
self.len = index; self.len = index;
return result; return result;
} }
@ -141,7 +149,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
pub fn addOne(self: *Self) !*T { pub fn addOne(self: *Self) !*T {
const new_length = self.len + 1; const new_length = self.len + 1;
try self.growCapacity(new_length); try self.growCapacity(new_length);
const result = self.uncheckedAt(self.len); const result = uncheckedAt(self, self.len);
self.len = new_length; self.len = new_length;
return result; return result;
} }
@ -193,7 +201,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
self.dynamic_segments = self.allocator.shrink([*]T, self.dynamic_segments, new_cap_shelf_count); self.dynamic_segments = self.allocator.shrink([*]T, self.dynamic_segments, new_cap_shelf_count);
} }
pub fn uncheckedAt(self: *Self, index: usize) *T { pub fn uncheckedAt(self: var, index: usize) AtType(@typeOf(self)) {
if (index < prealloc_item_count) { if (index < prealloc_item_count) {
return &self.prealloc_segment[index]; return &self.prealloc_segment[index];
} }

View File

@ -13,17 +13,11 @@ comptime {
@export("main", main, strong_linkage); @export("main", main, strong_linkage);
} else if (builtin.os == builtin.Os.windows) { } else if (builtin.os == builtin.Os.windows) {
@export("WinMainCRTStartup", WinMainCRTStartup, strong_linkage); @export("WinMainCRTStartup", WinMainCRTStartup, strong_linkage);
} else if (builtin.os == builtin.Os.zen) {
@export("_start", zen_start, strong_linkage);
} else { } else {
@export("_start", _start, strong_linkage); @export("_start", _start, strong_linkage);
} }
} }
extern fn zen_start() noreturn {
std.os.posix.exit(@inlineCall(callMain));
}
nakedcc fn _start() noreturn { nakedcc fn _start() noreturn {
switch (builtin.arch) { switch (builtin.arch) {
builtin.Arch.x86_64 => { builtin.Arch.x86_64 => {

View File

@ -72,10 +72,10 @@ pub fn main() !void {
if (mem.indexOfScalar(u8, option_contents, '=')) |name_end| { if (mem.indexOfScalar(u8, option_contents, '=')) |name_end| {
const option_name = option_contents[0..name_end]; const option_name = option_contents[0..name_end];
const option_value = option_contents[name_end + 1 ..]; const option_value = option_contents[name_end + 1 ..];
if (builder.addUserInputOption(option_name, option_value)) if (try builder.addUserInputOption(option_name, option_value))
return usageAndErr(&builder, false, try stderr_stream); return usageAndErr(&builder, false, try stderr_stream);
} else { } else {
if (builder.addUserInputFlag(option_contents)) if (try builder.addUserInputFlag(option_contents))
return usageAndErr(&builder, false, try stderr_stream); return usageAndErr(&builder, false, try stderr_stream);
} }
} else if (mem.startsWith(u8, arg, "-")) { } else if (mem.startsWith(u8, arg, "-")) {

View File

@ -188,6 +188,7 @@ pub const Utf8View = struct {
return Utf8View{ .bytes = s }; return Utf8View{ .bytes = s };
} }
/// TODO: https://github.com/ziglang/zig/issues/425
pub fn initComptime(comptime s: []const u8) Utf8View { pub fn initComptime(comptime s: []const u8) Utf8View {
if (comptime init(s)) |r| { if (comptime init(s)) |r| {
return r; return r;
@ -199,7 +200,7 @@ pub const Utf8View = struct {
} }
} }
pub fn iterator(s: *const Utf8View) Utf8Iterator { pub fn iterator(s: Utf8View) Utf8Iterator {
return Utf8Iterator{ return Utf8Iterator{
.bytes = s.bytes, .bytes = s.bytes,
.i = 0, .i = 0,
@ -217,7 +218,6 @@ const Utf8Iterator = struct {
} }
const cp_len = utf8ByteSequenceLength(it.bytes[it.i]) catch unreachable; const cp_len = utf8ByteSequenceLength(it.bytes[it.i]) catch unreachable;
it.i += cp_len; it.i += cp_len;
return it.bytes[it.i - cp_len .. it.i]; return it.bytes[it.i - cp_len .. it.i];
} }
@ -235,6 +235,38 @@ const Utf8Iterator = struct {
} }
}; };
pub const Utf16LeIterator = struct {
bytes: []const u8,
i: usize,
pub fn init(s: []const u16) Utf16LeIterator {
return Utf16LeIterator{
.bytes = @sliceToBytes(s),
.i = 0,
};
}
pub fn nextCodepoint(it: *Utf16LeIterator) !?u32 {
assert(it.i <= it.bytes.len);
if (it.i == it.bytes.len) return null;
const c0: u32 = mem.readIntLE(u16, it.bytes[it.i .. it.i + 2]);
if (c0 & ~u32(0x03ff) == 0xd800) {
// surrogate pair
it.i += 2;
if (it.i >= it.bytes.len) return error.DanglingSurrogateHalf;
const c1: u32 = mem.readIntLE(u16, it.bytes[it.i .. it.i + 2]);
if (c1 & ~u32(0x03ff) != 0xdc00) return error.ExpectedSecondSurrogateHalf;
it.i += 2;
return 0x10000 + (((c0 & 0x03ff) << 10) | (c1 & 0x03ff));
} else if (c0 & ~u32(0x03ff) == 0xdc00) {
return error.UnexpectedSecondSurrogateHalf;
} else {
it.i += 2;
return c0;
}
}
};
test "utf8 encode" { test "utf8 encode" {
comptime testUtf8Encode() catch unreachable; comptime testUtf8Encode() catch unreachable;
try testUtf8Encode(); try testUtf8Encode();
@ -445,42 +477,34 @@ fn testDecode(bytes: []const u8) !u32 {
return utf8Decode(bytes); return utf8Decode(bytes);
} }
// TODO: make this API on top of a non-allocating Utf16LeView /// Caller must free returned memory.
pub fn utf16leToUtf8(allocator: *mem.Allocator, utf16le: []const u16) ![]u8 { pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8 {
var result = std.ArrayList(u8).init(allocator); var result = std.ArrayList(u8).init(allocator);
// optimistically guess that it will all be ascii. // optimistically guess that it will all be ascii.
try result.ensureCapacity(utf16le.len); try result.ensureCapacity(utf16le.len);
const utf16le_as_bytes = @sliceToBytes(utf16le);
var i: usize = 0;
var out_index: usize = 0; var out_index: usize = 0;
while (i < utf16le_as_bytes.len) : (i += 2) { var it = Utf16LeIterator.init(utf16le);
// decode while (try it.nextCodepoint()) |codepoint| {
const c0: u32 = mem.readIntLE(u16, utf16le_as_bytes[i..i + 2]);
var codepoint: u32 = undefined;
if (c0 & ~u32(0x03ff) == 0xd800) {
// surrogate pair
i += 2;
if (i >= utf16le_as_bytes.len) return error.DanglingSurrogateHalf;
const c1: u32 = mem.readIntLE(u16, utf16le_as_bytes[i..i + 2]);
if (c1 & ~u32(0x03ff) != 0xdc00) return error.ExpectedSecondSurrogateHalf;
codepoint = 0x10000 + (((c0 & 0x03ff) << 10) | (c1 & 0x03ff));
} else if (c0 & ~u32(0x03ff) == 0xdc00) {
return error.UnexpectedSecondSurrogateHalf;
} else {
codepoint = c0;
}
// encode
const utf8_len = utf8CodepointSequenceLength(codepoint) catch unreachable; const utf8_len = utf8CodepointSequenceLength(codepoint) catch unreachable;
try result.resize(result.len + utf8_len); try result.resize(result.len + utf8_len);
_ = utf8Encode(codepoint, result.items[out_index..]) catch unreachable; assert((utf8Encode(codepoint, result.items[out_index..]) catch unreachable) == utf8_len);
out_index += utf8_len; out_index += utf8_len;
} }
return result.toOwnedSlice(); return result.toOwnedSlice();
} }
/// Asserts that the output buffer is big enough.
/// Returns end byte index into utf8.
pub fn utf16leToUtf8(utf8: []u8, utf16le: []const u16) !usize {
var end_index: usize = 0;
var it = Utf16LeIterator.init(utf16le);
while (try it.nextCodepoint()) |codepoint| {
end_index += try utf8Encode(codepoint, utf8[end_index..]);
}
return end_index;
}
test "utf16leToUtf8" { test "utf16leToUtf8" {
var utf16le: [2]u16 = undefined; var utf16le: [2]u16 = undefined;
const utf16le_as_bytes = @sliceToBytes(utf16le[0..]); const utf16le_as_bytes = @sliceToBytes(utf16le[0..]);
@ -488,14 +512,14 @@ test "utf16leToUtf8" {
{ {
mem.writeInt(utf16le_as_bytes[0..], u16('A'), builtin.Endian.Little); mem.writeInt(utf16le_as_bytes[0..], u16('A'), builtin.Endian.Little);
mem.writeInt(utf16le_as_bytes[2..], u16('a'), builtin.Endian.Little); mem.writeInt(utf16le_as_bytes[2..], u16('a'), builtin.Endian.Little);
const utf8 = try utf16leToUtf8(std.debug.global_allocator, utf16le); const utf8 = try utf16leToUtf8Alloc(std.debug.global_allocator, utf16le);
assert(mem.eql(u8, utf8, "Aa")); assert(mem.eql(u8, utf8, "Aa"));
} }
{ {
mem.writeInt(utf16le_as_bytes[0..], u16(0x80), builtin.Endian.Little); mem.writeInt(utf16le_as_bytes[0..], u16(0x80), builtin.Endian.Little);
mem.writeInt(utf16le_as_bytes[2..], u16(0xffff), builtin.Endian.Little); mem.writeInt(utf16le_as_bytes[2..], u16(0xffff), builtin.Endian.Little);
const utf8 = try utf16leToUtf8(std.debug.global_allocator, utf16le); const utf8 = try utf16leToUtf8Alloc(std.debug.global_allocator, utf16le);
assert(mem.eql(u8, utf8, "\xc2\x80" ++ "\xef\xbf\xbf")); assert(mem.eql(u8, utf8, "\xc2\x80" ++ "\xef\xbf\xbf"));
} }
@ -503,7 +527,7 @@ test "utf16leToUtf8" {
// the values just outside the surrogate half range // the values just outside the surrogate half range
mem.writeInt(utf16le_as_bytes[0..], u16(0xd7ff), builtin.Endian.Little); mem.writeInt(utf16le_as_bytes[0..], u16(0xd7ff), builtin.Endian.Little);
mem.writeInt(utf16le_as_bytes[2..], u16(0xe000), builtin.Endian.Little); mem.writeInt(utf16le_as_bytes[2..], u16(0xe000), builtin.Endian.Little);
const utf8 = try utf16leToUtf8(std.debug.global_allocator, utf16le); const utf8 = try utf16leToUtf8Alloc(std.debug.global_allocator, utf16le);
assert(mem.eql(u8, utf8, "\xed\x9f\xbf" ++ "\xee\x80\x80")); assert(mem.eql(u8, utf8, "\xed\x9f\xbf" ++ "\xee\x80\x80"));
} }
@ -511,7 +535,7 @@ test "utf16leToUtf8" {
// smallest surrogate pair // smallest surrogate pair
mem.writeInt(utf16le_as_bytes[0..], u16(0xd800), builtin.Endian.Little); mem.writeInt(utf16le_as_bytes[0..], u16(0xd800), builtin.Endian.Little);
mem.writeInt(utf16le_as_bytes[2..], u16(0xdc00), builtin.Endian.Little); mem.writeInt(utf16le_as_bytes[2..], u16(0xdc00), builtin.Endian.Little);
const utf8 = try utf16leToUtf8(std.debug.global_allocator, utf16le); const utf8 = try utf16leToUtf8Alloc(std.debug.global_allocator, utf16le);
assert(mem.eql(u8, utf8, "\xf0\x90\x80\x80")); assert(mem.eql(u8, utf8, "\xf0\x90\x80\x80"));
} }
@ -519,14 +543,48 @@ test "utf16leToUtf8" {
// largest surrogate pair // largest surrogate pair
mem.writeInt(utf16le_as_bytes[0..], u16(0xdbff), builtin.Endian.Little); mem.writeInt(utf16le_as_bytes[0..], u16(0xdbff), builtin.Endian.Little);
mem.writeInt(utf16le_as_bytes[2..], u16(0xdfff), builtin.Endian.Little); mem.writeInt(utf16le_as_bytes[2..], u16(0xdfff), builtin.Endian.Little);
const utf8 = try utf16leToUtf8(std.debug.global_allocator, utf16le); const utf8 = try utf16leToUtf8Alloc(std.debug.global_allocator, utf16le);
assert(mem.eql(u8, utf8, "\xf4\x8f\xbf\xbf")); assert(mem.eql(u8, utf8, "\xf4\x8f\xbf\xbf"));
} }
{ {
mem.writeInt(utf16le_as_bytes[0..], u16(0xdbff), builtin.Endian.Little); mem.writeInt(utf16le_as_bytes[0..], u16(0xdbff), builtin.Endian.Little);
mem.writeInt(utf16le_as_bytes[2..], u16(0xdc00), builtin.Endian.Little); mem.writeInt(utf16le_as_bytes[2..], u16(0xdc00), builtin.Endian.Little);
const utf8 = try utf16leToUtf8(std.debug.global_allocator, utf16le); const utf8 = try utf16leToUtf8Alloc(std.debug.global_allocator, utf16le);
assert(mem.eql(u8, utf8, "\xf4\x8f\xb0\x80")); assert(mem.eql(u8, utf8, "\xf4\x8f\xb0\x80"));
} }
} }
/// TODO support codepoints bigger than 16 bits
/// TODO type for null terminated pointer
pub fn utf8ToUtf16LeWithNull(allocator: *mem.Allocator, utf8: []const u8) ![]u16 {
var result = std.ArrayList(u16).init(allocator);
// optimistically guess that it will not require surrogate pairs
try result.ensureCapacity(utf8.len + 1);
const view = try Utf8View.init(utf8);
var it = view.iterator();
while (it.nextCodepoint()) |codepoint| {
try result.append(@intCast(u16, codepoint)); // TODO surrogate pairs
}
try result.append(0);
return result.toOwnedSlice();
}
/// Returns index of next character. If exact fit, returned index equals output slice length.
/// If ran out of room, returned index equals output slice length + 1.
/// TODO support codepoints bigger than 16 bits
pub fn utf8ToUtf16Le(utf16le: []u16, utf8: []const u8) !usize {
const utf16le_as_bytes = @sliceToBytes(utf16le[0..]);
var end_index: usize = 0;
var it = (try Utf8View.init(utf8)).iterator();
while (it.nextCodepoint()) |codepoint| {
if (end_index == utf16le_as_bytes.len) return (end_index / 2) + 1;
// TODO surrogate pairs
mem.writeInt(utf16le_as_bytes[end_index..], @intCast(u16, codepoint), builtin.Endian.Little);
end_index += 2;
}
return end_index / 2;
}

View File

@ -32,6 +32,12 @@ pub const Tree = struct {
return self.source[token.start..token.end]; return self.source[token.start..token.end];
} }
pub fn getNodeSource(self: *const Tree, node: *const Node) []const u8 {
const first_token = self.tokens.at(node.firstToken());
const last_token = self.tokens.at(node.lastToken());
return self.source[first_token.start..last_token.end];
}
pub const Location = struct { pub const Location = struct {
line: usize, line: usize,
column: usize, column: usize,
@ -338,7 +344,7 @@ pub const Node = struct {
unreachable; unreachable;
} }
pub fn firstToken(base: *Node) TokenIndex { pub fn firstToken(base: *const Node) TokenIndex {
comptime var i = 0; comptime var i = 0;
inline while (i < @memberCount(Id)) : (i += 1) { inline while (i < @memberCount(Id)) : (i += 1) {
if (base.id == @field(Id, @memberName(Id, i))) { if (base.id == @field(Id, @memberName(Id, i))) {
@ -349,7 +355,7 @@ pub const Node = struct {
unreachable; unreachable;
} }
pub fn lastToken(base: *Node) TokenIndex { pub fn lastToken(base: *const Node) TokenIndex {
comptime var i = 0; comptime var i = 0;
inline while (i < @memberCount(Id)) : (i += 1) { inline while (i < @memberCount(Id)) : (i += 1) {
if (base.id == @field(Id, @memberName(Id, i))) { if (base.id == @field(Id, @memberName(Id, i))) {
@ -473,11 +479,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Root) TokenIndex { pub fn firstToken(self: *const Root) TokenIndex {
return if (self.decls.len == 0) self.eof_token else (self.decls.at(0).*).firstToken(); return if (self.decls.len == 0) self.eof_token else (self.decls.at(0).*).firstToken();
} }
pub fn lastToken(self: *Root) TokenIndex { pub fn lastToken(self: *const Root) TokenIndex {
return if (self.decls.len == 0) self.eof_token else (self.decls.at(self.decls.len - 1).*).lastToken(); return if (self.decls.len == 0) self.eof_token else (self.decls.at(self.decls.len - 1).*).lastToken();
} }
}; };
@ -518,7 +524,7 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *VarDecl) TokenIndex { pub fn firstToken(self: *const VarDecl) TokenIndex {
if (self.visib_token) |visib_token| return visib_token; if (self.visib_token) |visib_token| return visib_token;
if (self.comptime_token) |comptime_token| return comptime_token; if (self.comptime_token) |comptime_token| return comptime_token;
if (self.extern_export_token) |extern_export_token| return extern_export_token; if (self.extern_export_token) |extern_export_token| return extern_export_token;
@ -526,7 +532,7 @@ pub const Node = struct {
return self.mut_token; return self.mut_token;
} }
pub fn lastToken(self: *VarDecl) TokenIndex { pub fn lastToken(self: *const VarDecl) TokenIndex {
return self.semicolon_token; return self.semicolon_token;
} }
}; };
@ -548,12 +554,12 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Use) TokenIndex { pub fn firstToken(self: *const Use) TokenIndex {
if (self.visib_token) |visib_token| return visib_token; if (self.visib_token) |visib_token| return visib_token;
return self.use_token; return self.use_token;
} }
pub fn lastToken(self: *Use) TokenIndex { pub fn lastToken(self: *const Use) TokenIndex {
return self.semicolon_token; return self.semicolon_token;
} }
}; };
@ -575,11 +581,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *ErrorSetDecl) TokenIndex { pub fn firstToken(self: *const ErrorSetDecl) TokenIndex {
return self.error_token; return self.error_token;
} }
pub fn lastToken(self: *ErrorSetDecl) TokenIndex { pub fn lastToken(self: *const ErrorSetDecl) TokenIndex {
return self.rbrace_token; return self.rbrace_token;
} }
}; };
@ -618,14 +624,14 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *ContainerDecl) TokenIndex { pub fn firstToken(self: *const ContainerDecl) TokenIndex {
if (self.layout_token) |layout_token| { if (self.layout_token) |layout_token| {
return layout_token; return layout_token;
} }
return self.kind_token; return self.kind_token;
} }
pub fn lastToken(self: *ContainerDecl) TokenIndex { pub fn lastToken(self: *const ContainerDecl) TokenIndex {
return self.rbrace_token; return self.rbrace_token;
} }
}; };
@ -646,12 +652,12 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *StructField) TokenIndex { pub fn firstToken(self: *const StructField) TokenIndex {
if (self.visib_token) |visib_token| return visib_token; if (self.visib_token) |visib_token| return visib_token;
return self.name_token; return self.name_token;
} }
pub fn lastToken(self: *StructField) TokenIndex { pub fn lastToken(self: *const StructField) TokenIndex {
return self.type_expr.lastToken(); return self.type_expr.lastToken();
} }
}; };
@ -679,11 +685,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *UnionTag) TokenIndex { pub fn firstToken(self: *const UnionTag) TokenIndex {
return self.name_token; return self.name_token;
} }
pub fn lastToken(self: *UnionTag) TokenIndex { pub fn lastToken(self: *const UnionTag) TokenIndex {
if (self.value_expr) |value_expr| { if (self.value_expr) |value_expr| {
return value_expr.lastToken(); return value_expr.lastToken();
} }
@ -712,11 +718,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *EnumTag) TokenIndex { pub fn firstToken(self: *const EnumTag) TokenIndex {
return self.name_token; return self.name_token;
} }
pub fn lastToken(self: *EnumTag) TokenIndex { pub fn lastToken(self: *const EnumTag) TokenIndex {
if (self.value) |value| { if (self.value) |value| {
return value.lastToken(); return value.lastToken();
} }
@ -741,11 +747,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *ErrorTag) TokenIndex { pub fn firstToken(self: *const ErrorTag) TokenIndex {
return self.name_token; return self.name_token;
} }
pub fn lastToken(self: *ErrorTag) TokenIndex { pub fn lastToken(self: *const ErrorTag) TokenIndex {
return self.name_token; return self.name_token;
} }
}; };
@ -758,11 +764,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Identifier) TokenIndex { pub fn firstToken(self: *const Identifier) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *Identifier) TokenIndex { pub fn lastToken(self: *const Identifier) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -784,11 +790,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *AsyncAttribute) TokenIndex { pub fn firstToken(self: *const AsyncAttribute) TokenIndex {
return self.async_token; return self.async_token;
} }
pub fn lastToken(self: *AsyncAttribute) TokenIndex { pub fn lastToken(self: *const AsyncAttribute) TokenIndex {
if (self.rangle_bracket) |rangle_bracket| { if (self.rangle_bracket) |rangle_bracket| {
return rangle_bracket; return rangle_bracket;
} }
@ -856,7 +862,7 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *FnProto) TokenIndex { pub fn firstToken(self: *const FnProto) TokenIndex {
if (self.visib_token) |visib_token| return visib_token; if (self.visib_token) |visib_token| return visib_token;
if (self.async_attr) |async_attr| return async_attr.firstToken(); if (self.async_attr) |async_attr| return async_attr.firstToken();
if (self.extern_export_inline_token) |extern_export_inline_token| return extern_export_inline_token; if (self.extern_export_inline_token) |extern_export_inline_token| return extern_export_inline_token;
@ -865,7 +871,7 @@ pub const Node = struct {
return self.fn_token; return self.fn_token;
} }
pub fn lastToken(self: *FnProto) TokenIndex { pub fn lastToken(self: *const FnProto) TokenIndex {
if (self.body_node) |body_node| return body_node.lastToken(); if (self.body_node) |body_node| return body_node.lastToken();
switch (self.return_type) { switch (self.return_type) {
// TODO allow this and next prong to share bodies since the types are the same // TODO allow this and next prong to share bodies since the types are the same
@ -896,11 +902,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *PromiseType) TokenIndex { pub fn firstToken(self: *const PromiseType) TokenIndex {
return self.promise_token; return self.promise_token;
} }
pub fn lastToken(self: *PromiseType) TokenIndex { pub fn lastToken(self: *const PromiseType) TokenIndex {
if (self.result) |result| return result.return_type.lastToken(); if (self.result) |result| return result.return_type.lastToken();
return self.promise_token; return self.promise_token;
} }
@ -923,14 +929,14 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *ParamDecl) TokenIndex { pub fn firstToken(self: *const ParamDecl) TokenIndex {
if (self.comptime_token) |comptime_token| return comptime_token; if (self.comptime_token) |comptime_token| return comptime_token;
if (self.noalias_token) |noalias_token| return noalias_token; if (self.noalias_token) |noalias_token| return noalias_token;
if (self.name_token) |name_token| return name_token; if (self.name_token) |name_token| return name_token;
return self.type_node.firstToken(); return self.type_node.firstToken();
} }
pub fn lastToken(self: *ParamDecl) TokenIndex { pub fn lastToken(self: *const ParamDecl) TokenIndex {
if (self.var_args_token) |var_args_token| return var_args_token; if (self.var_args_token) |var_args_token| return var_args_token;
return self.type_node.lastToken(); return self.type_node.lastToken();
} }
@ -954,7 +960,7 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Block) TokenIndex { pub fn firstToken(self: *const Block) TokenIndex {
if (self.label) |label| { if (self.label) |label| {
return label; return label;
} }
@ -962,7 +968,7 @@ pub const Node = struct {
return self.lbrace; return self.lbrace;
} }
pub fn lastToken(self: *Block) TokenIndex { pub fn lastToken(self: *const Block) TokenIndex {
return self.rbrace; return self.rbrace;
} }
}; };
@ -981,11 +987,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Defer) TokenIndex { pub fn firstToken(self: *const Defer) TokenIndex {
return self.defer_token; return self.defer_token;
} }
pub fn lastToken(self: *Defer) TokenIndex { pub fn lastToken(self: *const Defer) TokenIndex {
return self.expr.lastToken(); return self.expr.lastToken();
} }
}; };
@ -1005,11 +1011,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Comptime) TokenIndex { pub fn firstToken(self: *const Comptime) TokenIndex {
return self.comptime_token; return self.comptime_token;
} }
pub fn lastToken(self: *Comptime) TokenIndex { pub fn lastToken(self: *const Comptime) TokenIndex {
return self.expr.lastToken(); return self.expr.lastToken();
} }
}; };
@ -1029,11 +1035,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Payload) TokenIndex { pub fn firstToken(self: *const Payload) TokenIndex {
return self.lpipe; return self.lpipe;
} }
pub fn lastToken(self: *Payload) TokenIndex { pub fn lastToken(self: *const Payload) TokenIndex {
return self.rpipe; return self.rpipe;
} }
}; };
@ -1054,11 +1060,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *PointerPayload) TokenIndex { pub fn firstToken(self: *const PointerPayload) TokenIndex {
return self.lpipe; return self.lpipe;
} }
pub fn lastToken(self: *PointerPayload) TokenIndex { pub fn lastToken(self: *const PointerPayload) TokenIndex {
return self.rpipe; return self.rpipe;
} }
}; };
@ -1085,11 +1091,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *PointerIndexPayload) TokenIndex { pub fn firstToken(self: *const PointerIndexPayload) TokenIndex {
return self.lpipe; return self.lpipe;
} }
pub fn lastToken(self: *PointerIndexPayload) TokenIndex { pub fn lastToken(self: *const PointerIndexPayload) TokenIndex {
return self.rpipe; return self.rpipe;
} }
}; };
@ -1114,11 +1120,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Else) TokenIndex { pub fn firstToken(self: *const Else) TokenIndex {
return self.else_token; return self.else_token;
} }
pub fn lastToken(self: *Else) TokenIndex { pub fn lastToken(self: *const Else) TokenIndex {
return self.body.lastToken(); return self.body.lastToken();
} }
}; };
@ -1146,11 +1152,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Switch) TokenIndex { pub fn firstToken(self: *const Switch) TokenIndex {
return self.switch_token; return self.switch_token;
} }
pub fn lastToken(self: *Switch) TokenIndex { pub fn lastToken(self: *const Switch) TokenIndex {
return self.rbrace; return self.rbrace;
} }
}; };
@ -1181,11 +1187,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *SwitchCase) TokenIndex { pub fn firstToken(self: *const SwitchCase) TokenIndex {
return (self.items.at(0).*).firstToken(); return (self.items.at(0).*).firstToken();
} }
pub fn lastToken(self: *SwitchCase) TokenIndex { pub fn lastToken(self: *const SwitchCase) TokenIndex {
return self.expr.lastToken(); return self.expr.lastToken();
} }
}; };
@ -1198,11 +1204,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *SwitchElse) TokenIndex { pub fn firstToken(self: *const SwitchElse) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *SwitchElse) TokenIndex { pub fn lastToken(self: *const SwitchElse) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -1245,7 +1251,7 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *While) TokenIndex { pub fn firstToken(self: *const While) TokenIndex {
if (self.label) |label| { if (self.label) |label| {
return label; return label;
} }
@ -1257,7 +1263,7 @@ pub const Node = struct {
return self.while_token; return self.while_token;
} }
pub fn lastToken(self: *While) TokenIndex { pub fn lastToken(self: *const While) TokenIndex {
if (self.@"else") |@"else"| { if (self.@"else") |@"else"| {
return @"else".body.lastToken(); return @"else".body.lastToken();
} }
@ -1298,7 +1304,7 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *For) TokenIndex { pub fn firstToken(self: *const For) TokenIndex {
if (self.label) |label| { if (self.label) |label| {
return label; return label;
} }
@ -1310,7 +1316,7 @@ pub const Node = struct {
return self.for_token; return self.for_token;
} }
pub fn lastToken(self: *For) TokenIndex { pub fn lastToken(self: *const For) TokenIndex {
if (self.@"else") |@"else"| { if (self.@"else") |@"else"| {
return @"else".body.lastToken(); return @"else".body.lastToken();
} }
@ -1349,11 +1355,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *If) TokenIndex { pub fn firstToken(self: *const If) TokenIndex {
return self.if_token; return self.if_token;
} }
pub fn lastToken(self: *If) TokenIndex { pub fn lastToken(self: *const If) TokenIndex {
if (self.@"else") |@"else"| { if (self.@"else") |@"else"| {
return @"else".body.lastToken(); return @"else".body.lastToken();
} }
@ -1480,11 +1486,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *InfixOp) TokenIndex { pub fn firstToken(self: *const InfixOp) TokenIndex {
return self.lhs.firstToken(); return self.lhs.firstToken();
} }
pub fn lastToken(self: *InfixOp) TokenIndex { pub fn lastToken(self: *const InfixOp) TokenIndex {
return self.rhs.lastToken(); return self.rhs.lastToken();
} }
}; };
@ -1570,11 +1576,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *PrefixOp) TokenIndex { pub fn firstToken(self: *const PrefixOp) TokenIndex {
return self.op_token; return self.op_token;
} }
pub fn lastToken(self: *PrefixOp) TokenIndex { pub fn lastToken(self: *const PrefixOp) TokenIndex {
return self.rhs.lastToken(); return self.rhs.lastToken();
} }
}; };
@ -1594,11 +1600,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *FieldInitializer) TokenIndex { pub fn firstToken(self: *const FieldInitializer) TokenIndex {
return self.period_token; return self.period_token;
} }
pub fn lastToken(self: *FieldInitializer) TokenIndex { pub fn lastToken(self: *const FieldInitializer) TokenIndex {
return self.expr.lastToken(); return self.expr.lastToken();
} }
}; };
@ -1673,7 +1679,7 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *SuffixOp) TokenIndex { pub fn firstToken(self: *const SuffixOp) TokenIndex {
switch (self.op) { switch (self.op) {
@TagType(Op).Call => |*call_info| if (call_info.async_attr) |async_attr| return async_attr.firstToken(), @TagType(Op).Call => |*call_info| if (call_info.async_attr) |async_attr| return async_attr.firstToken(),
else => {}, else => {},
@ -1681,7 +1687,7 @@ pub const Node = struct {
return self.lhs.firstToken(); return self.lhs.firstToken();
} }
pub fn lastToken(self: *SuffixOp) TokenIndex { pub fn lastToken(self: *const SuffixOp) TokenIndex {
return self.rtoken; return self.rtoken;
} }
}; };
@ -1701,11 +1707,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *GroupedExpression) TokenIndex { pub fn firstToken(self: *const GroupedExpression) TokenIndex {
return self.lparen; return self.lparen;
} }
pub fn lastToken(self: *GroupedExpression) TokenIndex { pub fn lastToken(self: *const GroupedExpression) TokenIndex {
return self.rparen; return self.rparen;
} }
}; };
@ -1749,11 +1755,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *ControlFlowExpression) TokenIndex { pub fn firstToken(self: *const ControlFlowExpression) TokenIndex {
return self.ltoken; return self.ltoken;
} }
pub fn lastToken(self: *ControlFlowExpression) TokenIndex { pub fn lastToken(self: *const ControlFlowExpression) TokenIndex {
if (self.rhs) |rhs| { if (self.rhs) |rhs| {
return rhs.lastToken(); return rhs.lastToken();
} }
@ -1792,11 +1798,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Suspend) TokenIndex { pub fn firstToken(self: *const Suspend) TokenIndex {
return self.suspend_token; return self.suspend_token;
} }
pub fn lastToken(self: *Suspend) TokenIndex { pub fn lastToken(self: *const Suspend) TokenIndex {
if (self.body) |body| { if (self.body) |body| {
return body.lastToken(); return body.lastToken();
} }
@ -1813,11 +1819,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *IntegerLiteral) TokenIndex { pub fn firstToken(self: *const IntegerLiteral) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *IntegerLiteral) TokenIndex { pub fn lastToken(self: *const IntegerLiteral) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -1830,11 +1836,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *FloatLiteral) TokenIndex { pub fn firstToken(self: *const FloatLiteral) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *FloatLiteral) TokenIndex { pub fn lastToken(self: *const FloatLiteral) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -1856,11 +1862,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *BuiltinCall) TokenIndex { pub fn firstToken(self: *const BuiltinCall) TokenIndex {
return self.builtin_token; return self.builtin_token;
} }
pub fn lastToken(self: *BuiltinCall) TokenIndex { pub fn lastToken(self: *const BuiltinCall) TokenIndex {
return self.rparen_token; return self.rparen_token;
} }
}; };
@ -1873,11 +1879,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *StringLiteral) TokenIndex { pub fn firstToken(self: *const StringLiteral) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *StringLiteral) TokenIndex { pub fn lastToken(self: *const StringLiteral) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -1892,11 +1898,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *MultilineStringLiteral) TokenIndex { pub fn firstToken(self: *const MultilineStringLiteral) TokenIndex {
return self.lines.at(0).*; return self.lines.at(0).*;
} }
pub fn lastToken(self: *MultilineStringLiteral) TokenIndex { pub fn lastToken(self: *const MultilineStringLiteral) TokenIndex {
return self.lines.at(self.lines.len - 1).*; return self.lines.at(self.lines.len - 1).*;
} }
}; };
@ -1909,11 +1915,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *CharLiteral) TokenIndex { pub fn firstToken(self: *const CharLiteral) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *CharLiteral) TokenIndex { pub fn lastToken(self: *const CharLiteral) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -1926,11 +1932,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *BoolLiteral) TokenIndex { pub fn firstToken(self: *const BoolLiteral) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *BoolLiteral) TokenIndex { pub fn lastToken(self: *const BoolLiteral) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -1943,11 +1949,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *NullLiteral) TokenIndex { pub fn firstToken(self: *const NullLiteral) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *NullLiteral) TokenIndex { pub fn lastToken(self: *const NullLiteral) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -1960,11 +1966,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *UndefinedLiteral) TokenIndex { pub fn firstToken(self: *const UndefinedLiteral) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *UndefinedLiteral) TokenIndex { pub fn lastToken(self: *const UndefinedLiteral) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -1977,11 +1983,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *ThisLiteral) TokenIndex { pub fn firstToken(self: *const ThisLiteral) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *ThisLiteral) TokenIndex { pub fn lastToken(self: *const ThisLiteral) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -2022,11 +2028,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *AsmOutput) TokenIndex { pub fn firstToken(self: *const AsmOutput) TokenIndex {
return self.lbracket; return self.lbracket;
} }
pub fn lastToken(self: *AsmOutput) TokenIndex { pub fn lastToken(self: *const AsmOutput) TokenIndex {
return self.rparen; return self.rparen;
} }
}; };
@ -2054,11 +2060,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *AsmInput) TokenIndex { pub fn firstToken(self: *const AsmInput) TokenIndex {
return self.lbracket; return self.lbracket;
} }
pub fn lastToken(self: *AsmInput) TokenIndex { pub fn lastToken(self: *const AsmInput) TokenIndex {
return self.rparen; return self.rparen;
} }
}; };
@ -2089,11 +2095,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Asm) TokenIndex { pub fn firstToken(self: *const Asm) TokenIndex {
return self.asm_token; return self.asm_token;
} }
pub fn lastToken(self: *Asm) TokenIndex { pub fn lastToken(self: *const Asm) TokenIndex {
return self.rparen; return self.rparen;
} }
}; };
@ -2106,11 +2112,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Unreachable) TokenIndex { pub fn firstToken(self: *const Unreachable) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *Unreachable) TokenIndex { pub fn lastToken(self: *const Unreachable) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -2123,11 +2129,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *ErrorType) TokenIndex { pub fn firstToken(self: *const ErrorType) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *ErrorType) TokenIndex { pub fn lastToken(self: *const ErrorType) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -2140,11 +2146,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *VarType) TokenIndex { pub fn firstToken(self: *const VarType) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *VarType) TokenIndex { pub fn lastToken(self: *const VarType) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -2159,11 +2165,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *DocComment) TokenIndex { pub fn firstToken(self: *const DocComment) TokenIndex {
return self.lines.at(0).*; return self.lines.at(0).*;
} }
pub fn lastToken(self: *DocComment) TokenIndex { pub fn lastToken(self: *const DocComment) TokenIndex {
return self.lines.at(self.lines.len - 1).*; return self.lines.at(self.lines.len - 1).*;
} }
}; };
@ -2184,11 +2190,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *TestDecl) TokenIndex { pub fn firstToken(self: *const TestDecl) TokenIndex {
return self.test_token; return self.test_token;
} }
pub fn lastToken(self: *TestDecl) TokenIndex { pub fn lastToken(self: *const TestDecl) TokenIndex {
return self.body_node.lastToken(); return self.body_node.lastToken();
} }
}; };

View File

@ -10,6 +10,7 @@ comptime {
_ = @import("cases/bool.zig"); _ = @import("cases/bool.zig");
_ = @import("cases/bugs/1111.zig"); _ = @import("cases/bugs/1111.zig");
_ = @import("cases/bugs/1230.zig"); _ = @import("cases/bugs/1230.zig");
_ = @import("cases/bugs/1277.zig");
_ = @import("cases/bugs/394.zig"); _ = @import("cases/bugs/394.zig");
_ = @import("cases/bugs/655.zig"); _ = @import("cases/bugs/655.zig");
_ = @import("cases/bugs/656.zig"); _ = @import("cases/bugs/656.zig");

15
test/cases/bugs/1277.zig Normal file
View File

@ -0,0 +1,15 @@
const std = @import("std");
const S = struct {
f: ?fn () i32,
};
const s = S{ .f = f };
fn f() i32 {
return 1234;
}
test "don't emit an LLVM global for a const function when it's in an optional in a struct" {
std.debug.assertOrPanic(s.f.?() == 1234);
}

View File

@ -485,3 +485,14 @@ fn MakeType(comptime T: type) type {
} }
}; };
} }
test "implicit cast from *[N]T to ?[*]T" {
var x: ?[*]u16 = null;
var y: [4]u16 = [4]u16 {0, 1, 2, 3};
x = &y;
assert(std.mem.eql(u16, x.?[0..4], y[0..4]));
x.?[0] = 8;
y[3] = 6;
assert(std.mem.eql(u16, x.?[0..4], y[0..4]));
}

View File

@ -1,5 +1,5 @@
const A = error{ const A = error{
PathNotFound, FileNotFound,
NotDir, NotDir,
}; };
const B = error{OutOfMemory}; const B = error{OutOfMemory};
@ -15,7 +15,7 @@ test "merge error sets" {
@panic("unexpected"); @panic("unexpected");
} else |err| switch (err) { } else |err| switch (err) {
error.OutOfMemory => @panic("unexpected"), error.OutOfMemory => @panic("unexpected"),
error.PathNotFound => @panic("unexpected"), error.FileNotFound => @panic("unexpected"),
error.NotDir => {}, error.NotDir => {},
} }
} }

View File

@ -1,6 +1,51 @@
const tests = @import("tests.zig"); const tests = @import("tests.zig");
pub fn addCases(cases: *tests.TranslateCContext) void { pub fn addCases(cases: *tests.TranslateCContext) void {
cases.add("for loop with var init but empty body",
\\void foo(void) {
\\ for (int x = 0; x < 10; x++);
\\}
,
\\pub fn foo() void {
\\ {
\\ var x: c_int = 0;
\\ while (x < 10) : (x += 1) {}
\\ }
\\}
);
cases.add("do while with empty body",
\\void foo(void) {
\\ do ; while (1);
\\}
, // TODO this should be if (1 != 0) break
\\pub fn foo() void {
\\ while (true) {
\\ if (!1) break;
\\ }
\\}
);
cases.add("for with empty body",
\\void foo(void) {
\\ for (;;);
\\}
,
\\pub fn foo() void {
\\ while (true) {}
\\}
);
cases.add("while with empty body",
\\void foo(void) {
\\ while (1);
\\}
,
\\pub fn foo() void {
\\ while (1 != 0) {}
\\}
);
cases.add("double define struct", cases.add("double define struct",
\\typedef struct Bar Bar; \\typedef struct Bar Bar;
\\typedef struct Foo Foo; \\typedef struct Foo Foo;