diff --git a/CMakeLists.txt b/CMakeLists.txt
index f7c990794c..12b7efb06a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -463,11 +463,14 @@ set(ZIG_STD_FILES
"empty.zig"
"event.zig"
"event/channel.zig"
+ "event/fs.zig"
"event/future.zig"
"event/group.zig"
"event/lock.zig"
"event/locked.zig"
"event/loop.zig"
+ "event/rwlock.zig"
+ "event/rwlocked.zig"
"event/tcp.zig"
"fmt/errol/enum3.zig"
"fmt/errol/index.zig"
@@ -556,6 +559,7 @@ set(ZIG_STD_FILES
"math/tanh.zig"
"math/trunc.zig"
"mem.zig"
+ "mutex.zig"
"net.zig"
"os/child_process.zig"
"os/darwin.zig"
diff --git a/build.zig b/build.zig
index dd939365a2..6584e5ab1f 100644
--- a/build.zig
+++ b/build.zig
@@ -19,7 +19,7 @@ pub fn build(b: *Builder) !void {
var docgen_cmd = b.addCommand(null, b.env_map, [][]const u8{
docgen_exe.getOutputPath(),
rel_zig_exe,
- "doc/langref.html.in",
+ "doc" ++ os.path.sep_str ++ "langref.html.in",
os.path.join(b.allocator, b.cache_root, "langref.html") catch unreachable,
});
docgen_cmd.step.dependOn(&docgen_exe.step);
diff --git a/cmake/Findllvm.cmake b/cmake/Findllvm.cmake
index 788e57a644..a554fc21d6 100644
--- a/cmake/Findllvm.cmake
+++ b/cmake/Findllvm.cmake
@@ -8,7 +8,7 @@
# LLVM_LIBDIRS
find_program(LLVM_CONFIG_EXE
- NAMES llvm-config-7.0 llvm-config
+ NAMES llvm-config llvm-config-7.0
PATHS
"/mingw64/bin"
"/c/msys64/mingw64/bin"
diff --git a/doc/docgen.zig b/doc/docgen.zig
index e2da1fe6cc..3145c4483e 100644
--- a/doc/docgen.zig
+++ b/doc/docgen.zig
@@ -34,10 +34,10 @@ pub fn main() !void {
const out_file_name = try (args_it.next(allocator) orelse @panic("expected output arg"));
defer allocator.free(out_file_name);
- var in_file = try os.File.openRead(allocator, in_file_name);
+ var in_file = try os.File.openRead(in_file_name);
defer in_file.close();
- var out_file = try os.File.openWrite(allocator, out_file_name);
+ var out_file = try os.File.openWrite(out_file_name);
defer out_file.close();
var file_in_stream = io.FileInStream.init(&in_file);
@@ -370,9 +370,9 @@ fn genToc(allocator: *mem.Allocator, tokenizer: *Tokenizer) !Toc {
.n = header_stack_size,
},
});
- if (try urls.put(urlized, tag_token)) |other_tag_token| {
+ if (try urls.put(urlized, tag_token)) |entry| {
parseError(tokenizer, tag_token, "duplicate header url: #{}", urlized) catch {};
- parseError(tokenizer, other_tag_token, "other tag here") catch {};
+ parseError(tokenizer, entry.value, "other tag here") catch {};
return error.ParseError;
}
if (last_action == Action.Open) {
@@ -738,7 +738,7 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: var
try out.print("
{}
", escaped_source);
const name_plus_ext = try std.fmt.allocPrint(allocator, "{}.zig", code.name);
const tmp_source_file_name = try os.path.join(allocator, tmp_dir_name, name_plus_ext);
- try io.writeFile(allocator, tmp_source_file_name, trimmed_raw_source);
+ try io.writeFile(tmp_source_file_name, trimmed_raw_source);
switch (code.id) {
Code.Id.Exe => |expected_outcome| {
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 54677bc5b5..465f3c56a7 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -247,66 +247,6 @@ pub fn main() void {
Description
-
- i2 |
- (none) |
- signed 2-bit integer |
-
-
- u2 |
- (none) |
- unsigned 2-bit integer |
-
-
- i3 |
- (none) |
- signed 3-bit integer |
-
-
- u3 |
- (none) |
- unsigned 3-bit integer |
-
-
- i4 |
- (none) |
- signed 4-bit integer |
-
-
- u4 |
- (none) |
- unsigned 4-bit integer |
-
-
- i5 |
- (none) |
- signed 5-bit integer |
-
-
- u5 |
- (none) |
- unsigned 5-bit integer |
-
-
- i6 |
- (none) |
- signed 6-bit integer |
-
-
- u6 |
- (none) |
- unsigned 6-bit integer |
-
-
- i7 |
- (none) |
- signed 7-bit integer |
-
-
- u7 |
- (none) |
- unsigned 7-bit integer |
-
i8 |
int8_t |
@@ -476,6 +416,11 @@ pub fn main() void {
+
+ In addition to the integer types above, arbitrary bit-width integers can be referenced by using
+ an identifier of i or u followed by digits. For example, the identifier
+ i7 refers to a signed 7-bit integer.
+
{#see_also|Integers|Floats|void|Errors#}
{#header_close#}
{#header_open|Primitive Values#}
@@ -744,19 +689,19 @@ const yet_another_hex_float = 0x103.70P-5;
{#code_end#}
{#header_close#}
{#header_open|Floating Point Operations#}
- By default floating point operations use Optimized mode,
- but you can switch to Strict mode on a per-block basis:
+ By default floating point operations use Strict mode,
+ but you can switch to Optimized mode on a per-block basis:
{#code_begin|obj|foo#}
{#code_release_fast#}
const builtin = @import("builtin");
const big = f64(1 << 40);
export fn foo_strict(x: f64) f64 {
- @setFloatMode(this, builtin.FloatMode.Strict);
return x + big - big;
}
export fn foo_optimized(x: f64) f64 {
+ @setFloatMode(this, builtin.FloatMode.Optimized);
return x + big - big;
}
{#code_end#}
@@ -809,6 +754,8 @@ a += b
Addition.
- Can cause {#link|overflow|Default Operations#} for integers.
+ - Invokes {#link|Peer Type Resolution#} for the operands.
+ - See also {#link|@addWithOverflow#}.
|
@@ -826,6 +773,8 @@ a +%= b |
Wrapping Addition.
- Guaranteed to have twos-complement wrapping behavior.
+ - Invokes {#link|Peer Type Resolution#} for the operands.
+ - See also {#link|@addWithOverflow#}.
|
@@ -844,6 +793,8 @@ a -= b |
Subtraction.
- Can cause {#link|overflow|Default Operations#} for integers.
+ - Invokes {#link|Peer Type Resolution#} for the operands.
+ - See also {#link|@subWithOverflow#}.
|
@@ -861,6 +812,8 @@ a -%= b |
Wrapping Subtraction.
- Guaranteed to have twos-complement wrapping behavior.
+ - Invokes {#link|Peer Type Resolution#} for the operands.
+ - See also {#link|@subWithOverflow#}.
|
@@ -914,6 +867,8 @@ a *= b |
Multiplication.
- Can cause {#link|overflow|Default Operations#} for integers.
+ - Invokes {#link|Peer Type Resolution#} for the operands.
+ - See also {#link|@mulWithOverflow#}.
|
@@ -931,6 +886,8 @@ a *%= b |
Wrapping Multiplication.
- Guaranteed to have twos-complement wrapping behavior.
+ - Invokes {#link|Peer Type Resolution#} for the operands.
+ - See also {#link|@mulWithOverflow#}.
|
@@ -956,6 +913,7 @@ a /= b |
{#link|@divFloor#}, or
{#link|@divExact#} instead of /.
+ Invokes {#link|Peer Type Resolution#} for the operands.
@@ -979,6 +937,7 @@ a %= b |
{#link|@rem#} or
{#link|@mod#} instead of %.
+ Invokes {#link|Peer Type Resolution#} for the operands.
@@ -995,6 +954,7 @@ a <<= b |
Bit Shift Left.
+ b must be {#link|comptime-known|comptime#} or have a type with log2 number of bits as a.
- See also {#link|@shlExact#}.
- See also {#link|@shlWithOverflow#}.
@@ -1013,6 +973,7 @@ a >>= b |
Bit Shift Right.
+ b must be {#link|comptime-known|comptime#} or have a type with log2 number of bits as a.
- See also {#link|@shrExact#}.
|
@@ -1029,6 +990,9 @@ a &= b
Bitwise AND.
+
+ - Invokes {#link|Peer Type Resolution#} for the operands.
+
|
0b011 & 0b101 == 0b001
@@ -1043,6 +1007,9 @@ a |= b |
Bitwise OR.
+
+ - Invokes {#link|Peer Type Resolution#} for the operands.
+
|
0b010 | 0b100 == 0b110
@@ -1057,6 +1024,9 @@ a ^= b |
Bitwise XOR.
+
+ - Invokes {#link|Peer Type Resolution#} for the operands.
+
|
0b011 ^ 0b101 == 0b110
@@ -1186,6 +1156,7 @@ unwrapped == 1234
|
Returns true if a and b are equal, otherwise returns false.
+ Invokes {#link|Peer Type Resolution#} for the operands.
|
(1 == 1) == true
@@ -1218,6 +1189,7 @@ value == null
|
Returns false if a and b are equal, otherwise returns true.
+ Invokes {#link|Peer Type Resolution#} for the operands.
|
(1 != 1) == false
@@ -1233,6 +1205,7 @@ value == null
|
Returns true if a is greater than b, otherwise returns false.
+ Invokes {#link|Peer Type Resolution#} for the operands.
|
(2 > 1) == true
@@ -1248,6 +1221,7 @@ value == null
|
Returns true if a is greater than or equal to b, otherwise returns false.
+ Invokes {#link|Peer Type Resolution#} for the operands.
|
(2 >= 1) == true
@@ -1263,6 +1237,7 @@ value == null
|
Returns true if a is less than b, otherwise returns false.
+ Invokes {#link|Peer Type Resolution#} for the operands.
|
(1 < 2) == true
@@ -1278,6 +1253,7 @@ value == null
|
Returns true if a is less than or equal to b, otherwise returns false.
+ Invokes {#link|Peer Type Resolution#} for the operands.
|
(1 <= 2) == true
@@ -3807,6 +3783,7 @@ test "float widening" {
TODO: [N]T to ?[]const T
TODO: *[N]T to []T
TODO: *[N]T to [*]T
+ TODO: *[N]T to ?[*]T
TODO: *T to *[1]T
TODO: [N]T to E![]const T
{#header_close#}
@@ -3877,7 +3854,106 @@ test "float widening" {
{#header_close#}
{#header_open|Peer Type Resolution#}
- TODO
+ Peer Type Resolution occurs in these places:
+
+ - {#link|switch#} expressions
+ - {#link|if#} expressions
+ - {#link|while#} expressions
+ - {#link|for#} expressions
+ - Multiple break statements in a block
+ - Some {#link|binary operations|Table of Operators#}
+
+
+ This kind of type resolution chooses a type that all peer types can implicitly cast into. Here are
+ some examples:
+
+ {#code_begin|test#}
+const std = @import("std");
+const assert = std.debug.assert;
+const mem = std.mem;
+
+test "peer resolve int widening" {
+ var a: i8 = 12;
+ var b: i16 = 34;
+ var c = a + b;
+ assert(c == 46);
+ assert(@typeOf(c) == i16);
+}
+
+test "peer resolve arrays of different size to const slice" {
+ assert(mem.eql(u8, boolToStr(true), "true"));
+ assert(mem.eql(u8, boolToStr(false), "false"));
+ comptime assert(mem.eql(u8, boolToStr(true), "true"));
+ comptime assert(mem.eql(u8, boolToStr(false), "false"));
+}
+fn boolToStr(b: bool) []const u8 {
+ return if (b) "true" else "false";
+}
+
+test "peer resolve array and const slice" {
+ testPeerResolveArrayConstSlice(true);
+ comptime testPeerResolveArrayConstSlice(true);
+}
+fn testPeerResolveArrayConstSlice(b: bool) void {
+ const value1 = if (b) "aoeu" else ([]const u8)("zz");
+ const value2 = if (b) ([]const u8)("zz") else "aoeu";
+ assert(mem.eql(u8, value1, "aoeu"));
+ assert(mem.eql(u8, value2, "zz"));
+}
+
+test "peer type resolution: ?T and T" {
+ assert(peerTypeTAndOptionalT(true, false).? == 0);
+ assert(peerTypeTAndOptionalT(false, false).? == 3);
+ comptime {
+ assert(peerTypeTAndOptionalT(true, false).? == 0);
+ assert(peerTypeTAndOptionalT(false, false).? == 3);
+ }
+}
+fn peerTypeTAndOptionalT(c: bool, b: bool) ?usize {
+ if (c) {
+ return if (b) null else usize(0);
+ }
+
+ return usize(3);
+}
+
+test "peer type resolution: [0]u8 and []const u8" {
+ assert(peerTypeEmptyArrayAndSlice(true, "hi").len == 0);
+ assert(peerTypeEmptyArrayAndSlice(false, "hi").len == 1);
+ comptime {
+ assert(peerTypeEmptyArrayAndSlice(true, "hi").len == 0);
+ assert(peerTypeEmptyArrayAndSlice(false, "hi").len == 1);
+ }
+}
+fn peerTypeEmptyArrayAndSlice(a: bool, slice: []const u8) []const u8 {
+ if (a) {
+ return []const u8{};
+ }
+
+ return slice[0..1];
+}
+test "peer type resolution: [0]u8, []const u8, and error![]u8" {
+ {
+ var data = "hi";
+ const slice = data[0..];
+ assert((try peerTypeEmptyArrayAndSliceAndError(true, slice)).len == 0);
+ assert((try peerTypeEmptyArrayAndSliceAndError(false, slice)).len == 1);
+ }
+ comptime {
+ var data = "hi";
+ const slice = data[0..];
+ assert((try peerTypeEmptyArrayAndSliceAndError(true, slice)).len == 0);
+ assert((try peerTypeEmptyArrayAndSliceAndError(false, slice)).len == 1);
+ }
+}
+fn peerTypeEmptyArrayAndSliceAndError(a: bool, slice: []u8) error![]u8 {
+ if (a) {
+ return []u8{};
+ }
+
+ return slice[0..1];
+}
+ {#code_end#}
{#header_close#}
{#header_close#}
@@ -4705,10 +4781,7 @@ async fn testSuspendBlock() void {
{#link|Await#} counts as a suspend point.
- {#header_open|Breaking from Suspend Blocks#}
-
- Suspend blocks support labeled break, just like {#link|while#} and {#link|for#}.
-
+ {#header_open|Resuming from Suspend Blocks#}
Upon entering a suspend block, the coroutine is already considered
suspended, and can be resumed. For example, if you started another kernel thread,
@@ -4741,6 +4814,9 @@ async fn testResumeFromSuspend(my_result: *i32) void {
my_result.* += 1;
}
{#code_end#}
+
+ This is guaranteed to be a tail call, and therefore will not cause a new stack frame.
+
{#header_close#}
{#header_close#}
{#header_open|Await#}
@@ -5527,7 +5603,7 @@ fn add(a: i32, b: i32) i32 { return a + b; }
Returns the field type of a struct or union.
{#header_close#}
{#header_open|@memcpy#}
- @memcpy(noalias dest: *u8, noalias source: *const u8, byte_count: usize)
+ @memcpy(noalias dest: [*]u8, noalias source: [*]const u8, byte_count: usize)
This function copies bytes from one region of memory to another. dest and
source are both pointers and must not overlap.
@@ -5545,7 +5621,7 @@ fn add(a: i32, b: i32) i32 { return a + b; }
mem.copy(u8, dest[0...byte_count], source[0...byte_count]);
{#header_close#}
{#header_open|@memset#}
- @memset(dest: *u8, c: u8, byte_count: usize)
+ @memset(dest: [*]u8, c: u8, byte_count: usize)
This function sets a region of memory to c. dest is a pointer.
@@ -5817,7 +5893,7 @@ pub const FloatMode = enum {
{#code_end#}
-
-
Optimized (default) - Floating point operations may do all of the following:
+ Optimized - Floating point operations may do all of the following:
- Assume the arguments and result are not NaN. Optimizations are required to retain defined behavior over NaNs, but the value of the result is undefined.
- Assume the arguments and result are not +/-Inf. Optimizations are required to retain defined behavior over +/-Inf, but the value of the result is undefined.
@@ -5829,7 +5905,7 @@ pub const FloatMode = enum {
This is equivalent to -ffast-math in GCC.
-
-
Strict - Floating point operations follow strict IEEE compliance.
+ Strict (default) - Floating point operations follow strict IEEE compliance.
{#see_also|Floating Point Operations#}
@@ -6035,7 +6111,7 @@ pub const TypeInfo = union(TypeId) {
size: Size,
is_const: bool,
is_volatile: bool,
- alignment: u32,
+ alignment: u29,
child: type,
pub const Size = enum {
@@ -7543,8 +7619,8 @@ hljs.registerLanguage("zig", function(t) {
},
a = t.IR + "\\s*\\(",
c = {
- keyword: "const align var extern stdcallcc nakedcc volatile export pub noalias inline struct packed enum union break return try catch test continue unreachable comptime and or asm defer errdefer if else switch while for fn use bool f32 f64 void type noreturn error i8 u8 i16 u16 i32 u32 i64 u64 isize usize i8w u8w i16w i32w u32w i64w u64w isizew usizew c_short c_ushort c_int c_uint c_long c_ulong c_longlong c_ulonglong resume cancel await async orelse",
- built_in: "atomicLoad breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage divTrunc divFloor enumTagName intToPtr ptrToInt panic ptrCast intCast floatCast intToFloat floatToInt boolToInt bytesToSlice sliceToBytes errSetCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz popCount import cImport errorName embedFile cmpxchgStrong cmpxchgWeak fence divExact truncate atomicRmw sqrt field typeInfo typeName newStackCall errorToInt intToError enumToInt intToEnum",
+ keyword: "const align var extern stdcallcc nakedcc volatile export pub noalias inline struct packed enum union break return try catch test continue unreachable comptime and or asm defer errdefer if else switch while for fn use bool f32 f64 void type noreturn error i8 u8 i16 u16 i32 u32 i64 u64 isize usize i8w u8w i16w i32w u32w i64w u64w isizew usizew c_short c_ushort c_int c_uint c_long c_ulong c_longlong c_ulonglong resume suspend cancel await async orelse",
+ built_in: "atomicLoad breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage divTrunc divFloor enumTagName intToPtr ptrToInt panic ptrCast intCast floatCast intToFloat floatToInt boolToInt bytesToSlice sliceToBytes errSetCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz popCount import cImport errorName embedFile cmpxchgStrong cmpxchgWeak fence divExact truncate atomicRmw sqrt field typeInfo typeName newStackCall errorToInt intToError enumToInt intToEnum handle",
literal: "true false null undefined"
},
n = [e, t.CLCM, t.CBCM, s, r];
diff --git a/example/cat/main.zig b/example/cat/main.zig
index 27690d2695..120ba1da39 100644
--- a/example/cat/main.zig
+++ b/example/cat/main.zig
@@ -20,7 +20,7 @@ pub fn main() !void {
} else if (arg[0] == '-') {
return usage(exe);
} else {
- var file = os.File.openRead(allocator, arg) catch |err| {
+ var file = os.File.openRead(arg) catch |err| {
warn("Unable to open file: {}\n", @errorName(err));
return err;
};
diff --git a/example/shared_library/mathtest.zig b/example/shared_library/mathtest.zig
index a04ec1544d..96e41f847c 100644
--- a/example/shared_library/mathtest.zig
+++ b/example/shared_library/mathtest.zig
@@ -1,3 +1,12 @@
+// TODO Remove this workaround
+comptime {
+ const builtin = @import("builtin");
+ if (builtin.os == builtin.Os.macosx) {
+ @export("__mh_execute_header", _mh_execute_header, builtin.GlobalLinkage.Weak);
+ }
+}
+var _mh_execute_header = extern struct {x: usize}{.x = 0};
+
export fn add(a: i32, b: i32) i32 {
return a + b;
}
diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig
index 5ca01ca7e7..15e714fc9d 100644
--- a/src-self-hosted/codegen.zig
+++ b/src-self-hosted/codegen.zig
@@ -19,8 +19,8 @@ pub async fn renderToLlvm(comp: *Compilation, fn_val: *Value.Fn, code: *ir.Code)
var output_path = try await (async comp.createRandomOutputPath(comp.target.objFileExt()) catch unreachable);
errdefer output_path.deinit();
- const llvm_handle = try comp.event_loop_local.getAnyLlvmContext();
- defer llvm_handle.release(comp.event_loop_local);
+ const llvm_handle = try comp.zig_compiler.getAnyLlvmContext();
+ defer llvm_handle.release(comp.zig_compiler);
const context = llvm_handle.node.data;
diff --git a/src-self-hosted/compilation.zig b/src-self-hosted/compilation.zig
index 5ff8b1a858..a8c3e13e33 100644
--- a/src-self-hosted/compilation.zig
+++ b/src-self-hosted/compilation.zig
@@ -30,9 +30,12 @@ const Package = @import("package.zig").Package;
const link = @import("link.zig").link;
const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
const CInt = @import("c_int.zig").CInt;
+const fs = event.fs;
+
+const max_src_size = 2 * 1024 * 1024 * 1024; // 2 GiB
/// Data that is local to the event loop.
-pub const EventLoopLocal = struct {
+pub const ZigCompiler = struct {
loop: *event.Loop,
llvm_handle_pool: std.atomic.Stack(llvm.ContextRef),
lld_lock: event.Lock,
@@ -44,7 +47,7 @@ pub const EventLoopLocal = struct {
var lazy_init_targets = std.lazyInit(void);
- fn init(loop: *event.Loop) !EventLoopLocal {
+ fn init(loop: *event.Loop) !ZigCompiler {
lazy_init_targets.get() orelse {
Target.initializeAll();
lazy_init_targets.resolve();
@@ -54,7 +57,7 @@ pub const EventLoopLocal = struct {
try std.os.getRandomBytes(seed_bytes[0..]);
const seed = std.mem.readInt(seed_bytes, u64, builtin.Endian.Big);
- return EventLoopLocal{
+ return ZigCompiler{
.loop = loop,
.lld_lock = event.Lock.init(loop),
.llvm_handle_pool = std.atomic.Stack(llvm.ContextRef).init(),
@@ -64,7 +67,7 @@ pub const EventLoopLocal = struct {
}
/// Must be called only after EventLoop.run completes.
- fn deinit(self: *EventLoopLocal) void {
+ fn deinit(self: *ZigCompiler) void {
self.lld_lock.deinit();
while (self.llvm_handle_pool.pop()) |node| {
c.LLVMContextDispose(node.data);
@@ -74,7 +77,7 @@ pub const EventLoopLocal = struct {
/// Gets an exclusive handle on any LlvmContext.
/// Caller must release the handle when done.
- pub fn getAnyLlvmContext(self: *EventLoopLocal) !LlvmHandle {
+ pub fn getAnyLlvmContext(self: *ZigCompiler) !LlvmHandle {
if (self.llvm_handle_pool.pop()) |node| return LlvmHandle{ .node = node };
const context_ref = c.LLVMContextCreate() orelse return error.OutOfMemory;
@@ -89,24 +92,36 @@ pub const EventLoopLocal = struct {
return LlvmHandle{ .node = node };
}
- pub async fn getNativeLibC(self: *EventLoopLocal) !*LibCInstallation {
+ pub async fn getNativeLibC(self: *ZigCompiler) !*LibCInstallation {
if (await (async self.native_libc.start() catch unreachable)) |ptr| return ptr;
try await (async self.native_libc.data.findNative(self.loop) catch unreachable);
self.native_libc.resolve();
return &self.native_libc.data;
}
+
+ /// Must be called only once, ever. Sets global state.
+ pub fn setLlvmArgv(allocator: *Allocator, llvm_argv: []const []const u8) !void {
+ if (llvm_argv.len != 0) {
+ var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(allocator, [][]const []const u8{
+ [][]const u8{"zig (LLVM option parsing)"},
+ llvm_argv,
+ });
+ defer c_compatible_args.deinit();
+ c.ZigLLVMParseCommandLineOptions(llvm_argv.len + 1, c_compatible_args.ptr);
+ }
+ }
};
pub const LlvmHandle = struct {
node: *std.atomic.Stack(llvm.ContextRef).Node,
- pub fn release(self: LlvmHandle, event_loop_local: *EventLoopLocal) void {
- event_loop_local.llvm_handle_pool.push(self.node);
+ pub fn release(self: LlvmHandle, zig_compiler: *ZigCompiler) void {
+ zig_compiler.llvm_handle_pool.push(self.node);
}
};
pub const Compilation = struct {
- event_loop_local: *EventLoopLocal,
+ zig_compiler: *ZigCompiler,
loop: *event.Loop,
name: Buffer,
llvm_triple: Buffer,
@@ -134,7 +149,6 @@ pub const Compilation = struct {
linker_rdynamic: bool,
clang_argv: []const []const u8,
- llvm_argv: []const []const u8,
lib_dirs: []const []const u8,
rpath_list: []const []const u8,
assembly_files: []const []const u8,
@@ -214,6 +228,8 @@ pub const Compilation = struct {
deinit_group: event.Group(void),
destroy_handle: promise,
+ main_loop_handle: promise,
+ main_loop_future: event.Future(void),
have_err_ret_tracing: bool,
@@ -227,6 +243,8 @@ pub const Compilation = struct {
c_int_types: [CInt.list.len]*Type.Int,
+ fs_watch: *fs.Watch(*Scope.Root),
+
const IntTypeTable = std.HashMap(*const Type.Int.Key, *Type.Int, Type.Int.Key.hash, Type.Int.Key.eql);
const ArrayTypeTable = std.HashMap(*const Type.Array.Key, *Type.Array, Type.Array.Key.hash, Type.Array.Key.eql);
const PtrTypeTable = std.HashMap(*const Type.Pointer.Key, *Type.Pointer, Type.Pointer.Key.hash, Type.Pointer.Key.eql);
@@ -239,8 +257,6 @@ pub const Compilation = struct {
pub const BuildError = error{
OutOfMemory,
EndOfStream,
- BadFd,
- Io,
IsDir,
Unexpected,
SystemResources,
@@ -255,7 +271,6 @@ pub const Compilation = struct {
NameTooLong,
SystemFdQuotaExceeded,
NoDevice,
- PathNotFound,
NoSpaceLeft,
NotDir,
FileSystem,
@@ -282,6 +297,9 @@ pub const Compilation = struct {
LibCMissingDynamicLinker,
InvalidDarwinVersionString,
UnsupportedLinkArchitecture,
+ UserResourceLimitReached,
+ InvalidUtf8,
+ BadPathName,
};
pub const Event = union(enum) {
@@ -318,7 +336,7 @@ pub const Compilation = struct {
};
pub fn create(
- event_loop_local: *EventLoopLocal,
+ zig_compiler: *ZigCompiler,
name: []const u8,
root_src_path: ?[]const u8,
target: Target,
@@ -327,11 +345,45 @@ pub const Compilation = struct {
is_static: bool,
zig_lib_dir: []const u8,
) !*Compilation {
- const loop = event_loop_local.loop;
- const comp = try event_loop_local.loop.allocator.create(Compilation{
+ var optional_comp: ?*Compilation = null;
+ const handle = try async createAsync(
+ &optional_comp,
+ zig_compiler,
+ name,
+ root_src_path,
+ target,
+ kind,
+ build_mode,
+ is_static,
+ zig_lib_dir,
+ );
+ return optional_comp orelse if (getAwaitResult(
+ zig_compiler.loop.allocator,
+ handle,
+ )) |_| unreachable else |err| err;
+ }
+
+ async fn createAsync(
+ out_comp: *?*Compilation,
+ zig_compiler: *ZigCompiler,
+ name: []const u8,
+ root_src_path: ?[]const u8,
+ target: Target,
+ kind: Kind,
+ build_mode: builtin.Mode,
+ is_static: bool,
+ zig_lib_dir: []const u8,
+ ) !void {
+ // workaround for https://github.com/ziglang/zig/issues/1194
+ suspend {
+ resume @handle();
+ }
+
+ const loop = zig_compiler.loop;
+ var comp = Compilation{
.loop = loop,
.arena_allocator = std.heap.ArenaAllocator.init(loop.allocator),
- .event_loop_local = event_loop_local,
+ .zig_compiler = zig_compiler,
.events = undefined,
.root_src_path = root_src_path,
.target = target,
@@ -341,6 +393,9 @@ pub const Compilation = struct {
.zig_lib_dir = zig_lib_dir,
.zig_std_dir = undefined,
.tmp_dir = event.Future(BuildError![]u8).init(loop),
+ .destroy_handle = @handle(),
+ .main_loop_handle = undefined,
+ .main_loop_future = event.Future(void).init(loop),
.name = undefined,
.llvm_triple = undefined,
@@ -365,7 +420,6 @@ pub const Compilation = struct {
.is_static = is_static,
.linker_rdynamic = false,
.clang_argv = [][]const u8{},
- .llvm_argv = [][]const u8{},
.lib_dirs = [][]const u8{},
.rpath_list = [][]const u8{},
.assembly_files = [][]const u8{},
@@ -412,25 +466,26 @@ pub const Compilation = struct {
.std_package = undefined,
.override_libc = null,
- .destroy_handle = undefined,
.have_err_ret_tracing = false,
.primitive_type_table = undefined,
- });
- errdefer {
+
+ .fs_watch = undefined,
+ };
+ comp.link_libs_list = ArrayList(*LinkLib).init(comp.arena());
+ comp.primitive_type_table = TypeTable.init(comp.arena());
+
+ defer {
comp.int_type_table.private_data.deinit();
comp.array_type_table.private_data.deinit();
comp.ptr_type_table.private_data.deinit();
comp.fn_type_table.private_data.deinit();
comp.arena_allocator.deinit();
- comp.loop.allocator.destroy(comp);
}
comp.name = try Buffer.init(comp.arena(), name);
comp.llvm_triple = try target.getTriple(comp.arena());
comp.llvm_target = try Target.llvmTargetFromTriple(comp.llvm_triple);
- comp.link_libs_list = ArrayList(*LinkLib).init(comp.arena());
comp.zig_std_dir = try std.os.path.join(comp.arena(), zig_lib_dir, "std");
- comp.primitive_type_table = TypeTable.init(comp.arena());
const opt_level = switch (build_mode) {
builtin.Mode.Debug => llvm.CodeGenLevelNone,
@@ -444,8 +499,8 @@ pub const Compilation = struct {
// As a workaround we do not use target native features on Windows.
var target_specific_cpu_args: ?[*]u8 = null;
var target_specific_cpu_features: ?[*]u8 = null;
- errdefer llvm.DisposeMessage(target_specific_cpu_args);
- errdefer llvm.DisposeMessage(target_specific_cpu_features);
+ defer llvm.DisposeMessage(target_specific_cpu_args);
+ defer llvm.DisposeMessage(target_specific_cpu_features);
if (target == Target.Native and !target.isWindows()) {
target_specific_cpu_args = llvm.GetHostCPUName() orelse return error.OutOfMemory;
target_specific_cpu_features = llvm.GetNativeFeatures() orelse return error.OutOfMemory;
@@ -460,16 +515,16 @@ pub const Compilation = struct {
reloc_mode,
llvm.CodeModelDefault,
) orelse return error.OutOfMemory;
- errdefer llvm.DisposeTargetMachine(comp.target_machine);
+ defer llvm.DisposeTargetMachine(comp.target_machine);
comp.target_data_ref = llvm.CreateTargetDataLayout(comp.target_machine) orelse return error.OutOfMemory;
- errdefer llvm.DisposeTargetData(comp.target_data_ref);
+ defer llvm.DisposeTargetData(comp.target_data_ref);
comp.target_layout_str = llvm.CopyStringRepOfTargetData(comp.target_data_ref) orelse return error.OutOfMemory;
- errdefer llvm.DisposeMessage(comp.target_layout_str);
+ defer llvm.DisposeMessage(comp.target_layout_str);
comp.events = try event.Channel(Event).create(comp.loop, 0);
- errdefer comp.events.destroy();
+ defer comp.events.destroy();
if (root_src_path) |root_src| {
const dirname = std.os.path.dirname(root_src) orelse ".";
@@ -482,11 +537,27 @@ pub const Compilation = struct {
comp.root_package = try Package.create(comp.arena(), ".", "");
}
+ comp.fs_watch = try fs.Watch(*Scope.Root).create(loop, 16);
+ defer comp.fs_watch.destroy();
+
try comp.initTypes();
+ defer comp.primitive_type_table.deinit();
- comp.destroy_handle = try async comp.internalDeinit();
+ comp.main_loop_handle = async comp.mainLoop() catch unreachable;
+ // Set this to indicate that initialization completed successfully.
+ // from here on out we must not return an error.
+ // This must occur before the first suspend/await.
+ out_comp.* = ∁
+ // This suspend is resumed by destroy()
+ suspend;
+ // From here on is cleanup.
- return comp;
+ await (async comp.deinit_group.wait() catch unreachable);
+
+ if (comp.tmp_dir.getOrNull()) |tmp_dir_result| if (tmp_dir_result.*) |tmp_dir| {
+ // TODO evented I/O?
+ os.deleteTree(comp.arena(), tmp_dir) catch {};
+ } else |_| {};
}
/// it does ref the result because it could be an arbitrary integer size
@@ -672,55 +743,28 @@ pub const Compilation = struct {
assert((try comp.primitive_type_table.put(comp.u8_type.base.name, &comp.u8_type.base)) == null);
}
- /// This function can safely use async/await, because it manages Compilation's lifetime,
- /// and EventLoopLocal.deinit will not be called until the event.Loop.run() completes.
- async fn internalDeinit(self: *Compilation) void {
- suspend;
-
- await (async self.deinit_group.wait() catch unreachable);
- if (self.tmp_dir.getOrNull()) |tmp_dir_result| if (tmp_dir_result.*) |tmp_dir| {
- // TODO evented I/O?
- os.deleteTree(self.arena(), tmp_dir) catch {};
- } else |_| {};
-
- self.events.destroy();
-
- llvm.DisposeMessage(self.target_layout_str);
- llvm.DisposeTargetData(self.target_data_ref);
- llvm.DisposeTargetMachine(self.target_machine);
-
- self.primitive_type_table.deinit();
-
- self.arena_allocator.deinit();
- self.gpa().destroy(self);
- }
-
pub fn destroy(self: *Compilation) void {
+ cancel self.main_loop_handle;
resume self.destroy_handle;
}
- pub fn build(self: *Compilation) !void {
- if (self.llvm_argv.len != 0) {
- var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.arena(), [][]const []const u8{
- [][]const u8{"zig (LLVM option parsing)"},
- self.llvm_argv,
- });
- defer c_compatible_args.deinit();
- // TODO this sets global state
- c.ZigLLVMParseCommandLineOptions(self.llvm_argv.len + 1, c_compatible_args.ptr);
- }
-
- _ = try async self.buildAsync();
+ fn start(self: *Compilation) void {
+ self.main_loop_future.resolve();
}
- async fn buildAsync(self: *Compilation) void {
- while (true) {
- // TODO directly awaiting async should guarantee memory allocation elision
- const build_result = await (async self.compileAndLink() catch unreachable);
+ async fn mainLoop(self: *Compilation) void {
+ // wait until start() is called
+ _ = await (async self.main_loop_future.get() catch unreachable);
+ var build_result = await (async self.initialCompile() catch unreachable);
+
+ while (true) {
+ const link_result = if (build_result) blk: {
+ break :blk await (async self.maybeLink() catch unreachable);
+ } else |err| err;
// this makes a handy error return trace and stack trace in debug mode
if (std.debug.runtime_safety) {
- build_result catch unreachable;
+ link_result catch unreachable;
}
const compile_errors = blk: {
@@ -729,7 +773,7 @@ pub const Compilation = struct {
break :blk held.value.toOwnedSlice();
};
- if (build_result) |_| {
+ if (link_result) |_| {
if (compile_errors.len == 0) {
await (async self.events.put(Event.Ok) catch unreachable);
} else {
@@ -742,105 +786,195 @@ pub const Compilation = struct {
await (async self.events.put(Event{ .Error = err }) catch unreachable);
}
- // for now we stop after 1
- return;
+ // First, get an item from the watch channel, waiting on the channel.
+ var group = event.Group(BuildError!void).init(self.loop);
+ {
+ const ev = (await (async self.fs_watch.channel.get() catch unreachable)) catch |err| {
+ build_result = err;
+ continue;
+ };
+ const root_scope = ev.data;
+ group.call(rebuildFile, self, root_scope) catch |err| {
+ build_result = err;
+ continue;
+ };
+ }
+ // Next, get all the items from the channel that are buffered up.
+ while (await (async self.fs_watch.channel.getOrNull() catch unreachable)) |ev_or_err| {
+ if (ev_or_err) |ev| {
+ const root_scope = ev.data;
+ group.call(rebuildFile, self, root_scope) catch |err| {
+ build_result = err;
+ continue;
+ };
+ } else |err| {
+ build_result = err;
+ continue;
+ }
+ }
+ build_result = await (async group.wait() catch unreachable);
}
}
- async fn compileAndLink(self: *Compilation) !void {
- if (self.root_src_path) |root_src_path| {
- // TODO async/await os.path.real
- const root_src_real_path = os.path.real(self.gpa(), root_src_path) catch |err| {
- try printError("unable to get real path '{}': {}", root_src_path, err);
- return err;
- };
- const root_scope = blk: {
- errdefer self.gpa().free(root_src_real_path);
-
- // TODO async/await readFileAlloc()
- const source_code = io.readFileAlloc(self.gpa(), root_src_real_path) catch |err| {
- try printError("unable to open '{}': {}", root_src_real_path, err);
- return err;
- };
- errdefer self.gpa().free(source_code);
-
- const tree = try self.gpa().createOne(ast.Tree);
- tree.* = try std.zig.parse(self.gpa(), source_code);
- errdefer {
- tree.deinit();
- self.gpa().destroy(tree);
- }
-
- break :blk try Scope.Root.create(self, tree, root_src_real_path);
- };
- defer root_scope.base.deref(self);
- const tree = root_scope.tree;
-
- var error_it = tree.errors.iterator(0);
- while (error_it.next()) |parse_error| {
- const msg = try Msg.createFromParseErrorAndScope(self, root_scope, parse_error);
- errdefer msg.destroy();
-
- try await (async self.addCompileErrorAsync(msg) catch unreachable);
- }
- if (tree.errors.len != 0) {
+ async fn rebuildFile(self: *Compilation, root_scope: *Scope.Root) !void {
+ const tree_scope = blk: {
+ const source_code = (await (async fs.readFile(
+ self.loop,
+ root_scope.realpath,
+ max_src_size,
+ ) catch unreachable)) catch |err| {
+ try self.addCompileErrorCli(root_scope.realpath, "unable to open: {}", @errorName(err));
return;
+ };
+ errdefer self.gpa().free(source_code);
+
+ const tree = try self.gpa().createOne(ast.Tree);
+ tree.* = try std.zig.parse(self.gpa(), source_code);
+ errdefer {
+ tree.deinit();
+ self.gpa().destroy(tree);
}
- const decls = try Scope.Decls.create(self, &root_scope.base);
- defer decls.base.deref(self);
+ break :blk try Scope.AstTree.create(self, tree, root_scope);
+ };
+ defer tree_scope.base.deref(self);
- var decl_group = event.Group(BuildError!void).init(self.loop);
- var decl_group_consumed = false;
- errdefer if (!decl_group_consumed) decl_group.cancelAll();
+ var error_it = tree_scope.tree.errors.iterator(0);
+ while (error_it.next()) |parse_error| {
+ const msg = try Msg.createFromParseErrorAndScope(self, tree_scope, parse_error);
+ errdefer msg.destroy();
- var it = tree.root_node.decls.iterator(0);
- while (it.next()) |decl_ptr| {
- const decl = decl_ptr.*;
- switch (decl.id) {
- ast.Node.Id.Comptime => {
- const comptime_node = @fieldParentPtr(ast.Node.Comptime, "base", decl);
+ try await (async self.addCompileErrorAsync(msg) catch unreachable);
+ }
+ if (tree_scope.tree.errors.len != 0) {
+ return;
+ }
- try self.prelink_group.call(addCompTimeBlock, self, &decls.base, comptime_node);
- },
- ast.Node.Id.VarDecl => @panic("TODO"),
- ast.Node.Id.FnProto => {
- const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
+ const locked_table = await (async root_scope.decls.table.acquireWrite() catch unreachable);
+ defer locked_table.release();
- const name = if (fn_proto.name_token) |name_token| tree.tokenSlice(name_token) else {
- try self.addCompileError(root_scope, Span{
- .first = fn_proto.fn_token,
- .last = fn_proto.fn_token + 1,
- }, "missing function name");
- continue;
- };
+ var decl_group = event.Group(BuildError!void).init(self.loop);
+ defer decl_group.deinit();
+ try await try async self.rebuildChangedDecls(
+ &decl_group,
+ locked_table.value,
+ root_scope.decls,
+ &tree_scope.tree.root_node.decls,
+ tree_scope,
+ );
+
+ try await (async decl_group.wait() catch unreachable);
+ }
+
+ async fn rebuildChangedDecls(
+ self: *Compilation,
+ group: *event.Group(BuildError!void),
+ locked_table: *Decl.Table,
+ decl_scope: *Scope.Decls,
+ ast_decls: *ast.Node.Root.DeclList,
+ tree_scope: *Scope.AstTree,
+ ) !void {
+ var existing_decls = try locked_table.clone();
+ defer existing_decls.deinit();
+
+ var ast_it = ast_decls.iterator(0);
+ while (ast_it.next()) |decl_ptr| {
+ const decl = decl_ptr.*;
+ switch (decl.id) {
+ ast.Node.Id.Comptime => {
+ const comptime_node = @fieldParentPtr(ast.Node.Comptime, "base", decl);
+
+ // TODO connect existing comptime decls to updated source files
+
+ try self.prelink_group.call(addCompTimeBlock, self, tree_scope, &decl_scope.base, comptime_node);
+ },
+ ast.Node.Id.VarDecl => @panic("TODO"),
+ ast.Node.Id.FnProto => {
+ const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
+
+ const name = if (fn_proto.name_token) |name_token| tree_scope.tree.tokenSlice(name_token) else {
+ try self.addCompileError(tree_scope, Span{
+ .first = fn_proto.fn_token,
+ .last = fn_proto.fn_token + 1,
+ }, "missing function name");
+ continue;
+ };
+
+ if (existing_decls.remove(name)) |entry| {
+ // compare new code to existing
+ if (entry.value.cast(Decl.Fn)) |existing_fn_decl| {
+ // Just compare the old bytes to the new bytes of the top level decl.
+ // Even if the AST is technically the same, we want error messages to display
+ // from the most recent source.
+ const old_decl_src = existing_fn_decl.base.tree_scope.tree.getNodeSource(
+ &existing_fn_decl.fn_proto.base,
+ );
+ const new_decl_src = tree_scope.tree.getNodeSource(&fn_proto.base);
+ if (mem.eql(u8, old_decl_src, new_decl_src)) {
+ // it's the same, we can skip this decl
+ continue;
+ } else {
+ @panic("TODO decl changed implementation");
+ // Add the new thing before dereferencing the old thing. This way we don't end
+ // up pointlessly re-creating things we end up using in the new thing.
+ }
+ } else {
+ @panic("TODO decl changed kind");
+ }
+ } else {
+ // add new decl
const fn_decl = try self.gpa().create(Decl.Fn{
.base = Decl{
.id = Decl.Id.Fn,
.name = name,
- .visib = parseVisibToken(tree, fn_proto.visib_token),
+ .visib = parseVisibToken(tree_scope.tree, fn_proto.visib_token),
.resolution = event.Future(BuildError!void).init(self.loop),
- .parent_scope = &decls.base,
+ .parent_scope = &decl_scope.base,
+ .tree_scope = tree_scope,
},
.value = Decl.Fn.Val{ .Unresolved = {} },
.fn_proto = fn_proto,
});
+ tree_scope.base.ref();
errdefer self.gpa().destroy(fn_decl);
- try decl_group.call(addTopLevelDecl, self, decls, &fn_decl.base);
- },
- ast.Node.Id.TestDecl => @panic("TODO"),
- else => unreachable,
- }
+ try group.call(addTopLevelDecl, self, &fn_decl.base, locked_table);
+ }
+ },
+ ast.Node.Id.TestDecl => @panic("TODO"),
+ else => unreachable,
}
- decl_group_consumed = true;
- try await (async decl_group.wait() catch unreachable);
-
- // Now other code can rely on the decls scope having a complete list of names.
- decls.name_future.resolve();
}
+ var existing_decl_it = existing_decls.iterator();
+ while (existing_decl_it.next()) |entry| {
+ // this decl was deleted
+ const existing_decl = entry.value;
+ @panic("TODO handle decl deletion");
+ }
+ }
+
+ async fn initialCompile(self: *Compilation) !void {
+ if (self.root_src_path) |root_src_path| {
+ const root_scope = blk: {
+ // TODO async/await os.path.real
+ const root_src_real_path = os.path.realAlloc(self.gpa(), root_src_path) catch |err| {
+ try self.addCompileErrorCli(root_src_path, "unable to open: {}", @errorName(err));
+ return;
+ };
+ errdefer self.gpa().free(root_src_real_path);
+
+ break :blk try Scope.Root.create(self, root_src_real_path);
+ };
+ defer root_scope.base.deref(self);
+
+ assert((try await try async self.fs_watch.addFile(root_scope.realpath, root_scope)) == null);
+ try await try async self.rebuildFile(root_scope);
+ }
+ }
+
+ async fn maybeLink(self: *Compilation) !void {
(await (async self.prelink_group.wait() catch unreachable)) catch |err| switch (err) {
error.SemanticAnalysisFailed => {},
else => return err,
@@ -861,6 +995,7 @@ pub const Compilation = struct {
/// caller takes ownership of resulting Code
async fn genAndAnalyzeCode(
comp: *Compilation,
+ tree_scope: *Scope.AstTree,
scope: *Scope,
node: *ast.Node,
expected_type: ?*Type,
@@ -868,6 +1003,7 @@ pub const Compilation = struct {
const unanalyzed_code = try await (async ir.gen(
comp,
node,
+ tree_scope,
scope,
) catch unreachable);
defer unanalyzed_code.destroy(comp.gpa());
@@ -894,6 +1030,7 @@ pub const Compilation = struct {
async fn addCompTimeBlock(
comp: *Compilation,
+ tree_scope: *Scope.AstTree,
scope: *Scope,
comptime_node: *ast.Node.Comptime,
) !void {
@@ -902,6 +1039,7 @@ pub const Compilation = struct {
const analyzed_code = (await (async genAndAnalyzeCode(
comp,
+ tree_scope,
scope,
comptime_node.expr,
&void_type.base,
@@ -914,38 +1052,42 @@ pub const Compilation = struct {
analyzed_code.destroy(comp.gpa());
}
- async fn addTopLevelDecl(self: *Compilation, decls: *Scope.Decls, decl: *Decl) !void {
- const tree = decl.findRootScope().tree;
- const is_export = decl.isExported(tree);
-
- var add_to_table_resolved = false;
- const add_to_table = async self.addDeclToTable(decls, decl) catch unreachable;
- errdefer if (!add_to_table_resolved) cancel add_to_table; // TODO https://github.com/ziglang/zig/issues/1261
+ async fn addTopLevelDecl(
+ self: *Compilation,
+ decl: *Decl,
+ locked_table: *Decl.Table,
+ ) !void {
+ const is_export = decl.isExported(decl.tree_scope.tree);
if (is_export) {
try self.prelink_group.call(verifyUniqueSymbol, self, decl);
try self.prelink_group.call(resolveDecl, self, decl);
}
- add_to_table_resolved = true;
- try await add_to_table;
- }
-
- async fn addDeclToTable(self: *Compilation, decls: *Scope.Decls, decl: *Decl) !void {
- const held = await (async decls.table.acquire() catch unreachable);
- defer held.release();
-
- if (try held.value.put(decl.name, decl)) |other_decl| {
- try self.addCompileError(decls.base.findRoot(), decl.getSpan(), "redefinition of '{}'", decl.name);
+ const gop = try locked_table.getOrPut(decl.name);
+ if (gop.found_existing) {
+ try self.addCompileError(decl.tree_scope, decl.getSpan(), "redefinition of '{}'", decl.name);
// TODO note: other definition here
+ } else {
+ gop.kv.value = decl;
}
}
- fn addCompileError(self: *Compilation, root: *Scope.Root, span: Span, comptime fmt: []const u8, args: ...) !void {
+ fn addCompileError(self: *Compilation, tree_scope: *Scope.AstTree, span: Span, comptime fmt: []const u8, args: ...) !void {
const text = try std.fmt.allocPrint(self.gpa(), fmt, args);
errdefer self.gpa().free(text);
- const msg = try Msg.createFromScope(self, root, span, text);
+ const msg = try Msg.createFromScope(self, tree_scope, span, text);
+ errdefer msg.destroy();
+
+ try self.prelink_group.call(addCompileErrorAsync, self, msg);
+ }
+
+ fn addCompileErrorCli(self: *Compilation, realpath: []const u8, comptime fmt: []const u8, args: ...) !void {
+ const text = try std.fmt.allocPrint(self.gpa(), fmt, args);
+ errdefer self.gpa().free(text);
+
+ const msg = try Msg.createFromCli(self, realpath, text);
errdefer msg.destroy();
try self.prelink_group.call(addCompileErrorAsync, self, msg);
@@ -969,7 +1111,7 @@ pub const Compilation = struct {
if (try exported_symbol_names.value.put(decl.name, decl)) |other_decl| {
try self.addCompileError(
- decl.findRootScope(),
+ decl.tree_scope,
decl.getSpan(),
"exported symbol collision: '{}'",
decl.name,
@@ -1019,7 +1161,7 @@ pub const Compilation = struct {
async fn startFindingNativeLibC(self: *Compilation) void {
await (async self.loop.yield() catch unreachable);
// we don't care if it fails, we're just trying to kick off the future resolution
- _ = (await (async self.event_loop_local.getNativeLibC() catch unreachable)) catch return;
+ _ = (await (async self.zig_compiler.getNativeLibC() catch unreachable)) catch return;
}
/// General Purpose Allocator. Must free when done.
@@ -1077,7 +1219,7 @@ pub const Compilation = struct {
var rand_bytes: [9]u8 = undefined;
{
- const held = await (async self.event_loop_local.prng.acquire() catch unreachable);
+ const held = await (async self.zig_compiler.prng.acquire() catch unreachable);
defer held.release();
held.value.random.bytes(rand_bytes[0..]);
@@ -1093,18 +1235,24 @@ pub const Compilation = struct {
}
/// Returns a value which has been ref()'d once
- async fn analyzeConstValue(comp: *Compilation, scope: *Scope, node: *ast.Node, expected_type: *Type) !*Value {
- const analyzed_code = try await (async comp.genAndAnalyzeCode(scope, node, expected_type) catch unreachable);
+ async fn analyzeConstValue(
+ comp: *Compilation,
+ tree_scope: *Scope.AstTree,
+ scope: *Scope,
+ node: *ast.Node,
+ expected_type: *Type,
+ ) !*Value {
+ const analyzed_code = try await (async comp.genAndAnalyzeCode(tree_scope, scope, node, expected_type) catch unreachable);
defer analyzed_code.destroy(comp.gpa());
return analyzed_code.getCompTimeResult(comp);
}
- async fn analyzeTypeExpr(comp: *Compilation, scope: *Scope, node: *ast.Node) !*Type {
+ async fn analyzeTypeExpr(comp: *Compilation, tree_scope: *Scope.AstTree, scope: *Scope, node: *ast.Node) !*Type {
const meta_type = &Type.MetaType.get(comp).base;
defer meta_type.base.deref(comp);
- const result_val = try await (async comp.analyzeConstValue(scope, node, meta_type) catch unreachable);
+ const result_val = try await (async comp.analyzeConstValue(tree_scope, scope, node, meta_type) catch unreachable);
errdefer result_val.base.deref(comp);
return result_val.cast(Type).?;
@@ -1120,13 +1268,6 @@ pub const Compilation = struct {
}
};
-fn printError(comptime format: []const u8, args: ...) !void {
- var stderr_file = try std.io.getStdErr();
- var stderr_file_out_stream = std.io.FileOutStream.init(&stderr_file);
- const out_stream = &stderr_file_out_stream.stream;
- try out_stream.print(format, args);
-}
-
fn parseVisibToken(tree: *ast.Tree, optional_token_index: ?ast.TokenIndex) Visib {
if (optional_token_index) |token_index| {
const token = tree.tokens.at(token_index);
@@ -1150,12 +1291,14 @@ async fn generateDecl(comp: *Compilation, decl: *Decl) !void {
}
async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void {
+ const tree_scope = fn_decl.base.tree_scope;
+
const body_node = fn_decl.fn_proto.body_node orelse return await (async generateDeclFnProto(comp, fn_decl) catch unreachable);
const fndef_scope = try Scope.FnDef.create(comp, fn_decl.base.parent_scope);
defer fndef_scope.base.deref(comp);
- const fn_type = try await (async analyzeFnType(comp, fn_decl.base.parent_scope, fn_decl.fn_proto) catch unreachable);
+ const fn_type = try await (async analyzeFnType(comp, tree_scope, fn_decl.base.parent_scope, fn_decl.fn_proto) catch unreachable);
defer fn_type.base.base.deref(comp);
var symbol_name = try std.Buffer.init(comp.gpa(), fn_decl.base.name);
@@ -1168,18 +1311,17 @@ async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void {
symbol_name_consumed = true;
// Define local parameter variables
- const root_scope = fn_decl.base.findRootScope();
for (fn_type.key.data.Normal.params) |param, i| {
//AstNode *param_decl_node = get_param_decl_node(fn_table_entry, i);
const param_decl = @fieldParentPtr(ast.Node.ParamDecl, "base", fn_decl.fn_proto.params.at(i).*);
const name_token = param_decl.name_token orelse {
- try comp.addCompileError(root_scope, Span{
+ try comp.addCompileError(tree_scope, Span{
.first = param_decl.firstToken(),
.last = param_decl.type_node.firstToken(),
}, "missing parameter name");
return error.SemanticAnalysisFailed;
};
- const param_name = root_scope.tree.tokenSlice(name_token);
+ const param_name = tree_scope.tree.tokenSlice(name_token);
// if (is_noalias && get_codegen_ptr_type(param_type) == nullptr) {
// add_node_error(g, param_decl_node, buf_sprintf("noalias on non-pointer parameter"));
@@ -1201,6 +1343,7 @@ async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void {
}
const analyzed_code = try await (async comp.genAndAnalyzeCode(
+ tree_scope,
fn_val.child_scope,
body_node,
fn_type.key.data.Normal.return_type,
@@ -1231,12 +1374,17 @@ fn getZigDir(allocator: *mem.Allocator) ![]u8 {
return os.getAppDataDir(allocator, "zig");
}
-async fn analyzeFnType(comp: *Compilation, scope: *Scope, fn_proto: *ast.Node.FnProto) !*Type.Fn {
+async fn analyzeFnType(
+ comp: *Compilation,
+ tree_scope: *Scope.AstTree,
+ scope: *Scope,
+ fn_proto: *ast.Node.FnProto,
+) !*Type.Fn {
const return_type_node = switch (fn_proto.return_type) {
ast.Node.FnProto.ReturnType.Explicit => |n| n,
ast.Node.FnProto.ReturnType.InferErrorSet => |n| n,
};
- const return_type = try await (async comp.analyzeTypeExpr(scope, return_type_node) catch unreachable);
+ const return_type = try await (async comp.analyzeTypeExpr(tree_scope, scope, return_type_node) catch unreachable);
return_type.base.deref(comp);
var params = ArrayList(Type.Fn.Param).init(comp.gpa());
@@ -1252,7 +1400,7 @@ async fn analyzeFnType(comp: *Compilation, scope: *Scope, fn_proto: *ast.Node.Fn
var it = fn_proto.params.iterator(0);
while (it.next()) |param_node_ptr| {
const param_node = param_node_ptr.*.cast(ast.Node.ParamDecl).?;
- const param_type = try await (async comp.analyzeTypeExpr(scope, param_node.type_node) catch unreachable);
+ const param_type = try await (async comp.analyzeTypeExpr(tree_scope, scope, param_node.type_node) catch unreachable);
errdefer param_type.base.deref(comp);
try params.append(Type.Fn.Param{
.typ = param_type,
@@ -1289,7 +1437,12 @@ async fn analyzeFnType(comp: *Compilation, scope: *Scope, fn_proto: *ast.Node.Fn
}
async fn generateDeclFnProto(comp: *Compilation, fn_decl: *Decl.Fn) !void {
- const fn_type = try await (async analyzeFnType(comp, fn_decl.base.parent_scope, fn_decl.fn_proto) catch unreachable);
+ const fn_type = try await (async analyzeFnType(
+ comp,
+ fn_decl.base.tree_scope,
+ fn_decl.base.parent_scope,
+ fn_decl.fn_proto,
+ ) catch unreachable);
defer fn_type.base.base.deref(comp);
var symbol_name = try std.Buffer.init(comp.gpa(), fn_decl.base.name);
@@ -1301,3 +1454,14 @@ async fn generateDeclFnProto(comp: *Compilation, fn_decl: *Decl.Fn) !void {
fn_decl.value = Decl.Fn.Val{ .FnProto = fn_proto_val };
symbol_name_consumed = true;
}
+
+// TODO these are hacks which should probably be solved by the language
+fn getAwaitResult(allocator: *Allocator, handle: var) @typeInfo(@typeOf(handle)).Promise.child.? {
+ var result: ?@typeInfo(@typeOf(handle)).Promise.child.? = null;
+ cancel (async getAwaitResultAsync(handle, &result) catch unreachable);
+ return result.?;
+}
+
+async fn getAwaitResultAsync(handle: var, out: *?@typeInfo(@typeOf(handle)).Promise.child.?) void {
+ out.* = await handle;
+}
diff --git a/src-self-hosted/decl.zig b/src-self-hosted/decl.zig
index 6e80243038..25fcf195d1 100644
--- a/src-self-hosted/decl.zig
+++ b/src-self-hosted/decl.zig
@@ -17,8 +17,16 @@ pub const Decl = struct {
resolution: event.Future(Compilation.BuildError!void),
parent_scope: *Scope,
+ // TODO when we destroy the decl, deref the tree scope
+ tree_scope: *Scope.AstTree,
+
pub const Table = std.HashMap([]const u8, *Decl, mem.hash_slice_u8, mem.eql_slice_u8);
+ pub fn cast(base: *Decl, comptime T: type) ?*T {
+ if (base.id != @field(Id, @typeName(T))) return null;
+ return @fieldParentPtr(T, "base", base);
+ }
+
pub fn isExported(base: *const Decl, tree: *ast.Tree) bool {
switch (base.id) {
Id.Fn => {
@@ -95,4 +103,3 @@ pub const Decl = struct {
base: Decl,
};
};
-
diff --git a/src-self-hosted/errmsg.zig b/src-self-hosted/errmsg.zig
index 51e135686a..028c2e2174 100644
--- a/src-self-hosted/errmsg.zig
+++ b/src-self-hosted/errmsg.zig
@@ -33,35 +33,48 @@ pub const Span = struct {
};
pub const Msg = struct {
- span: Span,
text: []u8,
+ realpath: []u8,
data: Data,
const Data = union(enum) {
+ Cli: Cli,
PathAndTree: PathAndTree,
ScopeAndComp: ScopeAndComp,
};
const PathAndTree = struct {
- realpath: []const u8,
+ span: Span,
tree: *ast.Tree,
allocator: *mem.Allocator,
};
const ScopeAndComp = struct {
- root_scope: *Scope.Root,
+ span: Span,
+ tree_scope: *Scope.AstTree,
compilation: *Compilation,
};
+ const Cli = struct {
+ allocator: *mem.Allocator,
+ };
+
pub fn destroy(self: *Msg) void {
switch (self.data) {
+ Data.Cli => |cli| {
+ cli.allocator.free(self.text);
+ cli.allocator.free(self.realpath);
+ cli.allocator.destroy(self);
+ },
Data.PathAndTree => |path_and_tree| {
path_and_tree.allocator.free(self.text);
+ path_and_tree.allocator.free(self.realpath);
path_and_tree.allocator.destroy(self);
},
Data.ScopeAndComp => |scope_and_comp| {
- scope_and_comp.root_scope.base.deref(scope_and_comp.compilation);
+ scope_and_comp.tree_scope.base.deref(scope_and_comp.compilation);
scope_and_comp.compilation.gpa().free(self.text);
+ scope_and_comp.compilation.gpa().free(self.realpath);
scope_and_comp.compilation.gpa().destroy(self);
},
}
@@ -69,6 +82,7 @@ pub const Msg = struct {
fn getAllocator(self: *const Msg) *mem.Allocator {
switch (self.data) {
+ Data.Cli => |cli| return cli.allocator,
Data.PathAndTree => |path_and_tree| {
return path_and_tree.allocator;
},
@@ -78,71 +92,93 @@ pub const Msg = struct {
}
}
- pub fn getRealPath(self: *const Msg) []const u8 {
- switch (self.data) {
- Data.PathAndTree => |path_and_tree| {
- return path_and_tree.realpath;
- },
- Data.ScopeAndComp => |scope_and_comp| {
- return scope_and_comp.root_scope.realpath;
- },
- }
- }
-
pub fn getTree(self: *const Msg) *ast.Tree {
switch (self.data) {
+ Data.Cli => unreachable,
Data.PathAndTree => |path_and_tree| {
return path_and_tree.tree;
},
Data.ScopeAndComp => |scope_and_comp| {
- return scope_and_comp.root_scope.tree;
+ return scope_and_comp.tree_scope.tree;
},
}
}
+ pub fn getSpan(self: *const Msg) Span {
+ return switch (self.data) {
+ Data.Cli => unreachable,
+ Data.PathAndTree => |path_and_tree| path_and_tree.span,
+ Data.ScopeAndComp => |scope_and_comp| scope_and_comp.span,
+ };
+ }
+
/// Takes ownership of text
- /// References root_scope, and derefs when the msg is freed
- pub fn createFromScope(comp: *Compilation, root_scope: *Scope.Root, span: Span, text: []u8) !*Msg {
+ /// References tree_scope, and derefs when the msg is freed
+ pub fn createFromScope(comp: *Compilation, tree_scope: *Scope.AstTree, span: Span, text: []u8) !*Msg {
+ const realpath = try mem.dupe(comp.gpa(), u8, tree_scope.root().realpath);
+ errdefer comp.gpa().free(realpath);
+
const msg = try comp.gpa().create(Msg{
.text = text,
- .span = span,
+ .realpath = realpath,
.data = Data{
.ScopeAndComp = ScopeAndComp{
- .root_scope = root_scope,
+ .tree_scope = tree_scope,
.compilation = comp,
+ .span = span,
},
},
});
- root_scope.base.ref();
+ tree_scope.base.ref();
+ return msg;
+ }
+
+ /// Caller owns returned Msg and must free with `allocator`
+ /// allocator will additionally be used for printing messages later.
+ pub fn createFromCli(comp: *Compilation, realpath: []const u8, text: []u8) !*Msg {
+ const realpath_copy = try mem.dupe(comp.gpa(), u8, realpath);
+ errdefer comp.gpa().free(realpath_copy);
+
+ const msg = try comp.gpa().create(Msg{
+ .text = text,
+ .realpath = realpath_copy,
+ .data = Data{
+ .Cli = Cli{ .allocator = comp.gpa() },
+ },
+ });
return msg;
}
pub fn createFromParseErrorAndScope(
comp: *Compilation,
- root_scope: *Scope.Root,
+ tree_scope: *Scope.AstTree,
parse_error: *const ast.Error,
) !*Msg {
const loc_token = parse_error.loc();
var text_buf = try std.Buffer.initSize(comp.gpa(), 0);
defer text_buf.deinit();
+ const realpath_copy = try mem.dupe(comp.gpa(), u8, tree_scope.root().realpath);
+ errdefer comp.gpa().free(realpath_copy);
+
var out_stream = &std.io.BufferOutStream.init(&text_buf).stream;
- try parse_error.render(&root_scope.tree.tokens, out_stream);
+ try parse_error.render(&tree_scope.tree.tokens, out_stream);
const msg = try comp.gpa().create(Msg{
.text = undefined,
- .span = Span{
- .first = loc_token,
- .last = loc_token,
- },
+ .realpath = realpath_copy,
.data = Data{
.ScopeAndComp = ScopeAndComp{
- .root_scope = root_scope,
+ .tree_scope = tree_scope,
.compilation = comp,
+ .span = Span{
+ .first = loc_token,
+ .last = loc_token,
+ },
},
},
});
- root_scope.base.ref();
+ tree_scope.base.ref();
msg.text = text_buf.toOwnedSlice();
return msg;
}
@@ -161,22 +197,25 @@ pub const Msg = struct {
var text_buf = try std.Buffer.initSize(allocator, 0);
defer text_buf.deinit();
+ const realpath_copy = try mem.dupe(allocator, u8, realpath);
+ errdefer allocator.free(realpath_copy);
+
var out_stream = &std.io.BufferOutStream.init(&text_buf).stream;
try parse_error.render(&tree.tokens, out_stream);
const msg = try allocator.create(Msg{
.text = undefined,
+ .realpath = realpath_copy,
.data = Data{
.PathAndTree = PathAndTree{
.allocator = allocator,
- .realpath = realpath,
.tree = tree,
+ .span = Span{
+ .first = loc_token,
+ .last = loc_token,
+ },
},
},
- .span = Span{
- .first = loc_token,
- .last = loc_token,
- },
});
msg.text = text_buf.toOwnedSlice();
errdefer allocator.destroy(msg);
@@ -185,20 +224,28 @@ pub const Msg = struct {
}
pub fn printToStream(msg: *const Msg, stream: var, color_on: bool) !void {
+ switch (msg.data) {
+ Data.Cli => {
+ try stream.print("{}:-:-: error: {}\n", msg.realpath, msg.text);
+ return;
+ },
+ else => {},
+ }
+
const allocator = msg.getAllocator();
- const realpath = msg.getRealPath();
const tree = msg.getTree();
- const cwd = try os.getCwd(allocator);
+ const cwd = try os.getCwdAlloc(allocator);
defer allocator.free(cwd);
- const relpath = try os.path.relative(allocator, cwd, realpath);
+ const relpath = try os.path.relative(allocator, cwd, msg.realpath);
defer allocator.free(relpath);
- const path = if (relpath.len < realpath.len) relpath else realpath;
+ const path = if (relpath.len < msg.realpath.len) relpath else msg.realpath;
+ const span = msg.getSpan();
- const first_token = tree.tokens.at(msg.span.first);
- const last_token = tree.tokens.at(msg.span.last);
+ const first_token = tree.tokens.at(span.first);
+ const last_token = tree.tokens.at(span.last);
const start_loc = tree.tokenLocationPtr(0, first_token);
const end_loc = tree.tokenLocationPtr(first_token.end, last_token);
if (!color_on) {
diff --git a/src-self-hosted/introspect.zig b/src-self-hosted/introspect.zig
index ecd04c4467..d41f82f755 100644
--- a/src-self-hosted/introspect.zig
+++ b/src-self-hosted/introspect.zig
@@ -14,7 +14,7 @@ pub fn testZigInstallPrefix(allocator: *mem.Allocator, test_path: []const u8) ![
const test_index_file = try os.path.join(allocator, test_zig_dir, "std", "index.zig");
defer allocator.free(test_index_file);
- var file = try os.File.openRead(allocator, test_index_file);
+ var file = try os.File.openRead(test_index_file);
file.close();
return test_zig_dir;
@@ -22,7 +22,7 @@ pub fn testZigInstallPrefix(allocator: *mem.Allocator, test_path: []const u8) ![
/// Caller must free result
pub fn findZigLibDir(allocator: *mem.Allocator) ![]u8 {
- const self_exe_path = try os.selfExeDirPath(allocator);
+ const self_exe_path = try os.selfExeDirPathAlloc(allocator);
defer allocator.free(self_exe_path);
var cur_path: []const u8 = self_exe_path;
diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig
index 619cd4f330..562765b354 100644
--- a/src-self-hosted/ir.zig
+++ b/src-self-hosted/ir.zig
@@ -961,6 +961,7 @@ pub const Code = struct {
basic_block_list: std.ArrayList(*BasicBlock),
arena: std.heap.ArenaAllocator,
return_type: ?*Type,
+ tree_scope: *Scope.AstTree,
/// allocator is comp.gpa()
pub fn destroy(self: *Code, allocator: *Allocator) void {
@@ -990,14 +991,14 @@ pub const Code = struct {
return ret_value.val.KnownValue.getRef();
}
try comp.addCompileError(
- ret_value.scope.findRoot(),
+ self.tree_scope,
ret_value.span,
"unable to evaluate constant expression",
);
return error.SemanticAnalysisFailed;
} else if (inst.hasSideEffects()) {
try comp.addCompileError(
- inst.scope.findRoot(),
+ self.tree_scope,
inst.span,
"unable to evaluate constant expression",
);
@@ -1013,25 +1014,24 @@ pub const Builder = struct {
code: *Code,
current_basic_block: *BasicBlock,
next_debug_id: usize,
- root_scope: *Scope.Root,
is_comptime: bool,
is_async: bool,
begin_scope: ?*Scope,
pub const Error = Analyze.Error;
- pub fn init(comp: *Compilation, root_scope: *Scope.Root, begin_scope: ?*Scope) !Builder {
+ pub fn init(comp: *Compilation, tree_scope: *Scope.AstTree, begin_scope: ?*Scope) !Builder {
const code = try comp.gpa().create(Code{
.basic_block_list = undefined,
.arena = std.heap.ArenaAllocator.init(comp.gpa()),
.return_type = null,
+ .tree_scope = tree_scope,
});
code.basic_block_list = std.ArrayList(*BasicBlock).init(&code.arena.allocator);
errdefer code.destroy(comp.gpa());
return Builder{
.comp = comp,
- .root_scope = root_scope,
.current_basic_block = undefined,
.code = code,
.next_debug_id = 0,
@@ -1292,6 +1292,7 @@ pub const Builder = struct {
Scope.Id.FnDef => return false,
Scope.Id.Decls => unreachable,
Scope.Id.Root => unreachable,
+ Scope.Id.AstTree => unreachable,
Scope.Id.Block,
Scope.Id.Defer,
Scope.Id.DeferExpr,
@@ -1302,7 +1303,7 @@ pub const Builder = struct {
}
pub fn genIntLit(irb: *Builder, int_lit: *ast.Node.IntegerLiteral, scope: *Scope) !*Inst {
- const int_token = irb.root_scope.tree.tokenSlice(int_lit.token);
+ const int_token = irb.code.tree_scope.tree.tokenSlice(int_lit.token);
var base: u8 = undefined;
var rest: []const u8 = undefined;
@@ -1341,7 +1342,7 @@ pub const Builder = struct {
}
pub async fn genStrLit(irb: *Builder, str_lit: *ast.Node.StringLiteral, scope: *Scope) !*Inst {
- const str_token = irb.root_scope.tree.tokenSlice(str_lit.token);
+ const str_token = irb.code.tree_scope.tree.tokenSlice(str_lit.token);
const src_span = Span.token(str_lit.token);
var bad_index: usize = undefined;
@@ -1349,7 +1350,7 @@ pub const Builder = struct {
error.OutOfMemory => return error.OutOfMemory,
error.InvalidCharacter => {
try irb.comp.addCompileError(
- irb.root_scope,
+ irb.code.tree_scope,
src_span,
"invalid character in string literal: '{c}'",
str_token[bad_index],
@@ -1427,7 +1428,7 @@ pub const Builder = struct {
if (statement_node.cast(ast.Node.Defer)) |defer_node| {
// defer starts a new scope
- const defer_token = irb.root_scope.tree.tokens.at(defer_node.defer_token);
+ const defer_token = irb.code.tree_scope.tree.tokens.at(defer_node.defer_token);
const kind = switch (defer_token.id) {
Token.Id.Keyword_defer => Scope.Defer.Kind.ScopeExit,
Token.Id.Keyword_errdefer => Scope.Defer.Kind.ErrorExit,
@@ -1513,7 +1514,7 @@ pub const Builder = struct {
const src_span = Span.token(control_flow_expr.ltoken);
if (scope.findFnDef() == null) {
try irb.comp.addCompileError(
- irb.root_scope,
+ irb.code.tree_scope,
src_span,
"return expression outside function definition",
);
@@ -1523,7 +1524,7 @@ pub const Builder = struct {
if (scope.findDeferExpr()) |scope_defer_expr| {
if (!scope_defer_expr.reported_err) {
try irb.comp.addCompileError(
- irb.root_scope,
+ irb.code.tree_scope,
src_span,
"cannot return from defer expression",
);
@@ -1599,7 +1600,7 @@ pub const Builder = struct {
pub async fn genIdentifier(irb: *Builder, identifier: *ast.Node.Identifier, scope: *Scope, lval: LVal) !*Inst {
const src_span = Span.token(identifier.token);
- const name = irb.root_scope.tree.tokenSlice(identifier.token);
+ const name = irb.code.tree_scope.tree.tokenSlice(identifier.token);
//if (buf_eql_str(variable_name, "_") && lval == LValPtr) {
// IrInstructionConst *const_instruction = ir_build_instruction(irb, scope, node);
@@ -1622,7 +1623,7 @@ pub const Builder = struct {
}
} else |err| switch (err) {
error.Overflow => {
- try irb.comp.addCompileError(irb.root_scope, src_span, "integer too large");
+ try irb.comp.addCompileError(irb.code.tree_scope, src_span, "integer too large");
return error.SemanticAnalysisFailed;
},
error.OutOfMemory => return error.OutOfMemory,
@@ -1656,7 +1657,7 @@ pub const Builder = struct {
// TODO put a variable of same name with invalid type in global scope
// so that future references to this same name will find a variable with an invalid type
- try irb.comp.addCompileError(irb.root_scope, src_span, "unknown identifier '{}'", name);
+ try irb.comp.addCompileError(irb.code.tree_scope, src_span, "unknown identifier '{}'", name);
return error.SemanticAnalysisFailed;
}
@@ -1689,6 +1690,7 @@ pub const Builder = struct {
=> scope = scope.parent orelse break,
Scope.Id.DeferExpr => unreachable,
+ Scope.Id.AstTree => unreachable,
}
}
return result;
@@ -1740,6 +1742,7 @@ pub const Builder = struct {
=> scope = scope.parent orelse return is_noreturn,
Scope.Id.DeferExpr => unreachable,
+ Scope.Id.AstTree => unreachable,
}
}
}
@@ -1929,8 +1932,9 @@ pub const Builder = struct {
Scope.Id.Root => return Ident.NotFound,
Scope.Id.Decls => {
const decls = @fieldParentPtr(Scope.Decls, "base", s);
- const table = await (async decls.getTableReadOnly() catch unreachable);
- if (table.get(name)) |entry| {
+ const locked_table = await (async decls.table.acquireRead() catch unreachable);
+ defer locked_table.release();
+ if (locked_table.value.get(name)) |entry| {
return Ident{ .Decl = entry.value };
}
},
@@ -1967,8 +1971,8 @@ const Analyze = struct {
OutOfMemory,
};
- pub fn init(comp: *Compilation, root_scope: *Scope.Root, explicit_return_type: ?*Type) !Analyze {
- var irb = try Builder.init(comp, root_scope, null);
+ pub fn init(comp: *Compilation, tree_scope: *Scope.AstTree, explicit_return_type: ?*Type) !Analyze {
+ var irb = try Builder.init(comp, tree_scope, null);
errdefer irb.abort();
return Analyze{
@@ -2046,7 +2050,7 @@ const Analyze = struct {
}
fn addCompileError(self: *Analyze, span: Span, comptime fmt: []const u8, args: ...) !void {
- return self.irb.comp.addCompileError(self.irb.root_scope, span, fmt, args);
+ return self.irb.comp.addCompileError(self.irb.code.tree_scope, span, fmt, args);
}
fn resolvePeerTypes(self: *Analyze, expected_type: ?*Type, peers: []const *Inst) Analyze.Error!*Type {
@@ -2534,9 +2538,10 @@ const Analyze = struct {
pub async fn gen(
comp: *Compilation,
body_node: *ast.Node,
+ tree_scope: *Scope.AstTree,
scope: *Scope,
) !*Code {
- var irb = try Builder.init(comp, scope.findRoot(), scope);
+ var irb = try Builder.init(comp, tree_scope, scope);
errdefer irb.abort();
const entry_block = try irb.createBasicBlock(scope, c"Entry");
@@ -2554,9 +2559,8 @@ pub async fn gen(
pub async fn analyze(comp: *Compilation, old_code: *Code, expected_type: ?*Type) !*Code {
const old_entry_bb = old_code.basic_block_list.at(0);
- const root_scope = old_entry_bb.scope.findRoot();
- var ira = try Analyze.init(comp, root_scope, expected_type);
+ var ira = try Analyze.init(comp, old_code.tree_scope, expected_type);
errdefer ira.abort();
const new_entry_bb = try ira.getNewBasicBlock(old_entry_bb, null);
diff --git a/src-self-hosted/libc_installation.zig b/src-self-hosted/libc_installation.zig
index 3938c0d90c..5e292ff8b2 100644
--- a/src-self-hosted/libc_installation.zig
+++ b/src-self-hosted/libc_installation.zig
@@ -143,7 +143,7 @@ pub const LibCInstallation = struct {
pub async fn findNative(self: *LibCInstallation, loop: *event.Loop) !void {
self.initEmpty();
var group = event.Group(FindError!void).init(loop);
- errdefer group.cancelAll();
+ errdefer group.deinit();
var windows_sdk: ?*c.ZigWindowsSDK = null;
errdefer if (windows_sdk) |sdk| c.zig_free_windows_sdk(@ptrCast(?[*]c.ZigWindowsSDK, sdk));
@@ -233,7 +233,7 @@ pub const LibCInstallation = struct {
const stdlib_path = try std.os.path.join(loop.allocator, search_path, "stdlib.h");
defer loop.allocator.free(stdlib_path);
- if (try fileExists(loop.allocator, stdlib_path)) {
+ if (try fileExists(stdlib_path)) {
self.include_dir = try std.mem.dupe(loop.allocator, u8, search_path);
return;
}
@@ -257,7 +257,7 @@ pub const LibCInstallation = struct {
const stdlib_path = try std.os.path.join(loop.allocator, result_buf.toSliceConst(), "stdlib.h");
defer loop.allocator.free(stdlib_path);
- if (try fileExists(loop.allocator, stdlib_path)) {
+ if (try fileExists(stdlib_path)) {
self.include_dir = result_buf.toOwnedSlice();
return;
}
@@ -285,7 +285,7 @@ pub const LibCInstallation = struct {
}
const ucrt_lib_path = try std.os.path.join(loop.allocator, result_buf.toSliceConst(), "ucrt.lib");
defer loop.allocator.free(ucrt_lib_path);
- if (try fileExists(loop.allocator, ucrt_lib_path)) {
+ if (try fileExists(ucrt_lib_path)) {
self.lib_dir = result_buf.toOwnedSlice();
return;
}
@@ -313,7 +313,7 @@ pub const LibCInstallation = struct {
},
};
var group = event.Group(FindError!void).init(loop);
- errdefer group.cancelAll();
+ errdefer group.deinit();
for (dyn_tests) |*dyn_test| {
try group.call(testNativeDynamicLinker, self, loop, dyn_test);
}
@@ -341,7 +341,6 @@ pub const LibCInstallation = struct {
}
}
-
async fn findNativeKernel32LibDir(self: *LibCInstallation, loop: *event.Loop, sdk: *c.ZigWindowsSDK) FindError!void {
var search_buf: [2]Search = undefined;
const searches = fillSearch(&search_buf, sdk);
@@ -361,7 +360,7 @@ pub const LibCInstallation = struct {
}
const kernel32_path = try std.os.path.join(loop.allocator, result_buf.toSliceConst(), "kernel32.lib");
defer loop.allocator.free(kernel32_path);
- if (try fileExists(loop.allocator, kernel32_path)) {
+ if (try fileExists(kernel32_path)) {
self.kernel32_lib_dir = result_buf.toOwnedSlice();
return;
}
@@ -450,13 +449,11 @@ fn fillSearch(search_buf: *[2]Search, sdk: *c.ZigWindowsSDK) []Search {
return search_buf[0..search_end];
}
-
-fn fileExists(allocator: *std.mem.Allocator, path: []const u8) !bool {
- if (std.os.File.access(allocator, path)) |_| {
+fn fileExists(path: []const u8) !bool {
+ if (std.os.File.access(path)) |_| {
return true;
} else |err| switch (err) {
- error.NotFound, error.PermissionDenied => return false,
- error.OutOfMemory => return error.OutOfMemory,
+ error.FileNotFound, error.PermissionDenied => return false,
else => return error.FileSystem,
}
}
diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig
index 3b79c5b891..90f08b7305 100644
--- a/src-self-hosted/link.zig
+++ b/src-self-hosted/link.zig
@@ -61,7 +61,7 @@ pub async fn link(comp: *Compilation) !void {
ctx.libc = ctx.comp.override_libc orelse blk: {
switch (comp.target) {
Target.Native => {
- break :blk (await (async comp.event_loop_local.getNativeLibC() catch unreachable)) catch return error.LibCRequiredButNotProvidedOrFound;
+ break :blk (await (async comp.zig_compiler.getNativeLibC() catch unreachable)) catch return error.LibCRequiredButNotProvidedOrFound;
},
else => return error.LibCRequiredButNotProvidedOrFound,
}
@@ -83,7 +83,7 @@ pub async fn link(comp: *Compilation) !void {
{
// LLD is not thread-safe, so we grab a global lock.
- const held = await (async comp.event_loop_local.lld_lock.acquire() catch unreachable);
+ const held = await (async comp.zig_compiler.lld_lock.acquire() catch unreachable);
defer held.release();
// Not evented I/O. LLD does its own multithreading internally.
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 37bb435c1b..64c55a24e8 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -14,7 +14,7 @@ const c = @import("c.zig");
const introspect = @import("introspect.zig");
const Args = arg.Args;
const Flag = arg.Flag;
-const EventLoopLocal = @import("compilation.zig").EventLoopLocal;
+const ZigCompiler = @import("compilation.zig").ZigCompiler;
const Compilation = @import("compilation.zig").Compilation;
const Target = @import("target.zig").Target;
const errmsg = @import("errmsg.zig");
@@ -24,6 +24,8 @@ var stderr_file: os.File = undefined;
var stderr: *io.OutStream(io.FileOutStream.Error) = undefined;
var stdout: *io.OutStream(io.FileOutStream.Error) = undefined;
+const max_src_size = 2 * 1024 * 1024 * 1024; // 2 GiB
+
const usage =
\\usage: zig [command] [options]
\\
@@ -371,6 +373,16 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co
os.exit(1);
}
+ var clang_argv_buf = ArrayList([]const u8).init(allocator);
+ defer clang_argv_buf.deinit();
+
+ const mllvm_flags = flags.many("mllvm");
+ for (mllvm_flags) |mllvm| {
+ try clang_argv_buf.append("-mllvm");
+ try clang_argv_buf.append(mllvm);
+ }
+ try ZigCompiler.setLlvmArgv(allocator, mllvm_flags);
+
const zig_lib_dir = introspect.resolveZigLibDir(allocator) catch os.exit(1);
defer allocator.free(zig_lib_dir);
@@ -380,11 +392,11 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co
try loop.initMultiThreaded(allocator);
defer loop.deinit();
- var event_loop_local = try EventLoopLocal.init(&loop);
- defer event_loop_local.deinit();
+ var zig_compiler = try ZigCompiler.init(&loop);
+ defer zig_compiler.deinit();
var comp = try Compilation.create(
- &event_loop_local,
+ &zig_compiler,
root_name,
root_source_file,
Target.Native,
@@ -413,16 +425,6 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co
comp.linker_script = flags.single("linker-script");
comp.each_lib_rpath = flags.present("each-lib-rpath");
- var clang_argv_buf = ArrayList([]const u8).init(allocator);
- defer clang_argv_buf.deinit();
-
- const mllvm_flags = flags.many("mllvm");
- for (mllvm_flags) |mllvm| {
- try clang_argv_buf.append("-mllvm");
- try clang_argv_buf.append(mllvm);
- }
-
- comp.llvm_argv = mllvm_flags;
comp.clang_argv = clang_argv_buf.toSliceConst();
comp.strip = flags.present("strip");
@@ -465,30 +467,34 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co
comp.link_out_file = flags.single("output");
comp.link_objects = link_objects;
- try comp.build();
+ comp.start();
const process_build_events_handle = try async processBuildEvents(comp, color);
defer cancel process_build_events_handle;
loop.run();
}
async fn processBuildEvents(comp: *Compilation, color: errmsg.Color) void {
- // TODO directly awaiting async should guarantee memory allocation elision
- const build_event = await (async comp.events.get() catch unreachable);
+ var count: usize = 0;
+ while (true) {
+ // TODO directly awaiting async should guarantee memory allocation elision
+ const build_event = await (async comp.events.get() catch unreachable);
+ count += 1;
- switch (build_event) {
- Compilation.Event.Ok => {
- return;
- },
- Compilation.Event.Error => |err| {
- std.debug.warn("build failed: {}\n", @errorName(err));
- os.exit(1);
- },
- Compilation.Event.Fail => |msgs| {
- for (msgs) |msg| {
- defer msg.destroy();
- msg.printToFile(&stderr_file, color) catch os.exit(1);
- }
- },
+ switch (build_event) {
+ Compilation.Event.Ok => {
+ stderr.print("Build {} succeeded\n", count) catch os.exit(1);
+ },
+ Compilation.Event.Error => |err| {
+ stderr.print("Build {} failed: {}\n", count, @errorName(err)) catch os.exit(1);
+ },
+ Compilation.Event.Fail => |msgs| {
+ stderr.print("Build {} compile errors:\n", count) catch os.exit(1);
+ for (msgs) |msg| {
+ defer msg.destroy();
+ msg.printToFile(&stderr_file, color) catch os.exit(1);
+ }
+ },
+ }
}
}
@@ -528,33 +534,12 @@ const args_fmt_spec = []Flag{
};
const Fmt = struct {
- seen: std.HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8),
- queue: std.LinkedList([]const u8),
+ seen: event.Locked(SeenMap),
any_error: bool,
+ color: errmsg.Color,
+ loop: *event.Loop,
- // file_path must outlive Fmt
- fn addToQueue(self: *Fmt, file_path: []const u8) !void {
- const new_node = try self.seen.allocator.create(std.LinkedList([]const u8).Node{
- .prev = undefined,
- .next = undefined,
- .data = file_path,
- });
-
- if (try self.seen.put(file_path, {})) |_| return;
-
- self.queue.append(new_node);
- }
-
- fn addDirToQueue(self: *Fmt, file_path: []const u8) !void {
- var dir = try std.os.Dir.open(self.seen.allocator, file_path);
- defer dir.close();
- while (try dir.next()) |entry| {
- if (entry.kind == std.os.Dir.Entry.Kind.Directory or mem.endsWith(u8, entry.name, ".zig")) {
- const full_path = try os.path.join(self.seen.allocator, file_path, entry.name);
- try self.addToQueue(full_path);
- }
- }
- }
+ const SeenMap = std.HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8);
};
fn parseLibcPaths(allocator: *Allocator, libc: *LibCInstallation, libc_paths_file: []const u8) void {
@@ -587,17 +572,17 @@ fn cmdLibC(allocator: *Allocator, args: []const []const u8) !void {
try loop.initMultiThreaded(allocator);
defer loop.deinit();
- var event_loop_local = try EventLoopLocal.init(&loop);
- defer event_loop_local.deinit();
+ var zig_compiler = try ZigCompiler.init(&loop);
+ defer zig_compiler.deinit();
- const handle = try async findLibCAsync(&event_loop_local);
+ const handle = try async findLibCAsync(&zig_compiler);
defer cancel handle;
loop.run();
}
-async fn findLibCAsync(event_loop_local: *EventLoopLocal) void {
- const libc = (await (async event_loop_local.getNativeLibC() catch unreachable)) catch |err| {
+async fn findLibCAsync(zig_compiler: *ZigCompiler) void {
+ const libc = (await (async zig_compiler.getNativeLibC() catch unreachable)) catch |err| {
stderr.print("unable to find libc: {}\n", @errorName(err)) catch os.exit(1);
os.exit(1);
};
@@ -636,7 +621,7 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
var stdin_file = try io.getStdIn();
var stdin = io.FileInStream.init(&stdin_file);
- const source_code = try stdin.stream.readAllAlloc(allocator, @maxValue(usize));
+ const source_code = try stdin.stream.readAllAlloc(allocator, max_src_size);
defer allocator.free(source_code);
var tree = std.zig.parse(allocator, source_code) catch |err| {
@@ -665,69 +650,146 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
os.exit(1);
}
+ var loop: event.Loop = undefined;
+ try loop.initMultiThreaded(allocator);
+ defer loop.deinit();
+
+ var result: FmtError!void = undefined;
+ const main_handle = try async asyncFmtMainChecked(
+ &result,
+ &loop,
+ flags,
+ color,
+ );
+ defer cancel main_handle;
+ loop.run();
+ return result;
+}
+
+async fn asyncFmtMainChecked(
+ result: *(FmtError!void),
+ loop: *event.Loop,
+ flags: *const Args,
+ color: errmsg.Color,
+) void {
+ result.* = await (async asyncFmtMain(loop, flags, color) catch unreachable);
+}
+
+const FmtError = error{
+ SystemResources,
+ OperationAborted,
+ IoPending,
+ BrokenPipe,
+ Unexpected,
+ WouldBlock,
+ FileClosed,
+ DestinationAddressRequired,
+ DiskQuota,
+ FileTooBig,
+ InputOutput,
+ NoSpaceLeft,
+ AccessDenied,
+ OutOfMemory,
+ RenameAcrossMountPoints,
+ ReadOnlyFileSystem,
+ LinkQuotaExceeded,
+ FileBusy,
+} || os.File.OpenError;
+
+async fn asyncFmtMain(
+ loop: *event.Loop,
+ flags: *const Args,
+ color: errmsg.Color,
+) FmtError!void {
+ suspend {
+ resume @handle();
+ }
var fmt = Fmt{
- .seen = std.HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8).init(allocator),
- .queue = std.LinkedList([]const u8).init(),
+ .seen = event.Locked(Fmt.SeenMap).init(loop, Fmt.SeenMap.init(loop.allocator)),
.any_error = false,
+ .color = color,
+ .loop = loop,
};
+ var group = event.Group(FmtError!void).init(loop);
for (flags.positionals.toSliceConst()) |file_path| {
- try fmt.addToQueue(file_path);
+ try group.call(fmtPath, &fmt, file_path);
}
-
- while (fmt.queue.popFirst()) |node| {
- const file_path = node.data;
-
- var file = try os.File.openRead(allocator, file_path);
- defer file.close();
-
- const source_code = io.readFileAlloc(allocator, file_path) catch |err| switch (err) {
- error.IsDir => {
- try fmt.addDirToQueue(file_path);
- continue;
- },
- else => {
- try stderr.print("unable to open '{}': {}\n", file_path, err);
- fmt.any_error = true;
- continue;
- },
- };
- defer allocator.free(source_code);
-
- var tree = std.zig.parse(allocator, source_code) catch |err| {
- try stderr.print("error parsing file '{}': {}\n", file_path, err);
- fmt.any_error = true;
- continue;
- };
- defer tree.deinit();
-
- var error_it = tree.errors.iterator(0);
- while (error_it.next()) |parse_error| {
- const msg = try errmsg.Msg.createFromParseError(allocator, parse_error, &tree, file_path);
- defer msg.destroy();
-
- try msg.printToFile(&stderr_file, color);
- }
- if (tree.errors.len != 0) {
- fmt.any_error = true;
- continue;
- }
-
- const baf = try io.BufferedAtomicFile.create(allocator, file_path);
- defer baf.destroy();
-
- const anything_changed = try std.zig.render(allocator, baf.stream(), &tree);
- if (anything_changed) {
- try stderr.print("{}\n", file_path);
- try baf.finish();
- }
- }
-
+ try await (async group.wait() catch unreachable);
if (fmt.any_error) {
os.exit(1);
}
}
+async fn fmtPath(fmt: *Fmt, file_path_ref: []const u8) FmtError!void {
+ const file_path = try std.mem.dupe(fmt.loop.allocator, u8, file_path_ref);
+ defer fmt.loop.allocator.free(file_path);
+
+ {
+ const held = await (async fmt.seen.acquire() catch unreachable);
+ defer held.release();
+
+ if (try held.value.put(file_path, {})) |_| return;
+ }
+
+ const source_code = (await try async event.fs.readFile(
+ fmt.loop,
+ file_path,
+ max_src_size,
+ )) catch |err| switch (err) {
+ error.IsDir => {
+ // TODO make event based (and dir.next())
+ var dir = try std.os.Dir.open(fmt.loop.allocator, file_path);
+ defer dir.close();
+
+ var group = event.Group(FmtError!void).init(fmt.loop);
+ while (try dir.next()) |entry| {
+ if (entry.kind == std.os.Dir.Entry.Kind.Directory or mem.endsWith(u8, entry.name, ".zig")) {
+ const full_path = try os.path.join(fmt.loop.allocator, file_path, entry.name);
+ try group.call(fmtPath, fmt, full_path);
+ }
+ }
+ return await (async group.wait() catch unreachable);
+ },
+ else => {
+ // TODO lock stderr printing
+ try stderr.print("unable to open '{}': {}\n", file_path, err);
+ fmt.any_error = true;
+ return;
+ },
+ };
+ defer fmt.loop.allocator.free(source_code);
+
+ var tree = std.zig.parse(fmt.loop.allocator, source_code) catch |err| {
+ try stderr.print("error parsing file '{}': {}\n", file_path, err);
+ fmt.any_error = true;
+ return;
+ };
+ defer tree.deinit();
+
+ var error_it = tree.errors.iterator(0);
+ while (error_it.next()) |parse_error| {
+ const msg = try errmsg.Msg.createFromParseError(fmt.loop.allocator, parse_error, &tree, file_path);
+ defer fmt.loop.allocator.destroy(msg);
+
+ try msg.printToFile(&stderr_file, fmt.color);
+ }
+ if (tree.errors.len != 0) {
+ fmt.any_error = true;
+ return;
+ }
+
+ // TODO make this evented
+ const baf = try io.BufferedAtomicFile.create(fmt.loop.allocator, file_path);
+ defer baf.destroy();
+
+ const anything_changed = try std.zig.render(fmt.loop.allocator, baf.stream(), &tree);
+ if (anything_changed) {
+ try stderr.print("{}\n", file_path);
+ try baf.finish();
+ }
+}
+
// cmd:targets /////////////////////////////////////////////////////////////////////////////////////
fn cmdTargets(allocator: *Allocator, args: []const []const u8) !void {
diff --git a/src-self-hosted/scope.zig b/src-self-hosted/scope.zig
index a38e765c6e..43d3b5a784 100644
--- a/src-self-hosted/scope.zig
+++ b/src-self-hosted/scope.zig
@@ -36,6 +36,7 @@ pub const Scope = struct {
Id.Defer => @fieldParentPtr(Defer, "base", base).destroy(comp),
Id.DeferExpr => @fieldParentPtr(DeferExpr, "base", base).destroy(comp),
Id.Var => @fieldParentPtr(Var, "base", base).destroy(comp),
+ Id.AstTree => @fieldParentPtr(AstTree, "base", base).destroy(comp),
}
}
}
@@ -62,6 +63,8 @@ pub const Scope = struct {
Id.CompTime,
Id.Var,
=> scope = scope.parent.?,
+
+ Id.AstTree => unreachable,
}
}
}
@@ -82,6 +85,8 @@ pub const Scope = struct {
Id.Root,
Id.Var,
=> scope = scope.parent orelse return null,
+
+ Id.AstTree => unreachable,
}
}
}
@@ -97,6 +102,7 @@ pub const Scope = struct {
pub const Id = enum {
Root,
+ AstTree,
Decls,
Block,
FnDef,
@@ -108,13 +114,12 @@ pub const Scope = struct {
pub const Root = struct {
base: Scope,
- tree: *ast.Tree,
realpath: []const u8,
+ decls: *Decls,
/// Creates a Root scope with 1 reference
/// Takes ownership of realpath
- /// Takes ownership of tree, will deinit and destroy when done.
- pub fn create(comp: *Compilation, tree: *ast.Tree, realpath: []u8) !*Root {
+ pub fn create(comp: *Compilation, realpath: []u8) !*Root {
const self = try comp.gpa().createOne(Root);
self.* = Root{
.base = Scope{
@@ -122,41 +127,65 @@ pub const Scope = struct {
.parent = null,
.ref_count = std.atomic.Int(usize).init(1),
},
- .tree = tree,
.realpath = realpath,
+ .decls = undefined,
};
-
+ errdefer comp.gpa().destroy(self);
+ self.decls = try Decls.create(comp, &self.base);
return self;
}
pub fn destroy(self: *Root, comp: *Compilation) void {
+ // TODO comp.fs_watch.removeFile(self.realpath);
+ self.decls.base.deref(comp);
+ comp.gpa().free(self.realpath);
+ comp.gpa().destroy(self);
+ }
+ };
+
+ pub const AstTree = struct {
+ base: Scope,
+ tree: *ast.Tree,
+
+ /// Creates a scope with 1 reference
+ /// Takes ownership of tree, will deinit and destroy when done.
+ pub fn create(comp: *Compilation, tree: *ast.Tree, root_scope: *Root) !*AstTree {
+ const self = try comp.gpa().createOne(AstTree);
+ self.* = AstTree{
+ .base = undefined,
+ .tree = tree,
+ };
+ self.base.init(Id.AstTree, &root_scope.base);
+
+ return self;
+ }
+
+ pub fn destroy(self: *AstTree, comp: *Compilation) void {
comp.gpa().free(self.tree.source);
self.tree.deinit();
comp.gpa().destroy(self.tree);
- comp.gpa().free(self.realpath);
comp.gpa().destroy(self);
}
+
+ pub fn root(self: *AstTree) *Root {
+ return self.base.findRoot();
+ }
};
pub const Decls = struct {
base: Scope,
- /// The lock must be respected for writing. However once name_future resolves,
- /// readers can freely access it.
- table: event.Locked(Decl.Table),
-
- /// Once this future is resolved, the table is complete and available for unlocked
- /// read-only access. It does not mean all the decls are resolved; it means only that
- /// the table has all the names. Each decl in the table has its own resolution state.
- name_future: event.Future(void),
+ /// This table remains Write Locked when the names are incomplete or possibly outdated.
+ /// So if a reader manages to grab a lock, it can be sure that the set of names is complete
+ /// and correct.
+ table: event.RwLocked(Decl.Table),
/// Creates a Decls scope with 1 reference
pub fn create(comp: *Compilation, parent: *Scope) !*Decls {
const self = try comp.gpa().createOne(Decls);
self.* = Decls{
.base = undefined,
- .table = event.Locked(Decl.Table).init(comp.loop, Decl.Table.init(comp.gpa())),
- .name_future = event.Future(void).init(comp.loop),
+ .table = event.RwLocked(Decl.Table).init(comp.loop, Decl.Table.init(comp.gpa())),
};
self.base.init(Id.Decls, parent);
return self;
@@ -166,11 +195,6 @@ pub const Scope = struct {
self.table.deinit();
comp.gpa().destroy(self);
}
-
- pub async fn getTableReadOnly(self: *Decls) *Decl.Table {
- _ = await (async self.name_future.get() catch unreachable);
- return &self.table.private_data;
- }
};
pub const Block = struct {
diff --git a/src-self-hosted/test.zig b/src-self-hosted/test.zig
index 47e45d1bb0..d4a45e7a04 100644
--- a/src-self-hosted/test.zig
+++ b/src-self-hosted/test.zig
@@ -6,7 +6,7 @@ const Compilation = @import("compilation.zig").Compilation;
const introspect = @import("introspect.zig");
const assertOrPanic = std.debug.assertOrPanic;
const errmsg = @import("errmsg.zig");
-const EventLoopLocal = @import("compilation.zig").EventLoopLocal;
+const ZigCompiler = @import("compilation.zig").ZigCompiler;
var ctx: TestContext = undefined;
@@ -25,7 +25,7 @@ const allocator = std.heap.c_allocator;
pub const TestContext = struct {
loop: std.event.Loop,
- event_loop_local: EventLoopLocal,
+ zig_compiler: ZigCompiler,
zig_lib_dir: []u8,
file_index: std.atomic.Int(usize),
group: std.event.Group(error!void),
@@ -37,20 +37,20 @@ pub const TestContext = struct {
self.* = TestContext{
.any_err = {},
.loop = undefined,
- .event_loop_local = undefined,
+ .zig_compiler = undefined,
.zig_lib_dir = undefined,
.group = undefined,
.file_index = std.atomic.Int(usize).init(0),
};
- try self.loop.initMultiThreaded(allocator);
+ try self.loop.initSingleThreaded(allocator);
errdefer self.loop.deinit();
- self.event_loop_local = try EventLoopLocal.init(&self.loop);
- errdefer self.event_loop_local.deinit();
+ self.zig_compiler = try ZigCompiler.init(&self.loop);
+ errdefer self.zig_compiler.deinit();
self.group = std.event.Group(error!void).init(&self.loop);
- errdefer self.group.cancelAll();
+ errdefer self.group.deinit();
self.zig_lib_dir = try introspect.resolveZigLibDir(allocator);
errdefer allocator.free(self.zig_lib_dir);
@@ -62,7 +62,7 @@ pub const TestContext = struct {
fn deinit(self: *TestContext) void {
std.os.deleteTree(allocator, tmp_dir_name) catch {};
allocator.free(self.zig_lib_dir);
- self.event_loop_local.deinit();
+ self.zig_compiler.deinit();
self.loop.deinit();
}
@@ -94,10 +94,10 @@ pub const TestContext = struct {
}
// TODO async I/O
- try std.io.writeFile(allocator, file1_path, source);
+ try std.io.writeFile(file1_path, source);
var comp = try Compilation.create(
- &self.event_loop_local,
+ &self.zig_compiler,
"test",
file1_path,
Target.Native,
@@ -108,7 +108,7 @@ pub const TestContext = struct {
);
errdefer comp.destroy();
- try comp.build();
+ comp.start();
try self.group.call(getModuleEvent, comp, source, path, line, column, msg);
}
@@ -128,10 +128,10 @@ pub const TestContext = struct {
}
// TODO async I/O
- try std.io.writeFile(allocator, file1_path, source);
+ try std.io.writeFile(file1_path, source);
var comp = try Compilation.create(
- &self.event_loop_local,
+ &self.zig_compiler,
"test",
file1_path,
Target.Native,
@@ -144,7 +144,7 @@ pub const TestContext = struct {
_ = try comp.addLinkLib("c", true);
comp.link_out_file = output_file;
- try comp.build();
+ comp.start();
try self.group.call(getModuleEventSuccess, comp, output_file, expected_output);
}
@@ -212,9 +212,10 @@ pub const TestContext = struct {
Compilation.Event.Fail => |msgs| {
assertOrPanic(msgs.len != 0);
for (msgs) |msg| {
- if (mem.endsWith(u8, msg.getRealPath(), path) and mem.eql(u8, msg.text, text)) {
- const first_token = msg.getTree().tokens.at(msg.span.first);
- const last_token = msg.getTree().tokens.at(msg.span.first);
+ if (mem.endsWith(u8, msg.realpath, path) and mem.eql(u8, msg.text, text)) {
+ const span = msg.getSpan();
+ const first_token = msg.getTree().tokens.at(span.first);
+ const last_token = msg.getTree().tokens.at(span.first);
const start_loc = msg.getTree().tokenLocationPtr(0, first_token);
if (start_loc.line + 1 == line and start_loc.column + 1 == column) {
return;
diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig
index 6783130fc7..47dd3772e5 100644
--- a/src-self-hosted/type.zig
+++ b/src-self-hosted/type.zig
@@ -184,8 +184,8 @@ pub const Type = struct {
if (await (async base.abi_alignment.start() catch unreachable)) |ptr| return ptr.*;
{
- const held = try comp.event_loop_local.getAnyLlvmContext();
- defer held.release(comp.event_loop_local);
+ const held = try comp.zig_compiler.getAnyLlvmContext();
+ defer held.release(comp.zig_compiler);
const llvm_context = held.node.data;
diff --git a/src/all_types.hpp b/src/all_types.hpp
index e930b84e6d..496ab9b70d 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -1850,7 +1850,7 @@ struct ScopeDecls {
HashMap decl_table;
bool safety_off;
AstNode *safety_set_node;
- bool fast_math_off;
+ bool fast_math_on;
AstNode *fast_math_set_node;
ImportTableEntry *import;
// If this is a scope from a container, this is the type entry, otherwise null
@@ -1870,7 +1870,7 @@ struct ScopeBlock {
bool safety_off;
AstNode *safety_set_node;
- bool fast_math_off;
+ bool fast_math_on;
AstNode *fast_math_set_node;
};
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 03cfa5b67b..a8b3ea7132 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -19,12 +19,12 @@
static const size_t default_backward_branch_quota = 1000;
-static void resolve_enum_type(CodeGen *g, TypeTableEntry *enum_type);
-static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type);
+static Error resolve_enum_type(CodeGen *g, TypeTableEntry *enum_type);
+static Error resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type);
-static void resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type);
-static void resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type);
-static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type);
+static Error ATTRIBUTE_MUST_USE resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type);
+static Error ATTRIBUTE_MUST_USE resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type);
+static Error ATTRIBUTE_MUST_USE resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type);
static void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry);
ErrorMsg *add_node_error(CodeGen *g, AstNode *node, Buf *msg) {
@@ -370,15 +370,20 @@ uint64_t type_size_bits(CodeGen *g, TypeTableEntry *type_entry) {
return LLVMSizeOfTypeInBits(g->target_data_ref, type_entry->type_ref);
}
-bool type_is_copyable(CodeGen *g, TypeTableEntry *type_entry) {
- type_ensure_zero_bits_known(g, type_entry);
+Result type_is_copyable(CodeGen *g, TypeTableEntry *type_entry) {
+ Error err;
+ if ((err = type_ensure_zero_bits_known(g, type_entry)))
+ return err;
+
if (!type_has_bits(type_entry))
return true;
if (!handle_is_ptr(type_entry))
return true;
- ensure_complete_type(g, type_entry);
+ if ((err = ensure_complete_type(g, type_entry)))
+ return err;
+
return type_entry->is_copyable;
}
@@ -447,7 +452,7 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type
}
}
- type_ensure_zero_bits_known(g, child_type);
+ assertNoError(type_ensure_zero_bits_known(g, child_type));
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdPointer);
entry->is_copyable = true;
@@ -554,11 +559,11 @@ TypeTableEntry *get_optional_type(CodeGen *g, TypeTableEntry *child_type) {
TypeTableEntry *entry = child_type->optional_parent;
return entry;
} else {
- ensure_complete_type(g, child_type);
+ assertNoError(ensure_complete_type(g, child_type));
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdOptional);
assert(child_type->type_ref || child_type->zero_bits);
- entry->is_copyable = type_is_copyable(g, child_type);
+ entry->is_copyable = type_is_copyable(g, child_type).unwrap();
buf_resize(&entry->name, 0);
buf_appendf(&entry->name, "?%s", buf_ptr(&child_type->name));
@@ -650,7 +655,7 @@ TypeTableEntry *get_error_union_type(CodeGen *g, TypeTableEntry *err_set_type, T
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdErrorUnion);
entry->is_copyable = true;
assert(payload_type->di_type);
- ensure_complete_type(g, payload_type);
+ assertNoError(ensure_complete_type(g, payload_type));
buf_resize(&entry->name, 0);
buf_appendf(&entry->name, "%s!%s", buf_ptr(&err_set_type->name), buf_ptr(&payload_type->name));
@@ -739,7 +744,7 @@ TypeTableEntry *get_array_type(CodeGen *g, TypeTableEntry *child_type, uint64_t
return entry;
}
- ensure_complete_type(g, child_type);
+ assertNoError(ensure_complete_type(g, child_type));
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdArray);
entry->zero_bits = (array_size == 0) || child_type->zero_bits;
@@ -1050,13 +1055,13 @@ TypeTableEntry *get_ptr_to_stack_trace_type(CodeGen *g) {
}
TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) {
+ Error err;
auto table_entry = g->fn_type_table.maybe_get(fn_type_id);
if (table_entry) {
return table_entry->value;
}
if (fn_type_id->return_type != nullptr) {
- ensure_complete_type(g, fn_type_id->return_type);
- if (type_is_invalid(fn_type_id->return_type))
+ if ((err = ensure_complete_type(g, fn_type_id->return_type)))
return g->builtin_types.entry_invalid;
assert(fn_type_id->return_type->id != TypeTableEntryIdOpaque);
} else {
@@ -1172,8 +1177,7 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) {
gen_param_info->src_index = i;
gen_param_info->gen_index = SIZE_MAX;
- ensure_complete_type(g, type_entry);
- if (type_is_invalid(type_entry))
+ if ((err = ensure_complete_type(g, type_entry)))
return g->builtin_types.entry_invalid;
if (type_has_bits(type_entry)) {
@@ -1493,6 +1497,7 @@ TypeTableEntry *get_auto_err_set_type(CodeGen *g, FnTableEntry *fn_entry) {
static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *child_scope, FnTableEntry *fn_entry) {
assert(proto_node->type == NodeTypeFnProto);
AstNodeFnProto *fn_proto = &proto_node->data.fn_proto;
+ Error err;
FnTypeId fn_type_id = {0};
init_fn_type_id(&fn_type_id, proto_node, proto_node->data.fn_proto.params.length);
@@ -1550,7 +1555,8 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
return g->builtin_types.entry_invalid;
}
if (!calling_convention_allows_zig_types(fn_type_id.cc)) {
- type_ensure_zero_bits_known(g, type_entry);
+ if ((err = type_ensure_zero_bits_known(g, type_entry)))
+ return g->builtin_types.entry_invalid;
if (!type_has_bits(type_entry)) {
add_node_error(g, param_node->data.param_decl.type,
buf_sprintf("parameter of type '%s' has 0 bits; not allowed in function with calling convention '%s'",
@@ -1598,7 +1604,8 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
case TypeTableEntryIdUnion:
case TypeTableEntryIdFn:
case TypeTableEntryIdPromise:
- type_ensure_zero_bits_known(g, type_entry);
+ if ((err = type_ensure_zero_bits_known(g, type_entry)))
+ return g->builtin_types.entry_invalid;
if (type_requires_comptime(type_entry)) {
add_node_error(g, param_node->data.param_decl.type,
buf_sprintf("parameter of type '%s' must be declared comptime",
@@ -1729,24 +1736,28 @@ bool type_is_invalid(TypeTableEntry *type_entry) {
}
-static void resolve_enum_type(CodeGen *g, TypeTableEntry *enum_type) {
+static Error resolve_enum_type(CodeGen *g, TypeTableEntry *enum_type) {
assert(enum_type->id == TypeTableEntryIdEnum);
- if (enum_type->data.enumeration.complete)
- return;
+ if (enum_type->data.enumeration.is_invalid)
+ return ErrorSemanticAnalyzeFail;
- resolve_enum_zero_bits(g, enum_type);
- if (type_is_invalid(enum_type))
- return;
+ if (enum_type->data.enumeration.complete)
+ return ErrorNone;
+
+ Error err;
+ if ((err = resolve_enum_zero_bits(g, enum_type)))
+ return err;
AstNode *decl_node = enum_type->data.enumeration.decl_node;
if (enum_type->data.enumeration.embedded_in_current) {
if (!enum_type->data.enumeration.reported_infinite_err) {
+ enum_type->data.enumeration.is_invalid = true;
enum_type->data.enumeration.reported_infinite_err = true;
add_node_error(g, decl_node, buf_sprintf("enum '%s' contains itself", buf_ptr(&enum_type->name)));
}
- return;
+ return ErrorSemanticAnalyzeFail;
}
assert(!enum_type->data.enumeration.zero_bits_loop_flag);
@@ -1778,7 +1789,7 @@ static void resolve_enum_type(CodeGen *g, TypeTableEntry *enum_type) {
enum_type->data.enumeration.complete = true;
if (enum_type->data.enumeration.is_invalid)
- return;
+ return ErrorSemanticAnalyzeFail;
if (enum_type->zero_bits) {
enum_type->type_ref = LLVMVoidType();
@@ -1797,7 +1808,7 @@ static void resolve_enum_type(CodeGen *g, TypeTableEntry *enum_type) {
ZigLLVMReplaceTemporary(g->dbuilder, enum_type->di_type, replacement_di_type);
enum_type->di_type = replacement_di_type;
- return;
+ return ErrorNone;
}
TypeTableEntry *tag_int_type = enum_type->data.enumeration.tag_int_type;
@@ -1815,6 +1826,7 @@ static void resolve_enum_type(CodeGen *g, TypeTableEntry *enum_type) {
ZigLLVMReplaceTemporary(g->dbuilder, enum_type->di_type, tag_di_type);
enum_type->di_type = tag_di_type;
+ return ErrorNone;
}
@@ -1897,15 +1909,15 @@ TypeTableEntry *get_struct_type(CodeGen *g, const char *type_name, const char *f
return struct_type;
}
-static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) {
+static Error resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) {
assert(struct_type->id == TypeTableEntryIdStruct);
if (struct_type->data.structure.complete)
- return;
+ return ErrorNone;
- resolve_struct_zero_bits(g, struct_type);
- if (struct_type->data.structure.is_invalid)
- return;
+ Error err;
+ if ((err = resolve_struct_zero_bits(g, struct_type)))
+ return err;
AstNode *decl_node = struct_type->data.structure.decl_node;
@@ -1916,7 +1928,7 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) {
add_node_error(g, decl_node,
buf_sprintf("struct '%s' contains itself", buf_ptr(&struct_type->name)));
}
- return;
+ return ErrorSemanticAnalyzeFail;
}
assert(!struct_type->data.structure.zero_bits_loop_flag);
@@ -1943,8 +1955,7 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) {
TypeStructField *type_struct_field = &struct_type->data.structure.fields[i];
TypeTableEntry *field_type = type_struct_field->type_entry;
- ensure_complete_type(g, field_type);
- if (type_is_invalid(field_type)) {
+ if ((err = ensure_complete_type(g, field_type))) {
struct_type->data.structure.is_invalid = true;
break;
}
@@ -2026,7 +2037,7 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) {
struct_type->data.structure.complete = true;
if (struct_type->data.structure.is_invalid)
- return;
+ return ErrorSemanticAnalyzeFail;
if (struct_type->zero_bits) {
struct_type->type_ref = LLVMVoidType();
@@ -2045,7 +2056,7 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) {
0, nullptr, di_element_types, (int)debug_field_count, 0, nullptr, "");
ZigLLVMReplaceTemporary(g->dbuilder, struct_type->di_type, replacement_di_type);
struct_type->di_type = replacement_di_type;
- return;
+ return ErrorNone;
}
assert(struct_type->di_type);
@@ -2128,17 +2139,19 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) {
ZigLLVMReplaceTemporary(g->dbuilder, struct_type->di_type, replacement_di_type);
struct_type->di_type = replacement_di_type;
+
+ return ErrorNone;
}
-static void resolve_union_type(CodeGen *g, TypeTableEntry *union_type) {
+static Error resolve_union_type(CodeGen *g, TypeTableEntry *union_type) {
assert(union_type->id == TypeTableEntryIdUnion);
if (union_type->data.unionation.complete)
- return;
+ return ErrorNone;
- resolve_union_zero_bits(g, union_type);
- if (type_is_invalid(union_type))
- return;
+ Error err;
+ if ((err = resolve_union_zero_bits(g, union_type)))
+ return err;
AstNode *decl_node = union_type->data.unionation.decl_node;
@@ -2148,7 +2161,7 @@ static void resolve_union_type(CodeGen *g, TypeTableEntry *union_type) {
union_type->data.unionation.is_invalid = true;
add_node_error(g, decl_node, buf_sprintf("union '%s' contains itself", buf_ptr(&union_type->name)));
}
- return;
+ return ErrorSemanticAnalyzeFail;
}
assert(!union_type->data.unionation.zero_bits_loop_flag);
@@ -2179,8 +2192,7 @@ static void resolve_union_type(CodeGen *g, TypeTableEntry *union_type) {
TypeUnionField *union_field = &union_type->data.unionation.fields[i];
TypeTableEntry *field_type = union_field->type_entry;
- ensure_complete_type(g, field_type);
- if (type_is_invalid(field_type)) {
+ if ((err = ensure_complete_type(g, field_type))) {
union_type->data.unionation.is_invalid = true;
continue;
}
@@ -2219,7 +2231,7 @@ static void resolve_union_type(CodeGen *g, TypeTableEntry *union_type) {
union_type->data.unionation.most_aligned_union_member = most_aligned_union_member;
if (union_type->data.unionation.is_invalid)
- return;
+ return ErrorSemanticAnalyzeFail;
if (union_type->zero_bits) {
union_type->type_ref = LLVMVoidType();
@@ -2238,7 +2250,7 @@ static void resolve_union_type(CodeGen *g, TypeTableEntry *union_type) {
ZigLLVMReplaceTemporary(g->dbuilder, union_type->di_type, replacement_di_type);
union_type->di_type = replacement_di_type;
- return;
+ return ErrorNone;
}
uint64_t padding_in_bits = biggest_size_in_bits - size_of_most_aligned_member_in_bits;
@@ -2274,7 +2286,7 @@ static void resolve_union_type(CodeGen *g, TypeTableEntry *union_type) {
ZigLLVMReplaceTemporary(g->dbuilder, union_type->di_type, replacement_di_type);
union_type->di_type = replacement_di_type;
- return;
+ return ErrorNone;
}
LLVMTypeRef union_type_ref;
@@ -2293,7 +2305,7 @@ static void resolve_union_type(CodeGen *g, TypeTableEntry *union_type) {
ZigLLVMReplaceTemporary(g->dbuilder, union_type->di_type, tag_type->di_type);
union_type->di_type = tag_type->di_type;
- return;
+ return ErrorNone;
} else {
union_type_ref = most_aligned_union_member->type_ref;
}
@@ -2367,19 +2379,21 @@ static void resolve_union_type(CodeGen *g, TypeTableEntry *union_type) {
ZigLLVMReplaceTemporary(g->dbuilder, union_type->di_type, replacement_di_type);
union_type->di_type = replacement_di_type;
+
+ return ErrorNone;
}
-static void resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type) {
+static Error resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type) {
assert(enum_type->id == TypeTableEntryIdEnum);
if (enum_type->data.enumeration.zero_bits_known)
- return;
+ return ErrorNone;
if (enum_type->data.enumeration.zero_bits_loop_flag) {
add_node_error(g, enum_type->data.enumeration.decl_node,
buf_sprintf("'%s' depends on itself", buf_ptr(&enum_type->name)));
enum_type->data.enumeration.is_invalid = true;
- return;
+ return ErrorSemanticAnalyzeFail;
}
enum_type->data.enumeration.zero_bits_loop_flag = true;
@@ -2398,7 +2412,7 @@ static void resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type) {
enum_type->data.enumeration.is_invalid = true;
enum_type->data.enumeration.zero_bits_loop_flag = false;
enum_type->data.enumeration.zero_bits_known = true;
- return;
+ return ErrorSemanticAnalyzeFail;
}
enum_type->data.enumeration.src_field_count = field_count;
@@ -2525,13 +2539,23 @@ static void resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type) {
enum_type->data.enumeration.zero_bits_loop_flag = false;
enum_type->zero_bits = !type_has_bits(tag_int_type);
enum_type->data.enumeration.zero_bits_known = true;
+
+ if (enum_type->data.enumeration.is_invalid)
+ return ErrorSemanticAnalyzeFail;
+
+ return ErrorNone;
}
-static void resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type) {
+static Error resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type) {
assert(struct_type->id == TypeTableEntryIdStruct);
+ Error err;
+
+ if (struct_type->data.structure.is_invalid)
+ return ErrorSemanticAnalyzeFail;
+
if (struct_type->data.structure.zero_bits_known)
- return;
+ return ErrorNone;
if (struct_type->data.structure.zero_bits_loop_flag) {
// If we get here it's due to recursion. This is a design flaw in the compiler,
@@ -2547,7 +2571,7 @@ static void resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type) {
struct_type->data.structure.abi_alignment = LLVMABIAlignmentOfType(g->target_data_ref, LLVMPointerType(LLVMInt8Type(), 0));
}
}
- return;
+ return ErrorNone;
}
struct_type->data.structure.zero_bits_loop_flag = true;
@@ -2596,8 +2620,7 @@ static void resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type) {
buf_sprintf("enums, not structs, support field assignment"));
}
- type_ensure_zero_bits_known(g, field_type);
- if (type_is_invalid(field_type)) {
+ if ((err = type_ensure_zero_bits_known(g, field_type))) {
struct_type->data.structure.is_invalid = true;
continue;
}
@@ -2634,16 +2657,27 @@ static void resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type) {
struct_type->data.structure.gen_field_count = (uint32_t)gen_field_index;
struct_type->zero_bits = (gen_field_index == 0);
struct_type->data.structure.zero_bits_known = true;
+
+ if (struct_type->data.structure.is_invalid) {
+ return ErrorSemanticAnalyzeFail;
+ }
+
+ return ErrorNone;
}
-static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
+static Error resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
assert(union_type->id == TypeTableEntryIdUnion);
+ Error err;
+
+ if (union_type->data.unionation.is_invalid)
+ return ErrorSemanticAnalyzeFail;
+
if (union_type->data.unionation.zero_bits_known)
- return;
+ return ErrorNone;
if (type_is_invalid(union_type))
- return;
+ return ErrorSemanticAnalyzeFail;
if (union_type->data.unionation.zero_bits_loop_flag) {
// If we get here it's due to recursion. From this we conclude that the struct is
@@ -2660,7 +2694,7 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
LLVMPointerType(LLVMInt8Type(), 0));
}
}
- return;
+ return ErrorNone;
}
union_type->data.unionation.zero_bits_loop_flag = true;
@@ -2679,7 +2713,7 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
union_type->data.unionation.is_invalid = true;
union_type->data.unionation.zero_bits_loop_flag = false;
union_type->data.unionation.zero_bits_known = true;
- return;
+ return ErrorSemanticAnalyzeFail;
}
union_type->data.unionation.src_field_count = field_count;
union_type->data.unionation.fields = allocate(field_count);
@@ -2711,13 +2745,13 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
tag_int_type = analyze_type_expr(g, scope, enum_type_node);
if (type_is_invalid(tag_int_type)) {
union_type->data.unionation.is_invalid = true;
- return;
+ return ErrorSemanticAnalyzeFail;
}
if (tag_int_type->id != TypeTableEntryIdInt) {
add_node_error(g, enum_type_node,
buf_sprintf("expected integer tag type, found '%s'", buf_ptr(&tag_int_type->name)));
union_type->data.unionation.is_invalid = true;
- return;
+ return ErrorSemanticAnalyzeFail;
}
} else {
tag_int_type = get_smallest_unsigned_int_type(g, field_count - 1);
@@ -2744,13 +2778,13 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
TypeTableEntry *enum_type = analyze_type_expr(g, scope, enum_type_node);
if (type_is_invalid(enum_type)) {
union_type->data.unionation.is_invalid = true;
- return;
+ return ErrorSemanticAnalyzeFail;
}
if (enum_type->id != TypeTableEntryIdEnum) {
union_type->data.unionation.is_invalid = true;
add_node_error(g, enum_type_node,
buf_sprintf("expected enum tag type, found '%s'", buf_ptr(&enum_type->name)));
- return;
+ return ErrorSemanticAnalyzeFail;
}
tag_type = enum_type;
abi_alignment_so_far = get_abi_alignment(g, enum_type); // this populates src_field_count
@@ -2789,8 +2823,7 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
}
} else {
field_type = analyze_type_expr(g, scope, field_node->data.struct_field.type);
- type_ensure_zero_bits_known(g, field_type);
- if (type_is_invalid(field_type)) {
+ if ((err = type_ensure_zero_bits_known(g, field_type))) {
union_type->data.unionation.is_invalid = true;
continue;
}
@@ -2883,7 +2916,7 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
union_type->data.unionation.abi_alignment = abi_alignment_so_far;
if (union_type->data.unionation.is_invalid)
- return;
+ return ErrorSemanticAnalyzeFail;
bool src_have_tag = decl_node->data.container_decl.auto_enum ||
decl_node->data.container_decl.init_arg_expr != nullptr;
@@ -2905,7 +2938,7 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
add_node_error(g, source_node,
buf_sprintf("%s union does not support enum tag type", qual_str));
union_type->data.unionation.is_invalid = true;
- return;
+ return ErrorSemanticAnalyzeFail;
}
if (create_enum_type) {
@@ -2970,6 +3003,11 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
union_type->data.unionation.gen_field_count = gen_field_index;
union_type->zero_bits = (gen_field_index == 0 && (field_count < 2 || !src_have_tag));
union_type->data.unionation.zero_bits_known = true;
+
+ if (union_type->data.unionation.is_invalid)
+ return ErrorSemanticAnalyzeFail;
+
+ return ErrorNone;
}
static void get_fully_qualified_decl_name_internal(Buf *buf, Scope *scope, uint8_t sep) {
@@ -3035,7 +3073,7 @@ static bool scope_is_root_decls(Scope *scope) {
static void wrong_panic_prototype(CodeGen *g, AstNode *proto_node, TypeTableEntry *fn_type) {
add_node_error(g, proto_node,
- buf_sprintf("expected 'fn([]const u8, ?&builtin.StackTrace) unreachable', found '%s'",
+ buf_sprintf("expected 'fn([]const u8, ?*builtin.StackTrace) noreturn', found '%s'",
buf_ptr(&fn_type->name)));
}
@@ -3463,13 +3501,13 @@ VariableTableEntry *add_variable(CodeGen *g, AstNode *source_node, Scope *parent
variable_entry->shadowable = false;
variable_entry->mem_slot_index = SIZE_MAX;
variable_entry->src_arg_index = SIZE_MAX;
- variable_entry->align_bytes = get_abi_alignment(g, value->type);
assert(name);
-
buf_init_from_buf(&variable_entry->name, name);
- if (value->type->id != TypeTableEntryIdInvalid) {
+ if (!type_is_invalid(value->type)) {
+ variable_entry->align_bytes = get_abi_alignment(g, value->type);
+
VariableTableEntry *existing_var = find_variable(g, parent_scope, name);
if (existing_var && !existing_var->shadowable) {
ErrorMsg *msg = add_node_error(g, source_node,
@@ -5311,13 +5349,13 @@ ConstExprValue *create_const_arg_tuple(CodeGen *g, size_t arg_index_start, size_
void init_const_undefined(CodeGen *g, ConstExprValue *const_val) {
+ Error err;
TypeTableEntry *wanted_type = const_val->type;
if (wanted_type->id == TypeTableEntryIdArray) {
const_val->special = ConstValSpecialStatic;
const_val->data.x_array.special = ConstArraySpecialUndef;
} else if (wanted_type->id == TypeTableEntryIdStruct) {
- ensure_complete_type(g, wanted_type);
- if (type_is_invalid(wanted_type)) {
+ if ((err = ensure_complete_type(g, wanted_type))) {
return;
}
@@ -5350,27 +5388,33 @@ ConstExprValue *create_const_vals(size_t count) {
return vals;
}
-void ensure_complete_type(CodeGen *g, TypeTableEntry *type_entry) {
+Error ensure_complete_type(CodeGen *g, TypeTableEntry *type_entry) {
+ if (type_is_invalid(type_entry))
+ return ErrorSemanticAnalyzeFail;
if (type_entry->id == TypeTableEntryIdStruct) {
if (!type_entry->data.structure.complete)
- resolve_struct_type(g, type_entry);
+ return resolve_struct_type(g, type_entry);
} else if (type_entry->id == TypeTableEntryIdEnum) {
if (!type_entry->data.enumeration.complete)
- resolve_enum_type(g, type_entry);
+ return resolve_enum_type(g, type_entry);
} else if (type_entry->id == TypeTableEntryIdUnion) {
if (!type_entry->data.unionation.complete)
- resolve_union_type(g, type_entry);
+ return resolve_union_type(g, type_entry);
}
+ return ErrorNone;
}
-void type_ensure_zero_bits_known(CodeGen *g, TypeTableEntry *type_entry) {
+Error type_ensure_zero_bits_known(CodeGen *g, TypeTableEntry *type_entry) {
+ if (type_is_invalid(type_entry))
+ return ErrorSemanticAnalyzeFail;
if (type_entry->id == TypeTableEntryIdStruct) {
- resolve_struct_zero_bits(g, type_entry);
+ return resolve_struct_zero_bits(g, type_entry);
} else if (type_entry->id == TypeTableEntryIdEnum) {
- resolve_enum_zero_bits(g, type_entry);
+ return resolve_enum_zero_bits(g, type_entry);
} else if (type_entry->id == TypeTableEntryIdUnion) {
- resolve_union_zero_bits(g, type_entry);
+ return resolve_union_zero_bits(g, type_entry);
}
+ return ErrorNone;
}
bool ir_get_var_is_comptime(VariableTableEntry *var) {
@@ -6213,7 +6257,7 @@ LinkLib *add_link_lib(CodeGen *g, Buf *name) {
}
uint32_t get_abi_alignment(CodeGen *g, TypeTableEntry *type_entry) {
- type_ensure_zero_bits_known(g, type_entry);
+ assertNoError(type_ensure_zero_bits_known(g, type_entry));
if (type_entry->zero_bits) return 0;
// We need to make this function work without requiring ensure_complete_type
diff --git a/src/analyze.hpp b/src/analyze.hpp
index e4dfae4ecb..0b52e9a5e6 100644
--- a/src/analyze.hpp
+++ b/src/analyze.hpp
@@ -9,6 +9,7 @@
#define ZIG_ANALYZE_HPP
#include "all_types.hpp"
+#include "result.hpp"
void semantic_analyze(CodeGen *g);
ErrorMsg *add_node_error(CodeGen *g, AstNode *node, Buf *msg);
@@ -88,8 +89,8 @@ void init_fn_type_id(FnTypeId *fn_type_id, AstNode *proto_node, size_t param_cou
AstNode *get_param_decl_node(FnTableEntry *fn_entry, size_t index);
FnTableEntry *scope_get_fn_if_root(Scope *scope);
bool type_requires_comptime(TypeTableEntry *type_entry);
-void ensure_complete_type(CodeGen *g, TypeTableEntry *type_entry);
-void type_ensure_zero_bits_known(CodeGen *g, TypeTableEntry *type_entry);
+Error ATTRIBUTE_MUST_USE ensure_complete_type(CodeGen *g, TypeTableEntry *type_entry);
+Error ATTRIBUTE_MUST_USE type_ensure_zero_bits_known(CodeGen *g, TypeTableEntry *type_entry);
void complete_enum(CodeGen *g, TypeTableEntry *enum_type);
bool ir_get_var_is_comptime(VariableTableEntry *var);
bool const_values_equal(ConstExprValue *a, ConstExprValue *b);
@@ -178,7 +179,7 @@ TypeTableEntryId type_id_at_index(size_t index);
size_t type_id_len();
size_t type_id_index(TypeTableEntry *entry);
TypeTableEntry *get_generic_fn_type(CodeGen *g, FnTypeId *fn_type_id);
-bool type_is_copyable(CodeGen *g, TypeTableEntry *type_entry);
+Result type_is_copyable(CodeGen *g, TypeTableEntry *type_entry);
LinkLib *create_link_lib(Buf *name);
bool calling_convention_does_first_arg_return(CallingConvention cc);
LinkLib *add_link_lib(CodeGen *codegen, Buf *lib);
diff --git a/src/codegen.cpp b/src/codegen.cpp
index c9c80ede6d..d5c07828cd 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -829,15 +829,15 @@ static bool ir_want_fast_math(CodeGen *g, IrInstruction *instruction) {
if (scope->id == ScopeIdBlock) {
ScopeBlock *block_scope = (ScopeBlock *)scope;
if (block_scope->fast_math_set_node)
- return !block_scope->fast_math_off;
+ return block_scope->fast_math_on;
} else if (scope->id == ScopeIdDecls) {
ScopeDecls *decls_scope = (ScopeDecls *)scope;
if (decls_scope->fast_math_set_node)
- return !decls_scope->fast_math_off;
+ return decls_scope->fast_math_on;
}
scope = scope->parent;
}
- return true;
+ return false;
}
static bool ir_want_runtime_safety(CodeGen *g, IrInstruction *instruction) {
@@ -5131,13 +5131,13 @@ static bool is_llvm_value_unnamed_type(TypeTableEntry *type_entry, LLVMValueRef
}
static LLVMValueRef gen_const_val_ptr(CodeGen *g, ConstExprValue *const_val, const char *name) {
- render_const_val_global(g, const_val, name);
switch (const_val->data.x_ptr.special) {
case ConstPtrSpecialInvalid:
case ConstPtrSpecialDiscard:
zig_unreachable();
case ConstPtrSpecialRef:
{
+ render_const_val_global(g, const_val, name);
ConstExprValue *pointee = const_val->data.x_ptr.data.ref.pointee;
render_const_val(g, pointee, "");
render_const_val_global(g, pointee, "");
@@ -5148,6 +5148,7 @@ static LLVMValueRef gen_const_val_ptr(CodeGen *g, ConstExprValue *const_val, con
}
case ConstPtrSpecialBaseArray:
{
+ render_const_val_global(g, const_val, name);
ConstExprValue *array_const_val = const_val->data.x_ptr.data.base_array.array_val;
size_t elem_index = const_val->data.x_ptr.data.base_array.elem_index;
assert(array_const_val->type->id == TypeTableEntryIdArray);
@@ -5168,6 +5169,7 @@ static LLVMValueRef gen_const_val_ptr(CodeGen *g, ConstExprValue *const_val, con
}
case ConstPtrSpecialBaseStruct:
{
+ render_const_val_global(g, const_val, name);
ConstExprValue *struct_const_val = const_val->data.x_ptr.data.base_struct.struct_val;
assert(struct_const_val->type->id == TypeTableEntryIdStruct);
if (struct_const_val->type->zero_bits) {
@@ -5190,6 +5192,7 @@ static LLVMValueRef gen_const_val_ptr(CodeGen *g, ConstExprValue *const_val, con
}
case ConstPtrSpecialHardCodedAddr:
{
+ render_const_val_global(g, const_val, name);
uint64_t addr_value = const_val->data.x_ptr.data.hard_coded_addr.addr;
TypeTableEntry *usize = g->builtin_types.entry_usize;
const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstInt(usize->type_ref, addr_value, false),
@@ -5720,12 +5723,16 @@ static void do_code_gen(CodeGen *g) {
LLVMValueRef global_value;
if (var->linkage == VarLinkageExternal) {
- global_value = LLVMAddGlobal(g->module, var->value->type->type_ref, buf_ptr(&var->name));
+ LLVMValueRef existing_llvm_var = LLVMGetNamedGlobal(g->module, buf_ptr(&var->name));
+ if (existing_llvm_var) {
+ global_value = LLVMConstBitCast(existing_llvm_var, LLVMPointerType(var->value->type->type_ref, 0));
+ } else {
+ global_value = LLVMAddGlobal(g->module, var->value->type->type_ref, buf_ptr(&var->name));
+ // TODO debug info for the extern variable
- // TODO debug info for the extern variable
-
- LLVMSetLinkage(global_value, LLVMExternalLinkage);
- LLVMSetAlignment(global_value, var->align_bytes);
+ LLVMSetLinkage(global_value, LLVMExternalLinkage);
+ LLVMSetAlignment(global_value, var->align_bytes);
+ }
} else {
bool exported = (var->linkage == VarLinkageExport);
const char *mangled_name = buf_ptr(get_mangled_name(g, &var->name, exported));
diff --git a/src/ir.cpp b/src/ir.cpp
index 3e423487aa..5bf39ee691 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -8711,6 +8711,7 @@ static void update_errors_helper(CodeGen *g, ErrorTableEntry ***errors, size_t *
}
static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, TypeTableEntry *expected_type, IrInstruction **instructions, size_t instruction_count) {
+ Error err;
assert(instruction_count >= 1);
IrInstruction *prev_inst = instructions[0];
if (type_is_invalid(prev_inst->value.type)) {
@@ -9172,8 +9173,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
if (prev_type->id == TypeTableEntryIdEnum && cur_type->id == TypeTableEntryIdUnion &&
(cur_type->data.unionation.decl_node->data.container_decl.auto_enum || cur_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
{
- type_ensure_zero_bits_known(ira->codegen, cur_type);
- if (type_is_invalid(cur_type))
+ if ((err = type_ensure_zero_bits_known(ira->codegen, cur_type)))
return ira->codegen->builtin_types.entry_invalid;
if (cur_type->data.unionation.tag_type == prev_type) {
continue;
@@ -9183,8 +9183,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
if (cur_type->id == TypeTableEntryIdEnum && prev_type->id == TypeTableEntryIdUnion &&
(prev_type->data.unionation.decl_node->data.container_decl.auto_enum || prev_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
{
- type_ensure_zero_bits_known(ira->codegen, prev_type);
- if (type_is_invalid(prev_type))
+ if ((err = type_ensure_zero_bits_known(ira->codegen, prev_type)))
return ira->codegen->builtin_types.entry_invalid;
if (prev_type->data.unionation.tag_type == cur_type) {
prev_inst = cur_inst;
@@ -9999,11 +9998,11 @@ static IrInstruction *ir_analyze_array_to_slice(IrAnalyze *ira, IrInstruction *s
static IrInstruction *ir_analyze_enum_to_int(IrAnalyze *ira, IrInstruction *source_instr,
IrInstruction *target, TypeTableEntry *wanted_type)
{
+ Error err;
assert(wanted_type->id == TypeTableEntryIdInt);
TypeTableEntry *actual_type = target->value.type;
- ensure_complete_type(ira->codegen, actual_type);
- if (type_is_invalid(actual_type))
+ if ((err = ensure_complete_type(ira->codegen, actual_type)))
return ira->codegen->invalid_instruction;
if (wanted_type != actual_type->data.enumeration.tag_int_type) {
@@ -10069,6 +10068,7 @@ static IrInstruction *ir_analyze_undefined_to_anything(IrAnalyze *ira, IrInstruc
static IrInstruction *ir_analyze_enum_to_union(IrAnalyze *ira, IrInstruction *source_instr,
IrInstruction *target, TypeTableEntry *wanted_type)
{
+ Error err;
assert(wanted_type->id == TypeTableEntryIdUnion);
assert(target->value.type->id == TypeTableEntryIdEnum);
@@ -10078,8 +10078,7 @@ static IrInstruction *ir_analyze_enum_to_union(IrAnalyze *ira, IrInstruction *so
return ira->codegen->invalid_instruction;
TypeUnionField *union_field = find_union_field_by_tag(wanted_type, &val->data.x_enum_tag);
assert(union_field != nullptr);
- type_ensure_zero_bits_known(ira->codegen, union_field->type_entry);
- if (type_is_invalid(union_field->type_entry))
+ if ((err = type_ensure_zero_bits_known(ira->codegen, union_field->type_entry)))
return ira->codegen->invalid_instruction;
if (!union_field->type_entry->zero_bits) {
AstNode *field_node = wanted_type->data.unionation.decl_node->data.container_decl.fields.at(
@@ -10169,12 +10168,12 @@ static IrInstruction *ir_analyze_widen_or_shorten(IrAnalyze *ira, IrInstruction
static IrInstruction *ir_analyze_int_to_enum(IrAnalyze *ira, IrInstruction *source_instr,
IrInstruction *target, TypeTableEntry *wanted_type)
{
+ Error err;
assert(wanted_type->id == TypeTableEntryIdEnum);
TypeTableEntry *actual_type = target->value.type;
- ensure_complete_type(ira->codegen, wanted_type);
- if (type_is_invalid(wanted_type))
+ if ((err = ensure_complete_type(ira->codegen, wanted_type)))
return ira->codegen->invalid_instruction;
if (actual_type != wanted_type->data.enumeration.tag_int_type) {
@@ -10517,6 +10516,7 @@ static void report_recursive_error(IrAnalyze *ira, AstNode *source_node, ConstCa
static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_instr,
TypeTableEntry *wanted_type, IrInstruction *value)
{
+ Error err;
TypeTableEntry *actual_type = value->value.type;
AstNode *source_node = source_instr->source_node;
@@ -10697,6 +10697,19 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
return ira->codegen->invalid_instruction;
return cast2;
+ } else if (
+ wanted_child_type->id == TypeTableEntryIdPointer &&
+ wanted_child_type->data.pointer.ptr_len == PtrLenUnknown &&
+ actual_type->id == TypeTableEntryIdPointer &&
+ actual_type->data.pointer.ptr_len == PtrLenSingle &&
+ actual_type->data.pointer.child_type->id == TypeTableEntryIdArray &&
+ actual_type->data.pointer.alignment >= wanted_child_type->data.pointer.alignment &&
+ types_match_const_cast_only(ira, wanted_child_type->data.pointer.child_type,
+ actual_type->data.pointer.child_type->data.array.child_type, source_node,
+ !wanted_child_type->data.pointer.is_const).id == ConstCastResultIdOk)
+ {
+ IrInstruction *cast1 = ir_resolve_ptr_of_array_to_unknown_len_ptr(ira, source_instr, value, wanted_child_type);
+ return ir_analyze_maybe_wrap(ira, source_instr, cast1, wanted_type);
}
}
@@ -10783,8 +10796,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
if (actual_type->id == TypeTableEntryIdComptimeFloat ||
actual_type->id == TypeTableEntryIdComptimeInt)
{
- ensure_complete_type(ira->codegen, wanted_type);
- if (type_is_invalid(wanted_type))
+ if ((err = ensure_complete_type(ira->codegen, wanted_type)))
return ira->codegen->invalid_instruction;
if (wanted_type->id == TypeTableEntryIdEnum) {
IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.enumeration.tag_int_type, value);
@@ -10840,8 +10852,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
// cast from union to the enum type of the union
if (actual_type->id == TypeTableEntryIdUnion && wanted_type->id == TypeTableEntryIdEnum) {
- type_ensure_zero_bits_known(ira->codegen, actual_type);
- if (type_is_invalid(actual_type))
+ if ((err = type_ensure_zero_bits_known(ira->codegen, actual_type)))
return ira->codegen->invalid_instruction;
if (actual_type->data.unionation.tag_type == wanted_type) {
@@ -10854,7 +10865,9 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
(wanted_type->data.unionation.decl_node->data.container_decl.auto_enum ||
wanted_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
{
- type_ensure_zero_bits_known(ira->codegen, wanted_type);
+ if ((err = type_ensure_zero_bits_known(ira->codegen, wanted_type)))
+ return ira->codegen->invalid_instruction;
+
if (wanted_type->data.unionation.tag_type == actual_type) {
return ir_analyze_enum_to_union(ira, source_instr, value, wanted_type);
}
@@ -10866,7 +10879,9 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
if (union_type->data.unionation.decl_node->data.container_decl.auto_enum ||
union_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr)
{
- type_ensure_zero_bits_known(ira->codegen, union_type);
+ if ((err = type_ensure_zero_bits_known(ira->codegen, union_type)))
+ return ira->codegen->invalid_instruction;
+
if (union_type->data.unionation.tag_type == actual_type) {
IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, union_type, value);
if (type_is_invalid(cast1->value.type))
@@ -10910,8 +10925,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
actual_type, source_node, !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
{
- type_ensure_zero_bits_known(ira->codegen, actual_type);
- if (type_is_invalid(actual_type)) {
+ if ((err = type_ensure_zero_bits_known(ira->codegen, actual_type))) {
return ira->codegen->invalid_instruction;
}
if (!type_has_bits(actual_type)) {
@@ -11310,6 +11324,7 @@ static bool optional_value_is_null(ConstExprValue *val) {
}
static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp *bin_op_instruction) {
+ Error err;
IrInstruction *op1 = bin_op_instruction->op1->other;
IrInstruction *op2 = bin_op_instruction->op2->other;
AstNode *source_node = bin_op_instruction->base.source_node;
@@ -11445,8 +11460,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, source_node, nullptr, instructions, 2);
if (type_is_invalid(resolved_type))
return resolved_type;
- type_ensure_zero_bits_known(ira->codegen, resolved_type);
- if (type_is_invalid(resolved_type))
+ if ((err = type_ensure_zero_bits_known(ira->codegen, resolved_type)))
return resolved_type;
bool operator_allowed;
@@ -12393,6 +12407,7 @@ static TypeTableEntry *ir_analyze_instruction_bin_op(IrAnalyze *ira, IrInstructi
}
static TypeTableEntry *ir_analyze_instruction_decl_var(IrAnalyze *ira, IrInstructionDeclVar *decl_var_instruction) {
+ Error err;
VariableTableEntry *var = decl_var_instruction->var;
IrInstruction *init_value = decl_var_instruction->init_value->other;
@@ -12426,8 +12441,7 @@ static TypeTableEntry *ir_analyze_instruction_decl_var(IrAnalyze *ira, IrInstruc
if (type_is_invalid(result_type)) {
result_type = ira->codegen->builtin_types.entry_invalid;
} else {
- type_ensure_zero_bits_known(ira->codegen, result_type);
- if (type_is_invalid(result_type)) {
+ if ((err = type_ensure_zero_bits_known(ira->codegen, result_type))) {
result_type = ira->codegen->builtin_types.entry_invalid;
}
}
@@ -12945,6 +12959,7 @@ static VariableTableEntry *get_fn_var_by_index(FnTableEntry *fn_entry, size_t in
static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction,
VariableTableEntry *var)
{
+ Error err;
if (var->mem_slot_index != SIZE_MAX && var->owner_exec->analysis == nullptr) {
assert(ira->codegen->errors.length != 0);
return ira->codegen->invalid_instruction;
@@ -12999,7 +13014,8 @@ no_mem_slot:
instruction->scope, instruction->source_node, var);
var_ptr_instruction->value.type = get_pointer_to_type_extra(ira->codegen, var->value->type,
var->src_is_const, is_volatile, PtrLenSingle, var->align_bytes, 0, 0);
- type_ensure_zero_bits_known(ira->codegen, var->value->type);
+ if ((err = type_ensure_zero_bits_known(ira->codegen, var->value->type)))
+ return ira->codegen->invalid_instruction;
bool in_fn_scope = (scope_fn_entry(var->parent_scope) != nullptr);
var_ptr_instruction->value.data.rh_ptr = in_fn_scope ? RuntimeHintPtrStack : RuntimeHintPtrNonStack;
@@ -13011,6 +13027,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
FnTableEntry *fn_entry, TypeTableEntry *fn_type, IrInstruction *fn_ref,
IrInstruction *first_arg_ptr, bool comptime_fn_call, FnInline fn_inline)
{
+ Error err;
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
size_t first_arg_1_or_0 = first_arg_ptr ? 1 : 0;
@@ -13375,8 +13392,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
inst_fn_type_id.return_type = specified_return_type;
}
- type_ensure_zero_bits_known(ira->codegen, specified_return_type);
- if (type_is_invalid(specified_return_type))
+ if ((err = type_ensure_zero_bits_known(ira->codegen, specified_return_type)))
return ira->codegen->builtin_types.entry_invalid;
if (type_requires_comptime(specified_return_type)) {
@@ -13651,12 +13667,12 @@ static TypeTableEntry *ir_analyze_dereference(IrAnalyze *ira, IrInstructionUnOp
}
static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op_instruction) {
+ Error err;
IrInstruction *value = un_op_instruction->value->other;
TypeTableEntry *type_entry = ir_resolve_type(ira, value);
if (type_is_invalid(type_entry))
return ira->codegen->builtin_types.entry_invalid;
- ensure_complete_type(ira->codegen, type_entry);
- if (type_is_invalid(type_entry))
+ if ((err = ensure_complete_type(ira->codegen, type_entry)))
return ira->codegen->builtin_types.entry_invalid;
switch (type_entry->id) {
@@ -14010,6 +14026,7 @@ static TypeTableEntry *adjust_ptr_len(CodeGen *g, TypeTableEntry *ptr_type, PtrL
}
static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstructionElemPtr *elem_ptr_instruction) {
+ Error err;
IrInstruction *array_ptr = elem_ptr_instruction->array_ptr->other;
if (type_is_invalid(array_ptr->value.type))
return ira->codegen->builtin_types.entry_invalid;
@@ -14118,8 +14135,7 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
return ira->codegen->builtin_types.entry_invalid;
bool safety_check_on = elem_ptr_instruction->safety_check_on;
- ensure_complete_type(ira->codegen, return_type->data.pointer.child_type);
- if (type_is_invalid(return_type->data.pointer.child_type))
+ if ((err = ensure_complete_type(ira->codegen, return_type->data.pointer.child_type)))
return ira->codegen->builtin_types.entry_invalid;
uint64_t elem_size = type_size(ira->codegen, return_type->data.pointer.child_type);
@@ -14339,9 +14355,10 @@ static IrInstruction *ir_analyze_container_member_access_inner(IrAnalyze *ira,
static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_name,
IrInstruction *source_instr, IrInstruction *container_ptr, TypeTableEntry *container_type)
{
+ Error err;
+
TypeTableEntry *bare_type = container_ref_type(container_type);
- ensure_complete_type(ira->codegen, bare_type);
- if (type_is_invalid(bare_type))
+ if ((err = ensure_complete_type(ira->codegen, bare_type)))
return ira->codegen->invalid_instruction;
assert(container_ptr->value.type->id == TypeTableEntryIdPointer);
@@ -14540,6 +14557,7 @@ static ErrorTableEntry *find_err_table_entry(TypeTableEntry *err_set_type, Buf *
}
static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstructionFieldPtr *field_ptr_instruction) {
+ Error err;
IrInstruction *container_ptr = field_ptr_instruction->container_ptr->other;
if (type_is_invalid(container_ptr->value.type))
return ira->codegen->builtin_types.entry_invalid;
@@ -14641,8 +14659,7 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile);
}
if (child_type->id == TypeTableEntryIdEnum) {
- ensure_complete_type(ira->codegen, child_type);
- if (type_is_invalid(child_type))
+ if ((err = ensure_complete_type(ira->codegen, child_type)))
return ira->codegen->builtin_types.entry_invalid;
TypeEnumField *field = find_enum_type_field(child_type, field_name);
@@ -14666,8 +14683,7 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
(child_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr ||
child_type->data.unionation.decl_node->data.container_decl.auto_enum))
{
- ensure_complete_type(ira->codegen, child_type);
- if (type_is_invalid(child_type))
+ if ((err = ensure_complete_type(ira->codegen, child_type)))
return ira->codegen->builtin_types.entry_invalid;
TypeUnionField *field = find_union_type_field(child_type, field_name);
if (field) {
@@ -15187,17 +15203,17 @@ static TypeTableEntry *ir_analyze_instruction_set_float_mode(IrAnalyze *ira,
return ira->codegen->builtin_types.entry_void;
}
- bool *fast_math_off_ptr;
+ bool *fast_math_on_ptr;
AstNode **fast_math_set_node_ptr;
if (target_type->id == TypeTableEntryIdBlock) {
ScopeBlock *block_scope = (ScopeBlock *)target_val->data.x_block;
- fast_math_off_ptr = &block_scope->fast_math_off;
+ fast_math_on_ptr = &block_scope->fast_math_on;
fast_math_set_node_ptr = &block_scope->fast_math_set_node;
} else if (target_type->id == TypeTableEntryIdFn) {
assert(target_val->data.x_ptr.special == ConstPtrSpecialFunction);
FnTableEntry *target_fn = target_val->data.x_ptr.data.fn.fn_entry;
assert(target_fn->def_scope);
- fast_math_off_ptr = &target_fn->def_scope->fast_math_off;
+ fast_math_on_ptr = &target_fn->def_scope->fast_math_on;
fast_math_set_node_ptr = &target_fn->def_scope->fast_math_set_node;
} else if (target_type->id == TypeTableEntryIdMetaType) {
ScopeDecls *decls_scope;
@@ -15213,7 +15229,7 @@ static TypeTableEntry *ir_analyze_instruction_set_float_mode(IrAnalyze *ira,
buf_sprintf("expected scope reference, found type '%s'", buf_ptr(&type_arg->name)));
return ira->codegen->builtin_types.entry_invalid;
}
- fast_math_off_ptr = &decls_scope->fast_math_off;
+ fast_math_on_ptr = &decls_scope->fast_math_on;
fast_math_set_node_ptr = &decls_scope->fast_math_set_node;
} else {
ir_add_error_node(ira, target_instruction->source_node,
@@ -15235,7 +15251,7 @@ static TypeTableEntry *ir_analyze_instruction_set_float_mode(IrAnalyze *ira,
return ira->codegen->builtin_types.entry_invalid;
}
*fast_math_set_node_ptr = source_node;
- *fast_math_off_ptr = (float_mode_scalar == FloatModeStrict);
+ *fast_math_on_ptr = (float_mode_scalar == FloatModeOptimized);
ir_build_const_from(ira, &instruction->base);
return ira->codegen->builtin_types.entry_void;
@@ -15244,6 +15260,7 @@ static TypeTableEntry *ir_analyze_instruction_set_float_mode(IrAnalyze *ira,
static TypeTableEntry *ir_analyze_instruction_slice_type(IrAnalyze *ira,
IrInstructionSliceType *slice_type_instruction)
{
+ Error err;
uint32_t align_bytes;
if (slice_type_instruction->align_value != nullptr) {
if (!ir_resolve_align(ira, slice_type_instruction->align_value->other, &align_bytes))
@@ -15255,6 +15272,8 @@ static TypeTableEntry *ir_analyze_instruction_slice_type(IrAnalyze *ira,
return ira->codegen->builtin_types.entry_invalid;
if (slice_type_instruction->align_value == nullptr) {
+ if ((err = type_ensure_zero_bits_known(ira->codegen, child_type)))
+ return ira->codegen->builtin_types.entry_invalid;
align_bytes = get_abi_alignment(ira->codegen, child_type);
}
@@ -15293,7 +15312,8 @@ static TypeTableEntry *ir_analyze_instruction_slice_type(IrAnalyze *ira,
case TypeTableEntryIdBoundFn:
case TypeTableEntryIdPromise:
{
- type_ensure_zero_bits_known(ira->codegen, child_type);
+ if ((err = type_ensure_zero_bits_known(ira->codegen, child_type)))
+ return ira->codegen->builtin_types.entry_invalid;
TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, child_type,
is_const, is_volatile, PtrLenUnknown, align_bytes, 0, 0);
TypeTableEntry *result_type = get_slice_type(ira->codegen, slice_ptr_type);
@@ -15431,11 +15451,11 @@ static TypeTableEntry *ir_analyze_instruction_promise_type(IrAnalyze *ira, IrIns
static TypeTableEntry *ir_analyze_instruction_size_of(IrAnalyze *ira,
IrInstructionSizeOf *size_of_instruction)
{
+ Error err;
IrInstruction *type_value = size_of_instruction->type_value->other;
TypeTableEntry *type_entry = ir_resolve_type(ira, type_value);
- ensure_complete_type(ira->codegen, type_entry);
- if (type_is_invalid(type_entry))
+ if ((err = ensure_complete_type(ira->codegen, type_entry)))
return ira->codegen->builtin_types.entry_invalid;
switch (type_entry->id) {
@@ -15806,6 +15826,7 @@ static TypeTableEntry *ir_analyze_instruction_switch_br(IrAnalyze *ira,
static TypeTableEntry *ir_analyze_instruction_switch_target(IrAnalyze *ira,
IrInstructionSwitchTarget *switch_target_instruction)
{
+ Error err;
IrInstruction *target_value_ptr = switch_target_instruction->target_value_ptr->other;
if (type_is_invalid(target_value_ptr->value.type))
return ira->codegen->builtin_types.entry_invalid;
@@ -15832,8 +15853,7 @@ static TypeTableEntry *ir_analyze_instruction_switch_target(IrAnalyze *ira,
if (pointee_val->special == ConstValSpecialRuntime)
pointee_val = nullptr;
}
- ensure_complete_type(ira->codegen, target_type);
- if (type_is_invalid(target_type))
+ if ((err = ensure_complete_type(ira->codegen, target_type)))
return ira->codegen->builtin_types.entry_invalid;
switch (target_type->id) {
@@ -15897,8 +15917,7 @@ static TypeTableEntry *ir_analyze_instruction_switch_target(IrAnalyze *ira,
return tag_type;
}
case TypeTableEntryIdEnum: {
- type_ensure_zero_bits_known(ira->codegen, target_type);
- if (type_is_invalid(target_type))
+ if ((err = type_ensure_zero_bits_known(ira->codegen, target_type)))
return ira->codegen->builtin_types.entry_invalid;
if (target_type->data.enumeration.src_field_count < 2) {
TypeEnumField *only_field = &target_type->data.enumeration.fields[0];
@@ -16100,10 +16119,10 @@ static TypeTableEntry *ir_analyze_instruction_ref(IrAnalyze *ira, IrInstructionR
static TypeTableEntry *ir_analyze_container_init_fields_union(IrAnalyze *ira, IrInstruction *instruction,
TypeTableEntry *container_type, size_t instr_field_count, IrInstructionContainerInitFieldsField *fields)
{
+ Error err;
assert(container_type->id == TypeTableEntryIdUnion);
- ensure_complete_type(ira->codegen, container_type);
- if (type_is_invalid(container_type))
+ if ((err = ensure_complete_type(ira->codegen, container_type)))
return ira->codegen->builtin_types.entry_invalid;
if (instr_field_count != 1) {
@@ -16132,8 +16151,7 @@ static TypeTableEntry *ir_analyze_container_init_fields_union(IrAnalyze *ira, Ir
if (casted_field_value == ira->codegen->invalid_instruction)
return ira->codegen->builtin_types.entry_invalid;
- type_ensure_zero_bits_known(ira->codegen, casted_field_value->value.type);
- if (type_is_invalid(casted_field_value->value.type))
+ if ((err = type_ensure_zero_bits_known(ira->codegen, casted_field_value->value.type)))
return ira->codegen->builtin_types.entry_invalid;
bool is_comptime = ir_should_inline(ira->new_irb.exec, instruction->scope);
@@ -16167,6 +16185,7 @@ static TypeTableEntry *ir_analyze_container_init_fields_union(IrAnalyze *ira, Ir
static TypeTableEntry *ir_analyze_container_init_fields(IrAnalyze *ira, IrInstruction *instruction,
TypeTableEntry *container_type, size_t instr_field_count, IrInstructionContainerInitFieldsField *fields)
{
+ Error err;
if (container_type->id == TypeTableEntryIdUnion) {
return ir_analyze_container_init_fields_union(ira, instruction, container_type, instr_field_count, fields);
}
@@ -16177,8 +16196,7 @@ static TypeTableEntry *ir_analyze_container_init_fields(IrAnalyze *ira, IrInstru
return ira->codegen->builtin_types.entry_invalid;
}
- ensure_complete_type(ira->codegen, container_type);
- if (type_is_invalid(container_type))
+ if ((err = ensure_complete_type(ira->codegen, container_type)))
return ira->codegen->builtin_types.entry_invalid;
size_t actual_field_count = container_type->data.structure.src_field_count;
@@ -16559,6 +16577,7 @@ static TypeTableEntry *ir_analyze_instruction_err_name(IrAnalyze *ira, IrInstruc
}
static TypeTableEntry *ir_analyze_instruction_enum_tag_name(IrAnalyze *ira, IrInstructionTagName *instruction) {
+ Error err;
IrInstruction *target = instruction->target->other;
if (type_is_invalid(target->value.type))
return ira->codegen->builtin_types.entry_invalid;
@@ -16566,8 +16585,7 @@ static TypeTableEntry *ir_analyze_instruction_enum_tag_name(IrAnalyze *ira, IrIn
assert(target->value.type->id == TypeTableEntryIdEnum);
if (instr_is_comptime(target)) {
- type_ensure_zero_bits_known(ira->codegen, target->value.type);
- if (type_is_invalid(target->value.type))
+ if ((err = type_ensure_zero_bits_known(ira->codegen, target->value.type)))
return ira->codegen->builtin_types.entry_invalid;
TypeEnumField *field = find_enum_field_by_tag(target->value.type, &target->value.data.x_bigint);
ConstExprValue *array_val = create_const_str_lit(ira->codegen, field->name);
@@ -16591,6 +16609,7 @@ static TypeTableEntry *ir_analyze_instruction_enum_tag_name(IrAnalyze *ira, IrIn
static TypeTableEntry *ir_analyze_instruction_field_parent_ptr(IrAnalyze *ira,
IrInstructionFieldParentPtr *instruction)
{
+ Error err;
IrInstruction *type_value = instruction->type_value->other;
TypeTableEntry *container_type = ir_resolve_type(ira, type_value);
if (type_is_invalid(container_type))
@@ -16611,8 +16630,7 @@ static TypeTableEntry *ir_analyze_instruction_field_parent_ptr(IrAnalyze *ira,
return ira->codegen->builtin_types.entry_invalid;
}
- ensure_complete_type(ira->codegen, container_type);
- if (type_is_invalid(container_type))
+ if ((err = ensure_complete_type(ira->codegen, container_type)))
return ira->codegen->builtin_types.entry_invalid;
TypeStructField *field = find_struct_type_field(container_type, field_name);
@@ -16684,13 +16702,13 @@ static TypeTableEntry *ir_analyze_instruction_field_parent_ptr(IrAnalyze *ira,
static TypeTableEntry *ir_analyze_instruction_offset_of(IrAnalyze *ira,
IrInstructionOffsetOf *instruction)
{
+ Error err;
IrInstruction *type_value = instruction->type_value->other;
TypeTableEntry *container_type = ir_resolve_type(ira, type_value);
if (type_is_invalid(container_type))
return ira->codegen->builtin_types.entry_invalid;
- ensure_complete_type(ira->codegen, container_type);
- if (type_is_invalid(container_type))
+ if ((err = ensure_complete_type(ira->codegen, container_type)))
return ira->codegen->builtin_types.entry_invalid;
IrInstruction *field_name_value = instruction->field_name->other;
@@ -16735,19 +16753,15 @@ static void ensure_field_index(TypeTableEntry *type, const char *field_name, siz
(buf_deinit(field_name_buf), true));
}
-static TypeTableEntry *ir_type_info_get_type(IrAnalyze *ira, const char *type_name, TypeTableEntry *root = nullptr)
-{
+static TypeTableEntry *ir_type_info_get_type(IrAnalyze *ira, const char *type_name, TypeTableEntry *root) {
+ Error err;
static ConstExprValue *type_info_var = nullptr;
static TypeTableEntry *type_info_type = nullptr;
- if (type_info_var == nullptr)
- {
+ if (type_info_var == nullptr) {
type_info_var = get_builtin_value(ira->codegen, "TypeInfo");
assert(type_info_var->type->id == TypeTableEntryIdMetaType);
- ensure_complete_type(ira->codegen, type_info_var->data.x_type);
- if (type_is_invalid(type_info_var->data.x_type))
- return ira->codegen->builtin_types.entry_invalid;
-
+ assertNoError(ensure_complete_type(ira->codegen, type_info_var->data.x_type));
type_info_type = type_info_var->data.x_type;
assert(type_info_type->id == TypeTableEntryIdUnion);
}
@@ -16772,8 +16786,7 @@ static TypeTableEntry *ir_type_info_get_type(IrAnalyze *ira, const char *type_na
VariableTableEntry *var = tld->var;
- ensure_complete_type(ira->codegen, var->value->type);
- if (type_is_invalid(var->value->type))
+ if ((err = ensure_complete_type(ira->codegen, var->value->type)))
return ira->codegen->builtin_types.entry_invalid;
assert(var->value->type->id == TypeTableEntryIdMetaType);
return var->value->data.x_type;
@@ -16781,9 +16794,9 @@ static TypeTableEntry *ir_type_info_get_type(IrAnalyze *ira, const char *type_na
static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, ScopeDecls *decls_scope)
{
- TypeTableEntry *type_info_definition_type = ir_type_info_get_type(ira, "Definition");
- ensure_complete_type(ira->codegen, type_info_definition_type);
- if (type_is_invalid(type_info_definition_type))
+ Error err;
+ TypeTableEntry *type_info_definition_type = ir_type_info_get_type(ira, "Definition", nullptr);
+ if ((err = ensure_complete_type(ira->codegen, type_info_definition_type)))
return false;
ensure_field_index(type_info_definition_type, "name", 0);
@@ -16791,18 +16804,15 @@ static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop
ensure_field_index(type_info_definition_type, "data", 2);
TypeTableEntry *type_info_definition_data_type = ir_type_info_get_type(ira, "Data", type_info_definition_type);
- ensure_complete_type(ira->codegen, type_info_definition_data_type);
- if (type_is_invalid(type_info_definition_data_type))
+ if ((err = ensure_complete_type(ira->codegen, type_info_definition_data_type)))
return false;
TypeTableEntry *type_info_fn_def_type = ir_type_info_get_type(ira, "FnDef", type_info_definition_data_type);
- ensure_complete_type(ira->codegen, type_info_fn_def_type);
- if (type_is_invalid(type_info_fn_def_type))
+ if ((err = ensure_complete_type(ira->codegen, type_info_fn_def_type)))
return false;
TypeTableEntry *type_info_fn_def_inline_type = ir_type_info_get_type(ira, "Inline", type_info_fn_def_type);
- ensure_complete_type(ira->codegen, type_info_fn_def_inline_type);
- if (type_is_invalid(type_info_fn_def_inline_type))
+ if ((err = ensure_complete_type(ira->codegen, type_info_fn_def_inline_type)))
return false;
// Loop through our definitions once to figure out how many definitions we will generate info for.
@@ -16882,8 +16892,7 @@ static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop
case TldIdVar:
{
VariableTableEntry *var = ((TldVar *)curr_entry->value)->var;
- ensure_complete_type(ira->codegen, var->value->type);
- if (type_is_invalid(var->value->type))
+ if ((err = ensure_complete_type(ira->codegen, var->value->type)))
return false;
if (var->value->type->id == TypeTableEntryIdMetaType)
@@ -16940,7 +16949,7 @@ static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop
// calling_convention: TypeInfo.CallingConvention
ensure_field_index(fn_def_val->type, "calling_convention", 2);
fn_def_fields[2].special = ConstValSpecialStatic;
- fn_def_fields[2].type = ir_type_info_get_type(ira, "CallingConvention");
+ fn_def_fields[2].type = ir_type_info_get_type(ira, "CallingConvention", nullptr);
bigint_init_unsigned(&fn_def_fields[2].data.x_enum_tag, fn_node->cc);
// is_var_args: bool
ensure_field_index(fn_def_val->type, "is_var_args", 3);
@@ -17014,8 +17023,7 @@ static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop
case TldIdContainer:
{
TypeTableEntry *type_entry = ((TldContainer *)curr_entry->value)->type_entry;
- ensure_complete_type(ira->codegen, type_entry);
- if (type_is_invalid(type_entry))
+ if ((err = ensure_complete_type(ira->codegen, type_entry)))
return false;
// This is a type.
@@ -17041,12 +17049,67 @@ static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop
return true;
}
+static ConstExprValue *create_ptr_like_type_info(IrAnalyze *ira, TypeTableEntry *ptr_type_entry) {
+ TypeTableEntry *attrs_type;
+ uint32_t size_enum_index;
+ if (is_slice(ptr_type_entry)) {
+ attrs_type = ptr_type_entry->data.structure.fields[slice_ptr_index].type_entry;
+ size_enum_index = 2;
+ } else if (ptr_type_entry->id == TypeTableEntryIdPointer) {
+ attrs_type = ptr_type_entry;
+ size_enum_index = (ptr_type_entry->data.pointer.ptr_len == PtrLenSingle) ? 0 : 1;
+ } else {
+ zig_unreachable();
+ }
+
+ TypeTableEntry *type_info_pointer_type = ir_type_info_get_type(ira, "Pointer", nullptr);
+ assertNoError(ensure_complete_type(ira->codegen, type_info_pointer_type));
+
+ ConstExprValue *result = create_const_vals(1);
+ result->special = ConstValSpecialStatic;
+ result->type = type_info_pointer_type;
+
+ ConstExprValue *fields = create_const_vals(5);
+ result->data.x_struct.fields = fields;
+
+ // size: Size
+ ensure_field_index(result->type, "size", 0);
+ TypeTableEntry *type_info_pointer_size_type = ir_type_info_get_type(ira, "Size", type_info_pointer_type);
+ assertNoError(ensure_complete_type(ira->codegen, type_info_pointer_size_type));
+ fields[0].special = ConstValSpecialStatic;
+ fields[0].type = type_info_pointer_size_type;
+ bigint_init_unsigned(&fields[0].data.x_enum_tag, size_enum_index);
+
+ // is_const: bool
+ ensure_field_index(result->type, "is_const", 1);
+ fields[1].special = ConstValSpecialStatic;
+ fields[1].type = ira->codegen->builtin_types.entry_bool;
+ fields[1].data.x_bool = attrs_type->data.pointer.is_const;
+ // is_volatile: bool
+ ensure_field_index(result->type, "is_volatile", 2);
+ fields[2].special = ConstValSpecialStatic;
+ fields[2].type = ira->codegen->builtin_types.entry_bool;
+ fields[2].data.x_bool = attrs_type->data.pointer.is_volatile;
+ // alignment: u32
+ ensure_field_index(result->type, "alignment", 3);
+ fields[3].special = ConstValSpecialStatic;
+ fields[3].type = get_int_type(ira->codegen, false, 29);
+ bigint_init_unsigned(&fields[3].data.x_bigint, attrs_type->data.pointer.alignment);
+ // child: type
+ ensure_field_index(result->type, "child", 4);
+ fields[4].special = ConstValSpecialStatic;
+ fields[4].type = ira->codegen->builtin_types.entry_type;
+ fields[4].data.x_type = attrs_type->data.pointer.child_type;
+
+ return result;
+};
+
static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *type_entry) {
+ Error err;
assert(type_entry != nullptr);
assert(!type_is_invalid(type_entry));
- ensure_complete_type(ira->codegen, type_entry);
- if (type_is_invalid(type_entry))
+ if ((err = ensure_complete_type(ira->codegen, type_entry)))
return nullptr;
const auto make_enum_field_val = [ira](ConstExprValue *enum_field_val, TypeEnumField *enum_field,
@@ -17066,63 +17129,6 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
enum_field_val->data.x_struct.fields = inner_fields;
};
- const auto create_ptr_like_type_info = [ira](TypeTableEntry *ptr_type_entry) {
- TypeTableEntry *attrs_type;
- uint32_t size_enum_index;
- if (is_slice(ptr_type_entry)) {
- attrs_type = ptr_type_entry->data.structure.fields[slice_ptr_index].type_entry;
- size_enum_index = 2;
- } else if (ptr_type_entry->id == TypeTableEntryIdPointer) {
- attrs_type = ptr_type_entry;
- size_enum_index = (ptr_type_entry->data.pointer.ptr_len == PtrLenSingle) ? 0 : 1;
- } else {
- zig_unreachable();
- }
-
- TypeTableEntry *type_info_pointer_type = ir_type_info_get_type(ira, "Pointer");
- ensure_complete_type(ira->codegen, type_info_pointer_type);
- assert(!type_is_invalid(type_info_pointer_type));
-
- ConstExprValue *result = create_const_vals(1);
- result->special = ConstValSpecialStatic;
- result->type = type_info_pointer_type;
-
- ConstExprValue *fields = create_const_vals(5);
- result->data.x_struct.fields = fields;
-
- // size: Size
- ensure_field_index(result->type, "size", 0);
- TypeTableEntry *type_info_pointer_size_type = ir_type_info_get_type(ira, "Size", type_info_pointer_type);
- ensure_complete_type(ira->codegen, type_info_pointer_size_type);
- assert(!type_is_invalid(type_info_pointer_size_type));
- fields[0].special = ConstValSpecialStatic;
- fields[0].type = type_info_pointer_size_type;
- bigint_init_unsigned(&fields[0].data.x_enum_tag, size_enum_index);
-
- // is_const: bool
- ensure_field_index(result->type, "is_const", 1);
- fields[1].special = ConstValSpecialStatic;
- fields[1].type = ira->codegen->builtin_types.entry_bool;
- fields[1].data.x_bool = attrs_type->data.pointer.is_const;
- // is_volatile: bool
- ensure_field_index(result->type, "is_volatile", 2);
- fields[2].special = ConstValSpecialStatic;
- fields[2].type = ira->codegen->builtin_types.entry_bool;
- fields[2].data.x_bool = attrs_type->data.pointer.is_volatile;
- // alignment: u32
- ensure_field_index(result->type, "alignment", 3);
- fields[3].special = ConstValSpecialStatic;
- fields[3].type = ira->codegen->builtin_types.entry_u32;
- bigint_init_unsigned(&fields[3].data.x_bigint, attrs_type->data.pointer.alignment);
- // child: type
- ensure_field_index(result->type, "child", 4);
- fields[4].special = ConstValSpecialStatic;
- fields[4].type = ira->codegen->builtin_types.entry_type;
- fields[4].data.x_type = attrs_type->data.pointer.child_type;
-
- return result;
- };
-
if (type_entry == ira->codegen->builtin_types.entry_global_error_set) {
zig_panic("TODO implement @typeInfo for global error set");
}
@@ -17158,7 +17164,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
{
result = create_const_vals(1);
result->special = ConstValSpecialStatic;
- result->type = ir_type_info_get_type(ira, "Int");
+ result->type = ir_type_info_get_type(ira, "Int", nullptr);
ConstExprValue *fields = create_const_vals(2);
result->data.x_struct.fields = fields;
@@ -17180,7 +17186,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
{
result = create_const_vals(1);
result->special = ConstValSpecialStatic;
- result->type = ir_type_info_get_type(ira, "Float");
+ result->type = ir_type_info_get_type(ira, "Float", nullptr);
ConstExprValue *fields = create_const_vals(1);
result->data.x_struct.fields = fields;
@@ -17195,14 +17201,14 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
}
case TypeTableEntryIdPointer:
{
- result = create_ptr_like_type_info(type_entry);
+ result = create_ptr_like_type_info(ira, type_entry);
break;
}
case TypeTableEntryIdArray:
{
result = create_const_vals(1);
result->special = ConstValSpecialStatic;
- result->type = ir_type_info_get_type(ira, "Array");
+ result->type = ir_type_info_get_type(ira, "Array", nullptr);
ConstExprValue *fields = create_const_vals(2);
result->data.x_struct.fields = fields;
@@ -17224,7 +17230,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
{
result = create_const_vals(1);
result->special = ConstValSpecialStatic;
- result->type = ir_type_info_get_type(ira, "Optional");
+ result->type = ir_type_info_get_type(ira, "Optional", nullptr);
ConstExprValue *fields = create_const_vals(1);
result->data.x_struct.fields = fields;
@@ -17241,7 +17247,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
{
result = create_const_vals(1);
result->special = ConstValSpecialStatic;
- result->type = ir_type_info_get_type(ira, "Promise");
+ result->type = ir_type_info_get_type(ira, "Promise", nullptr);
ConstExprValue *fields = create_const_vals(1);
result->data.x_struct.fields = fields;
@@ -17267,7 +17273,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
{
result = create_const_vals(1);
result->special = ConstValSpecialStatic;
- result->type = ir_type_info_get_type(ira, "Enum");
+ result->type = ir_type_info_get_type(ira, "Enum", nullptr);
ConstExprValue *fields = create_const_vals(4);
result->data.x_struct.fields = fields;
@@ -17275,7 +17281,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
// layout: ContainerLayout
ensure_field_index(result->type, "layout", 0);
fields[0].special = ConstValSpecialStatic;
- fields[0].type = ir_type_info_get_type(ira, "ContainerLayout");
+ fields[0].type = ir_type_info_get_type(ira, "ContainerLayout", nullptr);
bigint_init_unsigned(&fields[0].data.x_enum_tag, type_entry->data.enumeration.layout);
// tag_type: type
ensure_field_index(result->type, "tag_type", 1);
@@ -17285,7 +17291,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
// fields: []TypeInfo.EnumField
ensure_field_index(result->type, "fields", 2);
- TypeTableEntry *type_info_enum_field_type = ir_type_info_get_type(ira, "EnumField");
+ TypeTableEntry *type_info_enum_field_type = ir_type_info_get_type(ira, "EnumField", nullptr);
uint32_t enum_field_count = type_entry->data.enumeration.src_field_count;
ConstExprValue *enum_field_array = create_const_vals(1);
@@ -17317,7 +17323,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
{
result = create_const_vals(1);
result->special = ConstValSpecialStatic;
- result->type = ir_type_info_get_type(ira, "ErrorSet");
+ result->type = ir_type_info_get_type(ira, "ErrorSet", nullptr);
ConstExprValue *fields = create_const_vals(1);
result->data.x_struct.fields = fields;
@@ -17325,7 +17331,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
// errors: []TypeInfo.Error
ensure_field_index(result->type, "errors", 0);
- TypeTableEntry *type_info_error_type = ir_type_info_get_type(ira, "Error");
+ TypeTableEntry *type_info_error_type = ir_type_info_get_type(ira, "Error", nullptr);
uint32_t error_count = type_entry->data.error_set.err_count;
ConstExprValue *error_array = create_const_vals(1);
error_array->special = ConstValSpecialStatic;
@@ -17367,7 +17373,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
{
result = create_const_vals(1);
result->special = ConstValSpecialStatic;
- result->type = ir_type_info_get_type(ira, "ErrorUnion");
+ result->type = ir_type_info_get_type(ira, "ErrorUnion", nullptr);
ConstExprValue *fields = create_const_vals(2);
result->data.x_struct.fields = fields;
@@ -17390,7 +17396,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
{
result = create_const_vals(1);
result->special = ConstValSpecialStatic;
- result->type = ir_type_info_get_type(ira, "Union");
+ result->type = ir_type_info_get_type(ira, "Union", nullptr);
ConstExprValue *fields = create_const_vals(4);
result->data.x_struct.fields = fields;
@@ -17398,7 +17404,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
// layout: ContainerLayout
ensure_field_index(result->type, "layout", 0);
fields[0].special = ConstValSpecialStatic;
- fields[0].type = ir_type_info_get_type(ira, "ContainerLayout");
+ fields[0].type = ir_type_info_get_type(ira, "ContainerLayout", nullptr);
bigint_init_unsigned(&fields[0].data.x_enum_tag, type_entry->data.unionation.layout);
// tag_type: ?type
ensure_field_index(result->type, "tag_type", 1);
@@ -17420,7 +17426,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
// fields: []TypeInfo.UnionField
ensure_field_index(result->type, "fields", 2);
- TypeTableEntry *type_info_union_field_type = ir_type_info_get_type(ira, "UnionField");
+ TypeTableEntry *type_info_union_field_type = ir_type_info_get_type(ira, "UnionField", nullptr);
uint32_t union_field_count = type_entry->data.unionation.src_field_count;
ConstExprValue *union_field_array = create_const_vals(1);
@@ -17432,7 +17438,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
init_const_slice(ira->codegen, &fields[2], union_field_array, 0, union_field_count, false);
- TypeTableEntry *type_info_enum_field_type = ir_type_info_get_type(ira, "EnumField");
+ TypeTableEntry *type_info_enum_field_type = ir_type_info_get_type(ira, "EnumField", nullptr);
for (uint32_t union_field_index = 0; union_field_index < union_field_count; union_field_index++) {
TypeUnionField *union_field = &type_entry->data.unionation.fields[union_field_index];
@@ -17474,13 +17480,13 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
case TypeTableEntryIdStruct:
{
if (type_entry->data.structure.is_slice) {
- result = create_ptr_like_type_info(type_entry);
+ result = create_ptr_like_type_info(ira, type_entry);
break;
}
result = create_const_vals(1);
result->special = ConstValSpecialStatic;
- result->type = ir_type_info_get_type(ira, "Struct");
+ result->type = ir_type_info_get_type(ira, "Struct", nullptr);
ConstExprValue *fields = create_const_vals(3);
result->data.x_struct.fields = fields;
@@ -17488,12 +17494,12 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
// layout: ContainerLayout
ensure_field_index(result->type, "layout", 0);
fields[0].special = ConstValSpecialStatic;
- fields[0].type = ir_type_info_get_type(ira, "ContainerLayout");
+ fields[0].type = ir_type_info_get_type(ira, "ContainerLayout", nullptr);
bigint_init_unsigned(&fields[0].data.x_enum_tag, type_entry->data.structure.layout);
// fields: []TypeInfo.StructField
ensure_field_index(result->type, "fields", 1);
- TypeTableEntry *type_info_struct_field_type = ir_type_info_get_type(ira, "StructField");
+ TypeTableEntry *type_info_struct_field_type = ir_type_info_get_type(ira, "StructField", nullptr);
uint32_t struct_field_count = type_entry->data.structure.src_field_count;
ConstExprValue *struct_field_array = create_const_vals(1);
@@ -17549,7 +17555,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
{
result = create_const_vals(1);
result->special = ConstValSpecialStatic;
- result->type = ir_type_info_get_type(ira, "Fn");
+ result->type = ir_type_info_get_type(ira, "Fn", nullptr);
ConstExprValue *fields = create_const_vals(6);
result->data.x_struct.fields = fields;
@@ -17557,7 +17563,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
// calling_convention: TypeInfo.CallingConvention
ensure_field_index(result->type, "calling_convention", 0);
fields[0].special = ConstValSpecialStatic;
- fields[0].type = ir_type_info_get_type(ira, "CallingConvention");
+ fields[0].type = ir_type_info_get_type(ira, "CallingConvention", nullptr);
bigint_init_unsigned(&fields[0].data.x_enum_tag, type_entry->data.fn.fn_type_id.cc);
// is_generic: bool
ensure_field_index(result->type, "is_generic", 1);
@@ -17598,7 +17604,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
fields[4].data.x_optional = async_alloc_type;
}
// args: []TypeInfo.FnArg
- TypeTableEntry *type_info_fn_arg_type = ir_type_info_get_type(ira, "FnArg");
+ TypeTableEntry *type_info_fn_arg_type = ir_type_info_get_type(ira, "FnArg", nullptr);
size_t fn_arg_count = type_entry->data.fn.fn_type_id.param_count -
(is_varargs && type_entry->data.fn.fn_type_id.cc != CallingConventionC);
@@ -17673,7 +17679,7 @@ static TypeTableEntry *ir_analyze_instruction_type_info(IrAnalyze *ira,
if (type_is_invalid(type_entry))
return ira->codegen->builtin_types.entry_invalid;
- TypeTableEntry *result_type = ir_type_info_get_type(ira, nullptr);
+ TypeTableEntry *result_type = ir_type_info_get_type(ira, nullptr, nullptr);
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
out_val->type = result_type;
@@ -18883,13 +18889,13 @@ static TypeTableEntry *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstructio
}
static TypeTableEntry *ir_analyze_instruction_member_count(IrAnalyze *ira, IrInstructionMemberCount *instruction) {
+ Error err;
IrInstruction *container = instruction->container->other;
if (type_is_invalid(container->value.type))
return ira->codegen->builtin_types.entry_invalid;
TypeTableEntry *container_type = ir_resolve_type(ira, container);
- ensure_complete_type(ira->codegen, container_type);
- if (type_is_invalid(container_type))
+ if ((err = ensure_complete_type(ira->codegen, container_type)))
return ira->codegen->builtin_types.entry_invalid;
uint64_t result;
@@ -18921,13 +18927,13 @@ static TypeTableEntry *ir_analyze_instruction_member_count(IrAnalyze *ira, IrIns
}
static TypeTableEntry *ir_analyze_instruction_member_type(IrAnalyze *ira, IrInstructionMemberType *instruction) {
+ Error err;
IrInstruction *container_type_value = instruction->container_type->other;
TypeTableEntry *container_type = ir_resolve_type(ira, container_type_value);
if (type_is_invalid(container_type))
return ira->codegen->builtin_types.entry_invalid;
- ensure_complete_type(ira->codegen, container_type);
- if (type_is_invalid(container_type))
+ if ((err = ensure_complete_type(ira->codegen, container_type)))
return ira->codegen->builtin_types.entry_invalid;
@@ -18968,13 +18974,13 @@ static TypeTableEntry *ir_analyze_instruction_member_type(IrAnalyze *ira, IrInst
}
static TypeTableEntry *ir_analyze_instruction_member_name(IrAnalyze *ira, IrInstructionMemberName *instruction) {
+ Error err;
IrInstruction *container_type_value = instruction->container_type->other;
TypeTableEntry *container_type = ir_resolve_type(ira, container_type_value);
if (type_is_invalid(container_type))
return ira->codegen->builtin_types.entry_invalid;
- ensure_complete_type(ira->codegen, container_type);
- if (type_is_invalid(container_type))
+ if ((err = ensure_complete_type(ira->codegen, container_type)))
return ira->codegen->builtin_types.entry_invalid;
uint64_t member_index;
@@ -19055,13 +19061,13 @@ static TypeTableEntry *ir_analyze_instruction_handle(IrAnalyze *ira, IrInstructi
}
static TypeTableEntry *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstructionAlignOf *instruction) {
+ Error err;
IrInstruction *type_value = instruction->type_value->other;
if (type_is_invalid(type_value->value.type))
return ira->codegen->builtin_types.entry_invalid;
TypeTableEntry *type_entry = ir_resolve_type(ira, type_value);
- type_ensure_zero_bits_known(ira->codegen, type_entry);
- if (type_is_invalid(type_entry))
+ if ((err = type_ensure_zero_bits_known(ira->codegen, type_entry)))
return ira->codegen->builtin_types.entry_invalid;
switch (type_entry->id) {
@@ -19917,6 +19923,7 @@ static void buf_read_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue
}
static TypeTableEntry *ir_analyze_instruction_bit_cast(IrAnalyze *ira, IrInstructionBitCast *instruction) {
+ Error err;
IrInstruction *dest_type_value = instruction->dest_type->other;
TypeTableEntry *dest_type = ir_resolve_type(ira, dest_type_value);
if (type_is_invalid(dest_type))
@@ -19927,12 +19934,10 @@ static TypeTableEntry *ir_analyze_instruction_bit_cast(IrAnalyze *ira, IrInstruc
if (type_is_invalid(src_type))
return ira->codegen->builtin_types.entry_invalid;
- ensure_complete_type(ira->codegen, dest_type);
- if (type_is_invalid(dest_type))
+ if ((err = ensure_complete_type(ira->codegen, dest_type)))
return ira->codegen->builtin_types.entry_invalid;
- ensure_complete_type(ira->codegen, src_type);
- if (type_is_invalid(src_type))
+ if ((err = ensure_complete_type(ira->codegen, src_type)))
return ira->codegen->builtin_types.entry_invalid;
if (get_codegen_ptr_type(src_type) != nullptr) {
@@ -20018,6 +20023,7 @@ static TypeTableEntry *ir_analyze_instruction_bit_cast(IrAnalyze *ira, IrInstruc
}
static TypeTableEntry *ir_analyze_instruction_int_to_ptr(IrAnalyze *ira, IrInstructionIntToPtr *instruction) {
+ Error err;
IrInstruction *dest_type_value = instruction->dest_type->other;
TypeTableEntry *dest_type = ir_resolve_type(ira, dest_type_value);
if (type_is_invalid(dest_type))
@@ -20028,7 +20034,8 @@ static TypeTableEntry *ir_analyze_instruction_int_to_ptr(IrAnalyze *ira, IrInstr
return ira->codegen->builtin_types.entry_invalid;
}
- type_ensure_zero_bits_known(ira->codegen, dest_type);
+ if ((err = type_ensure_zero_bits_known(ira->codegen, dest_type)))
+ return ira->codegen->builtin_types.entry_invalid;
if (!type_has_bits(dest_type)) {
ir_add_error(ira, dest_type_value,
buf_sprintf("type '%s' has 0 bits and cannot store information", buf_ptr(&dest_type->name)));
@@ -20161,6 +20168,7 @@ static TypeTableEntry *ir_analyze_instruction_ptr_to_int(IrAnalyze *ira, IrInstr
}
static TypeTableEntry *ir_analyze_instruction_ptr_type(IrAnalyze *ira, IrInstructionPtrType *instruction) {
+ Error err;
TypeTableEntry *child_type = ir_resolve_type(ira, instruction->child_type->other);
if (type_is_invalid(child_type))
return ira->codegen->builtin_types.entry_invalid;
@@ -20178,8 +20186,7 @@ static TypeTableEntry *ir_analyze_instruction_ptr_type(IrAnalyze *ira, IrInstruc
if (!ir_resolve_align(ira, instruction->align_value->other, &align_bytes))
return ira->codegen->builtin_types.entry_invalid;
} else {
- type_ensure_zero_bits_known(ira->codegen, child_type);
- if (type_is_invalid(child_type))
+ if ((err = type_ensure_zero_bits_known(ira->codegen, child_type)))
return ira->codegen->builtin_types.entry_invalid;
align_bytes = get_abi_alignment(ira->codegen, child_type);
}
@@ -20299,22 +20306,21 @@ static TypeTableEntry *ir_analyze_instruction_arg_type(IrAnalyze *ira, IrInstruc
}
static TypeTableEntry *ir_analyze_instruction_tag_type(IrAnalyze *ira, IrInstructionTagType *instruction) {
+ Error err;
IrInstruction *target_inst = instruction->target->other;
TypeTableEntry *enum_type = ir_resolve_type(ira, target_inst);
if (type_is_invalid(enum_type))
return ira->codegen->builtin_types.entry_invalid;
if (enum_type->id == TypeTableEntryIdEnum) {
- ensure_complete_type(ira->codegen, enum_type);
- if (type_is_invalid(enum_type))
+ if ((err = ensure_complete_type(ira->codegen, enum_type)))
return ira->codegen->builtin_types.entry_invalid;
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
out_val->data.x_type = enum_type->data.enumeration.tag_int_type;
return ira->codegen->builtin_types.entry_type;
} else if (enum_type->id == TypeTableEntryIdUnion) {
- ensure_complete_type(ira->codegen, enum_type);
- if (type_is_invalid(enum_type))
+ if ((err = ensure_complete_type(ira->codegen, enum_type)))
return ira->codegen->builtin_types.entry_invalid;
AstNode *decl_node = enum_type->data.unionation.decl_node;
@@ -20591,7 +20597,7 @@ static TypeTableEntry *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstr
return ira->codegen->builtin_types.entry_invalid;
IrInstruction *casted_operand = ir_implicit_cast(ira, operand, operand_type);
- if (type_is_invalid(casted_ptr->value.type))
+ if (type_is_invalid(casted_operand->value.type))
return ira->codegen->builtin_types.entry_invalid;
AtomicOrder ordering;
@@ -20817,6 +20823,7 @@ static TypeTableEntry *ir_analyze_instruction_sqrt(IrAnalyze *ira, IrInstruction
}
static TypeTableEntry *ir_analyze_instruction_enum_to_int(IrAnalyze *ira, IrInstructionEnumToInt *instruction) {
+ Error err;
IrInstruction *target = instruction->target->other;
if (type_is_invalid(target->value.type))
return ira->codegen->builtin_types.entry_invalid;
@@ -20827,8 +20834,7 @@ static TypeTableEntry *ir_analyze_instruction_enum_to_int(IrAnalyze *ira, IrInst
return ira->codegen->builtin_types.entry_invalid;
}
- type_ensure_zero_bits_known(ira->codegen, target->value.type);
- if (type_is_invalid(target->value.type))
+ if ((err = type_ensure_zero_bits_known(ira->codegen, target->value.type)))
return ira->codegen->builtin_types.entry_invalid;
TypeTableEntry *tag_type = target->value.type->data.enumeration.tag_int_type;
@@ -20839,6 +20845,7 @@ static TypeTableEntry *ir_analyze_instruction_enum_to_int(IrAnalyze *ira, IrInst
}
static TypeTableEntry *ir_analyze_instruction_int_to_enum(IrAnalyze *ira, IrInstructionIntToEnum *instruction) {
+ Error err;
IrInstruction *dest_type_value = instruction->dest_type->other;
TypeTableEntry *dest_type = ir_resolve_type(ira, dest_type_value);
if (type_is_invalid(dest_type))
@@ -20850,8 +20857,7 @@ static TypeTableEntry *ir_analyze_instruction_int_to_enum(IrAnalyze *ira, IrInst
return ira->codegen->builtin_types.entry_invalid;
}
- type_ensure_zero_bits_known(ira->codegen, dest_type);
- if (type_is_invalid(dest_type))
+ if ((err = type_ensure_zero_bits_known(ira->codegen, dest_type)))
return ira->codegen->builtin_types.entry_invalid;
TypeTableEntry *tag_type = dest_type->data.enumeration.tag_int_type;
diff --git a/src/result.hpp b/src/result.hpp
new file mode 100644
index 0000000000..6c9f35c0b6
--- /dev/null
+++ b/src/result.hpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2018 Andrew Kelley
+ *
+ * This file is part of zig, which is MIT licensed.
+ * See http://opensource.org/licenses/MIT
+ */
+
+#ifndef ZIG_RESULT_HPP
+#define ZIG_RESULT_HPP
+
+#include "error.hpp"
+
+#include
+
+static inline void assertNoError(Error err) {
+ assert(err == ErrorNone);
+}
+
+template
+struct Result {
+ T data;
+ Error err;
+
+ Result(T x) : data(x), err(ErrorNone) {}
+
+ Result(Error err) : err(err) {
+ assert(err != ErrorNone);
+ }
+
+ T unwrap() {
+ assert(err == ErrorNone);
+ return data;
+ }
+};
+
+#endif
diff --git a/src/translate_c.cpp b/src/translate_c.cpp
index 0373dee85c..f521077513 100644
--- a/src/translate_c.cpp
+++ b/src/translate_c.cpp
@@ -2759,7 +2759,9 @@ static AstNode *trans_do_loop(Context *c, TransScope *parent_scope, const DoStmt
AstNode *child_statement;
child_scope = trans_stmt(c, &child_block_scope->base, stmt->getBody(), &child_statement);
if (child_scope == nullptr) return nullptr;
- body_node->data.block.statements.append(child_statement);
+ if (child_statement != nullptr) {
+ body_node->data.block.statements.append(child_statement);
+ }
}
// if (!cond) break;
@@ -2769,6 +2771,7 @@ static AstNode *trans_do_loop(Context *c, TransScope *parent_scope, const DoStmt
terminator_node->data.if_bool_expr.condition = trans_create_node_prefix_op(c, PrefixOpBoolNot, condition_node);
terminator_node->data.if_bool_expr.then_block = trans_create_node(c, NodeTypeBreak);
+ assert(terminator_node != nullptr);
body_node->data.block.statements.append(terminator_node);
while_scope->node->data.while_expr.body = body_node;
@@ -2832,7 +2835,12 @@ static AstNode *trans_for_loop(Context *c, TransScope *parent_scope, const ForSt
TransScope *body_scope = trans_stmt(c, &while_scope->base, stmt->getBody(), &body_statement);
if (body_scope == nullptr)
return nullptr;
- while_scope->node->data.while_expr.body = body_statement;
+
+ if (body_statement == nullptr) {
+ while_scope->node->data.while_expr.body = trans_create_node(c, NodeTypeBlock);
+ } else {
+ while_scope->node->data.while_expr.body = body_statement;
+ }
return loop_block_node;
}
@@ -3067,9 +3075,14 @@ static int trans_stmt_extra(Context *c, TransScope *scope, const Stmt *stmt,
trans_unary_operator(c, result_used, scope, (const UnaryOperator *)stmt));
case Stmt::DeclStmtClass:
return trans_local_declaration(c, scope, (const DeclStmt *)stmt, out_node, out_child_scope);
- case Stmt::WhileStmtClass:
- return wrap_stmt(out_node, out_child_scope, scope,
- trans_while_loop(c, scope, (const WhileStmt *)stmt));
+ case Stmt::WhileStmtClass: {
+ AstNode *while_node = trans_while_loop(c, scope, (const WhileStmt *)stmt);
+ assert(while_node->type == NodeTypeWhileExpr);
+ if (while_node->data.while_expr.body == nullptr) {
+ while_node->data.while_expr.body = trans_create_node(c, NodeTypeBlock);
+ }
+ return wrap_stmt(out_node, out_child_scope, scope, while_node);
+ }
case Stmt::IfStmtClass:
return wrap_stmt(out_node, out_child_scope, scope,
trans_if_statement(c, scope, (const IfStmt *)stmt));
@@ -3092,12 +3105,18 @@ static int trans_stmt_extra(Context *c, TransScope *scope, const Stmt *stmt,
case Stmt::UnaryExprOrTypeTraitExprClass:
return wrap_stmt(out_node, out_child_scope, scope,
trans_unary_expr_or_type_trait_expr(c, scope, (const UnaryExprOrTypeTraitExpr *)stmt));
- case Stmt::DoStmtClass:
- return wrap_stmt(out_node, out_child_scope, scope,
- trans_do_loop(c, scope, (const DoStmt *)stmt));
- case Stmt::ForStmtClass:
- return wrap_stmt(out_node, out_child_scope, scope,
- trans_for_loop(c, scope, (const ForStmt *)stmt));
+ case Stmt::DoStmtClass: {
+ AstNode *while_node = trans_do_loop(c, scope, (const DoStmt *)stmt);
+ assert(while_node->type == NodeTypeWhileExpr);
+ if (while_node->data.while_expr.body == nullptr) {
+ while_node->data.while_expr.body = trans_create_node(c, NodeTypeBlock);
+ }
+ return wrap_stmt(out_node, out_child_scope, scope, while_node);
+ }
+ case Stmt::ForStmtClass: {
+ AstNode *node = trans_for_loop(c, scope, (const ForStmt *)stmt);
+ return wrap_stmt(out_node, out_child_scope, scope, node);
+ }
case Stmt::StringLiteralClass:
return wrap_stmt(out_node, out_child_scope, scope,
trans_string_literal(c, scope, (const StringLiteral *)stmt));
diff --git a/src/util.hpp b/src/util.hpp
index b0402137bd..41f8feb591 100644
--- a/src/util.hpp
+++ b/src/util.hpp
@@ -21,6 +21,7 @@
#define ATTRIBUTE_PRINTF(a, b)
#define ATTRIBUTE_RETURNS_NOALIAS __declspec(restrict)
#define ATTRIBUTE_NORETURN __declspec(noreturn)
+#define ATTRIBUTE_MUST_USE
#else
@@ -28,6 +29,7 @@
#define ATTRIBUTE_PRINTF(a, b) __attribute__((format(printf, a, b)))
#define ATTRIBUTE_RETURNS_NOALIAS __attribute__((__malloc__))
#define ATTRIBUTE_NORETURN __attribute__((noreturn))
+#define ATTRIBUTE_MUST_USE __attribute__((warn_unused_result))
#endif
diff --git a/std/atomic/queue.zig b/std/atomic/queue.zig
index df31c88d2a..6948af43ba 100644
--- a/std/atomic/queue.zig
+++ b/std/atomic/queue.zig
@@ -1,40 +1,38 @@
+const std = @import("../index.zig");
const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder;
const AtomicRmwOp = builtin.AtomicRmwOp;
+const assert = std.debug.assert;
/// Many producer, many consumer, non-allocating, thread-safe.
-/// Uses a spinlock to protect get() and put().
+/// Uses a mutex to protect access.
pub fn Queue(comptime T: type) type {
return struct {
head: ?*Node,
tail: ?*Node,
- lock: u8,
+ mutex: std.Mutex,
pub const Self = this;
-
- pub const Node = struct {
- next: ?*Node,
- data: T,
- };
+ pub const Node = std.LinkedList(T).Node;
pub fn init() Self {
return Self{
.head = null,
.tail = null,
- .lock = 0,
+ .mutex = std.Mutex.init(),
};
}
pub fn put(self: *Self, node: *Node) void {
node.next = null;
- while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
- defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
+ const held = self.mutex.acquire();
+ defer held.release();
- const opt_tail = self.tail;
+ node.prev = self.tail;
self.tail = node;
- if (opt_tail) |tail| {
- tail.next = node;
+ if (node.prev) |prev_tail| {
+ prev_tail.next = node;
} else {
assert(self.head == null);
self.head = node;
@@ -42,18 +40,27 @@ pub fn Queue(comptime T: type) type {
}
pub fn get(self: *Self) ?*Node {
- while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
- defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
+ const held = self.mutex.acquire();
+ defer held.release();
const head = self.head orelse return null;
self.head = head.next;
- if (head.next == null) self.tail = null;
+ if (head.next) |new_head| {
+ new_head.prev = null;
+ } else {
+ self.tail = null;
+ }
+ // This way, a get() and a remove() are thread-safe with each other.
+ head.prev = null;
+ head.next = null;
return head;
}
pub fn unget(self: *Self, node: *Node) void {
- while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
- defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
+ node.prev = null;
+
+ const held = self.mutex.acquire();
+ defer held.release();
const opt_head = self.head;
self.head = node;
@@ -65,13 +72,39 @@ pub fn Queue(comptime T: type) type {
}
}
+ /// Thread-safe with get() and remove(). Returns whether node was actually removed.
+ pub fn remove(self: *Self, node: *Node) bool {
+ const held = self.mutex.acquire();
+ defer held.release();
+
+ if (node.prev == null and node.next == null and self.head != node) {
+ return false;
+ }
+
+ if (node.prev) |prev| {
+ prev.next = node.next;
+ } else {
+ self.head = node.next;
+ }
+ if (node.next) |next| {
+ next.prev = node.prev;
+ } else {
+ self.tail = node.prev;
+ }
+ node.prev = null;
+ node.next = null;
+ return true;
+ }
+
pub fn isEmpty(self: *Self) bool {
- return @atomicLoad(?*Node, &self.head, builtin.AtomicOrder.SeqCst) != null;
+ const held = self.mutex.acquire();
+ defer held.release();
+ return self.head != null;
}
pub fn dump(self: *Self) void {
- while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
- defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
+ const held = self.mutex.acquire();
+ defer held.release();
std.debug.warn("head: ");
dumpRecursive(self.head, 0);
@@ -93,9 +126,6 @@ pub fn Queue(comptime T: type) type {
};
}
-const std = @import("../index.zig");
-const assert = std.debug.assert;
-
const Context = struct {
allocator: *std.mem.Allocator,
queue: *Queue(i32),
@@ -169,6 +199,7 @@ fn startPuts(ctx: *Context) u8 {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
const x = @bitCast(i32, r.random.scalar(u32));
const node = ctx.allocator.create(Queue(i32).Node{
+ .prev = undefined,
.next = undefined,
.data = x,
}) catch unreachable;
@@ -198,12 +229,14 @@ test "std.atomic.Queue single-threaded" {
var node_0 = Queue(i32).Node{
.data = 0,
.next = undefined,
+ .prev = undefined,
};
queue.put(&node_0);
var node_1 = Queue(i32).Node{
.data = 1,
.next = undefined,
+ .prev = undefined,
};
queue.put(&node_1);
@@ -212,12 +245,14 @@ test "std.atomic.Queue single-threaded" {
var node_2 = Queue(i32).Node{
.data = 2,
.next = undefined,
+ .prev = undefined,
};
queue.put(&node_2);
var node_3 = Queue(i32).Node{
.data = 3,
.next = undefined,
+ .prev = undefined,
};
queue.put(&node_3);
@@ -228,6 +263,7 @@ test "std.atomic.Queue single-threaded" {
var node_4 = Queue(i32).Node{
.data = 4,
.next = undefined,
+ .prev = undefined,
};
queue.put(&node_4);
diff --git a/std/build.zig b/std/build.zig
index 68cf13c1eb..08bb5635d9 100644
--- a/std/build.zig
+++ b/std/build.zig
@@ -267,7 +267,7 @@ pub const Builder = struct {
if (self.verbose) {
warn("rm {}\n", installed_file);
}
- _ = os.deleteFile(self.allocator, installed_file);
+ _ = os.deleteFile(installed_file);
}
// TODO remove empty directories
@@ -424,60 +424,69 @@ pub const Builder = struct {
return mode;
}
- pub fn addUserInputOption(self: *Builder, name: []const u8, value: []const u8) bool {
- if (self.user_input_options.put(name, UserInputOption{
- .name = name,
- .value = UserValue{ .Scalar = value },
- .used = false,
- }) catch unreachable) |*prev_value| {
- // option already exists
- switch (prev_value.value) {
- UserValue.Scalar => |s| {
- // turn it into a list
- var list = ArrayList([]const u8).init(self.allocator);
- list.append(s) catch unreachable;
- list.append(value) catch unreachable;
- _ = self.user_input_options.put(name, UserInputOption{
- .name = name,
- .value = UserValue{ .List = list },
- .used = false,
- }) catch unreachable;
- },
- UserValue.List => |*list| {
- // append to the list
- list.append(value) catch unreachable;
- _ = self.user_input_options.put(name, UserInputOption{
- .name = name,
- .value = UserValue{ .List = list.* },
- .used = false,
- }) catch unreachable;
- },
- UserValue.Flag => {
- warn("Option '-D{}={}' conflicts with flag '-D{}'.\n", name, value, name);
- return true;
- },
- }
+ pub fn addUserInputOption(self: *Builder, name: []const u8, value: []const u8) !bool {
+ const gop = try self.user_input_options.getOrPut(name);
+ if (!gop.found_existing) {
+ gop.kv.value = UserInputOption{
+ .name = name,
+ .value = UserValue{ .Scalar = value },
+ .used = false,
+ };
+ return false;
+ }
+
+ // option already exists
+ switch (gop.kv.value.value) {
+ UserValue.Scalar => |s| {
+ // turn it into a list
+ var list = ArrayList([]const u8).init(self.allocator);
+ list.append(s) catch unreachable;
+ list.append(value) catch unreachable;
+ _ = self.user_input_options.put(name, UserInputOption{
+ .name = name,
+ .value = UserValue{ .List = list },
+ .used = false,
+ }) catch unreachable;
+ },
+ UserValue.List => |*list| {
+ // append to the list
+ list.append(value) catch unreachable;
+ _ = self.user_input_options.put(name, UserInputOption{
+ .name = name,
+ .value = UserValue{ .List = list.* },
+ .used = false,
+ }) catch unreachable;
+ },
+ UserValue.Flag => {
+ warn("Option '-D{}={}' conflicts with flag '-D{}'.\n", name, value, name);
+ return true;
+ },
}
return false;
}
- pub fn addUserInputFlag(self: *Builder, name: []const u8) bool {
- if (self.user_input_options.put(name, UserInputOption{
- .name = name,
- .value = UserValue{ .Flag = {} },
- .used = false,
- }) catch unreachable) |*prev_value| {
- switch (prev_value.value) {
- UserValue.Scalar => |s| {
- warn("Flag '-D{}' conflicts with option '-D{}={}'.\n", name, name, s);
- return true;
- },
- UserValue.List => {
- warn("Flag '-D{}' conflicts with multiple options of the same name.\n", name);
- return true;
- },
- UserValue.Flag => {},
- }
+ pub fn addUserInputFlag(self: *Builder, name: []const u8) !bool {
+ const gop = try self.user_input_options.getOrPut(name);
+ if (!gop.found_existing) {
+ gop.kv.value = UserInputOption{
+ .name = name,
+ .value = UserValue{ .Flag = {} },
+ .used = false,
+ };
+ return false;
+ }
+
+ // option already exists
+ switch (gop.kv.value.value) {
+ UserValue.Scalar => |s| {
+ warn("Flag '-D{}' conflicts with option '-D{}={}'.\n", name, name, s);
+ return true;
+ },
+ UserValue.List => {
+ warn("Flag '-D{}' conflicts with multiple options of the same name.\n", name);
+ return true;
+ },
+ UserValue.Flag => {},
}
return false;
}
@@ -603,10 +612,10 @@ pub const Builder = struct {
}
fn copyFile(self: *Builder, source_path: []const u8, dest_path: []const u8) !void {
- return self.copyFileMode(source_path, dest_path, os.default_file_mode);
+ return self.copyFileMode(source_path, dest_path, os.File.default_mode);
}
- fn copyFileMode(self: *Builder, source_path: []const u8, dest_path: []const u8, mode: os.FileMode) !void {
+ fn copyFileMode(self: *Builder, source_path: []const u8, dest_path: []const u8, mode: os.File.Mode) !void {
if (self.verbose) {
warn("cp {} {}\n", source_path, dest_path);
}
@@ -1173,7 +1182,7 @@ pub const LibExeObjStep = struct {
if (self.build_options_contents.len() > 0) {
const build_options_file = try os.path.join(builder.allocator, builder.cache_root, builder.fmt("{}_build_options.zig", self.name));
- try std.io.writeFile(builder.allocator, build_options_file, self.build_options_contents.toSliceConst());
+ try std.io.writeFile(build_options_file, self.build_options_contents.toSliceConst());
try zig_args.append("--pkg-begin");
try zig_args.append("build_options");
try zig_args.append(builder.pathFromRoot(build_options_file));
@@ -1482,11 +1491,14 @@ pub const LibExeObjStep = struct {
}
if (!is_darwin) {
- const rpath_arg = builder.fmt("-Wl,-rpath,{}", os.path.real(builder.allocator, builder.pathFromRoot(builder.cache_root)) catch unreachable);
+ const rpath_arg = builder.fmt("-Wl,-rpath,{}", try os.path.realAlloc(
+ builder.allocator,
+ builder.pathFromRoot(builder.cache_root),
+ ));
defer builder.allocator.free(rpath_arg);
- cc_args.append(rpath_arg) catch unreachable;
+ try cc_args.append(rpath_arg);
- cc_args.append("-rdynamic") catch unreachable;
+ try cc_args.append("-rdynamic");
}
for (self.full_path_libs.toSliceConst()) |full_path_lib| {
@@ -1557,11 +1569,14 @@ pub const LibExeObjStep = struct {
cc_args.append("-o") catch unreachable;
cc_args.append(output_path) catch unreachable;
- const rpath_arg = builder.fmt("-Wl,-rpath,{}", os.path.real(builder.allocator, builder.pathFromRoot(builder.cache_root)) catch unreachable);
+ const rpath_arg = builder.fmt("-Wl,-rpath,{}", try os.path.realAlloc(
+ builder.allocator,
+ builder.pathFromRoot(builder.cache_root),
+ ));
defer builder.allocator.free(rpath_arg);
- cc_args.append(rpath_arg) catch unreachable;
+ try cc_args.append(rpath_arg);
- cc_args.append("-rdynamic") catch unreachable;
+ try cc_args.append("-rdynamic");
{
var it = self.link_libs.iterator();
@@ -1908,7 +1923,7 @@ pub const WriteFileStep = struct {
warn("unable to make path {}: {}\n", full_path_dir, @errorName(err));
return err;
};
- io.writeFile(self.builder.allocator, full_path, self.data) catch |err| {
+ io.writeFile(full_path, self.data) catch |err| {
warn("unable to write {}: {}\n", full_path, @errorName(err));
return err;
};
diff --git a/std/c/darwin.zig b/std/c/darwin.zig
index 1bd1d6c4c9..2e238e40eb 100644
--- a/std/c/darwin.zig
+++ b/std/c/darwin.zig
@@ -1,5 +1,8 @@
+const macho = @import("../macho.zig");
+
extern "c" fn __error() *c_int;
pub extern "c" fn _NSGetExecutablePath(buf: [*]u8, bufsize: *u32) c_int;
+pub extern "c" fn _dyld_get_image_header(image_index: u32) ?*mach_header;
pub extern "c" fn __getdirentries64(fd: c_int, buf_ptr: [*]u8, buf_len: usize, basep: *i64) usize;
@@ -30,10 +33,45 @@ pub extern "c" fn sysctl(name: [*]c_int, namelen: c_uint, oldp: ?*c_void, oldlen
pub extern "c" fn sysctlbyname(name: [*]const u8, oldp: ?*c_void, oldlenp: ?*usize, newp: ?*c_void, newlen: usize) c_int;
pub extern "c" fn sysctlnametomib(name: [*]const u8, mibp: ?*c_int, sizep: ?*usize) c_int;
+pub extern "c" fn bind(socket: c_int, address: ?*const sockaddr, address_len: socklen_t) c_int;
+pub extern "c" fn socket(domain: c_int, type: c_int, protocol: c_int) c_int;
+
+/// The value of the link editor defined symbol _MH_EXECUTE_SYM is the address
+/// of the mach header in a Mach-O executable file type. It does not appear in
+/// any file type other than a MH_EXECUTE file type. The type of the symbol is
+/// absolute as the header is not part of any section.
+pub extern "c" var _mh_execute_header: if (@sizeOf(usize) == 8) mach_header_64 else mach_header;
+
+pub const mach_header_64 = macho.mach_header_64;
+pub const mach_header = macho.mach_header;
+
pub use @import("../os/darwin/errno.zig");
pub const _errno = __error;
+pub const in_port_t = u16;
+pub const sa_family_t = u8;
+pub const socklen_t = u32;
+pub const sockaddr = extern union {
+ in: sockaddr_in,
+ in6: sockaddr_in6,
+};
+pub const sockaddr_in = extern struct {
+ len: u8,
+ family: sa_family_t,
+ port: in_port_t,
+ addr: u32,
+ zero: [8]u8,
+};
+pub const sockaddr_in6 = extern struct {
+ len: u8,
+ family: sa_family_t,
+ port: in_port_t,
+ flowinfo: u32,
+ addr: [16]u8,
+ scope_id: u32,
+};
+
pub const timeval = extern struct {
tv_sec: isize,
tv_usec: isize,
@@ -98,14 +136,6 @@ pub const dirent = extern struct {
d_name: u8, // field address is address of first byte of name
};
-pub const sockaddr = extern struct {
- sa_len: u8,
- sa_family: sa_family_t,
- sa_data: [14]u8,
-};
-
-pub const sa_family_t = u8;
-
pub const pthread_attr_t = extern struct {
__sig: c_long,
__opaque: [56]u8,
diff --git a/std/c/index.zig b/std/c/index.zig
index 7de8634d07..6b20d718ef 100644
--- a/std/c/index.zig
+++ b/std/c/index.zig
@@ -21,8 +21,10 @@ pub extern "c" fn lseek(fd: c_int, offset: isize, whence: c_int) isize;
pub extern "c" fn open(path: [*]const u8, oflag: c_int, ...) c_int;
pub extern "c" fn raise(sig: c_int) c_int;
pub extern "c" fn read(fd: c_int, buf: *c_void, nbyte: usize) isize;
+pub extern "c" fn pread(fd: c_int, buf: *c_void, nbyte: usize, offset: u64) isize;
pub extern "c" fn stat(noalias path: [*]const u8, noalias buf: *Stat) c_int;
pub extern "c" fn write(fd: c_int, buf: *const c_void, nbyte: usize) isize;
+pub extern "c" fn pwrite(fd: c_int, buf: *const c_void, nbyte: usize, offset: u64) isize;
pub extern "c" fn mmap(addr: ?*c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?*c_void;
pub extern "c" fn munmap(addr: *c_void, len: usize) c_int;
pub extern "c" fn unlink(path: [*]const u8) c_int;
@@ -58,6 +60,7 @@ pub extern "pthread" fn pthread_create(noalias newthread: *pthread_t, noalias at
pub extern "pthread" fn pthread_attr_init(attr: *pthread_attr_t) c_int;
pub extern "pthread" fn pthread_attr_setstack(attr: *pthread_attr_t, stackaddr: *c_void, stacksize: usize) c_int;
pub extern "pthread" fn pthread_attr_destroy(attr: *pthread_attr_t) c_int;
+pub extern "pthread" fn pthread_self() pthread_t;
pub extern "pthread" fn pthread_join(thread: pthread_t, arg_return: ?*?*c_void) c_int;
pub const pthread_t = *@OpaqueType();
diff --git a/std/c/linux.zig b/std/c/linux.zig
index 2699e9bd09..b0dadf071d 100644
--- a/std/c/linux.zig
+++ b/std/c/linux.zig
@@ -8,3 +8,6 @@ pub const pthread_attr_t = extern struct {
__size: [56]u8,
__align: c_long,
};
+
+/// See std.elf for constants for this
+pub extern fn getauxval(__type: c_ulong) c_ulong;
diff --git a/std/cstr.zig b/std/cstr.zig
index e83d5a39e9..a8aaf21279 100644
--- a/std/cstr.zig
+++ b/std/cstr.zig
@@ -9,10 +9,9 @@ pub const line_sep = switch (builtin.os) {
else => "\n",
};
+/// Deprecated, use mem.len
pub fn len(ptr: [*]const u8) usize {
- var count: usize = 0;
- while (ptr[count] != 0) : (count += 1) {}
- return count;
+ return mem.len(u8, ptr);
}
pub fn cmp(a: [*]const u8, b: [*]const u8) i8 {
@@ -27,12 +26,14 @@ pub fn cmp(a: [*]const u8, b: [*]const u8) i8 {
}
}
+/// Deprecated, use mem.toSliceConst
pub fn toSliceConst(str: [*]const u8) []const u8 {
- return str[0..len(str)];
+ return mem.toSliceConst(u8, str);
}
+/// Deprecated, use mem.toSlice
pub fn toSlice(str: [*]u8) []u8 {
- return str[0..len(str)];
+ return mem.toSlice(u8, str);
}
test "cstr fns" {
diff --git a/std/debug/index.zig b/std/debug/index.zig
index ab50d79db3..39c41d4bc1 100644
--- a/std/debug/index.zig
+++ b/std/debug/index.zig
@@ -4,8 +4,8 @@ const mem = std.mem;
const io = std.io;
const os = std.os;
const elf = std.elf;
-const DW = std.dwarf;
const macho = std.macho;
+const DW = std.dwarf;
const ArrayList = std.ArrayList;
const builtin = @import("builtin");
@@ -19,14 +19,19 @@ pub const runtime_safety = switch (builtin.mode) {
/// Tries to write to stderr, unbuffered, and ignores any error returned.
/// Does not append a newline.
-/// TODO atomic/multithread support
var stderr_file: os.File = undefined;
var stderr_file_out_stream: io.FileOutStream = undefined;
+
+/// TODO multithreaded awareness
var stderr_stream: ?*io.OutStream(io.FileOutStream.Error) = null;
+var stderr_mutex = std.Mutex.init();
pub fn warn(comptime fmt: []const u8, args: ...) void {
+ const held = stderr_mutex.acquire();
+ defer held.release();
const stderr = getStderrStream() catch return;
stderr.print(fmt, args) catch return;
}
+
pub fn getStderrStream() !*io.OutStream(io.FileOutStream.Error) {
if (stderr_stream) |st| {
return st;
@@ -39,14 +44,15 @@ pub fn getStderrStream() !*io.OutStream(io.FileOutStream.Error) {
}
}
-var self_debug_info: ?*ElfStackTrace = null;
-pub fn getSelfDebugInfo() !*ElfStackTrace {
- if (self_debug_info) |info| {
+/// TODO multithreaded awareness
+var self_debug_info: ?DebugInfo = null;
+
+pub fn getSelfDebugInfo() !*DebugInfo {
+ if (self_debug_info) |*info| {
return info;
} else {
- const info = try openSelfDebugInfo(getDebugInfoAllocator());
- self_debug_info = info;
- return info;
+ self_debug_info = try openSelfDebugInfo(getDebugInfoAllocator());
+ return &self_debug_info.?;
}
}
@@ -57,6 +63,7 @@ fn wantTtyColor() bool {
}
/// Tries to print the current stack trace to stderr, unbuffered, and ignores any error returned.
+/// TODO multithreaded awareness
pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
const stderr = getStderrStream() catch return;
const debug_info = getSelfDebugInfo() catch |err| {
@@ -70,6 +77,7 @@ pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
}
/// Tries to print a stack trace to stderr, unbuffered, and ignores any error returned.
+/// TODO multithreaded awareness
pub fn dumpStackTrace(stack_trace: *const builtin.StackTrace) void {
const stderr = getStderrStream() catch return;
const debug_info = getSelfDebugInfo() catch |err| {
@@ -124,6 +132,7 @@ pub fn panic(comptime format: []const u8, args: ...) noreturn {
panicExtra(null, first_trace_addr, format, args);
}
+/// TODO multithreaded awareness
var panicking: u8 = 0; // TODO make this a bool
pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: ...) noreturn {
@@ -152,7 +161,7 @@ const WHITE = "\x1b[37;1m";
const DIM = "\x1b[2m";
const RESET = "\x1b[0m";
-pub fn writeStackTrace(stack_trace: *const builtin.StackTrace, out_stream: var, allocator: *mem.Allocator, debug_info: *ElfStackTrace, tty_color: bool) !void {
+pub fn writeStackTrace(stack_trace: *const builtin.StackTrace, out_stream: var, allocator: *mem.Allocator, debug_info: *DebugInfo, tty_color: bool) !void {
var frame_index: usize = undefined;
var frames_left: usize = undefined;
if (stack_trace.index < stack_trace.instruction_addresses.len) {
@@ -182,7 +191,7 @@ pub inline fn getReturnAddress(frame_count: usize) usize {
return @intToPtr(*const usize, fp + @sizeOf(usize)).*;
}
-pub fn writeCurrentStackTrace(out_stream: var, allocator: *mem.Allocator, debug_info: *ElfStackTrace, tty_color: bool, start_addr: ?usize) !void {
+pub fn writeCurrentStackTrace(out_stream: var, allocator: *mem.Allocator, debug_info: *DebugInfo, tty_color: bool, start_addr: ?usize) !void {
const AddressState = union(enum) {
NotLookingForStartAddress,
LookingForStartAddress: usize,
@@ -215,130 +224,292 @@ pub fn writeCurrentStackTrace(out_stream: var, allocator: *mem.Allocator, debug_
}
}
-pub fn printSourceAtAddress(debug_info: *ElfStackTrace, out_stream: var, address: usize, tty_color: bool) !void {
+pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: var, address: usize, tty_color: bool) !void {
switch (builtin.os) {
- builtin.Os.windows => return error.UnsupportedDebugInfo,
- builtin.Os.macosx => {
- // TODO(bnoordhuis) It's theoretically possible to obtain the
- // compilation unit from the symbtab but it's not that useful
- // in practice because the compiler dumps everything in a single
- // object file. Future improvement: use external dSYM data when
- // available.
- const unknown = macho.Symbol{
- .name = "???",
- .address = address,
- };
- const symbol = debug_info.symbol_table.search(address) orelse &unknown;
- try out_stream.print(WHITE ++ "{}" ++ RESET ++ ": " ++ DIM ++ "0x{x}" ++ " in ??? (???)" ++ RESET ++ "\n", symbol.name, address);
+ builtin.Os.macosx => return printSourceAtAddressMacOs(debug_info, out_stream, address, tty_color),
+ builtin.Os.linux => return printSourceAtAddressLinux(debug_info, out_stream, address, tty_color),
+ builtin.Os.windows => {
+ // TODO https://github.com/ziglang/zig/issues/721
+ return error.UnsupportedOperatingSystem;
},
- else => {
- const compile_unit = findCompileUnit(debug_info, address) catch {
- if (tty_color) {
- try out_stream.print("???:?:?: " ++ DIM ++ "0x{x} in ??? (???)" ++ RESET ++ "\n ???\n\n", address);
- } else {
- try out_stream.print("???:?:?: 0x{x} in ??? (???)\n ???\n\n", address);
- }
- return;
- };
- const compile_unit_name = try compile_unit.die.getAttrString(debug_info, DW.AT_name);
- if (getLineNumberInfo(debug_info, compile_unit, address - 1)) |line_info| {
- defer line_info.deinit();
- if (tty_color) {
- try out_stream.print(
- WHITE ++ "{}:{}:{}" ++ RESET ++ ": " ++ DIM ++ "0x{x} in ??? ({})" ++ RESET ++ "\n",
- line_info.file_name,
- line_info.line,
- line_info.column,
- address,
- compile_unit_name,
- );
- if (printLineFromFile(debug_info.allocator(), out_stream, line_info)) {
- if (line_info.column == 0) {
- try out_stream.write("\n");
- } else {
- {
- var col_i: usize = 1;
- while (col_i < line_info.column) : (col_i += 1) {
- try out_stream.writeByte(' ');
- }
- }
- try out_stream.write(GREEN ++ "^" ++ RESET ++ "\n");
- }
- } else |err| switch (err) {
- error.EndOfFile => {},
- else => return err,
- }
- } else {
- try out_stream.print(
- "{}:{}:{}: 0x{x} in ??? ({})\n",
- line_info.file_name,
- line_info.line,
- line_info.column,
- address,
- compile_unit_name,
- );
- }
- } else |err| switch (err) {
- error.MissingDebugInfo, error.InvalidDebugInfo => {
- try out_stream.print("0x{x} in ??? ({})\n", address, compile_unit_name);
- },
- else => return err,
+ else => return error.UnsupportedOperatingSystem,
+ }
+}
+
+fn machoSearchSymbols(symbols: []const MachoSymbol, address: usize) ?*const MachoSymbol {
+ var min: usize = 0;
+ var max: usize = symbols.len - 1; // Exclude sentinel.
+ while (min < max) {
+ const mid = min + (max - min) / 2;
+ const curr = &symbols[mid];
+ const next = &symbols[mid + 1];
+ if (address >= next.address()) {
+ min = mid + 1;
+ } else if (address < curr.address()) {
+ max = mid;
+ } else {
+ return curr;
+ }
+ }
+ return null;
+}
+
+fn printSourceAtAddressMacOs(di: *DebugInfo, out_stream: var, address: usize, tty_color: bool) !void {
+ const base_addr = @ptrToInt(&std.c._mh_execute_header);
+ const adjusted_addr = 0x100000000 + (address - base_addr);
+
+ const symbol = machoSearchSymbols(di.symbols, adjusted_addr) orelse {
+ if (tty_color) {
+ try out_stream.print("???:?:?: " ++ DIM ++ "0x{x} in ??? (???)" ++ RESET ++ "\n\n\n", address);
+ } else {
+ try out_stream.print("???:?:?: 0x{x} in ??? (???)\n\n\n", address);
+ }
+ return;
+ };
+
+ const symbol_name = mem.toSliceConst(u8, di.strings.ptr + symbol.nlist.n_strx);
+ const compile_unit_name = if (symbol.ofile) |ofile| blk: {
+ const ofile_path = mem.toSliceConst(u8, di.strings.ptr + ofile.n_strx);
+ break :blk os.path.basename(ofile_path);
+ } else "???";
+ if (getLineNumberInfoMacOs(di, symbol.*, adjusted_addr)) |line_info| {
+ defer line_info.deinit();
+ try printLineInfo(di, out_stream, line_info, address, symbol_name, compile_unit_name, tty_color);
+ } else |err| switch (err) {
+ error.MissingDebugInfo, error.InvalidDebugInfo => {
+ if (tty_color) {
+ try out_stream.print("???:?:?: " ++ DIM ++ "0x{x} in {} ({})" ++ RESET ++ "\n\n\n", address, symbol_name, compile_unit_name);
+ } else {
+ try out_stream.print("???:?:?: 0x{x} in {} ({})\n\n\n", address, symbol_name, compile_unit_name);
}
},
+ else => return err,
}
}
-pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace {
- switch (builtin.object_format) {
- builtin.ObjectFormat.elf => {
- const st = try allocator.create(ElfStackTrace{
- .self_exe_file = undefined,
- .elf = undefined,
- .debug_info = undefined,
- .debug_abbrev = undefined,
- .debug_str = undefined,
- .debug_line = undefined,
- .debug_ranges = null,
- .abbrev_table_list = ArrayList(AbbrevTableHeader).init(allocator),
- .compile_unit_list = ArrayList(CompileUnit).init(allocator),
- });
- errdefer allocator.destroy(st);
- st.self_exe_file = try os.openSelfExe();
- errdefer st.self_exe_file.close();
-
- try st.elf.openFile(allocator, &st.self_exe_file);
- errdefer st.elf.close();
-
- st.debug_info = (try st.elf.findSection(".debug_info")) orelse return error.MissingDebugInfo;
- st.debug_abbrev = (try st.elf.findSection(".debug_abbrev")) orelse return error.MissingDebugInfo;
- st.debug_str = (try st.elf.findSection(".debug_str")) orelse return error.MissingDebugInfo;
- st.debug_line = (try st.elf.findSection(".debug_line")) orelse return error.MissingDebugInfo;
- st.debug_ranges = (try st.elf.findSection(".debug_ranges"));
- try scanAllCompileUnits(st);
- return st;
- },
- builtin.ObjectFormat.macho => {
- var exe_file = try os.openSelfExe();
- defer exe_file.close();
-
- const st = try allocator.create(ElfStackTrace{ .symbol_table = try macho.loadSymbols(allocator, &io.FileInStream.init(&exe_file)) });
- errdefer allocator.destroy(st);
- return st;
- },
- builtin.ObjectFormat.coff => {
- return error.TodoSupportCoffDebugInfo;
- },
- builtin.ObjectFormat.wasm => {
- return error.TodoSupportCOFFDebugInfo;
- },
- builtin.ObjectFormat.unknown => {
- return error.UnknownObjectFormat;
+pub fn printSourceAtAddressLinux(debug_info: *DebugInfo, out_stream: var, address: usize, tty_color: bool) !void {
+ const compile_unit = findCompileUnit(debug_info, address) catch {
+ if (tty_color) {
+ try out_stream.print("???:?:?: " ++ DIM ++ "0x{x} in ??? (???)" ++ RESET ++ "\n\n\n", address);
+ } else {
+ try out_stream.print("???:?:?: 0x{x} in ??? (???)\n\n\n", address);
+ }
+ return;
+ };
+ const compile_unit_name = try compile_unit.die.getAttrString(debug_info, DW.AT_name);
+ if (getLineNumberInfoLinux(debug_info, compile_unit, address - 1)) |line_info| {
+ defer line_info.deinit();
+ const symbol_name = "???";
+ try printLineInfo(debug_info, out_stream, line_info, address, symbol_name, compile_unit_name, tty_color);
+ } else |err| switch (err) {
+ error.MissingDebugInfo, error.InvalidDebugInfo => {
+ if (tty_color) {
+ try out_stream.print("???:?:?: " ++ DIM ++ "0x{x} in ??? ({})" ++ RESET ++ "\n\n\n", address, compile_unit_name);
+ } else {
+ try out_stream.print("???:?:?: 0x{x} in ??? ({})\n\n\n", address, compile_unit_name);
+ }
},
+ else => return err,
}
}
-fn printLineFromFile(allocator: *mem.Allocator, out_stream: var, line_info: *const LineInfo) !void {
- var f = try os.File.openRead(allocator, line_info.file_name);
+fn printLineInfo(
+ debug_info: *DebugInfo,
+ out_stream: var,
+ line_info: LineInfo,
+ address: usize,
+ symbol_name: []const u8,
+ compile_unit_name: []const u8,
+ tty_color: bool,
+) !void {
+ if (tty_color) {
+ try out_stream.print(
+ WHITE ++ "{}:{}:{}" ++ RESET ++ ": " ++ DIM ++ "0x{x} in {} ({})" ++ RESET ++ "\n",
+ line_info.file_name,
+ line_info.line,
+ line_info.column,
+ address,
+ symbol_name,
+ compile_unit_name,
+ );
+ if (printLineFromFile(out_stream, line_info)) {
+ if (line_info.column == 0) {
+ try out_stream.write("\n");
+ } else {
+ {
+ var col_i: usize = 1;
+ while (col_i < line_info.column) : (col_i += 1) {
+ try out_stream.writeByte(' ');
+ }
+ }
+ try out_stream.write(GREEN ++ "^" ++ RESET ++ "\n");
+ }
+ } else |err| switch (err) {
+ error.EndOfFile => {},
+ else => return err,
+ }
+ } else {
+ try out_stream.print(
+ "{}:{}:{}: 0x{x} in {} ({})\n",
+ line_info.file_name,
+ line_info.line,
+ line_info.column,
+ address,
+ symbol_name,
+ compile_unit_name,
+ );
+ }
+}
+
+// TODO use this
+pub const OpenSelfDebugInfoError = error{
+ MissingDebugInfo,
+ OutOfMemory,
+ UnsupportedOperatingSystem,
+};
+
+pub fn openSelfDebugInfo(allocator: *mem.Allocator) !DebugInfo {
+ switch (builtin.os) {
+ builtin.Os.linux => return openSelfDebugInfoLinux(allocator),
+ builtin.Os.macosx, builtin.Os.ios => return openSelfDebugInfoMacOs(allocator),
+ builtin.Os.windows => {
+ // TODO: https://github.com/ziglang/zig/issues/721
+ return error.UnsupportedOperatingSystem;
+ },
+ else => return error.UnsupportedOperatingSystem,
+ }
+}
+
+fn openSelfDebugInfoLinux(allocator: *mem.Allocator) !DebugInfo {
+ var di = DebugInfo{
+ .self_exe_file = undefined,
+ .elf = undefined,
+ .debug_info = undefined,
+ .debug_abbrev = undefined,
+ .debug_str = undefined,
+ .debug_line = undefined,
+ .debug_ranges = null,
+ .abbrev_table_list = ArrayList(AbbrevTableHeader).init(allocator),
+ .compile_unit_list = ArrayList(CompileUnit).init(allocator),
+ };
+ di.self_exe_file = try os.openSelfExe();
+ errdefer di.self_exe_file.close();
+
+ try di.elf.openFile(allocator, &di.self_exe_file);
+ errdefer di.elf.close();
+
+ di.debug_info = (try di.elf.findSection(".debug_info")) orelse return error.MissingDebugInfo;
+ di.debug_abbrev = (try di.elf.findSection(".debug_abbrev")) orelse return error.MissingDebugInfo;
+ di.debug_str = (try di.elf.findSection(".debug_str")) orelse return error.MissingDebugInfo;
+ di.debug_line = (try di.elf.findSection(".debug_line")) orelse return error.MissingDebugInfo;
+ di.debug_ranges = (try di.elf.findSection(".debug_ranges"));
+ try scanAllCompileUnits(&di);
+ return di;
+}
+
+pub fn findElfSection(elf: *Elf, name: []const u8) ?*elf.Shdr {
+ var file_stream = io.FileInStream.init(elf.in_file);
+ const in = &file_stream.stream;
+
+ section_loop: for (elf.section_headers) |*elf_section| {
+ if (elf_section.sh_type == SHT_NULL) continue;
+
+ const name_offset = elf.string_section.offset + elf_section.name;
+ try elf.in_file.seekTo(name_offset);
+
+ for (name) |expected_c| {
+ const target_c = try in.readByte();
+ if (target_c == 0 or expected_c != target_c) continue :section_loop;
+ }
+
+ {
+ const null_byte = try in.readByte();
+ if (null_byte == 0) return elf_section;
+ }
+ }
+
+ return null;
+}
+
+fn openSelfDebugInfoMacOs(allocator: *mem.Allocator) !DebugInfo {
+ const hdr = &std.c._mh_execute_header;
+ assert(hdr.magic == std.macho.MH_MAGIC_64);
+
+ const hdr_base = @ptrCast([*]u8, hdr);
+ var ptr = hdr_base + @sizeOf(macho.mach_header_64);
+ var ncmd: u32 = hdr.ncmds;
+ const symtab = while (ncmd != 0) : (ncmd -= 1) {
+ const lc = @ptrCast(*std.macho.load_command, ptr);
+ switch (lc.cmd) {
+ std.macho.LC_SYMTAB => break @ptrCast(*std.macho.symtab_command, ptr),
+ else => {},
+ }
+ ptr += lc.cmdsize; // TODO https://github.com/ziglang/zig/issues/1403
+ } else {
+ return error.MissingDebugInfo;
+ };
+ const syms = @ptrCast([*]macho.nlist_64, hdr_base + symtab.symoff)[0..symtab.nsyms];
+ const strings = @ptrCast([*]u8, hdr_base + symtab.stroff)[0..symtab.strsize];
+
+ const symbols_buf = try allocator.alloc(MachoSymbol, syms.len);
+
+ var ofile: ?*macho.nlist_64 = null;
+ var reloc: u64 = 0;
+ var symbol_index: usize = 0;
+ var last_len: u64 = 0;
+ for (syms) |*sym| {
+ if (sym.n_type & std.macho.N_STAB != 0) {
+ switch (sym.n_type) {
+ std.macho.N_OSO => {
+ ofile = sym;
+ reloc = 0;
+ },
+ std.macho.N_FUN => {
+ if (sym.n_sect == 0) {
+ last_len = sym.n_value;
+ } else {
+ symbols_buf[symbol_index] = MachoSymbol{
+ .nlist = sym,
+ .ofile = ofile,
+ .reloc = reloc,
+ };
+ symbol_index += 1;
+ }
+ },
+ std.macho.N_BNSYM => {
+ if (reloc == 0) {
+ reloc = sym.n_value;
+ }
+ },
+ else => continue,
+ }
+ }
+ }
+ const sentinel = try allocator.createOne(macho.nlist_64);
+ sentinel.* = macho.nlist_64{
+ .n_strx = 0,
+ .n_type = 36,
+ .n_sect = 0,
+ .n_desc = 0,
+ .n_value = symbols_buf[symbol_index - 1].nlist.n_value + last_len,
+ };
+
+ const symbols = allocator.shrink(MachoSymbol, symbols_buf, symbol_index);
+
+ // Even though lld emits symbols in ascending order, this debug code
+ // should work for programs linked in any valid way.
+ // This sort is so that we can binary search later.
+ std.sort.sort(MachoSymbol, symbols, MachoSymbol.addressLessThan);
+
+ return DebugInfo{
+ .ofiles = DebugInfo.OFileTable.init(allocator),
+ .symbols = symbols,
+ .strings = strings,
+ };
+}
+
+fn printLineFromFile(out_stream: var, line_info: *const LineInfo) !void {
+ var f = try os.File.openRead(line_info.file_name);
defer f.close();
// TODO fstat and make sure that the file has the correct size
@@ -369,12 +540,42 @@ fn printLineFromFile(allocator: *mem.Allocator, out_stream: var, line_info: *con
}
}
-pub const ElfStackTrace = switch (builtin.os) {
- builtin.Os.macosx => struct {
- symbol_table: macho.SymbolTable,
+const MachoSymbol = struct {
+ nlist: *macho.nlist_64,
+ ofile: ?*macho.nlist_64,
+ reloc: u64,
- pub fn close(self: *ElfStackTrace) void {
- self.symbol_table.deinit();
+ /// Returns the address from the macho file
+ fn address(self: MachoSymbol) u64 {
+ return self.nlist.n_value;
+ }
+
+ fn addressLessThan(lhs: MachoSymbol, rhs: MachoSymbol) bool {
+ return lhs.address() < rhs.address();
+ }
+};
+
+const MachOFile = struct {
+ bytes: []align(@alignOf(macho.mach_header_64)) const u8,
+ sect_debug_info: ?*const macho.section_64,
+ sect_debug_line: ?*const macho.section_64,
+};
+
+pub const DebugInfo = switch (builtin.os) {
+ builtin.Os.macosx => struct {
+ symbols: []const MachoSymbol,
+ strings: []const u8,
+ ofiles: OFileTable,
+
+ const OFileTable = std.HashMap(
+ *macho.nlist_64,
+ MachOFile,
+ std.hash_map.getHashPtrAddrFn(*macho.nlist_64),
+ std.hash_map.getTrivialEqlFn(*macho.nlist_64),
+ );
+
+ pub fn allocator(self: DebugInfo) *mem.Allocator {
+ return self.ofiles.allocator;
}
},
else => struct {
@@ -388,17 +589,17 @@ pub const ElfStackTrace = switch (builtin.os) {
abbrev_table_list: ArrayList(AbbrevTableHeader),
compile_unit_list: ArrayList(CompileUnit),
- pub fn allocator(self: *const ElfStackTrace) *mem.Allocator {
+ pub fn allocator(self: DebugInfo) *mem.Allocator {
return self.abbrev_table_list.allocator;
}
- pub fn readString(self: *ElfStackTrace) ![]u8 {
+ pub fn readString(self: *DebugInfo) ![]u8 {
var in_file_stream = io.FileInStream.init(&self.self_exe_file);
const in_stream = &in_file_stream.stream;
return readStringRaw(self.allocator(), in_stream);
}
- pub fn close(self: *ElfStackTrace) void {
+ pub fn close(self: *DebugInfo) void {
self.self_exe_file.close();
self.elf.close();
}
@@ -505,7 +706,7 @@ const Die = struct {
};
}
- fn getAttrString(self: *const Die, st: *ElfStackTrace, id: u64) ![]u8 {
+ fn getAttrString(self: *const Die, st: *DebugInfo, id: u64) ![]u8 {
const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.String => |value| value,
@@ -620,7 +821,7 @@ fn readStringRaw(allocator: *mem.Allocator, in_stream: var) ![]u8 {
return buf.toSlice();
}
-fn getString(st: *ElfStackTrace, offset: u64) ![]u8 {
+fn getString(st: *DebugInfo, offset: u64) ![]u8 {
const pos = st.debug_str.offset + offset;
try st.self_exe_file.seekTo(pos);
return st.readString();
@@ -672,14 +873,10 @@ fn parseFormValueRef(allocator: *mem.Allocator, in_stream: var, comptime T: type
const ParseFormValueError = error{
EndOfStream,
- Io,
- BadFd,
- Unexpected,
InvalidDebugInfo,
EndOfFile,
- IsDir,
OutOfMemory,
-};
+} || std.os.File.ReadError;
fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, is_64: bool) ParseFormValueError!FormValue {
return switch (form_id) {
@@ -731,7 +928,7 @@ fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, is_64
};
}
-fn parseAbbrevTable(st: *ElfStackTrace) !AbbrevTable {
+fn parseAbbrevTable(st: *DebugInfo) !AbbrevTable {
const in_file = &st.self_exe_file;
var in_file_stream = io.FileInStream.init(in_file);
const in_stream = &in_file_stream.stream;
@@ -761,7 +958,7 @@ fn parseAbbrevTable(st: *ElfStackTrace) !AbbrevTable {
/// Gets an already existing AbbrevTable given the abbrev_offset, or if not found,
/// seeks in the stream and parses it.
-fn getAbbrevTable(st: *ElfStackTrace, abbrev_offset: u64) !*const AbbrevTable {
+fn getAbbrevTable(st: *DebugInfo, abbrev_offset: u64) !*const AbbrevTable {
for (st.abbrev_table_list.toSlice()) |*header| {
if (header.offset == abbrev_offset) {
return &header.table;
@@ -782,7 +979,7 @@ fn getAbbrevTableEntry(abbrev_table: *const AbbrevTable, abbrev_code: u64) ?*con
return null;
}
-fn parseDie(st: *ElfStackTrace, abbrev_table: *const AbbrevTable, is_64: bool) !Die {
+fn parseDie(st: *DebugInfo, abbrev_table: *const AbbrevTable, is_64: bool) !Die {
const in_file = &st.self_exe_file;
var in_file_stream = io.FileInStream.init(in_file);
const in_stream = &in_file_stream.stream;
@@ -804,12 +1001,210 @@ fn parseDie(st: *ElfStackTrace, abbrev_table: *const AbbrevTable, is_64: bool) !
return result;
}
-fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, target_address: usize) !LineInfo {
- const compile_unit_cwd = try compile_unit.die.getAttrString(st, DW.AT_comp_dir);
+fn getLineNumberInfoMacOs(di: *DebugInfo, symbol: MachoSymbol, target_address: usize) !LineInfo {
+ const ofile = symbol.ofile orelse return error.MissingDebugInfo;
+ const gop = try di.ofiles.getOrPut(ofile);
+ const mach_o_file = if (gop.found_existing) &gop.kv.value else blk: {
+ errdefer _ = di.ofiles.remove(ofile);
+ const ofile_path = mem.toSliceConst(u8, di.strings.ptr + ofile.n_strx);
- const in_file = &st.self_exe_file;
- const debug_line_end = st.debug_line.offset + st.debug_line.size;
- var this_offset = st.debug_line.offset;
+ gop.kv.value = MachOFile{
+ .bytes = try std.io.readFileAllocAligned(di.ofiles.allocator, ofile_path, @alignOf(macho.mach_header_64)),
+ .sect_debug_info = null,
+ .sect_debug_line = null,
+ };
+ const hdr = @ptrCast(*const macho.mach_header_64, gop.kv.value.bytes.ptr);
+ if (hdr.magic != std.macho.MH_MAGIC_64) return error.InvalidDebugInfo;
+
+ const hdr_base = @ptrCast([*]const u8, hdr);
+ var ptr = hdr_base + @sizeOf(macho.mach_header_64);
+ var ncmd: u32 = hdr.ncmds;
+ const segcmd = while (ncmd != 0) : (ncmd -= 1) {
+ const lc = @ptrCast(*const std.macho.load_command, ptr);
+ switch (lc.cmd) {
+ std.macho.LC_SEGMENT_64 => break @ptrCast(*const std.macho.segment_command_64, ptr),
+ else => {},
+ }
+ ptr += lc.cmdsize; // TODO https://github.com/ziglang/zig/issues/1403
+ } else {
+ return error.MissingDebugInfo;
+ };
+ const sections = @alignCast(@alignOf(macho.section_64), @ptrCast([*]const macho.section_64, ptr + @sizeOf(std.macho.segment_command_64)))[0..segcmd.nsects];
+ for (sections) |*sect| {
+ if (sect.flags & macho.SECTION_TYPE == macho.S_REGULAR and
+ (sect.flags & macho.SECTION_ATTRIBUTES) & macho.S_ATTR_DEBUG == macho.S_ATTR_DEBUG)
+ {
+ const sect_name = mem.toSliceConst(u8, §.sectname);
+ if (mem.eql(u8, sect_name, "__debug_line")) {
+ gop.kv.value.sect_debug_line = sect;
+ } else if (mem.eql(u8, sect_name, "__debug_info")) {
+ gop.kv.value.sect_debug_info = sect;
+ }
+ }
+ }
+
+ break :blk &gop.kv.value;
+ };
+
+ const sect_debug_line = mach_o_file.sect_debug_line orelse return error.MissingDebugInfo;
+ var ptr = mach_o_file.bytes.ptr + sect_debug_line.offset;
+
+ var is_64: bool = undefined;
+ const unit_length = try readInitialLengthMem(&ptr, &is_64);
+ if (unit_length == 0) return error.MissingDebugInfo;
+
+ const version = readIntMem(&ptr, u16, builtin.Endian.Little);
+ // TODO support 3 and 5
+ if (version != 2 and version != 4) return error.InvalidDebugInfo;
+
+ const prologue_length = if (is_64)
+ readIntMem(&ptr, u64, builtin.Endian.Little)
+ else
+ readIntMem(&ptr, u32, builtin.Endian.Little);
+ const prog_start = ptr + prologue_length;
+
+ const minimum_instruction_length = readByteMem(&ptr);
+ if (minimum_instruction_length == 0) return error.InvalidDebugInfo;
+
+ if (version >= 4) {
+ // maximum_operations_per_instruction
+ ptr += 1;
+ }
+
+ const default_is_stmt = readByteMem(&ptr) != 0;
+ const line_base = readByteSignedMem(&ptr);
+
+ const line_range = readByteMem(&ptr);
+ if (line_range == 0) return error.InvalidDebugInfo;
+
+ const opcode_base = readByteMem(&ptr);
+
+ const standard_opcode_lengths = ptr[0 .. opcode_base - 1];
+ ptr += opcode_base - 1;
+
+ var include_directories = ArrayList([]const u8).init(di.allocator());
+ try include_directories.append("");
+ while (true) {
+ const dir = readStringMem(&ptr);
+ if (dir.len == 0) break;
+ try include_directories.append(dir);
+ }
+
+ var file_entries = ArrayList(FileEntry).init(di.allocator());
+ var prog = LineNumberProgram.init(default_is_stmt, include_directories.toSliceConst(), &file_entries, target_address);
+
+ while (true) {
+ const file_name = readStringMem(&ptr);
+ if (file_name.len == 0) break;
+ const dir_index = try readULeb128Mem(&ptr);
+ const mtime = try readULeb128Mem(&ptr);
+ const len_bytes = try readULeb128Mem(&ptr);
+ try file_entries.append(FileEntry{
+ .file_name = file_name,
+ .dir_index = dir_index,
+ .mtime = mtime,
+ .len_bytes = len_bytes,
+ });
+ }
+
+ ptr = prog_start;
+ while (true) {
+ const opcode = readByteMem(&ptr);
+
+ if (opcode == DW.LNS_extended_op) {
+ const op_size = try readULeb128Mem(&ptr);
+ if (op_size < 1) return error.InvalidDebugInfo;
+ var sub_op = readByteMem(&ptr);
+ switch (sub_op) {
+ DW.LNE_end_sequence => {
+ prog.end_sequence = true;
+ if (try prog.checkLineMatch()) |info| return info;
+ return error.MissingDebugInfo;
+ },
+ DW.LNE_set_address => {
+ const addr = readIntMem(&ptr, usize, builtin.Endian.Little);
+ prog.address = symbol.reloc + addr;
+ },
+ DW.LNE_define_file => {
+ const file_name = readStringMem(&ptr);
+ const dir_index = try readULeb128Mem(&ptr);
+ const mtime = try readULeb128Mem(&ptr);
+ const len_bytes = try readULeb128Mem(&ptr);
+ try file_entries.append(FileEntry{
+ .file_name = file_name,
+ .dir_index = dir_index,
+ .mtime = mtime,
+ .len_bytes = len_bytes,
+ });
+ },
+ else => {
+ ptr += op_size - 1;
+ },
+ }
+ } else if (opcode >= opcode_base) {
+ // special opcodes
+ const adjusted_opcode = opcode - opcode_base;
+ const inc_addr = minimum_instruction_length * (adjusted_opcode / line_range);
+ const inc_line = i32(line_base) + i32(adjusted_opcode % line_range);
+ prog.line += inc_line;
+ prog.address += inc_addr;
+ if (try prog.checkLineMatch()) |info| return info;
+ prog.basic_block = false;
+ } else {
+ switch (opcode) {
+ DW.LNS_copy => {
+ if (try prog.checkLineMatch()) |info| return info;
+ prog.basic_block = false;
+ },
+ DW.LNS_advance_pc => {
+ const arg = try readULeb128Mem(&ptr);
+ prog.address += arg * minimum_instruction_length;
+ },
+ DW.LNS_advance_line => {
+ const arg = try readILeb128Mem(&ptr);
+ prog.line += arg;
+ },
+ DW.LNS_set_file => {
+ const arg = try readULeb128Mem(&ptr);
+ prog.file = arg;
+ },
+ DW.LNS_set_column => {
+ const arg = try readULeb128Mem(&ptr);
+ prog.column = arg;
+ },
+ DW.LNS_negate_stmt => {
+ prog.is_stmt = !prog.is_stmt;
+ },
+ DW.LNS_set_basic_block => {
+ prog.basic_block = true;
+ },
+ DW.LNS_const_add_pc => {
+ const inc_addr = minimum_instruction_length * ((255 - opcode_base) / line_range);
+ prog.address += inc_addr;
+ },
+ DW.LNS_fixed_advance_pc => {
+ const arg = readIntMem(&ptr, u16, builtin.Endian.Little);
+ prog.address += arg;
+ },
+ DW.LNS_set_prologue_end => {},
+ else => {
+ if (opcode - 1 >= standard_opcode_lengths.len) return error.InvalidDebugInfo;
+ const len_bytes = standard_opcode_lengths[opcode - 1];
+ ptr += len_bytes;
+ },
+ }
+ }
+ }
+
+ return error.MissingDebugInfo;
+}
+
+fn getLineNumberInfoLinux(di: *DebugInfo, compile_unit: *const CompileUnit, target_address: usize) !LineInfo {
+ const compile_unit_cwd = try compile_unit.die.getAttrString(di, DW.AT_comp_dir);
+
+ const in_file = &di.self_exe_file;
+ const debug_line_end = di.debug_line.offset + di.debug_line.size;
+ var this_offset = di.debug_line.offset;
var this_index: usize = 0;
var in_file_stream = io.FileInStream.init(in_file);
@@ -828,11 +1223,11 @@ fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, targe
continue;
}
- const version = try in_stream.readInt(st.elf.endian, u16);
+ const version = try in_stream.readInt(di.elf.endian, u16);
// TODO support 3 and 5
if (version != 2 and version != 4) return error.InvalidDebugInfo;
- const prologue_length = if (is_64) try in_stream.readInt(st.elf.endian, u64) else try in_stream.readInt(st.elf.endian, u32);
+ const prologue_length = if (is_64) try in_stream.readInt(di.elf.endian, u64) else try in_stream.readInt(di.elf.endian, u32);
const prog_start_offset = (try in_file.getPos()) + prologue_length;
const minimum_instruction_length = try in_stream.readByte();
@@ -851,7 +1246,7 @@ fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, targe
const opcode_base = try in_stream.readByte();
- const standard_opcode_lengths = try st.allocator().alloc(u8, opcode_base - 1);
+ const standard_opcode_lengths = try di.allocator().alloc(u8, opcode_base - 1);
{
var i: usize = 0;
@@ -860,19 +1255,19 @@ fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, targe
}
}
- var include_directories = ArrayList([]u8).init(st.allocator());
+ var include_directories = ArrayList([]u8).init(di.allocator());
try include_directories.append(compile_unit_cwd);
while (true) {
- const dir = try st.readString();
+ const dir = try di.readString();
if (dir.len == 0) break;
try include_directories.append(dir);
}
- var file_entries = ArrayList(FileEntry).init(st.allocator());
+ var file_entries = ArrayList(FileEntry).init(di.allocator());
var prog = LineNumberProgram.init(default_is_stmt, include_directories.toSliceConst(), &file_entries, target_address);
while (true) {
- const file_name = try st.readString();
+ const file_name = try di.readString();
if (file_name.len == 0) break;
const dir_index = try readULeb128(in_stream);
const mtime = try readULeb128(in_stream);
@@ -890,11 +1285,10 @@ fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, targe
while (true) {
const opcode = try in_stream.readByte();
- var sub_op: u8 = undefined; // TODO move this to the correct scope and fix the compiler crash
if (opcode == DW.LNS_extended_op) {
const op_size = try readULeb128(in_stream);
if (op_size < 1) return error.InvalidDebugInfo;
- sub_op = try in_stream.readByte();
+ var sub_op = try in_stream.readByte();
switch (sub_op) {
DW.LNE_end_sequence => {
prog.end_sequence = true;
@@ -902,11 +1296,11 @@ fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, targe
return error.MissingDebugInfo;
},
DW.LNE_set_address => {
- const addr = try in_stream.readInt(st.elf.endian, usize);
+ const addr = try in_stream.readInt(di.elf.endian, usize);
prog.address = addr;
},
DW.LNE_define_file => {
- const file_name = try st.readString();
+ const file_name = try di.readString();
const dir_index = try readULeb128(in_stream);
const mtime = try readULeb128(in_stream);
const len_bytes = try readULeb128(in_stream);
@@ -964,7 +1358,7 @@ fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, targe
prog.address += inc_addr;
},
DW.LNS_fixed_advance_pc => {
- const arg = try in_stream.readInt(st.elf.endian, u16);
+ const arg = try in_stream.readInt(di.elf.endian, u16);
prog.address += arg;
},
DW.LNS_set_prologue_end => {},
@@ -983,7 +1377,7 @@ fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, targe
return error.MissingDebugInfo;
}
-fn scanAllCompileUnits(st: *ElfStackTrace) !void {
+fn scanAllCompileUnits(st: *DebugInfo) !void {
const debug_info_end = st.debug_info.offset + st.debug_info.size;
var this_unit_offset = st.debug_info.offset;
var cu_index: usize = 0;
@@ -1053,7 +1447,7 @@ fn scanAllCompileUnits(st: *ElfStackTrace) !void {
}
}
-fn findCompileUnit(st: *ElfStackTrace, target_address: u64) !*const CompileUnit {
+fn findCompileUnit(st: *DebugInfo, target_address: u64) !*const CompileUnit {
var in_file_stream = io.FileInStream.init(&st.self_exe_file);
const in_stream = &in_file_stream.stream;
for (st.compile_unit_list.toSlice()) |*compile_unit| {
@@ -1087,6 +1481,89 @@ fn findCompileUnit(st: *ElfStackTrace, target_address: u64) !*const CompileUnit
return error.MissingDebugInfo;
}
+fn readIntMem(ptr: *[*]const u8, comptime T: type, endian: builtin.Endian) T {
+ const result = mem.readInt(ptr.*[0..@sizeOf(T)], T, endian);
+ ptr.* += @sizeOf(T);
+ return result;
+}
+
+fn readByteMem(ptr: *[*]const u8) u8 {
+ const result = ptr.*[0];
+ ptr.* += 1;
+ return result;
+}
+
+fn readByteSignedMem(ptr: *[*]const u8) i8 {
+ return @bitCast(i8, readByteMem(ptr));
+}
+
+fn readInitialLengthMem(ptr: *[*]const u8, is_64: *bool) !u64 {
+ const first_32_bits = mem.readIntLE(u32, ptr.*[0..4]);
+ is_64.* = (first_32_bits == 0xffffffff);
+ if (is_64.*) {
+ ptr.* += 4;
+ const result = mem.readIntLE(u64, ptr.*[0..8]);
+ ptr.* += 8;
+ return result;
+ } else {
+ if (first_32_bits >= 0xfffffff0) return error.InvalidDebugInfo;
+ ptr.* += 4;
+ return u64(first_32_bits);
+ }
+}
+
+fn readStringMem(ptr: *[*]const u8) []const u8 {
+ const result = mem.toSliceConst(u8, ptr.*);
+ ptr.* += result.len + 1;
+ return result;
+}
+
+fn readULeb128Mem(ptr: *[*]const u8) !u64 {
+ var result: u64 = 0;
+ var shift: usize = 0;
+ var i: usize = 0;
+
+ while (true) {
+ const byte = ptr.*[i];
+ i += 1;
+
+ var operand: u64 = undefined;
+
+ if (@shlWithOverflow(u64, byte & 0b01111111, @intCast(u6, shift), &operand)) return error.InvalidDebugInfo;
+
+ result |= operand;
+
+ if ((byte & 0b10000000) == 0) {
+ ptr.* += i;
+ return result;
+ }
+
+ shift += 7;
+ }
+}
+fn readILeb128Mem(ptr: *[*]const u8) !i64 {
+ var result: i64 = 0;
+ var shift: usize = 0;
+ var i: usize = 0;
+
+ while (true) {
+ const byte = ptr.*[i];
+ i += 1;
+
+ var operand: i64 = undefined;
+ if (@shlWithOverflow(i64, byte & 0b01111111, @intCast(u6, shift), &operand)) return error.InvalidDebugInfo;
+
+ result |= operand;
+ shift += 7;
+
+ if ((byte & 0b10000000) == 0) {
+ if (shift < @sizeOf(i64) * 8 and (byte & 0b01000000) != 0) result |= -(i64(1) << @intCast(u6, shift));
+ ptr.* += i;
+ return result;
+ }
+ }
+}
+
fn readInitialLength(comptime E: type, in_stream: *io.InStream(E), is_64: *bool) !u64 {
const first_32_bits = try in_stream.readIntLe(u32);
is_64.* = (first_32_bits == 0xffffffff);
@@ -1143,7 +1620,7 @@ pub const global_allocator = &global_fixed_allocator.allocator;
var global_fixed_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(global_allocator_mem[0..]);
var global_allocator_mem: [100 * 1024]u8 = undefined;
-// TODO make thread safe
+/// TODO multithreaded awareness
var debug_info_allocator: ?*mem.Allocator = null;
var debug_info_direct_allocator: std.heap.DirectAllocator = undefined;
var debug_info_arena_allocator: std.heap.ArenaAllocator = undefined;
diff --git a/std/elf.zig b/std/elf.zig
index 8e6445c631..3d81555319 100644
--- a/std/elf.zig
+++ b/std/elf.zig
@@ -869,6 +869,11 @@ pub const Phdr = switch (@sizeOf(usize)) {
8 => Elf64_Phdr,
else => @compileError("expected pointer size of 32 or 64"),
};
+pub const Shdr = switch (@sizeOf(usize)) {
+ 4 => Elf32_Shdr,
+ 8 => Elf64_Shdr,
+ else => @compileError("expected pointer size of 32 or 64"),
+};
pub const Sym = switch (@sizeOf(usize)) {
4 => Elf32_Sym,
8 => Elf64_Sym,
diff --git a/std/event.zig b/std/event.zig
index 1e52086286..bd3262a575 100644
--- a/std/event.zig
+++ b/std/event.zig
@@ -1,17 +1,23 @@
-pub const Locked = @import("event/locked.zig").Locked;
-pub const Loop = @import("event/loop.zig").Loop;
-pub const Lock = @import("event/lock.zig").Lock;
-pub const tcp = @import("event/tcp.zig");
pub const Channel = @import("event/channel.zig").Channel;
-pub const Group = @import("event/group.zig").Group;
pub const Future = @import("event/future.zig").Future;
+pub const Group = @import("event/group.zig").Group;
+pub const Lock = @import("event/lock.zig").Lock;
+pub const Locked = @import("event/locked.zig").Locked;
+pub const RwLock = @import("event/rwlock.zig").RwLock;
+pub const RwLocked = @import("event/rwlocked.zig").RwLocked;
+pub const Loop = @import("event/loop.zig").Loop;
+pub const fs = @import("event/fs.zig");
+pub const tcp = @import("event/tcp.zig");
test "import event tests" {
- _ = @import("event/locked.zig");
- _ = @import("event/loop.zig");
- _ = @import("event/lock.zig");
- _ = @import("event/tcp.zig");
_ = @import("event/channel.zig");
- _ = @import("event/group.zig");
+ _ = @import("event/fs.zig");
_ = @import("event/future.zig");
+ _ = @import("event/group.zig");
+ _ = @import("event/lock.zig");
+ _ = @import("event/locked.zig");
+ _ = @import("event/rwlock.zig");
+ _ = @import("event/rwlocked.zig");
+ _ = @import("event/loop.zig");
+ _ = @import("event/tcp.zig");
}
diff --git a/std/event/channel.zig b/std/event/channel.zig
index 71e97f6e78..9ea75a2dd8 100644
--- a/std/event/channel.zig
+++ b/std/event/channel.zig
@@ -5,7 +5,7 @@ const AtomicRmwOp = builtin.AtomicRmwOp;
const AtomicOrder = builtin.AtomicOrder;
const Loop = std.event.Loop;
-/// many producer, many consumer, thread-safe, lock-free, runtime configurable buffer size
+/// many producer, many consumer, thread-safe, runtime configurable buffer size
/// when buffer is empty, consumers suspend and are resumed by producers
/// when buffer is full, producers suspend and are resumed by consumers
pub fn Channel(comptime T: type) type {
@@ -13,6 +13,7 @@ pub fn Channel(comptime T: type) type {
loop: *Loop,
getters: std.atomic.Queue(GetNode),
+ or_null_queue: std.atomic.Queue(*std.atomic.Queue(GetNode).Node),
putters: std.atomic.Queue(PutNode),
get_count: usize,
put_count: usize,
@@ -26,8 +27,22 @@ pub fn Channel(comptime T: type) type {
const SelfChannel = this;
const GetNode = struct {
- ptr: *T,
tick_node: *Loop.NextTickNode,
+ data: Data,
+
+ const Data = union(enum) {
+ Normal: Normal,
+ OrNull: OrNull,
+ };
+
+ const Normal = struct {
+ ptr: *T,
+ };
+
+ const OrNull = struct {
+ ptr: *?T,
+ or_null: *std.atomic.Queue(*std.atomic.Queue(GetNode).Node).Node,
+ };
};
const PutNode = struct {
data: T,
@@ -48,6 +63,7 @@ pub fn Channel(comptime T: type) type {
.need_dispatch = 0,
.getters = std.atomic.Queue(GetNode).init(),
.putters = std.atomic.Queue(PutNode).init(),
+ .or_null_queue = std.atomic.Queue(*std.atomic.Queue(GetNode).Node).init(),
.get_count = 0,
.put_count = 0,
});
@@ -71,18 +87,29 @@ pub fn Channel(comptime T: type) type {
/// puts a data item in the channel. The promise completes when the value has been added to the
/// buffer, or in the case of a zero size buffer, when the item has been retrieved by a getter.
pub async fn put(self: *SelfChannel, data: T) void {
+ // TODO fix this workaround
+ suspend {
+ resume @handle();
+ }
+
+ var my_tick_node = Loop.NextTickNode.init(@handle());
+ var queue_node = std.atomic.Queue(PutNode).Node.init(PutNode{
+ .tick_node = &my_tick_node,
+ .data = data,
+ });
+
+ // TODO test canceling a put()
+ errdefer {
+ _ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ const need_dispatch = !self.putters.remove(&queue_node);
+ self.loop.cancelOnNextTick(&my_tick_node);
+ if (need_dispatch) {
+ // oops we made the put_count incorrect for a period of time. fix by dispatching.
+ _ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ self.dispatch();
+ }
+ }
suspend {
- var my_tick_node = Loop.NextTickNode{
- .next = undefined,
- .data = @handle(),
- };
- var queue_node = std.atomic.Queue(PutNode).Node{
- .data = PutNode{
- .tick_node = &my_tick_node,
- .data = data,
- },
- .next = undefined,
- };
self.putters.put(&queue_node);
_ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
@@ -93,21 +120,35 @@ pub fn Channel(comptime T: type) type {
/// await this function to get an item from the channel. If the buffer is empty, the promise will
/// complete when the next item is put in the channel.
pub async fn get(self: *SelfChannel) T {
+ // TODO fix this workaround
+ suspend {
+ resume @handle();
+ }
+
// TODO integrate this function with named return values
// so we can get rid of this extra result copy
var result: T = undefined;
+ var my_tick_node = Loop.NextTickNode.init(@handle());
+ var queue_node = std.atomic.Queue(GetNode).Node.init(GetNode{
+ .tick_node = &my_tick_node,
+ .data = GetNode.Data{
+ .Normal = GetNode.Normal{ .ptr = &result },
+ },
+ });
+
+ // TODO test canceling a get()
+ errdefer {
+ _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ const need_dispatch = !self.getters.remove(&queue_node);
+ self.loop.cancelOnNextTick(&my_tick_node);
+ if (need_dispatch) {
+ // oops we made the get_count incorrect for a period of time. fix by dispatching.
+ _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ self.dispatch();
+ }
+ }
+
suspend {
- var my_tick_node = Loop.NextTickNode{
- .next = undefined,
- .data = @handle(),
- };
- var queue_node = std.atomic.Queue(GetNode).Node{
- .data = GetNode{
- .ptr = &result,
- .tick_node = &my_tick_node,
- },
- .next = undefined,
- };
self.getters.put(&queue_node);
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
@@ -116,6 +157,64 @@ pub fn Channel(comptime T: type) type {
return result;
}
+ //pub async fn select(comptime EnumUnion: type, channels: ...) EnumUnion {
+ // assert(@memberCount(EnumUnion) == channels.len); // enum union and channels mismatch
+ // assert(channels.len != 0); // enum unions cannot have 0 fields
+ // if (channels.len == 1) {
+ // const result = await (async channels[0].get() catch unreachable);
+ // return @unionInit(EnumUnion, @memberName(EnumUnion, 0), result);
+ // }
+ //}
+
+ /// Await this function to get an item from the channel. If the buffer is empty and there are no
+ /// puts waiting, this returns null.
+ /// Await is necessary for locking purposes. The function will be resumed after checking the channel
+ /// for data and will not wait for data to be available.
+ pub async fn getOrNull(self: *SelfChannel) ?T {
+ // TODO fix this workaround
+ suspend {
+ resume @handle();
+ }
+
+ // TODO integrate this function with named return values
+ // so we can get rid of this extra result copy
+ var result: ?T = null;
+ var my_tick_node = Loop.NextTickNode.init(@handle());
+ var or_null_node = std.atomic.Queue(*std.atomic.Queue(GetNode).Node).Node.init(undefined);
+ var queue_node = std.atomic.Queue(GetNode).Node.init(GetNode{
+ .tick_node = &my_tick_node,
+ .data = GetNode.Data{
+ .OrNull = GetNode.OrNull{
+ .ptr = &result,
+ .or_null = &or_null_node,
+ },
+ },
+ });
+ or_null_node.data = &queue_node;
+
+ // TODO test canceling getOrNull
+ errdefer {
+ _ = self.or_null_queue.remove(&or_null_node);
+ _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ const need_dispatch = !self.getters.remove(&queue_node);
+ self.loop.cancelOnNextTick(&my_tick_node);
+ if (need_dispatch) {
+ // oops we made the get_count incorrect for a period of time. fix by dispatching.
+ _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ self.dispatch();
+ }
+ }
+
+ suspend {
+ self.getters.put(&queue_node);
+ _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ self.or_null_queue.put(&or_null_node);
+
+ self.dispatch();
+ }
+ return result;
+ }
+
fn dispatch(self: *SelfChannel) void {
// set the "need dispatch" flag
_ = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
@@ -139,7 +238,15 @@ pub fn Channel(comptime T: type) type {
if (get_count == 0) break :one_dispatch;
const get_node = &self.getters.get().?.data;
- get_node.ptr.* = self.buffer_nodes[self.buffer_index -% self.buffer_len];
+ switch (get_node.data) {
+ GetNode.Data.Normal => |info| {
+ info.ptr.* = self.buffer_nodes[self.buffer_index -% self.buffer_len];
+ },
+ GetNode.Data.OrNull => |info| {
+ _ = self.or_null_queue.remove(info.or_null);
+ info.ptr.* = self.buffer_nodes[self.buffer_index -% self.buffer_len];
+ },
+ }
self.loop.onNextTick(get_node.tick_node);
self.buffer_len -= 1;
@@ -151,7 +258,15 @@ pub fn Channel(comptime T: type) type {
const get_node = &self.getters.get().?.data;
const put_node = &self.putters.get().?.data;
- get_node.ptr.* = put_node.data;
+ switch (get_node.data) {
+ GetNode.Data.Normal => |info| {
+ info.ptr.* = put_node.data;
+ },
+ GetNode.Data.OrNull => |info| {
+ _ = self.or_null_queue.remove(info.or_null);
+ info.ptr.* = put_node.data;
+ },
+ }
self.loop.onNextTick(get_node.tick_node);
self.loop.onNextTick(put_node.tick_node);
@@ -176,6 +291,16 @@ pub fn Channel(comptime T: type) type {
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
_ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ // All the "get or null" functions should resume now.
+ var remove_count: usize = 0;
+ while (self.or_null_queue.get()) |or_null_node| {
+ remove_count += @boolToInt(self.getters.remove(or_null_node.data));
+ self.loop.onNextTick(or_null_node.data.data.tick_node);
+ }
+ if (remove_count != 0) {
+ _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, remove_count, AtomicOrder.SeqCst);
+ }
+
// clear need-dispatch flag
const need_dispatch = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
if (need_dispatch != 0) continue;
@@ -226,6 +351,15 @@ async fn testChannelGetter(loop: *Loop, channel: *Channel(i32)) void {
const value2_promise = try async channel.get();
const value2 = await value2_promise;
assert(value2 == 4567);
+
+ const value3_promise = try async channel.getOrNull();
+ const value3 = await value3_promise;
+ assert(value3 == null);
+
+ const last_put = try async testPut(channel, 4444);
+ const value4 = await try async channel.getOrNull();
+ assert(value4.? == 4444);
+ await last_put;
}
async fn testChannelPutter(channel: *Channel(i32)) void {
@@ -233,3 +367,6 @@ async fn testChannelPutter(channel: *Channel(i32)) void {
await (async channel.put(4567) catch @panic("out of memory"));
}
+async fn testPut(channel: *Channel(i32), value: i32) void {
+ await (async channel.put(value) catch @panic("out of memory"));
+}
diff --git a/std/event/fs.zig b/std/event/fs.zig
new file mode 100644
index 0000000000..5e7e24ff43
--- /dev/null
+++ b/std/event/fs.zig
@@ -0,0 +1,1347 @@
+const builtin = @import("builtin");
+const std = @import("../index.zig");
+const event = std.event;
+const assert = std.debug.assert;
+const os = std.os;
+const mem = std.mem;
+const posix = os.posix;
+const windows = os.windows;
+const Loop = event.Loop;
+
+pub const RequestNode = std.atomic.Queue(Request).Node;
+
+pub const Request = struct {
+ msg: Msg,
+ finish: Finish,
+
+ pub const Finish = union(enum) {
+ TickNode: Loop.NextTickNode,
+ DeallocCloseOperation: *CloseOperation,
+ NoAction,
+ };
+
+ pub const Msg = union(enum) {
+ PWriteV: PWriteV,
+ PReadV: PReadV,
+ Open: Open,
+ Close: Close,
+ WriteFile: WriteFile,
+ End, // special - means the fs thread should exit
+
+ pub const PWriteV = struct {
+ fd: os.FileHandle,
+ iov: []os.posix.iovec_const,
+ offset: usize,
+ result: Error!void,
+
+ pub const Error = os.File.WriteError;
+ };
+
+ pub const PReadV = struct {
+ fd: os.FileHandle,
+ iov: []os.posix.iovec,
+ offset: usize,
+ result: Error!usize,
+
+ pub const Error = os.File.ReadError;
+ };
+
+ pub const Open = struct {
+ /// must be null terminated. TODO https://github.com/ziglang/zig/issues/265
+ path: []const u8,
+ flags: u32,
+ mode: os.File.Mode,
+ result: Error!os.FileHandle,
+
+ pub const Error = os.File.OpenError;
+ };
+
+ pub const WriteFile = struct {
+ /// must be null terminated. TODO https://github.com/ziglang/zig/issues/265
+ path: []const u8,
+ contents: []const u8,
+ mode: os.File.Mode,
+ result: Error!void,
+
+ pub const Error = os.File.OpenError || os.File.WriteError;
+ };
+
+ pub const Close = struct {
+ fd: os.FileHandle,
+ };
+ };
+};
+
+/// data - just the inner references - must live until pwritev promise completes.
+pub async fn pwritev(loop: *Loop, fd: os.FileHandle, data: []const []const u8, offset: usize) !void {
+ switch (builtin.os) {
+ builtin.Os.macosx,
+ builtin.Os.linux,
+ => return await (async pwritevPosix(loop, fd, data, offset) catch unreachable),
+ builtin.Os.windows => return await (async pwritevWindows(loop, fd, data, offset) catch unreachable),
+ else => @compileError("Unsupported OS"),
+ }
+}
+
+/// data - just the inner references - must live until pwritev promise completes.
+pub async fn pwritevWindows(loop: *Loop, fd: os.FileHandle, data: []const []const u8, offset: usize) !void {
+ if (data.len == 0) return;
+ if (data.len == 1) return await (async pwriteWindows(loop, fd, data[0], offset) catch unreachable);
+
+ const data_copy = try std.mem.dupe(loop.allocator, []const u8, data);
+ defer loop.allocator.free(data_copy);
+
+ // TODO do these in parallel
+ var off = offset;
+ for (data_copy) |buf| {
+ try await (async pwriteWindows(loop, fd, buf, off) catch unreachable);
+ off += buf.len;
+ }
+}
+
+pub async fn pwriteWindows(loop: *Loop, fd: os.FileHandle, data: []const u8, offset: u64) os.WindowsWriteError!void {
+ // workaround for https://github.com/ziglang/zig/issues/1194
+ suspend {
+ resume @handle();
+ }
+
+ var resume_node = Loop.ResumeNode.Basic{
+ .base = Loop.ResumeNode{
+ .id = Loop.ResumeNode.Id.Basic,
+ .handle = @handle(),
+ },
+ };
+ const completion_key = @ptrToInt(&resume_node.base);
+ // TODO support concurrent async ops on the file handle
+ // we can do this by ignoring completion key and using @fieldParentPtr with the *Overlapped
+ _ = try os.windowsCreateIoCompletionPort(fd, loop.os_data.io_port, completion_key, undefined);
+ var overlapped = windows.OVERLAPPED{
+ .Internal = 0,
+ .InternalHigh = 0,
+ .Offset = @truncate(u32, offset),
+ .OffsetHigh = @truncate(u32, offset >> 32),
+ .hEvent = null,
+ };
+ loop.beginOneEvent();
+ errdefer loop.finishOneEvent();
+
+ errdefer {
+ _ = windows.CancelIoEx(fd, &overlapped);
+ }
+ suspend {
+ _ = windows.WriteFile(fd, data.ptr, @intCast(windows.DWORD, data.len), null, &overlapped);
+ }
+ var bytes_transferred: windows.DWORD = undefined;
+ if (windows.GetOverlappedResult(fd, &overlapped, &bytes_transferred, windows.FALSE) == 0) {
+ const err = windows.GetLastError();
+ return switch (err) {
+ windows.ERROR.IO_PENDING => unreachable,
+ windows.ERROR.INVALID_USER_BUFFER => error.SystemResources,
+ windows.ERROR.NOT_ENOUGH_MEMORY => error.SystemResources,
+ windows.ERROR.OPERATION_ABORTED => error.OperationAborted,
+ windows.ERROR.NOT_ENOUGH_QUOTA => error.SystemResources,
+ windows.ERROR.BROKEN_PIPE => error.BrokenPipe,
+ else => os.unexpectedErrorWindows(err),
+ };
+ }
+}
+
+/// data - just the inner references - must live until pwritev promise completes.
+pub async fn pwritevPosix(loop: *Loop, fd: os.FileHandle, data: []const []const u8, offset: usize) !void {
+ // workaround for https://github.com/ziglang/zig/issues/1194
+ suspend {
+ resume @handle();
+ }
+
+ const iovecs = try loop.allocator.alloc(os.posix.iovec_const, data.len);
+ defer loop.allocator.free(iovecs);
+
+ for (data) |buf, i| {
+ iovecs[i] = os.posix.iovec_const{
+ .iov_base = buf.ptr,
+ .iov_len = buf.len,
+ };
+ }
+
+ var req_node = RequestNode{
+ .prev = null,
+ .next = null,
+ .data = Request{
+ .msg = Request.Msg{
+ .PWriteV = Request.Msg.PWriteV{
+ .fd = fd,
+ .iov = iovecs,
+ .offset = offset,
+ .result = undefined,
+ },
+ },
+ .finish = Request.Finish{
+ .TickNode = Loop.NextTickNode{
+ .prev = null,
+ .next = null,
+ .data = @handle(),
+ },
+ },
+ },
+ };
+
+ errdefer loop.posixFsCancel(&req_node);
+
+ suspend {
+ loop.posixFsRequest(&req_node);
+ }
+
+ return req_node.data.msg.PWriteV.result;
+}
+
+/// data - just the inner references - must live until preadv promise completes.
+pub async fn preadv(loop: *Loop, fd: os.FileHandle, data: []const []u8, offset: usize) !usize {
+ assert(data.len != 0);
+ switch (builtin.os) {
+ builtin.Os.macosx,
+ builtin.Os.linux,
+ => return await (async preadvPosix(loop, fd, data, offset) catch unreachable),
+ builtin.Os.windows => return await (async preadvWindows(loop, fd, data, offset) catch unreachable),
+ else => @compileError("Unsupported OS"),
+ }
+}
+
+pub async fn preadvWindows(loop: *Loop, fd: os.FileHandle, data: []const []u8, offset: u64) !usize {
+ assert(data.len != 0);
+ if (data.len == 1) return await (async preadWindows(loop, fd, data[0], offset) catch unreachable);
+
+ const data_copy = try std.mem.dupe(loop.allocator, []u8, data);
+ defer loop.allocator.free(data_copy);
+
+ // TODO do these in parallel?
+ var off: usize = 0;
+ var iov_i: usize = 0;
+ var inner_off: usize = 0;
+ while (true) {
+ const v = data_copy[iov_i];
+ const amt_read = try await (async preadWindows(loop, fd, v[inner_off .. v.len - inner_off], offset + off) catch unreachable);
+ off += amt_read;
+ inner_off += amt_read;
+ if (inner_off == v.len) {
+ iov_i += 1;
+ inner_off = 0;
+ if (iov_i == data_copy.len) {
+ return off;
+ }
+ }
+ if (amt_read == 0) return off; // EOF
+ }
+}
+
+pub async fn preadWindows(loop: *Loop, fd: os.FileHandle, data: []u8, offset: u64) !usize {
+ // workaround for https://github.com/ziglang/zig/issues/1194
+ suspend {
+ resume @handle();
+ }
+
+ var resume_node = Loop.ResumeNode.Basic{
+ .base = Loop.ResumeNode{
+ .id = Loop.ResumeNode.Id.Basic,
+ .handle = @handle(),
+ },
+ };
+ const completion_key = @ptrToInt(&resume_node.base);
+ // TODO support concurrent async ops on the file handle
+ // we can do this by ignoring completion key and using @fieldParentPtr with the *Overlapped
+ _ = try os.windowsCreateIoCompletionPort(fd, loop.os_data.io_port, completion_key, undefined);
+ var overlapped = windows.OVERLAPPED{
+ .Internal = 0,
+ .InternalHigh = 0,
+ .Offset = @truncate(u32, offset),
+ .OffsetHigh = @truncate(u32, offset >> 32),
+ .hEvent = null,
+ };
+ loop.beginOneEvent();
+ errdefer loop.finishOneEvent();
+
+ errdefer {
+ _ = windows.CancelIoEx(fd, &overlapped);
+ }
+ suspend {
+ _ = windows.ReadFile(fd, data.ptr, @intCast(windows.DWORD, data.len), null, &overlapped);
+ }
+ var bytes_transferred: windows.DWORD = undefined;
+ if (windows.GetOverlappedResult(fd, &overlapped, &bytes_transferred, windows.FALSE) == 0) {
+ const err = windows.GetLastError();
+ return switch (err) {
+ windows.ERROR.IO_PENDING => unreachable,
+ windows.ERROR.OPERATION_ABORTED => error.OperationAborted,
+ windows.ERROR.BROKEN_PIPE => error.BrokenPipe,
+ else => os.unexpectedErrorWindows(err),
+ };
+ }
+ return usize(bytes_transferred);
+}
+
+/// data - just the inner references - must live until preadv promise completes.
+pub async fn preadvPosix(loop: *Loop, fd: os.FileHandle, data: []const []u8, offset: usize) !usize {
+ // workaround for https://github.com/ziglang/zig/issues/1194
+ suspend {
+ resume @handle();
+ }
+
+ const iovecs = try loop.allocator.alloc(os.posix.iovec, data.len);
+ defer loop.allocator.free(iovecs);
+
+ for (data) |buf, i| {
+ iovecs[i] = os.posix.iovec{
+ .iov_base = buf.ptr,
+ .iov_len = buf.len,
+ };
+ }
+
+ var req_node = RequestNode{
+ .prev = null,
+ .next = null,
+ .data = Request{
+ .msg = Request.Msg{
+ .PReadV = Request.Msg.PReadV{
+ .fd = fd,
+ .iov = iovecs,
+ .offset = offset,
+ .result = undefined,
+ },
+ },
+ .finish = Request.Finish{
+ .TickNode = Loop.NextTickNode{
+ .prev = null,
+ .next = null,
+ .data = @handle(),
+ },
+ },
+ },
+ };
+
+ errdefer loop.posixFsCancel(&req_node);
+
+ suspend {
+ loop.posixFsRequest(&req_node);
+ }
+
+ return req_node.data.msg.PReadV.result;
+}
+
+pub async fn openPosix(
+ loop: *Loop,
+ path: []const u8,
+ flags: u32,
+ mode: os.File.Mode,
+) os.File.OpenError!os.FileHandle {
+ // workaround for https://github.com/ziglang/zig/issues/1194
+ suspend {
+ resume @handle();
+ }
+
+ const path_c = try std.os.toPosixPath(path);
+
+ var req_node = RequestNode{
+ .prev = null,
+ .next = null,
+ .data = Request{
+ .msg = Request.Msg{
+ .Open = Request.Msg.Open{
+ .path = path_c[0..path.len],
+ .flags = flags,
+ .mode = mode,
+ .result = undefined,
+ },
+ },
+ .finish = Request.Finish{
+ .TickNode = Loop.NextTickNode{
+ .prev = null,
+ .next = null,
+ .data = @handle(),
+ },
+ },
+ },
+ };
+
+ errdefer loop.posixFsCancel(&req_node);
+
+ suspend {
+ loop.posixFsRequest(&req_node);
+ }
+
+ return req_node.data.msg.Open.result;
+}
+
+pub async fn openRead(loop: *Loop, path: []const u8) os.File.OpenError!os.FileHandle {
+ switch (builtin.os) {
+ builtin.Os.macosx, builtin.Os.linux => {
+ const flags = posix.O_LARGEFILE | posix.O_RDONLY | posix.O_CLOEXEC;
+ return await (async openPosix(loop, path, flags, os.File.default_mode) catch unreachable);
+ },
+
+ builtin.Os.windows => return os.windowsOpen(
+ path,
+ windows.GENERIC_READ,
+ windows.FILE_SHARE_READ,
+ windows.OPEN_EXISTING,
+ windows.FILE_ATTRIBUTE_NORMAL | windows.FILE_FLAG_OVERLAPPED,
+ ),
+
+ else => @compileError("Unsupported OS"),
+ }
+}
+
+/// Creates if does not exist. Truncates the file if it exists.
+/// Uses the default mode.
+pub async fn openWrite(loop: *Loop, path: []const u8) os.File.OpenError!os.FileHandle {
+ return await (async openWriteMode(loop, path, os.File.default_mode) catch unreachable);
+}
+
+/// Creates if does not exist. Truncates the file if it exists.
+pub async fn openWriteMode(loop: *Loop, path: []const u8, mode: os.File.Mode) os.File.OpenError!os.FileHandle {
+ switch (builtin.os) {
+ builtin.Os.macosx,
+ builtin.Os.linux,
+ => {
+ const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_TRUNC;
+ return await (async openPosix(loop, path, flags, os.File.default_mode) catch unreachable);
+ },
+ builtin.Os.windows => return os.windowsOpen(
+ path,
+ windows.GENERIC_WRITE,
+ windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE,
+ windows.CREATE_ALWAYS,
+ windows.FILE_ATTRIBUTE_NORMAL | windows.FILE_FLAG_OVERLAPPED,
+ ),
+ else => @compileError("Unsupported OS"),
+ }
+}
+
+/// Creates if does not exist. Does not truncate.
+pub async fn openReadWrite(
+ loop: *Loop,
+ path: []const u8,
+ mode: os.File.Mode,
+) os.File.OpenError!os.FileHandle {
+ switch (builtin.os) {
+ builtin.Os.macosx, builtin.Os.linux => {
+ const flags = posix.O_LARGEFILE | posix.O_RDWR | posix.O_CREAT | posix.O_CLOEXEC;
+ return await (async openPosix(loop, path, flags, mode) catch unreachable);
+ },
+
+ builtin.Os.windows => return os.windowsOpen(
+ path,
+ windows.GENERIC_WRITE | windows.GENERIC_READ,
+ windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE,
+ windows.OPEN_ALWAYS,
+ windows.FILE_ATTRIBUTE_NORMAL | windows.FILE_FLAG_OVERLAPPED,
+ ),
+
+ else => @compileError("Unsupported OS"),
+ }
+}
+
+/// This abstraction helps to close file handles in defer expressions
+/// without the possibility of failure and without the use of suspend points.
+/// Start a `CloseOperation` before opening a file, so that you can defer
+/// `CloseOperation.finish`.
+/// If you call `setHandle` then finishing will close the fd; otherwise finishing
+/// will deallocate the `CloseOperation`.
+pub const CloseOperation = struct {
+ loop: *Loop,
+ os_data: OsData,
+
+ const OsData = switch (builtin.os) {
+ builtin.Os.linux, builtin.Os.macosx => OsDataPosix,
+
+ builtin.Os.windows => struct {
+ handle: ?os.FileHandle,
+ },
+
+ else => @compileError("Unsupported OS"),
+ };
+
+ const OsDataPosix = struct {
+ have_fd: bool,
+ close_req_node: RequestNode,
+ };
+
+ pub fn start(loop: *Loop) (error{OutOfMemory}!*CloseOperation) {
+ const self = try loop.allocator.createOne(CloseOperation);
+ self.* = CloseOperation{
+ .loop = loop,
+ .os_data = switch (builtin.os) {
+ builtin.Os.linux, builtin.Os.macosx => initOsDataPosix(self),
+ builtin.Os.windows => OsData{ .handle = null },
+ else => @compileError("Unsupported OS"),
+ },
+ };
+ return self;
+ }
+
+ fn initOsDataPosix(self: *CloseOperation) OsData {
+ return OsData{
+ .have_fd = false,
+ .close_req_node = RequestNode{
+ .prev = null,
+ .next = null,
+ .data = Request{
+ .msg = Request.Msg{
+ .Close = Request.Msg.Close{ .fd = undefined },
+ },
+ .finish = Request.Finish{ .DeallocCloseOperation = self },
+ },
+ },
+ };
+ }
+
+ /// Defer this after creating.
+ pub fn finish(self: *CloseOperation) void {
+ switch (builtin.os) {
+ builtin.Os.linux,
+ builtin.Os.macosx,
+ => {
+ if (self.os_data.have_fd) {
+ self.loop.posixFsRequest(&self.os_data.close_req_node);
+ } else {
+ self.loop.allocator.destroy(self);
+ }
+ },
+ builtin.Os.windows => {
+ if (self.os_data.handle) |handle| {
+ os.close(handle);
+ }
+ self.loop.allocator.destroy(self);
+ },
+ else => @compileError("Unsupported OS"),
+ }
+ }
+
+ pub fn setHandle(self: *CloseOperation, handle: os.FileHandle) void {
+ switch (builtin.os) {
+ builtin.Os.linux,
+ builtin.Os.macosx,
+ => {
+ self.os_data.close_req_node.data.msg.Close.fd = handle;
+ self.os_data.have_fd = true;
+ },
+ builtin.Os.windows => {
+ self.os_data.handle = handle;
+ },
+ else => @compileError("Unsupported OS"),
+ }
+ }
+
+ /// Undo a `setHandle`.
+ pub fn clearHandle(self: *CloseOperation) void {
+ switch (builtin.os) {
+ builtin.Os.linux,
+ builtin.Os.macosx,
+ => {
+ self.os_data.have_fd = false;
+ },
+ builtin.Os.windows => {
+ self.os_data.handle = null;
+ },
+ else => @compileError("Unsupported OS"),
+ }
+ }
+
+ pub fn getHandle(self: *CloseOperation) os.FileHandle {
+ switch (builtin.os) {
+ builtin.Os.linux,
+ builtin.Os.macosx,
+ => {
+ assert(self.os_data.have_fd);
+ return self.os_data.close_req_node.data.msg.Close.fd;
+ },
+ builtin.Os.windows => {
+ return self.os_data.handle.?;
+ },
+ else => @compileError("Unsupported OS"),
+ }
+ }
+};
+
+/// contents must remain alive until writeFile completes.
+/// TODO make this atomic or provide writeFileAtomic and rename this one to writeFileTruncate
+pub async fn writeFile(loop: *Loop, path: []const u8, contents: []const u8) !void {
+ return await (async writeFileMode(loop, path, contents, os.File.default_mode) catch unreachable);
+}
+
+/// contents must remain alive until writeFile completes.
+pub async fn writeFileMode(loop: *Loop, path: []const u8, contents: []const u8, mode: os.File.Mode) !void {
+ switch (builtin.os) {
+ builtin.Os.linux,
+ builtin.Os.macosx,
+ => return await (async writeFileModeThread(loop, path, contents, mode) catch unreachable),
+ builtin.Os.windows => return await (async writeFileWindows(loop, path, contents) catch unreachable),
+ else => @compileError("Unsupported OS"),
+ }
+}
+
+async fn writeFileWindows(loop: *Loop, path: []const u8, contents: []const u8) !void {
+ const handle = try os.windowsOpen(
+ path,
+ windows.GENERIC_WRITE,
+ windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE,
+ windows.CREATE_ALWAYS,
+ windows.FILE_ATTRIBUTE_NORMAL | windows.FILE_FLAG_OVERLAPPED,
+ );
+ defer os.close(handle);
+
+ try await (async pwriteWindows(loop, handle, contents, 0) catch unreachable);
+}
+
+async fn writeFileModeThread(loop: *Loop, path: []const u8, contents: []const u8, mode: os.File.Mode) !void {
+ // workaround for https://github.com/ziglang/zig/issues/1194
+ suspend {
+ resume @handle();
+ }
+
+ const path_with_null = try std.cstr.addNullByte(loop.allocator, path);
+ defer loop.allocator.free(path_with_null);
+
+ var req_node = RequestNode{
+ .prev = null,
+ .next = null,
+ .data = Request{
+ .msg = Request.Msg{
+ .WriteFile = Request.Msg.WriteFile{
+ .path = path_with_null[0..path.len],
+ .contents = contents,
+ .mode = mode,
+ .result = undefined,
+ },
+ },
+ .finish = Request.Finish{
+ .TickNode = Loop.NextTickNode{
+ .prev = null,
+ .next = null,
+ .data = @handle(),
+ },
+ },
+ },
+ };
+
+ errdefer loop.posixFsCancel(&req_node);
+
+ suspend {
+ loop.posixFsRequest(&req_node);
+ }
+
+ return req_node.data.msg.WriteFile.result;
+}
+
+/// The promise resumes when the last data has been confirmed written, but before the file handle
+/// is closed.
+/// Caller owns returned memory.
+pub async fn readFile(loop: *Loop, file_path: []const u8, max_size: usize) ![]u8 {
+ var close_op = try CloseOperation.start(loop);
+ defer close_op.finish();
+
+ const fd = try await (async openRead(loop, file_path) catch unreachable);
+ close_op.setHandle(fd);
+
+ var list = std.ArrayList(u8).init(loop.allocator);
+ defer list.deinit();
+
+ while (true) {
+ try list.ensureCapacity(list.len + os.page_size);
+ const buf = list.items[list.len..];
+ const buf_array = [][]u8{buf};
+ const amt = try await (async preadv(loop, fd, buf_array, list.len) catch unreachable);
+ list.len += amt;
+ if (list.len > max_size) {
+ return error.FileTooBig;
+ }
+ if (amt < buf.len) {
+ return list.toOwnedSlice();
+ }
+ }
+}
+
+pub const WatchEventId = enum {
+ CloseWrite,
+ Delete,
+};
+
+pub const WatchEventError = error{
+ UserResourceLimitReached,
+ SystemResources,
+ AccessDenied,
+ Unexpected, // TODO remove this possibility
+};
+
+pub fn Watch(comptime V: type) type {
+ return struct {
+ channel: *event.Channel(Event.Error!Event),
+ os_data: OsData,
+
+ const OsData = switch (builtin.os) {
+ builtin.Os.macosx => struct {
+ file_table: FileTable,
+ table_lock: event.Lock,
+
+ const FileTable = std.AutoHashMap([]const u8, *Put);
+ const Put = struct {
+ putter: promise,
+ value_ptr: *V,
+ };
+ },
+
+ builtin.Os.linux => LinuxOsData,
+ builtin.Os.windows => WindowsOsData,
+
+ else => @compileError("Unsupported OS"),
+ };
+
+ const WindowsOsData = struct {
+ table_lock: event.Lock,
+ dir_table: DirTable,
+ all_putters: std.atomic.Queue(promise),
+ ref_count: std.atomic.Int(usize),
+
+ const DirTable = std.AutoHashMap([]const u8, *Dir);
+ const FileTable = std.AutoHashMap([]const u16, V);
+
+ const Dir = struct {
+ putter: promise,
+ file_table: FileTable,
+ table_lock: event.Lock,
+ };
+ };
+
+ const LinuxOsData = struct {
+ putter: promise,
+ inotify_fd: i32,
+ wd_table: WdTable,
+ table_lock: event.Lock,
+
+ const WdTable = std.AutoHashMap(i32, Dir);
+ const FileTable = std.AutoHashMap([]const u8, V);
+
+ const Dir = struct {
+ dirname: []const u8,
+ file_table: FileTable,
+ };
+ };
+
+ const FileToHandle = std.AutoHashMap([]const u8, promise);
+
+ const Self = this;
+
+ pub const Event = struct {
+ id: Id,
+ data: V,
+
+ pub const Id = WatchEventId;
+ pub const Error = WatchEventError;
+ };
+
+ pub fn create(loop: *Loop, event_buf_count: usize) !*Self {
+ const channel = try event.Channel(Self.Event.Error!Self.Event).create(loop, event_buf_count);
+ errdefer channel.destroy();
+
+ switch (builtin.os) {
+ builtin.Os.linux => {
+ const inotify_fd = try os.linuxINotifyInit1(os.linux.IN_NONBLOCK | os.linux.IN_CLOEXEC);
+ errdefer os.close(inotify_fd);
+
+ var result: *Self = undefined;
+ _ = try async linuxEventPutter(inotify_fd, channel, &result);
+ return result;
+ },
+
+ builtin.Os.windows => {
+ const self = try loop.allocator.createOne(Self);
+ errdefer loop.allocator.destroy(self);
+ self.* = Self{
+ .channel = channel,
+ .os_data = OsData{
+ .table_lock = event.Lock.init(loop),
+ .dir_table = OsData.DirTable.init(loop.allocator),
+ .ref_count = std.atomic.Int(usize).init(1),
+ .all_putters = std.atomic.Queue(promise).init(),
+ },
+ };
+ return self;
+ },
+
+ builtin.Os.macosx => {
+ const self = try loop.allocator.createOne(Self);
+ errdefer loop.allocator.destroy(self);
+
+ self.* = Self{
+ .channel = channel,
+ .os_data = OsData{
+ .table_lock = event.Lock.init(loop),
+ .file_table = OsData.FileTable.init(loop.allocator),
+ },
+ };
+ return self;
+ },
+ else => @compileError("Unsupported OS"),
+ }
+ }
+
+ /// All addFile calls and removeFile calls must have completed.
+ pub fn destroy(self: *Self) void {
+ switch (builtin.os) {
+ builtin.Os.macosx => {
+ // TODO we need to cancel the coroutines before destroying the lock
+ self.os_data.table_lock.deinit();
+ var it = self.os_data.file_table.iterator();
+ while (it.next()) |entry| {
+ cancel entry.value.putter;
+ self.channel.loop.allocator.free(entry.key);
+ }
+ self.channel.destroy();
+ },
+ builtin.Os.linux => cancel self.os_data.putter,
+ builtin.Os.windows => {
+ while (self.os_data.all_putters.get()) |putter_node| {
+ cancel putter_node.data;
+ }
+ self.deref();
+ },
+ else => @compileError("Unsupported OS"),
+ }
+ }
+
+ fn ref(self: *Self) void {
+ _ = self.os_data.ref_count.incr();
+ }
+
+ fn deref(self: *Self) void {
+ if (self.os_data.ref_count.decr() == 1) {
+ const allocator = self.channel.loop.allocator;
+ self.os_data.table_lock.deinit();
+ var it = self.os_data.dir_table.iterator();
+ while (it.next()) |entry| {
+ allocator.free(entry.key);
+ allocator.destroy(entry.value);
+ }
+ self.os_data.dir_table.deinit();
+ self.channel.destroy();
+ allocator.destroy(self);
+ }
+ }
+
+ pub async fn addFile(self: *Self, file_path: []const u8, value: V) !?V {
+ switch (builtin.os) {
+ builtin.Os.macosx => return await (async addFileMacosx(self, file_path, value) catch unreachable),
+ builtin.Os.linux => return await (async addFileLinux(self, file_path, value) catch unreachable),
+ builtin.Os.windows => return await (async addFileWindows(self, file_path, value) catch unreachable),
+ else => @compileError("Unsupported OS"),
+ }
+ }
+
+ async fn addFileMacosx(self: *Self, file_path: []const u8, value: V) !?V {
+ const resolved_path = try os.path.resolve(self.channel.loop.allocator, file_path);
+ var resolved_path_consumed = false;
+ defer if (!resolved_path_consumed) self.channel.loop.allocator.free(resolved_path);
+
+ var close_op = try CloseOperation.start(self.channel.loop);
+ var close_op_consumed = false;
+ defer if (!close_op_consumed) close_op.finish();
+
+ const flags = posix.O_SYMLINK | posix.O_EVTONLY;
+ const mode = 0;
+ const fd = try await (async openPosix(self.channel.loop, resolved_path, flags, mode) catch unreachable);
+ close_op.setHandle(fd);
+
+ var put_data: *OsData.Put = undefined;
+ const putter = try async self.kqPutEvents(close_op, value, &put_data);
+ close_op_consumed = true;
+ errdefer cancel putter;
+
+ const result = blk: {
+ const held = await (async self.os_data.table_lock.acquire() catch unreachable);
+ defer held.release();
+
+ const gop = try self.os_data.file_table.getOrPut(resolved_path);
+ if (gop.found_existing) {
+ const prev_value = gop.kv.value.value_ptr.*;
+ cancel gop.kv.value.putter;
+ gop.kv.value = put_data;
+ break :blk prev_value;
+ } else {
+ resolved_path_consumed = true;
+ gop.kv.value = put_data;
+ break :blk null;
+ }
+ };
+
+ return result;
+ }
+
+ async fn kqPutEvents(self: *Self, close_op: *CloseOperation, value: V, out_put: **OsData.Put) void {
+ // TODO https://github.com/ziglang/zig/issues/1194
+ suspend {
+ resume @handle();
+ }
+
+ var value_copy = value;
+ var put = OsData.Put{
+ .putter = @handle(),
+ .value_ptr = &value_copy,
+ };
+ out_put.* = &put;
+ self.channel.loop.beginOneEvent();
+
+ defer {
+ close_op.finish();
+ self.channel.loop.finishOneEvent();
+ }
+
+ while (true) {
+ if (await (async self.channel.loop.bsdWaitKev(
+ @intCast(usize, close_op.getHandle()),
+ posix.EVFILT_VNODE,
+ posix.NOTE_WRITE | posix.NOTE_DELETE,
+ ) catch unreachable)) |kev| {
+ // TODO handle EV_ERROR
+ if (kev.fflags & posix.NOTE_DELETE != 0) {
+ await (async self.channel.put(Self.Event{
+ .id = Event.Id.Delete,
+ .data = value_copy,
+ }) catch unreachable);
+ } else if (kev.fflags & posix.NOTE_WRITE != 0) {
+ await (async self.channel.put(Self.Event{
+ .id = Event.Id.CloseWrite,
+ .data = value_copy,
+ }) catch unreachable);
+ }
+ } else |err| switch (err) {
+ error.EventNotFound => unreachable,
+ error.ProcessNotFound => unreachable,
+ error.AccessDenied, error.SystemResources => {
+ // TODO https://github.com/ziglang/zig/issues/769
+ const casted_err = @errSetCast(error{
+ AccessDenied,
+ SystemResources,
+ }, err);
+ await (async self.channel.put(casted_err) catch unreachable);
+ },
+ }
+ }
+ }
+
+ async fn addFileLinux(self: *Self, file_path: []const u8, value: V) !?V {
+ const value_copy = value;
+
+ const dirname = os.path.dirname(file_path) orelse ".";
+ const dirname_with_null = try std.cstr.addNullByte(self.channel.loop.allocator, dirname);
+ var dirname_with_null_consumed = false;
+ defer if (!dirname_with_null_consumed) self.channel.loop.allocator.free(dirname_with_null);
+
+ const basename = os.path.basename(file_path);
+ const basename_with_null = try std.cstr.addNullByte(self.channel.loop.allocator, basename);
+ var basename_with_null_consumed = false;
+ defer if (!basename_with_null_consumed) self.channel.loop.allocator.free(basename_with_null);
+
+ const wd = try os.linuxINotifyAddWatchC(
+ self.os_data.inotify_fd,
+ dirname_with_null.ptr,
+ os.linux.IN_CLOSE_WRITE | os.linux.IN_ONLYDIR | os.linux.IN_EXCL_UNLINK,
+ );
+ // wd is either a newly created watch or an existing one.
+
+ const held = await (async self.os_data.table_lock.acquire() catch unreachable);
+ defer held.release();
+
+ const gop = try self.os_data.wd_table.getOrPut(wd);
+ if (!gop.found_existing) {
+ gop.kv.value = OsData.Dir{
+ .dirname = dirname_with_null,
+ .file_table = OsData.FileTable.init(self.channel.loop.allocator),
+ };
+ dirname_with_null_consumed = true;
+ }
+ const dir = &gop.kv.value;
+
+ const file_table_gop = try dir.file_table.getOrPut(basename_with_null);
+ if (file_table_gop.found_existing) {
+ const prev_value = file_table_gop.kv.value;
+ file_table_gop.kv.value = value_copy;
+ return prev_value;
+ } else {
+ file_table_gop.kv.value = value_copy;
+ basename_with_null_consumed = true;
+ return null;
+ }
+ }
+
+ async fn addFileWindows(self: *Self, file_path: []const u8, value: V) !?V {
+ const value_copy = value;
+ // TODO we might need to convert dirname and basename to canonical file paths ("short"?)
+
+ const dirname = try std.mem.dupe(self.channel.loop.allocator, u8, os.path.dirname(file_path) orelse ".");
+ var dirname_consumed = false;
+ defer if (!dirname_consumed) self.channel.loop.allocator.free(dirname);
+
+ const dirname_utf16le = try std.unicode.utf8ToUtf16LeWithNull(self.channel.loop.allocator, dirname);
+ defer self.channel.loop.allocator.free(dirname_utf16le);
+
+ // TODO https://github.com/ziglang/zig/issues/265
+ const basename = os.path.basename(file_path);
+ const basename_utf16le_null = try std.unicode.utf8ToUtf16LeWithNull(self.channel.loop.allocator, basename);
+ var basename_utf16le_null_consumed = false;
+ defer if (!basename_utf16le_null_consumed) self.channel.loop.allocator.free(basename_utf16le_null);
+ const basename_utf16le_no_null = basename_utf16le_null[0 .. basename_utf16le_null.len - 1];
+
+ const dir_handle = windows.CreateFileW(
+ dirname_utf16le.ptr,
+ windows.FILE_LIST_DIRECTORY,
+ windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE | windows.FILE_SHARE_WRITE,
+ null,
+ windows.OPEN_EXISTING,
+ windows.FILE_FLAG_BACKUP_SEMANTICS | windows.FILE_FLAG_OVERLAPPED,
+ null,
+ );
+ if (dir_handle == windows.INVALID_HANDLE_VALUE) {
+ const err = windows.GetLastError();
+ switch (err) {
+ windows.ERROR.FILE_NOT_FOUND => return error.FileNotFound,
+ windows.ERROR.PATH_NOT_FOUND => return error.FileNotFound,
+ else => return os.unexpectedErrorWindows(err),
+ }
+ }
+ var dir_handle_consumed = false;
+ defer if (!dir_handle_consumed) os.close(dir_handle);
+
+ const held = await (async self.os_data.table_lock.acquire() catch unreachable);
+ defer held.release();
+
+ const gop = try self.os_data.dir_table.getOrPut(dirname);
+ if (gop.found_existing) {
+ const dir = gop.kv.value;
+ const held_dir_lock = await (async dir.table_lock.acquire() catch unreachable);
+ defer held_dir_lock.release();
+
+ const file_gop = try dir.file_table.getOrPut(basename_utf16le_no_null);
+ if (file_gop.found_existing) {
+ const prev_value = file_gop.kv.value;
+ file_gop.kv.value = value_copy;
+ return prev_value;
+ } else {
+ file_gop.kv.value = value_copy;
+ basename_utf16le_null_consumed = true;
+ return null;
+ }
+ } else {
+ errdefer _ = self.os_data.dir_table.remove(dirname);
+ const dir = try self.channel.loop.allocator.createOne(OsData.Dir);
+ errdefer self.channel.loop.allocator.destroy(dir);
+
+ dir.* = OsData.Dir{
+ .file_table = OsData.FileTable.init(self.channel.loop.allocator),
+ .table_lock = event.Lock.init(self.channel.loop),
+ .putter = undefined,
+ };
+ gop.kv.value = dir;
+ assert((try dir.file_table.put(basename_utf16le_no_null, value_copy)) == null);
+ basename_utf16le_null_consumed = true;
+
+ dir.putter = try async self.windowsDirReader(dir_handle, dir);
+ dir_handle_consumed = true;
+
+ dirname_consumed = true;
+
+ return null;
+ }
+ }
+
+ async fn windowsDirReader(self: *Self, dir_handle: windows.HANDLE, dir: *OsData.Dir) void {
+ // TODO https://github.com/ziglang/zig/issues/1194
+ suspend {
+ resume @handle();
+ }
+
+ self.ref();
+ defer self.deref();
+
+ defer os.close(dir_handle);
+
+ var putter_node = std.atomic.Queue(promise).Node{
+ .data = @handle(),
+ .prev = null,
+ .next = null,
+ };
+ self.os_data.all_putters.put(&putter_node);
+ defer _ = self.os_data.all_putters.remove(&putter_node);
+
+ var resume_node = Loop.ResumeNode.Basic{
+ .base = Loop.ResumeNode{
+ .id = Loop.ResumeNode.Id.Basic,
+ .handle = @handle(),
+ },
+ };
+ const completion_key = @ptrToInt(&resume_node.base);
+ var overlapped = windows.OVERLAPPED{
+ .Internal = 0,
+ .InternalHigh = 0,
+ .Offset = 0,
+ .OffsetHigh = 0,
+ .hEvent = null,
+ };
+ var event_buf: [4096]u8 align(@alignOf(windows.FILE_NOTIFY_INFORMATION)) = undefined;
+
+ // TODO handle this error not in the channel but in the setup
+ _ = os.windowsCreateIoCompletionPort(
+ dir_handle,
+ self.channel.loop.os_data.io_port,
+ completion_key,
+ undefined,
+ ) catch |err| {
+ await (async self.channel.put(err) catch unreachable);
+ return;
+ };
+
+ while (true) {
+ {
+ // TODO only 1 beginOneEvent for the whole coroutine
+ self.channel.loop.beginOneEvent();
+ errdefer self.channel.loop.finishOneEvent();
+ errdefer {
+ _ = windows.CancelIoEx(dir_handle, &overlapped);
+ }
+ suspend {
+ _ = windows.ReadDirectoryChangesW(
+ dir_handle,
+ &event_buf,
+ @intCast(windows.DWORD, event_buf.len),
+ windows.FALSE, // watch subtree
+ windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME |
+ windows.FILE_NOTIFY_CHANGE_ATTRIBUTES | windows.FILE_NOTIFY_CHANGE_SIZE |
+ windows.FILE_NOTIFY_CHANGE_LAST_WRITE | windows.FILE_NOTIFY_CHANGE_LAST_ACCESS |
+ windows.FILE_NOTIFY_CHANGE_CREATION | windows.FILE_NOTIFY_CHANGE_SECURITY,
+ null, // number of bytes transferred (unused for async)
+ &overlapped,
+ null, // completion routine - unused because we use IOCP
+ );
+ }
+ }
+ var bytes_transferred: windows.DWORD = undefined;
+ if (windows.GetOverlappedResult(dir_handle, &overlapped, &bytes_transferred, windows.FALSE) == 0) {
+ const errno = windows.GetLastError();
+ const err = switch (errno) {
+ else => os.unexpectedErrorWindows(errno),
+ };
+ await (async self.channel.put(err) catch unreachable);
+ } else {
+ // can't use @bytesToSlice because of the special variable length name field
+ var ptr = event_buf[0..].ptr;
+ const end_ptr = ptr + bytes_transferred;
+ var ev: *windows.FILE_NOTIFY_INFORMATION = undefined;
+ while (@ptrToInt(ptr) < @ptrToInt(end_ptr)) : (ptr += ev.NextEntryOffset) {
+ ev = @ptrCast(*windows.FILE_NOTIFY_INFORMATION, ptr);
+ const emit = switch (ev.Action) {
+ windows.FILE_ACTION_REMOVED => WatchEventId.Delete,
+ windows.FILE_ACTION_MODIFIED => WatchEventId.CloseWrite,
+ else => null,
+ };
+ if (emit) |id| {
+ const basename_utf16le = ([*]u16)(&ev.FileName)[0 .. ev.FileNameLength / 2];
+ const user_value = blk: {
+ const held = await (async dir.table_lock.acquire() catch unreachable);
+ defer held.release();
+
+ if (dir.file_table.get(basename_utf16le)) |entry| {
+ break :blk entry.value;
+ } else {
+ break :blk null;
+ }
+ };
+ if (user_value) |v| {
+ await (async self.channel.put(Event{
+ .id = id,
+ .data = v,
+ }) catch unreachable);
+ }
+ }
+ if (ev.NextEntryOffset == 0) break;
+ }
+ }
+ }
+ }
+
+ pub async fn removeFile(self: *Self, file_path: []const u8) ?V {
+ @panic("TODO");
+ }
+
+ async fn linuxEventPutter(inotify_fd: i32, channel: *event.Channel(Event.Error!Event), out_watch: **Self) void {
+ // TODO https://github.com/ziglang/zig/issues/1194
+ suspend {
+ resume @handle();
+ }
+
+ const loop = channel.loop;
+
+ var watch = Self{
+ .channel = channel,
+ .os_data = OsData{
+ .putter = @handle(),
+ .inotify_fd = inotify_fd,
+ .wd_table = OsData.WdTable.init(loop.allocator),
+ .table_lock = event.Lock.init(loop),
+ },
+ };
+ out_watch.* = &watch;
+
+ loop.beginOneEvent();
+
+ defer {
+ watch.os_data.table_lock.deinit();
+ var wd_it = watch.os_data.wd_table.iterator();
+ while (wd_it.next()) |wd_entry| {
+ var file_it = wd_entry.value.file_table.iterator();
+ while (file_it.next()) |file_entry| {
+ loop.allocator.free(file_entry.key);
+ }
+ loop.allocator.free(wd_entry.value.dirname);
+ }
+ loop.finishOneEvent();
+ os.close(inotify_fd);
+ channel.destroy();
+ }
+
+ var event_buf: [4096]u8 align(@alignOf(os.linux.inotify_event)) = undefined;
+
+ while (true) {
+ const rc = os.linux.read(inotify_fd, &event_buf, event_buf.len);
+ const errno = os.linux.getErrno(rc);
+ switch (errno) {
+ 0 => {
+ // can't use @bytesToSlice because of the special variable length name field
+ var ptr = event_buf[0..].ptr;
+ const end_ptr = ptr + event_buf.len;
+ var ev: *os.linux.inotify_event = undefined;
+ while (@ptrToInt(ptr) < @ptrToInt(end_ptr)) : (ptr += @sizeOf(os.linux.inotify_event) + ev.len) {
+ ev = @ptrCast(*os.linux.inotify_event, ptr);
+ if (ev.mask & os.linux.IN_CLOSE_WRITE == os.linux.IN_CLOSE_WRITE) {
+ const basename_ptr = ptr + @sizeOf(os.linux.inotify_event);
+ const basename_with_null = basename_ptr[0 .. std.cstr.len(basename_ptr) + 1];
+ const user_value = blk: {
+ const held = await (async watch.os_data.table_lock.acquire() catch unreachable);
+ defer held.release();
+
+ const dir = &watch.os_data.wd_table.get(ev.wd).?.value;
+ if (dir.file_table.get(basename_with_null)) |entry| {
+ break :blk entry.value;
+ } else {
+ break :blk null;
+ }
+ };
+ if (user_value) |v| {
+ await (async channel.put(Event{
+ .id = WatchEventId.CloseWrite,
+ .data = v,
+ }) catch unreachable);
+ }
+ }
+ }
+ },
+ os.linux.EINTR => continue,
+ os.linux.EINVAL => unreachable,
+ os.linux.EFAULT => unreachable,
+ os.linux.EAGAIN => {
+ (await (async loop.linuxWaitFd(
+ inotify_fd,
+ os.linux.EPOLLET | os.linux.EPOLLIN,
+ ) catch unreachable)) catch |err| {
+ const transformed_err = switch (err) {
+ error.InvalidFileDescriptor => unreachable,
+ error.FileDescriptorAlreadyPresentInSet => unreachable,
+ error.InvalidSyscall => unreachable,
+ error.OperationCausesCircularLoop => unreachable,
+ error.FileDescriptorNotRegistered => unreachable,
+ error.SystemResources => error.SystemResources,
+ error.UserResourceLimitReached => error.UserResourceLimitReached,
+ error.FileDescriptorIncompatibleWithEpoll => unreachable,
+ error.Unexpected => unreachable,
+ };
+ await (async channel.put(transformed_err) catch unreachable);
+ };
+ },
+ else => unreachable,
+ }
+ }
+ }
+ };
+}
+
+const test_tmp_dir = "std_event_fs_test";
+
+test "write a file, watch it, write it again" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const allocator = &da.allocator;
+
+ // TODO move this into event loop too
+ try os.makePath(allocator, test_tmp_dir);
+ defer os.deleteTree(allocator, test_tmp_dir) catch {};
+
+ var loop: Loop = undefined;
+ try loop.initMultiThreaded(allocator);
+ defer loop.deinit();
+
+ var result: error!void = error.ResultNeverWritten;
+ const handle = try async testFsWatchCantFail(&loop, &result);
+ defer cancel handle;
+
+ loop.run();
+ return result;
+}
+
+async fn testFsWatchCantFail(loop: *Loop, result: *(error!void)) void {
+ result.* = await async testFsWatch(loop) catch unreachable;
+}
+
+async fn testFsWatch(loop: *Loop) !void {
+ const file_path = try os.path.join(loop.allocator, test_tmp_dir, "file.txt");
+ defer loop.allocator.free(file_path);
+
+ const contents =
+ \\line 1
+ \\line 2
+ ;
+ const line2_offset = 7;
+
+ // first just write then read the file
+ try await try async writeFile(loop, file_path, contents);
+
+ const read_contents = try await try async readFile(loop, file_path, 1024 * 1024);
+ assert(mem.eql(u8, read_contents, contents));
+
+ // now watch the file
+ var watch = try Watch(void).create(loop, 0);
+ defer watch.destroy();
+
+ assert((try await try async watch.addFile(file_path, {})) == null);
+
+ const ev = try async watch.channel.get();
+ var ev_consumed = false;
+ defer if (!ev_consumed) cancel ev;
+
+ // overwrite line 2
+ const fd = try await try async openReadWrite(loop, file_path, os.File.default_mode);
+ {
+ defer os.close(fd);
+
+ try await try async pwritev(loop, fd, []const []const u8{"lorem ipsum"}, line2_offset);
+ }
+
+ ev_consumed = true;
+ switch ((try await ev).id) {
+ WatchEventId.CloseWrite => {},
+ WatchEventId.Delete => @panic("wrong event"),
+ }
+ const contents_updated = try await try async readFile(loop, file_path, 1024 * 1024);
+ assert(mem.eql(u8, contents_updated,
+ \\line 1
+ \\lorem ipsum
+ ));
+
+ // TODO test deleting the file and then re-adding it. we should get events for both
+}
diff --git a/std/event/group.zig b/std/event/group.zig
index 6c7fc63699..2b5a517b2f 100644
--- a/std/event/group.zig
+++ b/std/event/group.zig
@@ -29,6 +29,17 @@ pub fn Group(comptime ReturnType: type) type {
};
}
+ /// Cancel all the outstanding promises. Can be called even if wait was already called.
+ pub fn deinit(self: *Self) void {
+ while (self.coro_stack.pop()) |node| {
+ cancel node.data;
+ }
+ while (self.alloc_stack.pop()) |node| {
+ cancel node.data;
+ self.lock.loop.allocator.destroy(node);
+ }
+ }
+
/// Add a promise to the group. Thread-safe.
pub fn add(self: *Self, handle: promise->ReturnType) (error{OutOfMemory}!void) {
const node = try self.lock.loop.allocator.create(Stack.Node{
@@ -88,7 +99,7 @@ pub fn Group(comptime ReturnType: type) type {
await node.data;
} else {
(await node.data) catch |err| {
- self.cancelAll();
+ self.deinit();
return err;
};
}
@@ -100,25 +111,12 @@ pub fn Group(comptime ReturnType: type) type {
await handle;
} else {
(await handle) catch |err| {
- self.cancelAll();
+ self.deinit();
return err;
};
}
}
}
-
- /// Cancel all the outstanding promises. May only be called if wait was never called.
- /// TODO These should be `cancelasync` not `cancel`.
- /// See https://github.com/ziglang/zig/issues/1261
- pub fn cancelAll(self: *Self) void {
- while (self.coro_stack.pop()) |node| {
- cancel node.data;
- }
- while (self.alloc_stack.pop()) |node| {
- cancel node.data;
- self.lock.loop.allocator.destroy(node);
- }
- }
};
}
diff --git a/std/event/lock.zig b/std/event/lock.zig
index c4cb1a3f0e..2ee9dc981f 100644
--- a/std/event/lock.zig
+++ b/std/event/lock.zig
@@ -9,6 +9,7 @@ const Loop = std.event.Loop;
/// Thread-safe async/await lock.
/// Does not make any syscalls - coroutines which are waiting for the lock are suspended, and
/// are resumed when the lock is released, in order.
+/// Allows only one actor to hold the lock.
pub const Lock = struct {
loop: *Loop,
shared_bit: u8, // TODO make this a bool
@@ -90,13 +91,14 @@ pub const Lock = struct {
}
pub async fn acquire(self: *Lock) Held {
+ // TODO explicitly put this memory in the coroutine frame #1194
suspend {
- // TODO explicitly put this memory in the coroutine frame #1194
- var my_tick_node = Loop.NextTickNode{
- .data = @handle(),
- .next = undefined,
- };
+ resume @handle();
+ }
+ var my_tick_node = Loop.NextTickNode.init(@handle());
+ errdefer _ = self.queue.remove(&my_tick_node); // TODO test canceling an acquire
+ suspend {
self.queue.put(&my_tick_node);
// At this point, we are in the queue, so we might have already been resumed and this coroutine
@@ -146,6 +148,7 @@ async fn testLock(loop: *Loop, lock: *Lock) void {
}
const handle1 = async lockRunner(lock) catch @panic("out of memory");
var tick_node1 = Loop.NextTickNode{
+ .prev = undefined,
.next = undefined,
.data = handle1,
};
@@ -153,6 +156,7 @@ async fn testLock(loop: *Loop, lock: *Lock) void {
const handle2 = async lockRunner(lock) catch @panic("out of memory");
var tick_node2 = Loop.NextTickNode{
+ .prev = undefined,
.next = undefined,
.data = handle2,
};
@@ -160,6 +164,7 @@ async fn testLock(loop: *Loop, lock: *Lock) void {
const handle3 = async lockRunner(lock) catch @panic("out of memory");
var tick_node3 = Loop.NextTickNode{
+ .prev = undefined,
.next = undefined,
.data = handle3,
};
diff --git a/std/event/loop.zig b/std/event/loop.zig
index 8b1b2e53db..733112549d 100644
--- a/std/event/loop.zig
+++ b/std/event/loop.zig
@@ -2,10 +2,12 @@ const std = @import("../index.zig");
const builtin = @import("builtin");
const assert = std.debug.assert;
const mem = std.mem;
-const posix = std.os.posix;
-const windows = std.os.windows;
const AtomicRmwOp = builtin.AtomicRmwOp;
const AtomicOrder = builtin.AtomicOrder;
+const fs = std.event.fs;
+const os = std.os;
+const posix = os.posix;
+const windows = os.windows;
pub const Loop = struct {
allocator: *mem.Allocator,
@@ -13,7 +15,7 @@ pub const Loop = struct {
os_data: OsData,
final_resume_node: ResumeNode,
pending_event_count: usize,
- extra_threads: []*std.os.Thread,
+ extra_threads: []*os.Thread,
// pre-allocated eventfds. all permanently active.
// this is how we send promises to be resumed on other threads.
@@ -50,6 +52,22 @@ pub const Loop = struct {
base: ResumeNode,
kevent: posix.Kevent,
};
+
+ pub const Basic = switch (builtin.os) {
+ builtin.Os.macosx => MacOsBasic,
+ builtin.Os.linux => struct {
+ base: ResumeNode,
+ },
+ builtin.Os.windows => struct {
+ base: ResumeNode,
+ },
+ else => @compileError("unsupported OS"),
+ };
+
+ const MacOsBasic = struct {
+ base: ResumeNode,
+ kev: posix.Kevent,
+ };
};
/// After initialization, call run().
@@ -65,7 +83,7 @@ pub const Loop = struct {
/// TODO copy elision / named return values so that the threads referencing *Loop
/// have the correct pointer value.
pub fn initMultiThreaded(self: *Loop, allocator: *mem.Allocator) !void {
- const core_count = try std.os.cpuCount(allocator);
+ const core_count = try os.cpuCount(allocator);
return self.initInternal(allocator, core_count);
}
@@ -92,7 +110,7 @@ pub const Loop = struct {
);
errdefer self.allocator.free(self.eventfd_resume_nodes);
- self.extra_threads = try self.allocator.alloc(*std.os.Thread, extra_thread_count);
+ self.extra_threads = try self.allocator.alloc(*os.Thread, extra_thread_count);
errdefer self.allocator.free(self.extra_threads);
try self.initOsData(extra_thread_count);
@@ -104,17 +122,30 @@ pub const Loop = struct {
self.allocator.free(self.extra_threads);
}
- const InitOsDataError = std.os.LinuxEpollCreateError || mem.Allocator.Error || std.os.LinuxEventFdError ||
- std.os.SpawnThreadError || std.os.LinuxEpollCtlError || std.os.BsdKEventError ||
- std.os.WindowsCreateIoCompletionPortError;
+ const InitOsDataError = os.LinuxEpollCreateError || mem.Allocator.Error || os.LinuxEventFdError ||
+ os.SpawnThreadError || os.LinuxEpollCtlError || os.BsdKEventError ||
+ os.WindowsCreateIoCompletionPortError;
const wakeup_bytes = []u8{0x1} ** 8;
fn initOsData(self: *Loop, extra_thread_count: usize) InitOsDataError!void {
switch (builtin.os) {
builtin.Os.linux => {
+ self.os_data.fs_queue = std.atomic.Queue(fs.Request).init();
+ self.os_data.fs_queue_item = 0;
+ // we need another thread for the file system because Linux does not have an async
+ // file system I/O API.
+ self.os_data.fs_end_request = fs.RequestNode{
+ .prev = undefined,
+ .next = undefined,
+ .data = fs.Request{
+ .msg = fs.Request.Msg.End,
+ .finish = fs.Request.Finish.NoAction,
+ },
+ };
+
errdefer {
- while (self.available_eventfd_resume_nodes.pop()) |node| std.os.close(node.data.eventfd);
+ while (self.available_eventfd_resume_nodes.pop()) |node| os.close(node.data.eventfd);
}
for (self.eventfd_resume_nodes) |*eventfd_node| {
eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
@@ -123,7 +154,7 @@ pub const Loop = struct {
.id = ResumeNode.Id.EventFd,
.handle = undefined,
},
- .eventfd = try std.os.linuxEventFd(1, posix.EFD_CLOEXEC | posix.EFD_NONBLOCK),
+ .eventfd = try os.linuxEventFd(1, posix.EFD_CLOEXEC | posix.EFD_NONBLOCK),
.epoll_op = posix.EPOLL_CTL_ADD,
},
.next = undefined,
@@ -131,44 +162,62 @@ pub const Loop = struct {
self.available_eventfd_resume_nodes.push(eventfd_node);
}
- self.os_data.epollfd = try std.os.linuxEpollCreate(posix.EPOLL_CLOEXEC);
- errdefer std.os.close(self.os_data.epollfd);
+ self.os_data.epollfd = try os.linuxEpollCreate(posix.EPOLL_CLOEXEC);
+ errdefer os.close(self.os_data.epollfd);
- self.os_data.final_eventfd = try std.os.linuxEventFd(0, posix.EFD_CLOEXEC | posix.EFD_NONBLOCK);
- errdefer std.os.close(self.os_data.final_eventfd);
+ self.os_data.final_eventfd = try os.linuxEventFd(0, posix.EFD_CLOEXEC | posix.EFD_NONBLOCK);
+ errdefer os.close(self.os_data.final_eventfd);
self.os_data.final_eventfd_event = posix.epoll_event{
.events = posix.EPOLLIN,
.data = posix.epoll_data{ .ptr = @ptrToInt(&self.final_resume_node) },
};
- try std.os.linuxEpollCtl(
+ try os.linuxEpollCtl(
self.os_data.epollfd,
posix.EPOLL_CTL_ADD,
self.os_data.final_eventfd,
&self.os_data.final_eventfd_event,
);
+ self.os_data.fs_thread = try os.spawnThread(self, posixFsRun);
+ errdefer {
+ self.posixFsRequest(&self.os_data.fs_end_request);
+ self.os_data.fs_thread.wait();
+ }
+
var extra_thread_index: usize = 0;
errdefer {
// writing 8 bytes to an eventfd cannot fail
- std.os.posixWrite(self.os_data.final_eventfd, wakeup_bytes) catch unreachable;
+ os.posixWrite(self.os_data.final_eventfd, wakeup_bytes) catch unreachable;
while (extra_thread_index != 0) {
extra_thread_index -= 1;
self.extra_threads[extra_thread_index].wait();
}
}
while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) {
- self.extra_threads[extra_thread_index] = try std.os.spawnThread(self, workerRun);
+ self.extra_threads[extra_thread_index] = try os.spawnThread(self, workerRun);
}
},
builtin.Os.macosx => {
- self.os_data.kqfd = try std.os.bsdKQueue();
- errdefer std.os.close(self.os_data.kqfd);
+ self.os_data.kqfd = try os.bsdKQueue();
+ errdefer os.close(self.os_data.kqfd);
- self.os_data.kevents = try self.allocator.alloc(posix.Kevent, extra_thread_count);
- errdefer self.allocator.free(self.os_data.kevents);
+ self.os_data.fs_kqfd = try os.bsdKQueue();
+ errdefer os.close(self.os_data.fs_kqfd);
- const eventlist = ([*]posix.Kevent)(undefined)[0..0];
+ self.os_data.fs_queue = std.atomic.Queue(fs.Request).init();
+ // we need another thread for the file system because Darwin does not have an async
+ // file system I/O API.
+ self.os_data.fs_end_request = fs.RequestNode{
+ .prev = undefined,
+ .next = undefined,
+ .data = fs.Request{
+ .msg = fs.Request.Msg.End,
+ .finish = fs.Request.Finish.NoAction,
+ },
+ };
+
+ const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
for (self.eventfd_resume_nodes) |*eventfd_node, i| {
eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
@@ -191,18 +240,9 @@ pub const Loop = struct {
};
self.available_eventfd_resume_nodes.push(eventfd_node);
const kevent_array = (*[1]posix.Kevent)(&eventfd_node.data.kevent);
- _ = try std.os.bsdKEvent(self.os_data.kqfd, kevent_array, eventlist, null);
+ _ = try os.bsdKEvent(self.os_data.kqfd, kevent_array, empty_kevs, null);
eventfd_node.data.kevent.flags = posix.EV_CLEAR | posix.EV_ENABLE;
eventfd_node.data.kevent.fflags = posix.NOTE_TRIGGER;
- // this one is for waiting for events
- self.os_data.kevents[i] = posix.Kevent{
- .ident = i,
- .filter = posix.EVFILT_USER,
- .flags = 0,
- .fflags = 0,
- .data = 0,
- .udata = @ptrToInt(&eventfd_node.data.base),
- };
}
// Pre-add so that we cannot get error.SystemResources
@@ -215,31 +255,55 @@ pub const Loop = struct {
.data = 0,
.udata = @ptrToInt(&self.final_resume_node),
};
- const kevent_array = (*[1]posix.Kevent)(&self.os_data.final_kevent);
- _ = try std.os.bsdKEvent(self.os_data.kqfd, kevent_array, eventlist, null);
+ const final_kev_arr = (*[1]posix.Kevent)(&self.os_data.final_kevent);
+ _ = try os.bsdKEvent(self.os_data.kqfd, final_kev_arr, empty_kevs, null);
self.os_data.final_kevent.flags = posix.EV_ENABLE;
self.os_data.final_kevent.fflags = posix.NOTE_TRIGGER;
+ self.os_data.fs_kevent_wake = posix.Kevent{
+ .ident = 0,
+ .filter = posix.EVFILT_USER,
+ .flags = posix.EV_ADD | posix.EV_ENABLE,
+ .fflags = posix.NOTE_TRIGGER,
+ .data = 0,
+ .udata = undefined,
+ };
+
+ self.os_data.fs_kevent_wait = posix.Kevent{
+ .ident = 0,
+ .filter = posix.EVFILT_USER,
+ .flags = posix.EV_ADD | posix.EV_CLEAR,
+ .fflags = 0,
+ .data = 0,
+ .udata = undefined,
+ };
+
+ self.os_data.fs_thread = try os.spawnThread(self, posixFsRun);
+ errdefer {
+ self.posixFsRequest(&self.os_data.fs_end_request);
+ self.os_data.fs_thread.wait();
+ }
+
var extra_thread_index: usize = 0;
errdefer {
- _ = std.os.bsdKEvent(self.os_data.kqfd, kevent_array, eventlist, null) catch unreachable;
+ _ = os.bsdKEvent(self.os_data.kqfd, final_kev_arr, empty_kevs, null) catch unreachable;
while (extra_thread_index != 0) {
extra_thread_index -= 1;
self.extra_threads[extra_thread_index].wait();
}
}
while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) {
- self.extra_threads[extra_thread_index] = try std.os.spawnThread(self, workerRun);
+ self.extra_threads[extra_thread_index] = try os.spawnThread(self, workerRun);
}
},
builtin.Os.windows => {
- self.os_data.io_port = try std.os.windowsCreateIoCompletionPort(
+ self.os_data.io_port = try os.windowsCreateIoCompletionPort(
windows.INVALID_HANDLE_VALUE,
null,
undefined,
- undefined,
+ @maxValue(windows.DWORD),
);
- errdefer std.os.close(self.os_data.io_port);
+ errdefer os.close(self.os_data.io_port);
for (self.eventfd_resume_nodes) |*eventfd_node, i| {
eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
@@ -262,7 +326,7 @@ pub const Loop = struct {
while (i < extra_thread_index) : (i += 1) {
while (true) {
const overlapped = @intToPtr(?*windows.OVERLAPPED, 0x1);
- std.os.windowsPostQueuedCompletionStatus(self.os_data.io_port, undefined, @ptrToInt(&self.final_resume_node), overlapped) catch continue;
+ os.windowsPostQueuedCompletionStatus(self.os_data.io_port, undefined, @ptrToInt(&self.final_resume_node), overlapped) catch continue;
break;
}
}
@@ -272,7 +336,7 @@ pub const Loop = struct {
}
}
while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) {
- self.extra_threads[extra_thread_index] = try std.os.spawnThread(self, workerRun);
+ self.extra_threads[extra_thread_index] = try os.spawnThread(self, workerRun);
}
},
else => {},
@@ -282,63 +346,113 @@ pub const Loop = struct {
fn deinitOsData(self: *Loop) void {
switch (builtin.os) {
builtin.Os.linux => {
- std.os.close(self.os_data.final_eventfd);
- while (self.available_eventfd_resume_nodes.pop()) |node| std.os.close(node.data.eventfd);
- std.os.close(self.os_data.epollfd);
+ os.close(self.os_data.final_eventfd);
+ while (self.available_eventfd_resume_nodes.pop()) |node| os.close(node.data.eventfd);
+ os.close(self.os_data.epollfd);
self.allocator.free(self.eventfd_resume_nodes);
},
builtin.Os.macosx => {
- self.allocator.free(self.os_data.kevents);
- std.os.close(self.os_data.kqfd);
+ os.close(self.os_data.kqfd);
+ os.close(self.os_data.fs_kqfd);
},
builtin.Os.windows => {
- std.os.close(self.os_data.io_port);
+ os.close(self.os_data.io_port);
},
else => {},
}
}
/// resume_node must live longer than the promise that it holds a reference to.
- pub fn addFd(self: *Loop, fd: i32, resume_node: *ResumeNode) !void {
- _ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
- errdefer {
- self.finishOneEvent();
- }
- try self.modFd(
+ /// flags must contain EPOLLET
+ pub fn linuxAddFd(self: *Loop, fd: i32, resume_node: *ResumeNode, flags: u32) !void {
+ assert(flags & posix.EPOLLET == posix.EPOLLET);
+ self.beginOneEvent();
+ errdefer self.finishOneEvent();
+ try self.linuxModFd(
fd,
posix.EPOLL_CTL_ADD,
- std.os.linux.EPOLLIN | std.os.linux.EPOLLOUT | std.os.linux.EPOLLET,
+ flags,
resume_node,
);
}
- pub fn modFd(self: *Loop, fd: i32, op: u32, events: u32, resume_node: *ResumeNode) !void {
- var ev = std.os.linux.epoll_event{
- .events = events,
- .data = std.os.linux.epoll_data{ .ptr = @ptrToInt(resume_node) },
+ pub fn linuxModFd(self: *Loop, fd: i32, op: u32, flags: u32, resume_node: *ResumeNode) !void {
+ assert(flags & posix.EPOLLET == posix.EPOLLET);
+ var ev = os.linux.epoll_event{
+ .events = flags,
+ .data = os.linux.epoll_data{ .ptr = @ptrToInt(resume_node) },
};
- try std.os.linuxEpollCtl(self.os_data.epollfd, op, fd, &ev);
+ try os.linuxEpollCtl(self.os_data.epollfd, op, fd, &ev);
}
- pub fn removeFd(self: *Loop, fd: i32) void {
- self.removeFdNoCounter(fd);
+ pub fn linuxRemoveFd(self: *Loop, fd: i32) void {
+ os.linuxEpollCtl(self.os_data.epollfd, os.linux.EPOLL_CTL_DEL, fd, undefined) catch {};
self.finishOneEvent();
}
- fn removeFdNoCounter(self: *Loop, fd: i32) void {
- std.os.linuxEpollCtl(self.os_data.epollfd, std.os.linux.EPOLL_CTL_DEL, fd, undefined) catch {};
- }
-
- pub async fn waitFd(self: *Loop, fd: i32) !void {
- defer self.removeFd(fd);
+ pub async fn linuxWaitFd(self: *Loop, fd: i32, flags: u32) !void {
+ defer self.linuxRemoveFd(fd);
suspend {
// TODO explicitly put this memory in the coroutine frame #1194
- var resume_node = ResumeNode{
+ var resume_node = ResumeNode.Basic{
+ .base = ResumeNode{
+ .id = ResumeNode.Id.Basic,
+ .handle = @handle(),
+ },
+ };
+ try self.linuxAddFd(fd, &resume_node.base, flags);
+ }
+ }
+
+ pub async fn bsdWaitKev(self: *Loop, ident: usize, filter: i16, fflags: u32) !posix.Kevent {
+ // TODO #1194
+ suspend {
+ resume @handle();
+ }
+ var resume_node = ResumeNode.Basic{
+ .base = ResumeNode{
.id = ResumeNode.Id.Basic,
.handle = @handle(),
- };
- try self.addFd(fd, &resume_node);
+ },
+ .kev = undefined,
+ };
+ defer self.bsdRemoveKev(ident, filter);
+ suspend {
+ try self.bsdAddKev(&resume_node, ident, filter, fflags);
}
+ return resume_node.kev;
+ }
+
+ /// resume_node must live longer than the promise that it holds a reference to.
+ pub fn bsdAddKev(self: *Loop, resume_node: *ResumeNode.Basic, ident: usize, filter: i16, fflags: u32) !void {
+ self.beginOneEvent();
+ errdefer self.finishOneEvent();
+ var kev = posix.Kevent{
+ .ident = ident,
+ .filter = filter,
+ .flags = posix.EV_ADD | posix.EV_ENABLE | posix.EV_CLEAR,
+ .fflags = fflags,
+ .data = 0,
+ .udata = @ptrToInt(&resume_node.base),
+ };
+ const kevent_array = (*[1]posix.Kevent)(&kev);
+ const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
+ _ = try os.bsdKEvent(self.os_data.kqfd, kevent_array, empty_kevs, null);
+ }
+
+ pub fn bsdRemoveKev(self: *Loop, ident: usize, filter: i16) void {
+ var kev = posix.Kevent{
+ .ident = ident,
+ .filter = filter,
+ .flags = posix.EV_DELETE,
+ .fflags = 0,
+ .data = 0,
+ .udata = 0,
+ };
+ const kevent_array = (*[1]posix.Kevent)(&kev);
+ const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
+ _ = os.bsdKEvent(self.os_data.kqfd, kevent_array, empty_kevs, null) catch undefined;
+ self.finishOneEvent();
}
fn dispatch(self: *Loop) void {
@@ -352,8 +466,8 @@ pub const Loop = struct {
switch (builtin.os) {
builtin.Os.macosx => {
const kevent_array = (*[1]posix.Kevent)(&eventfd_node.kevent);
- const eventlist = ([*]posix.Kevent)(undefined)[0..0];
- _ = std.os.bsdKEvent(self.os_data.kqfd, kevent_array, eventlist, null) catch {
+ const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
+ _ = os.bsdKEvent(self.os_data.kqfd, kevent_array, empty_kevs, null) catch {
self.next_tick_queue.unget(next_tick_node);
self.available_eventfd_resume_nodes.push(resume_stack_node);
return;
@@ -361,9 +475,9 @@ pub const Loop = struct {
},
builtin.Os.linux => {
// the pending count is already accounted for
- const epoll_events = posix.EPOLLONESHOT | std.os.linux.EPOLLIN | std.os.linux.EPOLLOUT |
- std.os.linux.EPOLLET;
- self.modFd(
+ const epoll_events = posix.EPOLLONESHOT | os.linux.EPOLLIN | os.linux.EPOLLOUT |
+ os.linux.EPOLLET;
+ self.linuxModFd(
eventfd_node.eventfd,
eventfd_node.epoll_op,
epoll_events,
@@ -379,7 +493,7 @@ pub const Loop = struct {
// the consumer code can decide whether to read the completion key.
// it has to do this for normal I/O, so we match that behavior here.
const overlapped = @intToPtr(?*windows.OVERLAPPED, 0x1);
- std.os.windowsPostQueuedCompletionStatus(
+ os.windowsPostQueuedCompletionStatus(
self.os_data.io_port,
undefined,
eventfd_node.completion_key,
@@ -397,15 +511,29 @@ pub const Loop = struct {
/// Bring your own linked list node. This means it can't fail.
pub fn onNextTick(self: *Loop, node: *NextTickNode) void {
- _ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ self.beginOneEvent(); // finished in dispatch()
self.next_tick_queue.put(node);
self.dispatch();
}
+ pub fn cancelOnNextTick(self: *Loop, node: *NextTickNode) void {
+ if (self.next_tick_queue.remove(node)) {
+ self.finishOneEvent();
+ }
+ }
+
pub fn run(self: *Loop) void {
self.finishOneEvent(); // the reference we start with
self.workerRun();
+
+ switch (builtin.os) {
+ builtin.Os.linux,
+ builtin.Os.macosx,
+ => self.os_data.fs_thread.wait(),
+ else => {},
+ }
+
for (self.extra_threads) |extra_thread| {
extra_thread.wait();
}
@@ -420,6 +548,7 @@ pub const Loop = struct {
suspend {
handle.* = @handle();
var my_tick_node = Loop.NextTickNode{
+ .prev = undefined,
.next = undefined,
.data = @handle(),
};
@@ -441,6 +570,7 @@ pub const Loop = struct {
pub async fn yield(self: *Loop) void {
suspend {
var my_tick_node = Loop.NextTickNode{
+ .prev = undefined,
.next = undefined,
.data = @handle(),
};
@@ -448,20 +578,28 @@ pub const Loop = struct {
}
}
- fn finishOneEvent(self: *Loop) void {
- if (@atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst) == 1) {
+ /// call finishOneEvent when done
+ pub fn beginOneEvent(self: *Loop) void {
+ _ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+ }
+
+ pub fn finishOneEvent(self: *Loop) void {
+ const prev = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
+ if (prev == 1) {
// cause all the threads to stop
switch (builtin.os) {
builtin.Os.linux => {
+ self.posixFsRequest(&self.os_data.fs_end_request);
// writing 8 bytes to an eventfd cannot fail
- std.os.posixWrite(self.os_data.final_eventfd, wakeup_bytes) catch unreachable;
+ os.posixWrite(self.os_data.final_eventfd, wakeup_bytes) catch unreachable;
return;
},
builtin.Os.macosx => {
+ self.posixFsRequest(&self.os_data.fs_end_request);
const final_kevent = (*[1]posix.Kevent)(&self.os_data.final_kevent);
- const eventlist = ([*]posix.Kevent)(undefined)[0..0];
+ const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
// cannot fail because we already added it and this just enables it
- _ = std.os.bsdKEvent(self.os_data.kqfd, final_kevent, eventlist, null) catch unreachable;
+ _ = os.bsdKEvent(self.os_data.kqfd, final_kevent, empty_kevs, null) catch unreachable;
return;
},
builtin.Os.windows => {
@@ -469,7 +607,7 @@ pub const Loop = struct {
while (i < self.extra_threads.len + 1) : (i += 1) {
while (true) {
const overlapped = @intToPtr(?*windows.OVERLAPPED, 0x1);
- std.os.windowsPostQueuedCompletionStatus(self.os_data.io_port, undefined, @ptrToInt(&self.final_resume_node), overlapped) catch continue;
+ os.windowsPostQueuedCompletionStatus(self.os_data.io_port, undefined, @ptrToInt(&self.final_resume_node), overlapped) catch continue;
break;
}
}
@@ -492,8 +630,8 @@ pub const Loop = struct {
switch (builtin.os) {
builtin.Os.linux => {
// only process 1 event so we don't steal from other threads
- var events: [1]std.os.linux.epoll_event = undefined;
- const count = std.os.linuxEpollWait(self.os_data.epollfd, events[0..], -1);
+ var events: [1]os.linux.epoll_event = undefined;
+ const count = os.linuxEpollWait(self.os_data.epollfd, events[0..], -1);
for (events[0..count]) |ev| {
const resume_node = @intToPtr(*ResumeNode, ev.data.ptr);
const handle = resume_node.handle;
@@ -516,13 +654,17 @@ pub const Loop = struct {
},
builtin.Os.macosx => {
var eventlist: [1]posix.Kevent = undefined;
- const count = std.os.bsdKEvent(self.os_data.kqfd, self.os_data.kevents, eventlist[0..], null) catch unreachable;
+ const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
+ const count = os.bsdKEvent(self.os_data.kqfd, empty_kevs, eventlist[0..], null) catch unreachable;
for (eventlist[0..count]) |ev| {
const resume_node = @intToPtr(*ResumeNode, ev.udata);
const handle = resume_node.handle;
const resume_node_id = resume_node.id;
switch (resume_node_id) {
- ResumeNode.Id.Basic => {},
+ ResumeNode.Id.Basic => {
+ const basic_node = @fieldParentPtr(ResumeNode.Basic, "base", resume_node);
+ basic_node.kev = ev;
+ },
ResumeNode.Id.Stop => return,
ResumeNode.Id.EventFd => {
const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node);
@@ -541,9 +683,10 @@ pub const Loop = struct {
while (true) {
var nbytes: windows.DWORD = undefined;
var overlapped: ?*windows.OVERLAPPED = undefined;
- switch (std.os.windowsGetQueuedCompletionStatus(self.os_data.io_port, &nbytes, &completion_key, &overlapped, windows.INFINITE)) {
- std.os.WindowsWaitResult.Aborted => return,
- std.os.WindowsWaitResult.Normal => {},
+ switch (os.windowsGetQueuedCompletionStatus(self.os_data.io_port, &nbytes, &completion_key, &overlapped, windows.INFINITE)) {
+ os.WindowsWaitResult.Aborted => return,
+ os.WindowsWaitResult.Normal => {},
+ os.WindowsWaitResult.Cancelled => continue,
}
if (overlapped != null) break;
}
@@ -560,21 +703,101 @@ pub const Loop = struct {
},
}
resume handle;
- if (resume_node_id == ResumeNode.Id.EventFd) {
- self.finishOneEvent();
- }
+ self.finishOneEvent();
},
else => @compileError("unsupported OS"),
}
}
}
+ fn posixFsRequest(self: *Loop, request_node: *fs.RequestNode) void {
+ self.beginOneEvent(); // finished in posixFsRun after processing the msg
+ self.os_data.fs_queue.put(request_node);
+ switch (builtin.os) {
+ builtin.Os.macosx => {
+ const fs_kevs = (*[1]posix.Kevent)(&self.os_data.fs_kevent_wake);
+ const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
+ _ = os.bsdKEvent(self.os_data.fs_kqfd, fs_kevs, empty_kevs, null) catch unreachable;
+ },
+ builtin.Os.linux => {
+ _ = @atomicRmw(u8, &self.os_data.fs_queue_item, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ const rc = os.linux.futex_wake(@ptrToInt(&self.os_data.fs_queue_item), os.linux.FUTEX_WAKE, 1);
+ switch (os.linux.getErrno(rc)) {
+ 0 => {},
+ posix.EINVAL => unreachable,
+ else => unreachable,
+ }
+ },
+ else => @compileError("Unsupported OS"),
+ }
+ }
+
+ fn posixFsCancel(self: *Loop, request_node: *fs.RequestNode) void {
+ if (self.os_data.fs_queue.remove(request_node)) {
+ self.finishOneEvent();
+ }
+ }
+
+ fn posixFsRun(self: *Loop) void {
+ while (true) {
+ if (builtin.os == builtin.Os.linux) {
+ _ = @atomicRmw(u8, &self.os_data.fs_queue_item, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+ }
+ while (self.os_data.fs_queue.get()) |node| {
+ switch (node.data.msg) {
+ @TagType(fs.Request.Msg).End => return,
+ @TagType(fs.Request.Msg).PWriteV => |*msg| {
+ msg.result = os.posix_pwritev(msg.fd, msg.iov.ptr, msg.iov.len, msg.offset);
+ },
+ @TagType(fs.Request.Msg).PReadV => |*msg| {
+ msg.result = os.posix_preadv(msg.fd, msg.iov.ptr, msg.iov.len, msg.offset);
+ },
+ @TagType(fs.Request.Msg).Open => |*msg| {
+ msg.result = os.posixOpenC(msg.path.ptr, msg.flags, msg.mode);
+ },
+ @TagType(fs.Request.Msg).Close => |*msg| os.close(msg.fd),
+ @TagType(fs.Request.Msg).WriteFile => |*msg| blk: {
+ const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT |
+ posix.O_CLOEXEC | posix.O_TRUNC;
+ const fd = os.posixOpenC(msg.path.ptr, flags, msg.mode) catch |err| {
+ msg.result = err;
+ break :blk;
+ };
+ defer os.close(fd);
+ msg.result = os.posixWrite(fd, msg.contents);
+ },
+ }
+ switch (node.data.finish) {
+ @TagType(fs.Request.Finish).TickNode => |*tick_node| self.onNextTick(tick_node),
+ @TagType(fs.Request.Finish).DeallocCloseOperation => |close_op| {
+ self.allocator.destroy(close_op);
+ },
+ @TagType(fs.Request.Finish).NoAction => {},
+ }
+ self.finishOneEvent();
+ }
+ switch (builtin.os) {
+ builtin.Os.linux => {
+ const rc = os.linux.futex_wait(@ptrToInt(&self.os_data.fs_queue_item), os.linux.FUTEX_WAIT, 0, null);
+ switch (os.linux.getErrno(rc)) {
+ 0 => continue,
+ posix.EINTR => continue,
+ posix.EAGAIN => continue,
+ else => unreachable,
+ }
+ },
+ builtin.Os.macosx => {
+ const fs_kevs = (*[1]posix.Kevent)(&self.os_data.fs_kevent_wait);
+ var out_kevs: [1]posix.Kevent = undefined;
+ _ = os.bsdKEvent(self.os_data.fs_kqfd, fs_kevs, out_kevs[0..], null) catch unreachable;
+ },
+ else => @compileError("Unsupported OS"),
+ }
+ }
+ }
+
const OsData = switch (builtin.os) {
- builtin.Os.linux => struct {
- epollfd: i32,
- final_eventfd: i32,
- final_eventfd_event: std.os.linux.epoll_event,
- },
+ builtin.Os.linux => LinuxOsData,
builtin.Os.macosx => MacOsData,
builtin.Os.windows => struct {
io_port: windows.HANDLE,
@@ -586,7 +809,22 @@ pub const Loop = struct {
const MacOsData = struct {
kqfd: i32,
final_kevent: posix.Kevent,
- kevents: []posix.Kevent,
+ fs_kevent_wake: posix.Kevent,
+ fs_kevent_wait: posix.Kevent,
+ fs_thread: *os.Thread,
+ fs_kqfd: i32,
+ fs_queue: std.atomic.Queue(fs.Request),
+ fs_end_request: fs.RequestNode,
+ };
+
+ const LinuxOsData = struct {
+ epollfd: i32,
+ final_eventfd: i32,
+ final_eventfd_event: os.linux.epoll_event,
+ fs_thread: *os.Thread,
+ fs_queue_item: u8,
+ fs_queue: std.atomic.Queue(fs.Request),
+ fs_end_request: fs.RequestNode,
};
};
diff --git a/std/event/rwlock.zig b/std/event/rwlock.zig
new file mode 100644
index 0000000000..186c81eb76
--- /dev/null
+++ b/std/event/rwlock.zig
@@ -0,0 +1,296 @@
+const std = @import("../index.zig");
+const builtin = @import("builtin");
+const assert = std.debug.assert;
+const mem = std.mem;
+const AtomicRmwOp = builtin.AtomicRmwOp;
+const AtomicOrder = builtin.AtomicOrder;
+const Loop = std.event.Loop;
+
+/// Thread-safe async/await lock.
+/// Does not make any syscalls - coroutines which are waiting for the lock are suspended, and
+/// are resumed when the lock is released, in order.
+/// Many readers can hold the lock at the same time; however locking for writing is exclusive.
+/// When a read lock is held, it will not be released until the reader queue is empty.
+/// When a write lock is held, it will not be released until the writer queue is empty.
+pub const RwLock = struct {
+ loop: *Loop,
+ shared_state: u8, // TODO make this an enum
+ writer_queue: Queue,
+ reader_queue: Queue,
+ writer_queue_empty_bit: u8, // TODO make this a bool
+ reader_queue_empty_bit: u8, // TODO make this a bool
+ reader_lock_count: usize,
+
+ const State = struct {
+ const Unlocked = 0;
+ const WriteLock = 1;
+ const ReadLock = 2;
+ };
+
+ const Queue = std.atomic.Queue(promise);
+
+ pub const HeldRead = struct {
+ lock: *RwLock,
+
+ pub fn release(self: HeldRead) void {
+ // If other readers still hold the lock, we're done.
+ if (@atomicRmw(usize, &self.lock.reader_lock_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst) != 1) {
+ return;
+ }
+
+ _ = @atomicRmw(u8, &self.lock.reader_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ if (@cmpxchgStrong(u8, &self.lock.shared_state, State.ReadLock, State.Unlocked, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) {
+ // Didn't unlock. Someone else's problem.
+ return;
+ }
+
+ self.lock.commonPostUnlock();
+ }
+ };
+
+ pub const HeldWrite = struct {
+ lock: *RwLock,
+
+ pub fn release(self: HeldWrite) void {
+ // See if we can leave it locked for writing, and pass the lock to the next writer
+ // in the queue to grab the lock.
+ if (self.lock.writer_queue.get()) |node| {
+ self.lock.loop.onNextTick(node);
+ return;
+ }
+
+ // We need to release the write lock. Check if any readers are waiting to grab the lock.
+ if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, AtomicOrder.SeqCst) == 0) {
+ // Switch to a read lock.
+ _ = @atomicRmw(u8, &self.lock.shared_state, AtomicRmwOp.Xchg, State.ReadLock, AtomicOrder.SeqCst);
+ while (self.lock.reader_queue.get()) |node| {
+ self.lock.loop.onNextTick(node);
+ }
+ return;
+ }
+
+ _ = @atomicRmw(u8, &self.lock.writer_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.lock.shared_state, AtomicRmwOp.Xchg, State.Unlocked, AtomicOrder.SeqCst);
+
+ self.lock.commonPostUnlock();
+ }
+ };
+
+ pub fn init(loop: *Loop) RwLock {
+ return RwLock{
+ .loop = loop,
+ .shared_state = State.Unlocked,
+ .writer_queue = Queue.init(),
+ .writer_queue_empty_bit = 1,
+ .reader_queue = Queue.init(),
+ .reader_queue_empty_bit = 1,
+ .reader_lock_count = 0,
+ };
+ }
+
+ /// Must be called when not locked. Not thread safe.
+ /// All calls to acquire() and release() must complete before calling deinit().
+ pub fn deinit(self: *RwLock) void {
+ assert(self.shared_state == State.Unlocked);
+ while (self.writer_queue.get()) |node| cancel node.data;
+ while (self.reader_queue.get()) |node| cancel node.data;
+ }
+
+ pub async fn acquireRead(self: *RwLock) HeldRead {
+ _ = @atomicRmw(usize, &self.reader_lock_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
+
+ suspend {
+ // TODO explicitly put this memory in the coroutine frame #1194
+ var my_tick_node = Loop.NextTickNode{
+ .data = @handle(),
+ .prev = undefined,
+ .next = undefined,
+ };
+
+ self.reader_queue.put(&my_tick_node);
+
+ // At this point, we are in the reader_queue, so we might have already been resumed and this coroutine
+ // frame might be destroyed. For the rest of the suspend block we cannot access the coroutine frame.
+
+ // We set this bit so that later we can rely on the fact, that if reader_queue_empty_bit is 1,
+ // some actor will attempt to grab the lock.
+ _ = @atomicRmw(u8, &self.reader_queue_empty_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+
+ // Here we don't care if we are the one to do the locking or if it was already locked for reading.
+ const have_read_lock = if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |old_state| old_state == State.ReadLock else true;
+ if (have_read_lock) {
+ // Give out all the read locks.
+ if (self.reader_queue.get()) |first_node| {
+ while (self.reader_queue.get()) |node| {
+ self.loop.onNextTick(node);
+ }
+ resume first_node.data;
+ }
+ }
+ }
+ return HeldRead{ .lock = self };
+ }
+
+ pub async fn acquireWrite(self: *RwLock) HeldWrite {
+ suspend {
+ // TODO explicitly put this memory in the coroutine frame #1194
+ var my_tick_node = Loop.NextTickNode{
+ .data = @handle(),
+ .prev = undefined,
+ .next = undefined,
+ };
+
+ self.writer_queue.put(&my_tick_node);
+
+ // At this point, we are in the writer_queue, so we might have already been resumed and this coroutine
+ // frame might be destroyed. For the rest of the suspend block we cannot access the coroutine frame.
+
+ // We set this bit so that later we can rely on the fact, that if writer_queue_empty_bit is 1,
+ // some actor will attempt to grab the lock.
+ _ = @atomicRmw(u8, &self.writer_queue_empty_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
+
+ // Here we must be the one to acquire the write lock. It cannot already be locked.
+ if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) == null) {
+ // We now have a write lock.
+ if (self.writer_queue.get()) |node| {
+ // Whether this node is us or someone else, we tail resume it.
+ resume node.data;
+ }
+ }
+ }
+ return HeldWrite{ .lock = self };
+ }
+
+ fn commonPostUnlock(self: *RwLock) void {
+ while (true) {
+ // There might be a writer_queue item or a reader_queue item
+ // If we check and both are empty, we can be done, because the other actors will try to
+ // obtain the lock.
+ // But if there's a writer_queue item or a reader_queue item,
+ // we are the actor which must loop and attempt to grab the lock again.
+ if (@atomicLoad(u8, &self.writer_queue_empty_bit, AtomicOrder.SeqCst) == 0) {
+ if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) {
+ // We did not obtain the lock. Great, the queues are someone else's problem.
+ return;
+ }
+ // If there's an item in the writer queue, give them the lock, and we're done.
+ if (self.writer_queue.get()) |node| {
+ self.loop.onNextTick(node);
+ return;
+ }
+ // Release the lock again.
+ _ = @atomicRmw(u8, &self.writer_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ _ = @atomicRmw(u8, &self.shared_state, AtomicRmwOp.Xchg, State.Unlocked, AtomicOrder.SeqCst);
+ continue;
+ }
+
+ if (@atomicLoad(u8, &self.reader_queue_empty_bit, AtomicOrder.SeqCst) == 0) {
+ if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) {
+ // We did not obtain the lock. Great, the queues are someone else's problem.
+ return;
+ }
+ // If there are any items in the reader queue, give out all the reader locks, and we're done.
+ if (self.reader_queue.get()) |first_node| {
+ self.loop.onNextTick(first_node);
+ while (self.reader_queue.get()) |node| {
+ self.loop.onNextTick(node);
+ }
+ return;
+ }
+ // Release the lock again.
+ _ = @atomicRmw(u8, &self.reader_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ if (@cmpxchgStrong(u8, &self.shared_state, State.ReadLock, State.Unlocked, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) {
+ // Didn't unlock. Someone else's problem.
+ return;
+ }
+ continue;
+ }
+ return;
+ }
+ }
+};
+
+test "std.event.RwLock" {
+ var da = std.heap.DirectAllocator.init();
+ defer da.deinit();
+
+ const allocator = &da.allocator;
+
+ var loop: Loop = undefined;
+ try loop.initMultiThreaded(allocator);
+ defer loop.deinit();
+
+ var lock = RwLock.init(&loop);
+ defer lock.deinit();
+
+ const handle = try async testLock(&loop, &lock);
+ defer cancel handle;
+ loop.run();
+
+ const expected_result = [1]i32{shared_it_count * @intCast(i32, shared_test_data.len)} ** shared_test_data.len;
+ assert(mem.eql(i32, shared_test_data, expected_result));
+}
+
+async fn testLock(loop: *Loop, lock: *RwLock) void {
+ // TODO explicitly put next tick node memory in the coroutine frame #1194
+ suspend {
+ resume @handle();
+ }
+
+ var read_nodes: [100]Loop.NextTickNode = undefined;
+ for (read_nodes) |*read_node| {
+ read_node.data = async readRunner(lock) catch @panic("out of memory");
+ loop.onNextTick(read_node);
+ }
+
+ var write_nodes: [shared_it_count]Loop.NextTickNode = undefined;
+ for (write_nodes) |*write_node| {
+ write_node.data = async writeRunner(lock) catch @panic("out of memory");
+ loop.onNextTick(write_node);
+ }
+
+ for (write_nodes) |*write_node| {
+ await @ptrCast(promise->void, write_node.data);
+ }
+ for (read_nodes) |*read_node| {
+ await @ptrCast(promise->void, read_node.data);
+ }
+}
+
+const shared_it_count = 10;
+var shared_test_data = [1]i32{0} ** 10;
+var shared_test_index: usize = 0;
+var shared_count: usize = 0;
+
+async fn writeRunner(lock: *RwLock) void {
+ suspend; // resumed by onNextTick
+
+ var i: usize = 0;
+ while (i < shared_test_data.len) : (i += 1) {
+ std.os.time.sleep(0, 100000);
+ const lock_promise = async lock.acquireWrite() catch @panic("out of memory");
+ const handle = await lock_promise;
+ defer handle.release();
+
+ shared_count += 1;
+ while (shared_test_index < shared_test_data.len) : (shared_test_index += 1) {
+ shared_test_data[shared_test_index] = shared_test_data[shared_test_index] + 1;
+ }
+ shared_test_index = 0;
+ }
+}
+
+async fn readRunner(lock: *RwLock) void {
+ suspend; // resumed by onNextTick
+ std.os.time.sleep(0, 1);
+
+ var i: usize = 0;
+ while (i < shared_test_data.len) : (i += 1) {
+ const lock_promise = async lock.acquireRead() catch @panic("out of memory");
+ const handle = await lock_promise;
+ defer handle.release();
+
+ assert(shared_test_index == 0);
+ assert(shared_test_data[i] == @intCast(i32, shared_count));
+ }
+}
diff --git a/std/event/rwlocked.zig b/std/event/rwlocked.zig
new file mode 100644
index 0000000000..ef7e83d20c
--- /dev/null
+++ b/std/event/rwlocked.zig
@@ -0,0 +1,58 @@
+const std = @import("../index.zig");
+const RwLock = std.event.RwLock;
+const Loop = std.event.Loop;
+
+/// Thread-safe async/await RW lock that protects one piece of data.
+/// Does not make any syscalls - coroutines which are waiting for the lock are suspended, and
+/// are resumed when the lock is released, in order.
+pub fn RwLocked(comptime T: type) type {
+ return struct {
+ lock: RwLock,
+ locked_data: T,
+
+ const Self = this;
+
+ pub const HeldReadLock = struct {
+ value: *const T,
+ held: RwLock.HeldRead,
+
+ pub fn release(self: HeldReadLock) void {
+ self.held.release();
+ }
+ };
+
+ pub const HeldWriteLock = struct {
+ value: *T,
+ held: RwLock.HeldWrite,
+
+ pub fn release(self: HeldWriteLock) void {
+ self.held.release();
+ }
+ };
+
+ pub fn init(loop: *Loop, data: T) Self {
+ return Self{
+ .lock = RwLock.init(loop),
+ .locked_data = data,
+ };
+ }
+
+ pub fn deinit(self: *Self) void {
+ self.lock.deinit();
+ }
+
+ pub async fn acquireRead(self: *Self) HeldReadLock {
+ return HeldReadLock{
+ .held = await (async self.lock.acquireRead() catch unreachable),
+ .value = &self.locked_data,
+ };
+ }
+
+ pub async fn acquireWrite(self: *Self) HeldWriteLock {
+ return HeldWriteLock{
+ .held = await (async self.lock.acquireWrite() catch unreachable),
+ .value = &self.locked_data,
+ };
+ }
+ };
+}
diff --git a/std/event/tcp.zig b/std/event/tcp.zig
index ea803a9322..19cce4a5e5 100644
--- a/std/event/tcp.zig
+++ b/std/event/tcp.zig
@@ -55,13 +55,13 @@ pub const Server = struct {
errdefer cancel self.accept_coro.?;
self.listen_resume_node.handle = self.accept_coro.?;
- try self.loop.addFd(sockfd, &self.listen_resume_node);
+ try self.loop.linuxAddFd(sockfd, &self.listen_resume_node, posix.EPOLLIN | posix.EPOLLOUT | posix.EPOLLET);
errdefer self.loop.removeFd(sockfd);
}
/// Stop listening
pub fn close(self: *Server) void {
- self.loop.removeFd(self.sockfd.?);
+ self.loop.linuxRemoveFd(self.sockfd.?);
std.os.close(self.sockfd.?);
}
@@ -116,7 +116,7 @@ pub async fn connect(loop: *Loop, _address: *const std.net.Address) !std.os.File
errdefer std.os.close(sockfd);
try std.os.posixConnectAsync(sockfd, &address.os_addr);
- try await try async loop.waitFd(sockfd);
+ try await try async loop.linuxWaitFd(sockfd, posix.EPOLLIN | posix.EPOLLOUT | posix.EPOLLET);
try std.os.posixGetSockOptConnectError(sockfd);
return std.os.File.openHandle(sockfd);
@@ -181,4 +181,3 @@ async fn doAsyncTest(loop: *Loop, address: *const std.net.Address, server: *Serv
assert(mem.eql(u8, msg, "hello from server\n"));
server.close();
}
-
diff --git a/std/fmt/errol/index.zig b/std/fmt/errol/index.zig
index 3222913107..8b1ffa3622 100644
--- a/std/fmt/errol/index.zig
+++ b/std/fmt/errol/index.zig
@@ -253,11 +253,7 @@ fn gethi(in: f64) f64 {
/// Normalize the number by factoring in the error.
/// @hp: The float pair.
fn hpNormalize(hp: *HP) void {
- // Required to avoid segfaults causing buffer overrun during errol3 digit output termination.
- @setFloatMode(this, @import("builtin").FloatMode.Strict);
-
const val = hp.val;
-
hp.val += hp.off;
hp.off += val - hp.val;
}
diff --git a/std/fmt/index.zig b/std/fmt/index.zig
index f4f9efee37..82e9a5ba39 100644
--- a/std/fmt/index.zig
+++ b/std/fmt/index.zig
@@ -146,6 +146,45 @@ pub fn formatType(
builtin.TypeId.Promise => {
return format(context, Errors, output, "promise@{x}", @ptrToInt(value));
},
+ builtin.TypeId.Enum, builtin.TypeId.Union, builtin.TypeId.Struct => {
+ const has_cust_fmt = comptime cf: {
+ const info = @typeInfo(T);
+ const defs = switch (info) {
+ builtin.TypeId.Struct => |s| s.defs,
+ builtin.TypeId.Union => |u| u.defs,
+ builtin.TypeId.Enum => |e| e.defs,
+ else => unreachable,
+ };
+
+ for (defs) |def| {
+ if (mem.eql(u8, def.name, "format")) {
+ break :cf true;
+ }
+ }
+ break :cf false;
+ };
+
+ if (has_cust_fmt) return value.format(fmt, context, Errors, output);
+ try output(context, @typeName(T));
+ if (comptime @typeId(T) == builtin.TypeId.Enum) {
+ try output(context, ".");
+ try formatType(@tagName(value), "", context, Errors, output);
+ return;
+ }
+ comptime var field_i = 0;
+ inline while (field_i < @memberCount(T)) : (field_i += 1) {
+ if (field_i == 0) {
+ try output(context, "{ .");
+ } else {
+ try output(context, ", .");
+ }
+ try output(context, @memberName(T, field_i));
+ try output(context, " = ");
+ try formatType(@field(value, @memberName(T, field_i)), "", context, Errors, output);
+ }
+ try output(context, " }");
+ return;
+ },
builtin.TypeId.Pointer => |ptr_info| switch (ptr_info.size) {
builtin.TypeInfo.Pointer.Size.One => switch (@typeInfo(ptr_info.child)) {
builtin.TypeId.Array => |info| {
@@ -155,31 +194,13 @@ pub fn formatType(
return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value));
},
builtin.TypeId.Enum, builtin.TypeId.Union, builtin.TypeId.Struct => {
- const has_cust_fmt = comptime cf: {
- const info = @typeInfo(T.Child);
- const defs = switch (info) {
- builtin.TypeId.Struct => |s| s.defs,
- builtin.TypeId.Union => |u| u.defs,
- builtin.TypeId.Enum => |e| e.defs,
- else => unreachable,
- };
-
- for (defs) |def| {
- if (mem.eql(u8, def.name, "format")) {
- break :cf true;
- }
- }
- break :cf false;
- };
-
- if (has_cust_fmt) return value.format(fmt, context, Errors, output);
- return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value));
+ return formatType(value.*, fmt, context, Errors, output);
},
else => return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value)),
},
builtin.TypeInfo.Pointer.Size.Many => {
if (ptr_info.child == u8) {
- if (fmt[0] == 's') {
+ if (fmt.len > 0 and fmt[0] == 's') {
const len = std.cstr.len(value);
return formatText(value[0..len], fmt, context, Errors, output);
}
@@ -911,14 +932,21 @@ test "fmt.format" {
try testFmt("file size: 63MiB\n", "file size: {Bi}\n", usize(63 * 1024 * 1024));
try testFmt("file size: 66.06MB\n", "file size: {B2}\n", usize(63 * 1024 * 1024));
{
- // Dummy field because of https://github.com/ziglang/zig/issues/557.
const Struct = struct {
- unused: u8,
+ field: u8,
};
- var buf1: [32]u8 = undefined;
- const value = Struct{ .unused = 42 };
- const result = try bufPrint(buf1[0..], "pointer: {}\n", &value);
- assert(mem.startsWith(u8, result, "pointer: Struct@"));
+ const value = Struct{ .field = 42 };
+ try testFmt("struct: Struct{ .field = 42 }\n", "struct: {}\n", value);
+ try testFmt("struct: Struct{ .field = 42 }\n", "struct: {}\n", &value);
+ }
+ {
+ const Enum = enum {
+ One,
+ Two,
+ };
+ const value = Enum.Two;
+ try testFmt("enum: Enum.Two\n", "enum: {}\n", value);
+ try testFmt("enum: Enum.Two\n", "enum: {}\n", &value);
}
{
var buf1: [32]u8 = undefined;
@@ -941,6 +969,7 @@ test "fmt.format" {
{
// This fails on release due to a minor rounding difference.
// --release-fast outputs 9.999960000000001e-40 vs. the expected.
+ // TODO fix this, it should be the same in Debug and ReleaseFast
if (builtin.mode == builtin.Mode.Debug) {
var buf1: [32]u8 = undefined;
const value: f64 = 9.999960e-40;
@@ -1133,23 +1162,23 @@ test "fmt.format" {
y: f32,
pub fn format(
- self: *SelfType,
+ self: SelfType,
comptime fmt: []const u8,
context: var,
comptime Errors: type,
output: fn (@typeOf(context), []const u8) Errors!void,
) Errors!void {
- if (fmt.len > 0) {
- if (fmt.len > 1) unreachable;
- switch (fmt[0]) {
+ switch (fmt.len) {
+ 0 => return std.fmt.format(context, Errors, output, "({.3},{.3})", self.x, self.y),
+ 1 => switch (fmt[0]) {
//point format
'p' => return std.fmt.format(context, Errors, output, "({.3},{.3})", self.x, self.y),
//dimension format
'd' => return std.fmt.format(context, Errors, output, "{.3}x{.3}", self.x, self.y),
else => unreachable,
- }
+ },
+ else => unreachable,
}
- return std.fmt.format(context, Errors, output, "({.3},{.3})", self.x, self.y);
}
};
@@ -1160,6 +1189,10 @@ test "fmt.format" {
};
try testFmt("point: (10.200,2.220)\n", "point: {}\n", &value);
try testFmt("dim: 10.200x2.220\n", "dim: {d}\n", &value);
+
+ // same thing but not passing a pointer
+ try testFmt("point: (10.200,2.220)\n", "point: {}\n", value);
+ try testFmt("dim: 10.200x2.220\n", "dim: {d}\n", value);
}
}
diff --git a/std/hash_map.zig b/std/hash_map.zig
index cebd5272c0..9654d612a5 100644
--- a/std/hash_map.zig
+++ b/std/hash_map.zig
@@ -9,6 +9,10 @@ const builtin = @import("builtin");
const want_modification_safety = builtin.mode != builtin.Mode.ReleaseFast;
const debug_u32 = if (want_modification_safety) u32 else void;
+pub fn AutoHashMap(comptime K: type, comptime V: type) type {
+ return HashMap(K, V, getAutoHashFn(K), getAutoEqlFn(K));
+}
+
pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u32, comptime eql: fn (a: K, b: K) bool) type {
return struct {
entries: []Entry,
@@ -20,13 +24,22 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
const Self = this;
- pub const Entry = struct {
- used: bool,
- distance_from_start_index: usize,
+ pub const KV = struct {
key: K,
value: V,
};
+ const Entry = struct {
+ used: bool,
+ distance_from_start_index: usize,
+ kv: KV,
+ };
+
+ pub const GetOrPutResult = struct {
+ kv: *KV,
+ found_existing: bool,
+ };
+
pub const Iterator = struct {
hm: *const Self,
// how many items have we returned
@@ -36,7 +49,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
// used to detect concurrent modification
initial_modification_count: debug_u32,
- pub fn next(it: *Iterator) ?*Entry {
+ pub fn next(it: *Iterator) ?*KV {
if (want_modification_safety) {
assert(it.initial_modification_count == it.hm.modification_count); // concurrent modification
}
@@ -46,7 +59,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
if (entry.used) {
it.index += 1;
it.count += 1;
- return entry;
+ return &entry.kv;
}
}
unreachable; // no next item
@@ -71,7 +84,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
};
}
- pub fn deinit(hm: *const Self) void {
+ pub fn deinit(hm: Self) void {
hm.allocator.free(hm.entries);
}
@@ -84,34 +97,65 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
hm.incrementModificationCount();
}
- pub fn count(hm: *const Self) usize {
- return hm.size;
+ pub fn count(self: Self) usize {
+ return self.size;
}
- /// Returns the value that was already there.
- pub fn put(hm: *Self, key: K, value: *const V) !?V {
- if (hm.entries.len == 0) {
- try hm.initCapacity(16);
+ /// If key exists this function cannot fail.
+ /// If there is an existing item with `key`, then the result
+ /// kv pointer points to it, and found_existing is true.
+ /// Otherwise, puts a new item with undefined value, and
+ /// the kv pointer points to it. Caller should then initialize
+ /// the data.
+ pub fn getOrPut(self: *Self, key: K) !GetOrPutResult {
+ // TODO this implementation can be improved - we should only
+ // have to hash once and find the entry once.
+ if (self.get(key)) |kv| {
+ return GetOrPutResult{
+ .kv = kv,
+ .found_existing = true,
+ };
+ }
+ self.incrementModificationCount();
+ try self.ensureCapacity();
+ const put_result = self.internalPut(key);
+ assert(put_result.old_kv == null);
+ return GetOrPutResult{
+ .kv = &put_result.new_entry.kv,
+ .found_existing = false,
+ };
+ }
+
+ fn ensureCapacity(self: *Self) !void {
+ if (self.entries.len == 0) {
+ return self.initCapacity(16);
}
- hm.incrementModificationCount();
// if we get too full (60%), double the capacity
- if (hm.size * 5 >= hm.entries.len * 3) {
- const old_entries = hm.entries;
- try hm.initCapacity(hm.entries.len * 2);
+ if (self.size * 5 >= self.entries.len * 3) {
+ const old_entries = self.entries;
+ try self.initCapacity(self.entries.len * 2);
// dump all of the old elements into the new table
for (old_entries) |*old_entry| {
if (old_entry.used) {
- _ = hm.internalPut(old_entry.key, old_entry.value);
+ self.internalPut(old_entry.kv.key).new_entry.kv.value = old_entry.kv.value;
}
}
- hm.allocator.free(old_entries);
+ self.allocator.free(old_entries);
}
-
- return hm.internalPut(key, value);
}
- pub fn get(hm: *const Self, key: K) ?*Entry {
+ /// Returns the kv pair that was already there.
+ pub fn put(self: *Self, key: K, value: V) !?KV {
+ self.incrementModificationCount();
+ try self.ensureCapacity();
+
+ const put_result = self.internalPut(key);
+ put_result.new_entry.kv.value = value;
+ return put_result.old_kv;
+ }
+
+ pub fn get(hm: *const Self, key: K) ?*KV {
if (hm.entries.len == 0) {
return null;
}
@@ -122,7 +166,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
return hm.get(key) != null;
}
- pub fn remove(hm: *Self, key: K) ?*Entry {
+ pub fn remove(hm: *Self, key: K) ?*KV {
if (hm.entries.len == 0) return null;
hm.incrementModificationCount();
const start_index = hm.keyToIndex(key);
@@ -134,7 +178,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
if (!entry.used) return null;
- if (!eql(entry.key, key)) continue;
+ if (!eql(entry.kv.key, key)) continue;
while (roll_over < hm.entries.len) : (roll_over += 1) {
const next_index = (start_index + roll_over + 1) % hm.entries.len;
@@ -142,7 +186,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
if (!next_entry.used or next_entry.distance_from_start_index == 0) {
entry.used = false;
hm.size -= 1;
- return entry;
+ return &entry.kv;
}
entry.* = next_entry.*;
entry.distance_from_start_index -= 1;
@@ -163,6 +207,16 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
};
}
+ pub fn clone(self: Self) !Self {
+ var other = Self.init(self.allocator);
+ try other.initCapacity(self.entries.len);
+ var it = self.iterator();
+ while (it.next()) |entry| {
+ assert((try other.put(entry.key, entry.value)) == null);
+ }
+ return other;
+ }
+
fn initCapacity(hm: *Self, capacity: usize) !void {
hm.entries = try hm.allocator.alloc(Entry, capacity);
hm.size = 0;
@@ -178,60 +232,81 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
}
}
- /// Returns the value that was already there.
- fn internalPut(hm: *Self, orig_key: K, orig_value: *const V) ?V {
+ const InternalPutResult = struct {
+ new_entry: *Entry,
+ old_kv: ?KV,
+ };
+
+ /// Returns a pointer to the new entry.
+ /// Asserts that there is enough space for the new item.
+ fn internalPut(self: *Self, orig_key: K) InternalPutResult {
var key = orig_key;
- var value = orig_value.*;
- const start_index = hm.keyToIndex(key);
+ var value: V = undefined;
+ const start_index = self.keyToIndex(key);
var roll_over: usize = 0;
var distance_from_start_index: usize = 0;
- while (roll_over < hm.entries.len) : ({
+ var got_result_entry = false;
+ var result = InternalPutResult{
+ .new_entry = undefined,
+ .old_kv = null,
+ };
+ while (roll_over < self.entries.len) : ({
roll_over += 1;
distance_from_start_index += 1;
}) {
- const index = (start_index + roll_over) % hm.entries.len;
- const entry = &hm.entries[index];
+ const index = (start_index + roll_over) % self.entries.len;
+ const entry = &self.entries[index];
- if (entry.used and !eql(entry.key, key)) {
+ if (entry.used and !eql(entry.kv.key, key)) {
if (entry.distance_from_start_index < distance_from_start_index) {
// robin hood to the rescue
const tmp = entry.*;
- hm.max_distance_from_start_index = math.max(hm.max_distance_from_start_index, distance_from_start_index);
+ self.max_distance_from_start_index = math.max(self.max_distance_from_start_index, distance_from_start_index);
+ if (!got_result_entry) {
+ got_result_entry = true;
+ result.new_entry = entry;
+ }
entry.* = Entry{
.used = true,
.distance_from_start_index = distance_from_start_index,
- .key = key,
- .value = value,
+ .kv = KV{
+ .key = key,
+ .value = value,
+ },
};
- key = tmp.key;
- value = tmp.value;
+ key = tmp.kv.key;
+ value = tmp.kv.value;
distance_from_start_index = tmp.distance_from_start_index;
}
continue;
}
- var result: ?V = null;
if (entry.used) {
- result = entry.value;
+ result.old_kv = entry.kv;
} else {
// adding an entry. otherwise overwriting old value with
// same key
- hm.size += 1;
+ self.size += 1;
}
- hm.max_distance_from_start_index = math.max(distance_from_start_index, hm.max_distance_from_start_index);
+ self.max_distance_from_start_index = math.max(distance_from_start_index, self.max_distance_from_start_index);
+ if (!got_result_entry) {
+ result.new_entry = entry;
+ }
entry.* = Entry{
.used = true,
.distance_from_start_index = distance_from_start_index,
- .key = key,
- .value = value,
+ .kv = KV{
+ .key = key,
+ .value = value,
+ },
};
return result;
}
unreachable; // put into a full map
}
- fn internalGet(hm: *const Self, key: K) ?*Entry {
+ fn internalGet(hm: Self, key: K) ?*KV {
const start_index = hm.keyToIndex(key);
{
var roll_over: usize = 0;
@@ -240,13 +315,13 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
const entry = &hm.entries[index];
if (!entry.used) return null;
- if (eql(entry.key, key)) return entry;
+ if (eql(entry.kv.key, key)) return &entry.kv;
}
}
return null;
}
- fn keyToIndex(hm: *const Self, key: K) usize {
+ fn keyToIndex(hm: Self, key: K) usize {
return usize(hash(key)) % hm.entries.len;
}
};
@@ -256,7 +331,7 @@ test "basic hash map usage" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
- var map = HashMap(i32, i32, hash_i32, eql_i32).init(&direct_allocator.allocator);
+ var map = AutoHashMap(i32, i32).init(&direct_allocator.allocator);
defer map.deinit();
assert((try map.put(1, 11)) == null);
@@ -265,8 +340,19 @@ test "basic hash map usage" {
assert((try map.put(4, 44)) == null);
assert((try map.put(5, 55)) == null);
- assert((try map.put(5, 66)).? == 55);
- assert((try map.put(5, 55)).? == 66);
+ assert((try map.put(5, 66)).?.value == 55);
+ assert((try map.put(5, 55)).?.value == 66);
+
+ const gop1 = try map.getOrPut(5);
+ assert(gop1.found_existing == true);
+ assert(gop1.kv.value == 55);
+ gop1.kv.value = 77;
+ assert(map.get(5).?.value == 77);
+
+ const gop2 = try map.getOrPut(99);
+ assert(gop2.found_existing == false);
+ gop2.kv.value = 42;
+ assert(map.get(99).?.value == 42);
assert(map.contains(2));
assert(map.get(2).?.value == 22);
@@ -279,7 +365,7 @@ test "iterator hash map" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
- var reset_map = HashMap(i32, i32, hash_i32, eql_i32).init(&direct_allocator.allocator);
+ var reset_map = AutoHashMap(i32, i32).init(&direct_allocator.allocator);
defer reset_map.deinit();
assert((try reset_map.put(1, 11)) == null);
@@ -287,14 +373,14 @@ test "iterator hash map" {
assert((try reset_map.put(3, 33)) == null);
var keys = []i32{
- 1,
- 2,
3,
+ 2,
+ 1,
};
var values = []i32{
- 11,
- 22,
33,
+ 22,
+ 11,
};
var it = reset_map.iterator();
@@ -322,10 +408,140 @@ test "iterator hash map" {
assert(entry.value == values[0]);
}
-fn hash_i32(x: i32) u32 {
- return @bitCast(u32, x);
+pub fn getHashPtrAddrFn(comptime K: type) (fn (K) u32) {
+ return struct {
+ fn hash(key: K) u32 {
+ return getAutoHashFn(usize)(@ptrToInt(key));
+ }
+ }.hash;
}
-fn eql_i32(a: i32, b: i32) bool {
- return a == b;
+pub fn getTrivialEqlFn(comptime K: type) (fn (K, K) bool) {
+ return struct {
+ fn eql(a: K, b: K) bool {
+ return a == b;
+ }
+ }.eql;
+}
+
+pub fn getAutoHashFn(comptime K: type) (fn (K) u32) {
+ return struct {
+ fn hash(key: K) u32 {
+ comptime var rng = comptime std.rand.DefaultPrng.init(0);
+ return autoHash(key, &rng.random, u32);
+ }
+ }.hash;
+}
+
+pub fn getAutoEqlFn(comptime K: type) (fn (K, K) bool) {
+ return struct {
+ fn eql(a: K, b: K) bool {
+ return autoEql(a, b);
+ }
+ }.eql;
+}
+
+// TODO improve these hash functions
+pub fn autoHash(key: var, comptime rng: *std.rand.Random, comptime HashInt: type) HashInt {
+ switch (@typeInfo(@typeOf(key))) {
+ builtin.TypeId.NoReturn,
+ builtin.TypeId.Opaque,
+ builtin.TypeId.Undefined,
+ builtin.TypeId.ArgTuple,
+ => @compileError("cannot hash this type"),
+
+ builtin.TypeId.Void,
+ builtin.TypeId.Null,
+ => return 0,
+
+ builtin.TypeId.Int => |info| {
+ const unsigned_x = @bitCast(@IntType(false, info.bits), key);
+ if (info.bits <= HashInt.bit_count) {
+ return HashInt(unsigned_x) ^ comptime rng.scalar(HashInt);
+ } else {
+ return @truncate(HashInt, unsigned_x ^ comptime rng.scalar(@typeOf(unsigned_x)));
+ }
+ },
+
+ builtin.TypeId.Float => |info| {
+ return autoHash(@bitCast(@IntType(false, info.bits), key), rng);
+ },
+ builtin.TypeId.Bool => return autoHash(@boolToInt(key), rng),
+ builtin.TypeId.Enum => return autoHash(@enumToInt(key), rng),
+ builtin.TypeId.ErrorSet => return autoHash(@errorToInt(key), rng),
+ builtin.TypeId.Promise, builtin.TypeId.Fn => return autoHash(@ptrToInt(key), rng),
+
+ builtin.TypeId.Namespace,
+ builtin.TypeId.Block,
+ builtin.TypeId.BoundFn,
+ builtin.TypeId.ComptimeFloat,
+ builtin.TypeId.ComptimeInt,
+ builtin.TypeId.Type,
+ => return 0,
+
+ builtin.TypeId.Pointer => |info| switch (info.size) {
+ builtin.TypeInfo.Pointer.Size.One => @compileError("TODO auto hash for single item pointers"),
+ builtin.TypeInfo.Pointer.Size.Many => @compileError("TODO auto hash for many item pointers"),
+ builtin.TypeInfo.Pointer.Size.Slice => {
+ const interval = std.math.max(1, key.len / 256);
+ var i: usize = 0;
+ var h = comptime rng.scalar(HashInt);
+ while (i < key.len) : (i += interval) {
+ h ^= autoHash(key[i], rng, HashInt);
+ }
+ return h;
+ },
+ },
+
+ builtin.TypeId.Optional => @compileError("TODO auto hash for optionals"),
+ builtin.TypeId.Array => @compileError("TODO auto hash for arrays"),
+ builtin.TypeId.Struct => @compileError("TODO auto hash for structs"),
+ builtin.TypeId.Union => @compileError("TODO auto hash for unions"),
+ builtin.TypeId.ErrorUnion => @compileError("TODO auto hash for unions"),
+ }
+}
+
+pub fn autoEql(a: var, b: @typeOf(a)) bool {
+ switch (@typeInfo(@typeOf(a))) {
+ builtin.TypeId.NoReturn,
+ builtin.TypeId.Opaque,
+ builtin.TypeId.Undefined,
+ builtin.TypeId.ArgTuple,
+ => @compileError("cannot test equality of this type"),
+ builtin.TypeId.Void,
+ builtin.TypeId.Null,
+ => return true,
+ builtin.TypeId.Bool,
+ builtin.TypeId.Int,
+ builtin.TypeId.Float,
+ builtin.TypeId.ComptimeFloat,
+ builtin.TypeId.ComptimeInt,
+ builtin.TypeId.Namespace,
+ builtin.TypeId.Block,
+ builtin.TypeId.Promise,
+ builtin.TypeId.Enum,
+ builtin.TypeId.BoundFn,
+ builtin.TypeId.Fn,
+ builtin.TypeId.ErrorSet,
+ builtin.TypeId.Type,
+ => return a == b,
+
+ builtin.TypeId.Pointer => |info| switch (info.size) {
+ builtin.TypeInfo.Pointer.Size.One => @compileError("TODO auto eql for single item pointers"),
+ builtin.TypeInfo.Pointer.Size.Many => @compileError("TODO auto eql for many item pointers"),
+ builtin.TypeInfo.Pointer.Size.Slice => {
+ if (a.len != b.len) return false;
+ for (a) |a_item, i| {
+ if (!autoEql(a_item, b[i])) return false;
+ }
+ return true;
+ },
+ },
+
+ builtin.TypeId.Optional => @compileError("TODO auto eql for optionals"),
+ builtin.TypeId.Array => @compileError("TODO auto eql for arrays"),
+ builtin.TypeId.Struct => @compileError("TODO auto eql for structs"),
+ builtin.TypeId.Union => @compileError("TODO auto eql for unions"),
+ builtin.TypeId.ErrorUnion => @compileError("TODO auto eql for unions"),
+ }
}
diff --git a/std/index.zig b/std/index.zig
index 2f4cfb7553..8dfc59b1d2 100644
--- a/std/index.zig
+++ b/std/index.zig
@@ -5,10 +5,11 @@ pub const BufSet = @import("buf_set.zig").BufSet;
pub const Buffer = @import("buffer.zig").Buffer;
pub const BufferOutStream = @import("buffer.zig").BufferOutStream;
pub const HashMap = @import("hash_map.zig").HashMap;
+pub const AutoHashMap = @import("hash_map.zig").AutoHashMap;
pub const LinkedList = @import("linked_list.zig").LinkedList;
-pub const IntrusiveLinkedList = @import("linked_list.zig").IntrusiveLinkedList;
pub const SegmentedList = @import("segmented_list.zig").SegmentedList;
pub const DynLib = @import("dynamic_library.zig").DynLib;
+pub const Mutex = @import("mutex.zig").Mutex;
pub const atomic = @import("atomic/index.zig");
pub const base64 = @import("base64.zig");
@@ -23,6 +24,7 @@ pub const empty_import = @import("empty.zig");
pub const event = @import("event.zig");
pub const fmt = @import("fmt/index.zig");
pub const hash = @import("hash/index.zig");
+pub const hash_map = @import("hash_map.zig");
pub const heap = @import("heap.zig");
pub const io = @import("io.zig");
pub const json = @import("json.zig");
@@ -32,6 +34,7 @@ pub const mem = @import("mem.zig");
pub const net = @import("net.zig");
pub const os = @import("os/index.zig");
pub const rand = @import("rand/index.zig");
+pub const rb = @import("rb.zig");
pub const sort = @import("sort.zig");
pub const unicode = @import("unicode.zig");
pub const zig = @import("zig/index.zig");
@@ -48,6 +51,7 @@ test "std" {
_ = @import("hash_map.zig");
_ = @import("linked_list.zig");
_ = @import("segmented_list.zig");
+ _ = @import("mutex.zig");
_ = @import("base64.zig");
_ = @import("build.zig");
diff --git a/std/io.zig b/std/io.zig
index ff73c04f78..369f6eede3 100644
--- a/std/io.zig
+++ b/std/io.zig
@@ -207,6 +207,12 @@ pub fn InStream(comptime ReadError: type) type {
_ = try self.readByte();
}
}
+
+ pub fn readStruct(self: *Self, comptime T: type, ptr: *T) !void {
+ // Only extern and packed structs have defined in-memory layout.
+ assert(@typeInfo(T).Struct.layout != builtin.TypeInfo.ContainerLayout.Auto);
+ return self.readNoEof(@sliceToBytes((*[1]T)(ptr)[0..]));
+ }
};
}
@@ -254,9 +260,8 @@ pub fn OutStream(comptime WriteError: type) type {
};
}
-/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
-pub fn writeFile(allocator: *mem.Allocator, path: []const u8, data: []const u8) !void {
- var file = try File.openWrite(allocator, path);
+pub fn writeFile(path: []const u8, data: []const u8) !void {
+ var file = try File.openWrite(path);
defer file.close();
try file.write(data);
}
@@ -268,7 +273,7 @@ pub fn readFileAlloc(allocator: *mem.Allocator, path: []const u8) ![]u8 {
/// On success, caller owns returned buffer.
pub fn readFileAllocAligned(allocator: *mem.Allocator, path: []const u8, comptime A: u29) ![]align(A) u8 {
- var file = try File.openRead(allocator, path);
+ var file = try File.openRead(path);
defer file.close();
const size = try file.getEndPos();
@@ -415,13 +420,12 @@ pub fn PeekStream(comptime buffer_size: usize, comptime InStreamError: type) typ
self.at_end = (read < left);
return pos + read;
}
-
};
}
pub const SliceInStream = struct {
const Self = this;
- pub const Error = error { };
+ pub const Error = error{};
pub const Stream = InStream(Error);
pub stream: Stream,
@@ -481,13 +485,12 @@ pub const SliceOutStream = struct {
assert(self.pos <= self.slice.len);
- const n =
- if (self.pos + bytes.len <= self.slice.len)
- bytes.len
- else
- self.slice.len - self.pos;
+ const n = if (self.pos + bytes.len <= self.slice.len)
+ bytes.len
+ else
+ self.slice.len - self.pos;
- std.mem.copy(u8, self.slice[self.pos..self.pos + n], bytes[0..n]);
+ std.mem.copy(u8, self.slice[self.pos .. self.pos + n], bytes[0..n]);
self.pos += n;
if (n < bytes.len) {
@@ -586,7 +589,7 @@ pub const BufferedAtomicFile = struct {
});
errdefer allocator.destroy(self);
- self.atomic_file = try os.AtomicFile.init(allocator, dest_path, os.default_file_mode);
+ self.atomic_file = try os.AtomicFile.init(allocator, dest_path, os.File.default_mode);
errdefer self.atomic_file.deinit();
self.file_stream = FileOutStream.init(&self.atomic_file.file);
diff --git a/std/io_test.zig b/std/io_test.zig
index 56f8a9a6ad..7a44032673 100644
--- a/std/io_test.zig
+++ b/std/io_test.zig
@@ -16,7 +16,7 @@ test "write a file, read it, then delete it" {
prng.random.bytes(data[0..]);
const tmp_file_name = "temp_test_file.txt";
{
- var file = try os.File.openWrite(allocator, tmp_file_name);
+ var file = try os.File.openWrite(tmp_file_name);
defer file.close();
var file_out_stream = io.FileOutStream.init(&file);
@@ -28,7 +28,7 @@ test "write a file, read it, then delete it" {
try buf_stream.flush();
}
{
- var file = try os.File.openRead(allocator, tmp_file_name);
+ var file = try os.File.openRead(tmp_file_name);
defer file.close();
const file_size = try file.getEndPos();
@@ -45,7 +45,7 @@ test "write a file, read it, then delete it" {
assert(mem.eql(u8, contents["begin".len .. contents.len - "end".len], data));
assert(mem.eql(u8, contents[contents.len - "end".len ..], "end"));
}
- try os.deleteFile(allocator, tmp_file_name);
+ try os.deleteFile(tmp_file_name);
}
test "BufferOutStream" {
@@ -63,7 +63,7 @@ test "BufferOutStream" {
}
test "SliceInStream" {
- const bytes = []const u8 { 1, 2, 3, 4, 5, 6, 7 };
+ const bytes = []const u8{ 1, 2, 3, 4, 5, 6, 7 };
var ss = io.SliceInStream.init(bytes);
var dest: [4]u8 = undefined;
@@ -81,7 +81,7 @@ test "SliceInStream" {
}
test "PeekStream" {
- const bytes = []const u8 { 1, 2, 3, 4, 5, 6, 7, 8 };
+ const bytes = []const u8{ 1, 2, 3, 4, 5, 6, 7, 8 };
var ss = io.SliceInStream.init(bytes);
var ps = io.PeekStream(2, io.SliceInStream.Error).init(&ss.stream);
diff --git a/std/json.zig b/std/json.zig
index e62d5a3466..5fc2274985 100644
--- a/std/json.zig
+++ b/std/json.zig
@@ -1318,7 +1318,7 @@ pub const Parser = struct {
_ = p.stack.pop();
var object = &p.stack.items[p.stack.len - 1].Object;
- _ = try object.put(key, value);
+ _ = try object.put(key, value.*);
p.state = State.ObjectKey;
},
// Array Parent -> [ ..., , value ]
diff --git a/std/linked_list.zig b/std/linked_list.zig
index 62cd5ca2bb..130ddbce5d 100644
--- a/std/linked_list.zig
+++ b/std/linked_list.zig
@@ -4,18 +4,8 @@ const assert = debug.assert;
const mem = std.mem;
const Allocator = mem.Allocator;
-/// Generic non-intrusive doubly linked list.
-pub fn LinkedList(comptime T: type) type {
- return BaseLinkedList(T, void, "");
-}
-
-/// Generic intrusive doubly linked list.
-pub fn IntrusiveLinkedList(comptime ParentType: type, comptime field_name: []const u8) type {
- return BaseLinkedList(void, ParentType, field_name);
-}
-
/// Generic doubly linked list.
-fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_name: []const u8) type {
+pub fn LinkedList(comptime T: type) type {
return struct {
const Self = this;
@@ -25,23 +15,13 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
next: ?*Node,
data: T,
- pub fn init(value: *const T) Node {
+ pub fn init(data: T) Node {
return Node{
.prev = null,
.next = null,
- .data = value.*,
+ .data = data,
};
}
-
- pub fn initIntrusive() Node {
- // TODO: when #678 is solved this can become `init`.
- return Node.init({});
- }
-
- pub fn toData(node: *Node) *ParentType {
- comptime assert(isIntrusive());
- return @fieldParentPtr(ParentType, field_name, node);
- }
};
first: ?*Node,
@@ -60,10 +40,6 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
};
}
- fn isIntrusive() bool {
- return ParentType != void or field_name.len != 0;
- }
-
/// Insert a new node after an existing one.
///
/// Arguments:
@@ -192,7 +168,6 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Returns:
/// A pointer to the new node.
pub fn allocateNode(list: *Self, allocator: *Allocator) !*Node {
- comptime assert(!isIntrusive());
return allocator.create(Node(undefined));
}
@@ -202,7 +177,6 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// node: Pointer to the node to deallocate.
/// allocator: Dynamic memory allocator.
pub fn destroyNode(list: *Self, node: *Node, allocator: *Allocator) void {
- comptime assert(!isIntrusive());
allocator.destroy(node);
}
@@ -214,8 +188,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Returns:
/// A pointer to the new node.
- pub fn createNode(list: *Self, data: *const T, allocator: *Allocator) !*Node {
- comptime assert(!isIntrusive());
+ pub fn createNode(list: *Self, data: T, allocator: *Allocator) !*Node {
var node = try list.allocateNode(allocator);
node.* = Node.init(data);
return node;
@@ -274,69 +247,3 @@ test "basic linked list test" {
assert(list.last.?.data == 4);
assert(list.len == 2);
}
-
-const ElementList = IntrusiveLinkedList(Element, "link");
-const Element = struct {
- value: u32,
- link: IntrusiveLinkedList(Element, "link").Node,
-};
-
-test "basic intrusive linked list test" {
- const allocator = debug.global_allocator;
- var list = ElementList.init();
-
- var one = Element{
- .value = 1,
- .link = ElementList.Node.initIntrusive(),
- };
- var two = Element{
- .value = 2,
- .link = ElementList.Node.initIntrusive(),
- };
- var three = Element{
- .value = 3,
- .link = ElementList.Node.initIntrusive(),
- };
- var four = Element{
- .value = 4,
- .link = ElementList.Node.initIntrusive(),
- };
- var five = Element{
- .value = 5,
- .link = ElementList.Node.initIntrusive(),
- };
-
- list.append(&two.link); // {2}
- list.append(&five.link); // {2, 5}
- list.prepend(&one.link); // {1, 2, 5}
- list.insertBefore(&five.link, &four.link); // {1, 2, 4, 5}
- list.insertAfter(&two.link, &three.link); // {1, 2, 3, 4, 5}
-
- // Traverse forwards.
- {
- var it = list.first;
- var index: u32 = 1;
- while (it) |node| : (it = node.next) {
- assert(node.toData().value == index);
- index += 1;
- }
- }
-
- // Traverse backwards.
- {
- var it = list.last;
- var index: u32 = 1;
- while (it) |node| : (it = node.prev) {
- assert(node.toData().value == (6 - index));
- index += 1;
- }
- }
-
- var first = list.popFirst(); // {2, 3, 4, 5}
- var last = list.pop(); // {2, 3, 4}
- list.remove(&three.link); // {2, 4}
-
- assert(list.first.?.toData().value == 2);
- assert(list.last.?.toData().value == 4);
- assert(list.len == 2);
-}
diff --git a/std/macho.zig b/std/macho.zig
index ddc4d334e4..4325810b03 100644
--- a/std/macho.zig
+++ b/std/macho.zig
@@ -1,16 +1,18 @@
-const builtin = @import("builtin");
-const std = @import("index.zig");
-const io = std.io;
-const mem = std.mem;
-const MH_MAGIC_64 = 0xFEEDFACF;
-const MH_PIE = 0x200000;
-const LC_SYMTAB = 2;
-
-const MachHeader64 = packed struct {
+pub const mach_header = extern struct {
magic: u32,
- cputype: u32,
- cpusubtype: u32,
+ cputype: cpu_type_t,
+ cpusubtype: cpu_subtype_t,
+ filetype: u32,
+ ncmds: u32,
+ sizeofcmds: u32,
+ flags: u32,
+};
+
+pub const mach_header_64 = extern struct {
+ magic: u32,
+ cputype: cpu_type_t,
+ cpusubtype: cpu_subtype_t,
filetype: u32,
ncmds: u32,
sizeofcmds: u32,
@@ -18,19 +20,138 @@ const MachHeader64 = packed struct {
reserved: u32,
};
-const LoadCommand = packed struct {
+pub const load_command = extern struct {
cmd: u32,
cmdsize: u32,
};
-const SymtabCommand = packed struct {
- symoff: u32,
- nsyms: u32,
- stroff: u32,
- strsize: u32,
+
+/// The symtab_command contains the offsets and sizes of the link-edit 4.3BSD
+/// "stab" style symbol table information as described in the header files
+/// and .
+pub const symtab_command = extern struct {
+ cmd: u32, /// LC_SYMTAB
+ cmdsize: u32, /// sizeof(struct symtab_command)
+ symoff: u32, /// symbol table offset
+ nsyms: u32, /// number of symbol table entries
+ stroff: u32, /// string table offset
+ strsize: u32, /// string table size in bytes
};
-const Nlist64 = packed struct {
+/// The linkedit_data_command contains the offsets and sizes of a blob
+/// of data in the __LINKEDIT segment.
+const linkedit_data_command = extern struct {
+ cmd: u32,/// LC_CODE_SIGNATURE, LC_SEGMENT_SPLIT_INFO, LC_FUNCTION_STARTS, LC_DATA_IN_CODE, LC_DYLIB_CODE_SIGN_DRS or LC_LINKER_OPTIMIZATION_HINT.
+ cmdsize: u32, /// sizeof(struct linkedit_data_command)
+ dataoff: u32 , /// file offset of data in __LINKEDIT segment
+ datasize: u32 , /// file size of data in __LINKEDIT segment
+};
+
+/// The segment load command indicates that a part of this file is to be
+/// mapped into the task's address space. The size of this segment in memory,
+/// vmsize, maybe equal to or larger than the amount to map from this file,
+/// filesize. The file is mapped starting at fileoff to the beginning of
+/// the segment in memory, vmaddr. The rest of the memory of the segment,
+/// if any, is allocated zero fill on demand. The segment's maximum virtual
+/// memory protection and initial virtual memory protection are specified
+/// by the maxprot and initprot fields. If the segment has sections then the
+/// section structures directly follow the segment command and their size is
+/// reflected in cmdsize.
+pub const segment_command = extern struct {
+ cmd: u32,/// LC_SEGMENT
+ cmdsize: u32,/// includes sizeof section structs
+ segname: [16]u8,/// segment name
+ vmaddr: u32,/// memory address of this segment
+ vmsize: u32,/// memory size of this segment
+ fileoff: u32,/// file offset of this segment
+ filesize: u32,/// amount to map from the file
+ maxprot: vm_prot_t,/// maximum VM protection
+ initprot: vm_prot_t,/// initial VM protection
+ nsects: u32,/// number of sections in segment
+ flags: u32,
+};
+
+/// The 64-bit segment load command indicates that a part of this file is to be
+/// mapped into a 64-bit task's address space. If the 64-bit segment has
+/// sections then section_64 structures directly follow the 64-bit segment
+/// command and their size is reflected in cmdsize.
+pub const segment_command_64 = extern struct {
+ cmd: u32, /// LC_SEGMENT_64
+ cmdsize: u32, /// includes sizeof section_64 structs
+ segname: [16]u8, /// segment name
+ vmaddr: u64, /// memory address of this segment
+ vmsize: u64, /// memory size of this segment
+ fileoff: u64, /// file offset of this segment
+ filesize: u64, /// amount to map from the file
+ maxprot: vm_prot_t, /// maximum VM protection
+ initprot: vm_prot_t, /// initial VM protection
+ nsects: u32, /// number of sections in segment
+ flags: u32,
+};
+
+/// A segment is made up of zero or more sections. Non-MH_OBJECT files have
+/// all of their segments with the proper sections in each, and padded to the
+/// specified segment alignment when produced by the link editor. The first
+/// segment of a MH_EXECUTE and MH_FVMLIB format file contains the mach_header
+/// and load commands of the object file before its first section. The zero
+/// fill sections are always last in their segment (in all formats). This
+/// allows the zeroed segment padding to be mapped into memory where zero fill
+/// sections might be. The gigabyte zero fill sections, those with the section
+/// type S_GB_ZEROFILL, can only be in a segment with sections of this type.
+/// These segments are then placed after all other segments.
+///
+/// The MH_OBJECT format has all of its sections in one segment for
+/// compactness. There is no padding to a specified segment boundary and the
+/// mach_header and load commands are not part of the segment.
+///
+/// Sections with the same section name, sectname, going into the same segment,
+/// segname, are combined by the link editor. The resulting section is aligned
+/// to the maximum alignment of the combined sections and is the new section's
+/// alignment. The combined sections are aligned to their original alignment in
+/// the combined section. Any padded bytes to get the specified alignment are
+/// zeroed.
+///
+/// The format of the relocation entries referenced by the reloff and nreloc
+/// fields of the section structure for mach object files is described in the
+/// header file .
+pub const @"section" = extern struct {
+ sectname: [16]u8, /// name of this section
+ segname: [16]u8, /// segment this section goes in
+ addr: u32, /// memory address of this section
+ size: u32, /// size in bytes of this section
+ offset: u32, /// file offset of this section
+ @"align": u32, /// section alignment (power of 2)
+ reloff: u32, /// file offset of relocation entries
+ nreloc: u32, /// number of relocation entries
+ flags: u32, /// flags (section type and attributes
+ reserved1: u32, /// reserved (for offset or index)
+ reserved2: u32, /// reserved (for count or sizeof)
+};
+
+pub const section_64 = extern struct {
+ sectname: [16]u8, /// name of this section
+ segname: [16]u8, /// segment this section goes in
+ addr: u64, /// memory address of this section
+ size: u64, /// size in bytes of this section
+ offset: u32, /// file offset of this section
+ @"align": u32, /// section alignment (power of 2)
+ reloff: u32, /// file offset of relocation entries
+ nreloc: u32, /// number of relocation entries
+ flags: u32, /// flags (section type and attributes
+ reserved1: u32, /// reserved (for offset or index)
+ reserved2: u32, /// reserved (for count or sizeof)
+ reserved3: u32, /// reserved
+};
+
+pub const nlist = extern struct {
+ n_strx: u32,
+ n_type: u8,
+ n_sect: u8,
+ n_desc: i16,
+ n_value: u32,
+};
+
+pub const nlist_64 = extern struct {
n_strx: u32,
n_type: u8,
n_sect: u8,
@@ -38,135 +159,190 @@ const Nlist64 = packed struct {
n_value: u64,
};
-pub const Symbol = struct {
- name: []const u8,
- address: u64,
+/// After MacOS X 10.1 when a new load command is added that is required to be
+/// understood by the dynamic linker for the image to execute properly the
+/// LC_REQ_DYLD bit will be or'ed into the load command constant. If the dynamic
+/// linker sees such a load command it it does not understand will issue a
+/// "unknown load command required for execution" error and refuse to use the
+/// image. Other load commands without this bit that are not understood will
+/// simply be ignored.
+pub const LC_REQ_DYLD = 0x80000000;
- fn addressLessThan(lhs: Symbol, rhs: Symbol) bool {
- return lhs.address < rhs.address;
- }
-};
+pub const LC_SEGMENT = 0x1; /// segment of this file to be mapped
+pub const LC_SYMTAB = 0x2; /// link-edit stab symbol table info
+pub const LC_SYMSEG = 0x3; /// link-edit gdb symbol table info (obsolete)
+pub const LC_THREAD = 0x4; /// thread
+pub const LC_UNIXTHREAD = 0x5; /// unix thread (includes a stack)
+pub const LC_LOADFVMLIB = 0x6; /// load a specified fixed VM shared library
+pub const LC_IDFVMLIB = 0x7; /// fixed VM shared library identification
+pub const LC_IDENT = 0x8; /// object identification info (obsolete)
+pub const LC_FVMFILE = 0x9; /// fixed VM file inclusion (internal use)
+pub const LC_PREPAGE = 0xa; /// prepage command (internal use)
+pub const LC_DYSYMTAB = 0xb; /// dynamic link-edit symbol table info
+pub const LC_LOAD_DYLIB = 0xc; /// load a dynamically linked shared library
+pub const LC_ID_DYLIB = 0xd; /// dynamically linked shared lib ident
+pub const LC_LOAD_DYLINKER = 0xe; /// load a dynamic linker
+pub const LC_ID_DYLINKER = 0xf; /// dynamic linker identification
+pub const LC_PREBOUND_DYLIB = 0x10; /// modules prebound for a dynamically
+pub const LC_ROUTINES = 0x11; /// image routines
+pub const LC_SUB_FRAMEWORK = 0x12; /// sub framework
+pub const LC_SUB_UMBRELLA = 0x13; /// sub umbrella
+pub const LC_SUB_CLIENT = 0x14; /// sub client
+pub const LC_SUB_LIBRARY = 0x15; /// sub library
+pub const LC_TWOLEVEL_HINTS = 0x16; /// two-level namespace lookup hints
+pub const LC_PREBIND_CKSUM = 0x17; /// prebind checksum
-pub const SymbolTable = struct {
- allocator: *mem.Allocator,
- symbols: []const Symbol,
- strings: []const u8,
+/// load a dynamically linked shared library that is allowed to be missing
+/// (all symbols are weak imported).
+pub const LC_LOAD_WEAK_DYLIB = (0x18 | LC_REQ_DYLD);
- // Doubles as an eyecatcher to calculate the PIE slide, see loadSymbols().
- // Ideally we'd use _mh_execute_header because it's always at 0x100000000
- // in the image but as it's located in a different section than executable
- // code, its displacement is different.
- pub fn deinit(self: *SymbolTable) void {
- self.allocator.free(self.symbols);
- self.symbols = []const Symbol{};
+pub const LC_SEGMENT_64 = 0x19; /// 64-bit segment of this file to be mapped
+pub const LC_ROUTINES_64 = 0x1a; /// 64-bit image routines
+pub const LC_UUID = 0x1b; /// the uuid
+pub const LC_RPATH = (0x1c | LC_REQ_DYLD); /// runpath additions
+pub const LC_CODE_SIGNATURE = 0x1d; /// local of code signature
+pub const LC_SEGMENT_SPLIT_INFO = 0x1e; /// local of info to split segments
+pub const LC_REEXPORT_DYLIB = (0x1f | LC_REQ_DYLD); /// load and re-export dylib
+pub const LC_LAZY_LOAD_DYLIB = 0x20; /// delay load of dylib until first use
+pub const LC_ENCRYPTION_INFO = 0x21; /// encrypted segment information
+pub const LC_DYLD_INFO = 0x22; /// compressed dyld information
+pub const LC_DYLD_INFO_ONLY = (0x22|LC_REQ_DYLD); /// compressed dyld information only
+pub const LC_LOAD_UPWARD_DYLIB = (0x23 | LC_REQ_DYLD); /// load upward dylib
+pub const LC_VERSION_MIN_MACOSX = 0x24; /// build for MacOSX min OS version
+pub const LC_VERSION_MIN_IPHONEOS = 0x25; /// build for iPhoneOS min OS version
+pub const LC_FUNCTION_STARTS = 0x26; /// compressed table of function start addresses
+pub const LC_DYLD_ENVIRONMENT = 0x27; /// string for dyld to treat like environment variable
+pub const LC_MAIN = (0x28|LC_REQ_DYLD); /// replacement for LC_UNIXTHREAD
+pub const LC_DATA_IN_CODE = 0x29; /// table of non-instructions in __text
+pub const LC_SOURCE_VERSION = 0x2A; /// source version used to build binary
+pub const LC_DYLIB_CODE_SIGN_DRS = 0x2B; /// Code signing DRs copied from linked dylibs
+pub const LC_ENCRYPTION_INFO_64 = 0x2C; /// 64-bit encrypted segment information
+pub const LC_LINKER_OPTION = 0x2D; /// linker options in MH_OBJECT files
+pub const LC_LINKER_OPTIMIZATION_HINT = 0x2E; /// optimization hints in MH_OBJECT files
+pub const LC_VERSION_MIN_TVOS = 0x2F; /// build for AppleTV min OS version
+pub const LC_VERSION_MIN_WATCHOS = 0x30; /// build for Watch min OS version
+pub const LC_NOTE = 0x31; /// arbitrary data included within a Mach-O file
+pub const LC_BUILD_VERSION = 0x32; /// build for platform min OS version
- self.allocator.free(self.strings);
- self.strings = []const u8{};
- }
+pub const MH_MAGIC = 0xfeedface; /// the mach magic number
+pub const MH_CIGAM = 0xcefaedfe; /// NXSwapInt(MH_MAGIC)
- pub fn search(self: *const SymbolTable, address: usize) ?*const Symbol {
- var min: usize = 0;
- var max: usize = self.symbols.len - 1; // Exclude sentinel.
- while (min < max) {
- const mid = min + (max - min) / 2;
- const curr = &self.symbols[mid];
- const next = &self.symbols[mid + 1];
- if (address >= next.address) {
- min = mid + 1;
- } else if (address < curr.address) {
- max = mid;
- } else {
- return curr;
- }
- }
- return null;
- }
-};
+pub const MH_MAGIC_64 = 0xfeedfacf; /// the 64-bit mach magic number
+pub const MH_CIGAM_64 = 0xcffaedfe; /// NXSwapInt(MH_MAGIC_64)
-pub fn loadSymbols(allocator: *mem.Allocator, in: *io.FileInStream) !SymbolTable {
- var file = in.file;
- try file.seekTo(0);
+pub const MH_OBJECT = 0x1; /// relocatable object file
+pub const MH_EXECUTE = 0x2; /// demand paged executable file
+pub const MH_FVMLIB = 0x3; /// fixed VM shared library file
+pub const MH_CORE = 0x4; /// core file
+pub const MH_PRELOAD = 0x5; /// preloaded executable file
+pub const MH_DYLIB = 0x6; /// dynamically bound shared library
+pub const MH_DYLINKER = 0x7; /// dynamic link editor
+pub const MH_BUNDLE = 0x8; /// dynamically bound bundle file
+pub const MH_DYLIB_STUB = 0x9; /// shared library stub for static linking only, no section contents
+pub const MH_DSYM = 0xa; /// companion file with only debug sections
+pub const MH_KEXT_BUNDLE = 0xb; /// x86_64 kexts
- var hdr: MachHeader64 = undefined;
- try readOneNoEof(in, MachHeader64, &hdr);
- if (hdr.magic != MH_MAGIC_64) return error.MissingDebugInfo;
- const is_pie = MH_PIE == (hdr.flags & MH_PIE);
+// Constants for the flags field of the mach_header
- var pos: usize = @sizeOf(@typeOf(hdr));
- var ncmd: u32 = hdr.ncmds;
- while (ncmd != 0) : (ncmd -= 1) {
- try file.seekTo(pos);
- var lc: LoadCommand = undefined;
- try readOneNoEof(in, LoadCommand, &lc);
- if (lc.cmd == LC_SYMTAB) break;
- pos += lc.cmdsize;
- } else {
- return error.MissingDebugInfo;
- }
+pub const MH_NOUNDEFS = 0x1; /// the object file has no undefined references
+pub const MH_INCRLINK = 0x2; /// the object file is the output of an incremental link against a base file and can't be link edited again
+pub const MH_DYLDLINK = 0x4; /// the object file is input for the dynamic linker and can't be staticly link edited again
+pub const MH_BINDATLOAD = 0x8; /// the object file's undefined references are bound by the dynamic linker when loaded.
+pub const MH_PREBOUND = 0x10; /// the file has its dynamic undefined references prebound.
+pub const MH_SPLIT_SEGS = 0x20; /// the file has its read-only and read-write segments split
+pub const MH_LAZY_INIT = 0x40; /// the shared library init routine is to be run lazily via catching memory faults to its writeable segments (obsolete)
+pub const MH_TWOLEVEL = 0x80; /// the image is using two-level name space bindings
+pub const MH_FORCE_FLAT = 0x100; /// the executable is forcing all images to use flat name space bindings
+pub const MH_NOMULTIDEFS = 0x200; /// this umbrella guarantees no multiple defintions of symbols in its sub-images so the two-level namespace hints can always be used.
+pub const MH_NOFIXPREBINDING = 0x400; /// do not have dyld notify the prebinding agent about this executable
+pub const MH_PREBINDABLE = 0x800; /// the binary is not prebound but can have its prebinding redone. only used when MH_PREBOUND is not set.
+pub const MH_ALLMODSBOUND = 0x1000; /// indicates that this binary binds to all two-level namespace modules of its dependent libraries. only used when MH_PREBINDABLE and MH_TWOLEVEL are both set.
+pub const MH_SUBSECTIONS_VIA_SYMBOLS = 0x2000;/// safe to divide up the sections into sub-sections via symbols for dead code stripping
+pub const MH_CANONICAL = 0x4000; /// the binary has been canonicalized via the unprebind operation
+pub const MH_WEAK_DEFINES = 0x8000; /// the final linked image contains external weak symbols
+pub const MH_BINDS_TO_WEAK = 0x10000; /// the final linked image uses weak symbols
- var cmd: SymtabCommand = undefined;
- try readOneNoEof(in, SymtabCommand, &cmd);
+pub const MH_ALLOW_STACK_EXECUTION = 0x20000;/// When this bit is set, all stacks in the task will be given stack execution privilege. Only used in MH_EXECUTE filetypes.
+pub const MH_ROOT_SAFE = 0x40000; /// When this bit is set, the binary declares it is safe for use in processes with uid zero
+
+pub const MH_SETUID_SAFE = 0x80000; /// When this bit is set, the binary declares it is safe for use in processes when issetugid() is true
- try file.seekTo(cmd.symoff);
- var syms = try allocator.alloc(Nlist64, cmd.nsyms);
- defer allocator.free(syms);
- try readNoEof(in, Nlist64, syms);
+pub const MH_NO_REEXPORTED_DYLIBS = 0x100000; /// When this bit is set on a dylib, the static linker does not need to examine dependent dylibs to see if any are re-exported
+pub const MH_PIE = 0x200000; /// When this bit is set, the OS will load the main executable at a random address. Only used in MH_EXECUTE filetypes.
+pub const MH_DEAD_STRIPPABLE_DYLIB = 0x400000; /// Only for use on dylibs. When linking against a dylib that has this bit set, the static linker will automatically not create a LC_LOAD_DYLIB load command to the dylib if no symbols are being referenced from the dylib.
+pub const MH_HAS_TLV_DESCRIPTORS = 0x800000; /// Contains a section of type S_THREAD_LOCAL_VARIABLES
- try file.seekTo(cmd.stroff);
- var strings = try allocator.alloc(u8, cmd.strsize);
- errdefer allocator.free(strings);
- try in.stream.readNoEof(strings);
+pub const MH_NO_HEAP_EXECUTION = 0x1000000; /// When this bit is set, the OS will run the main executable with a non-executable heap even on platforms (e.g. i386) that don't require it. Only used in MH_EXECUTE filetypes.
- var nsyms: usize = 0;
- for (syms) |sym|
- if (isSymbol(sym)) nsyms += 1;
- if (nsyms == 0) return error.MissingDebugInfo;
+pub const MH_APP_EXTENSION_SAFE = 0x02000000; /// The code was linked for use in an application extension.
- var symbols = try allocator.alloc(Symbol, nsyms + 1); // Room for sentinel.
- errdefer allocator.free(symbols);
+pub const MH_NLIST_OUTOFSYNC_WITH_DYLDINFO = 0x04000000; /// The external symbols listed in the nlist symbol table do not include all the symbols listed in the dyld info.
- var pie_slide: usize = 0;
- var nsym: usize = 0;
- for (syms) |sym| {
- if (!isSymbol(sym)) continue;
- const start = sym.n_strx;
- const end = mem.indexOfScalarPos(u8, strings, start, 0).?;
- const name = strings[start..end];
- const address = sym.n_value;
- symbols[nsym] = Symbol{ .name = name, .address = address };
- nsym += 1;
- if (is_pie and mem.eql(u8, name, "_SymbolTable_deinit")) {
- pie_slide = @ptrToInt(SymbolTable.deinit) - address;
- }
- }
- // Effectively a no-op, lld emits symbols in ascending order.
- std.sort.sort(Symbol, symbols[0..nsyms], Symbol.addressLessThan);
+/// The flags field of a section structure is separated into two parts a section
+/// type and section attributes. The section types are mutually exclusive (it
+/// can only have one type) but the section attributes are not (it may have more
+/// than one attribute).
+/// 256 section types
+pub const SECTION_TYPE = 0x000000ff;
+pub const SECTION_ATTRIBUTES = 0xffffff00; /// 24 section attributes
- // Insert the sentinel. Since we don't know where the last function ends,
- // we arbitrarily limit it to the start address + 4 KB.
- const top = symbols[nsyms - 1].address + 4096;
- symbols[nsyms] = Symbol{ .name = "", .address = top };
+pub const S_REGULAR = 0x0; /// regular section
+pub const S_ZEROFILL = 0x1; /// zero fill on demand section
+pub const S_CSTRING_LITERALS = 0x2; /// section with only literal C string
+pub const S_4BYTE_LITERALS = 0x3; /// section with only 4 byte literals
+pub const S_8BYTE_LITERALS = 0x4; /// section with only 8 byte literals
+pub const S_LITERAL_POINTERS = 0x5; /// section with only pointers to
- if (pie_slide != 0) {
- for (symbols) |*symbol|
- symbol.address += pie_slide;
- }
- return SymbolTable{
- .allocator = allocator,
- .symbols = symbols,
- .strings = strings,
- };
-}
+pub const N_STAB = 0xe0; /// if any of these bits set, a symbolic debugging entry
+pub const N_PEXT = 0x10; /// private external symbol bit
+pub const N_TYPE = 0x0e; /// mask for the type bits
+pub const N_EXT = 0x01; /// external symbol bit, set for external symbols
-fn readNoEof(in: *io.FileInStream, comptime T: type, result: []T) !void {
- return in.stream.readNoEof(@sliceToBytes(result));
-}
-fn readOneNoEof(in: *io.FileInStream, comptime T: type, result: *T) !void {
- return readNoEof(in, T, (*[1]T)(result)[0..]);
-}
-fn isSymbol(sym: *const Nlist64) bool {
- return sym.n_value != 0 and sym.n_desc == 0;
-}
+pub const N_GSYM = 0x20; /// global symbol: name,,NO_SECT,type,0
+pub const N_FNAME = 0x22; /// procedure name (f77 kludge): name,,NO_SECT,0,0
+pub const N_FUN = 0x24; /// procedure: name,,n_sect,linenumber,address
+pub const N_STSYM = 0x26; /// static symbol: name,,n_sect,type,address
+pub const N_LCSYM = 0x28; /// .lcomm symbol: name,,n_sect,type,address
+pub const N_BNSYM = 0x2e; /// begin nsect sym: 0,,n_sect,0,address
+pub const N_AST = 0x32; /// AST file path: name,,NO_SECT,0,0
+pub const N_OPT = 0x3c; /// emitted with gcc2_compiled and in gcc source
+pub const N_RSYM = 0x40; /// register sym: name,,NO_SECT,type,register
+pub const N_SLINE = 0x44; /// src line: 0,,n_sect,linenumber,address
+pub const N_ENSYM = 0x4e; /// end nsect sym: 0,,n_sect,0,address
+pub const N_SSYM = 0x60; /// structure elt: name,,NO_SECT,type,struct_offset
+pub const N_SO = 0x64; /// source file name: name,,n_sect,0,address
+pub const N_OSO = 0x66; /// object file name: name,,0,0,st_mtime
+pub const N_LSYM = 0x80; /// local sym: name,,NO_SECT,type,offset
+pub const N_BINCL = 0x82; /// include file beginning: name,,NO_SECT,0,sum
+pub const N_SOL = 0x84; /// #included file name: name,,n_sect,0,address
+pub const N_PARAMS = 0x86; /// compiler parameters: name,,NO_SECT,0,0
+pub const N_VERSION = 0x88; /// compiler version: name,,NO_SECT,0,0
+pub const N_OLEVEL = 0x8A; /// compiler -O level: name,,NO_SECT,0,0
+pub const N_PSYM = 0xa0; /// parameter: name,,NO_SECT,type,offset
+pub const N_EINCL = 0xa2; /// include file end: name,,NO_SECT,0,0
+pub const N_ENTRY = 0xa4; /// alternate entry: name,,n_sect,linenumber,address
+pub const N_LBRAC = 0xc0; /// left bracket: 0,,NO_SECT,nesting level,address
+pub const N_EXCL = 0xc2; /// deleted include file: name,,NO_SECT,0,sum
+pub const N_RBRAC = 0xe0; /// right bracket: 0,,NO_SECT,nesting level,address
+pub const N_BCOMM = 0xe2; /// begin common: name,,NO_SECT,0,0
+pub const N_ECOMM = 0xe4; /// end common: name,,n_sect,0,0
+pub const N_ECOML = 0xe8; /// end common (local name): 0,,n_sect,0,address
+pub const N_LENG = 0xfe; /// second stab entry with length information
+
+/// If a segment contains any sections marked with S_ATTR_DEBUG then all
+/// sections in that segment must have this attribute. No section other than
+/// a section marked with this attribute may reference the contents of this
+/// section. A section with this attribute may contain no symbols and must have
+/// a section type S_REGULAR. The static linker will not copy section contents
+/// from sections with this attribute into its output file. These sections
+/// generally contain DWARF debugging info.
+pub const S_ATTR_DEBUG = 0x02000000; /// a debug section
+
+pub const cpu_type_t = integer_t;
+pub const cpu_subtype_t = integer_t;
+pub const integer_t = c_int;
+pub const vm_prot_t = c_int;
+
diff --git a/std/math/ceil.zig b/std/math/ceil.zig
index 1c429504e8..8a5221d862 100644
--- a/std/math/ceil.zig
+++ b/std/math/ceil.zig
@@ -61,10 +61,8 @@ fn ceil64(x: f64) f64 {
}
if (u >> 63 != 0) {
- @setFloatMode(this, builtin.FloatMode.Strict);
y = x - math.f64_toint + math.f64_toint - x;
} else {
- @setFloatMode(this, builtin.FloatMode.Strict);
y = x + math.f64_toint - math.f64_toint - x;
}
diff --git a/std/math/complex/exp.zig b/std/math/complex/exp.zig
index 48fb132d97..e696ee42b4 100644
--- a/std/math/complex/exp.zig
+++ b/std/math/complex/exp.zig
@@ -17,8 +17,6 @@ pub fn exp(z: var) @typeOf(z) {
}
fn exp32(z: Complex(f32)) Complex(f32) {
- @setFloatMode(this, @import("builtin").FloatMode.Strict);
-
const exp_overflow = 0x42b17218; // max_exp * ln2 ~= 88.72283955
const cexp_overflow = 0x43400074; // (max_exp - min_denom_exp) * ln2
diff --git a/std/math/cos.zig b/std/math/cos.zig
index 71d5e4a8f6..b6a2fbffe6 100644
--- a/std/math/cos.zig
+++ b/std/math/cos.zig
@@ -37,8 +37,6 @@ const C5 = 4.16666666666665929218E-2;
//
// This may have slight differences on some edge cases and may need to replaced if so.
fn cos32(x_: f32) f32 {
- @setFloatMode(this, @import("builtin").FloatMode.Strict);
-
const pi4a = 7.85398125648498535156e-1;
const pi4b = 3.77489470793079817668E-8;
const pi4c = 2.69515142907905952645E-15;
diff --git a/std/math/exp.zig b/std/math/exp.zig
index d6185d4f0b..cf8fd62d80 100644
--- a/std/math/exp.zig
+++ b/std/math/exp.zig
@@ -18,8 +18,6 @@ pub fn exp(x: var) @typeOf(x) {
}
fn exp32(x_: f32) f32 {
- @setFloatMode(this, builtin.FloatMode.Strict);
-
const half = []f32{ 0.5, -0.5 };
const ln2hi = 6.9314575195e-1;
const ln2lo = 1.4286067653e-6;
@@ -95,8 +93,6 @@ fn exp32(x_: f32) f32 {
}
fn exp64(x_: f64) f64 {
- @setFloatMode(this, builtin.FloatMode.Strict);
-
const half = []const f64{ 0.5, -0.5 };
const ln2hi: f64 = 6.93147180369123816490e-01;
const ln2lo: f64 = 1.90821492927058770002e-10;
diff --git a/std/math/exp2.zig b/std/math/exp2.zig
index d590b0b60b..3d8e5d692e 100644
--- a/std/math/exp2.zig
+++ b/std/math/exp2.zig
@@ -36,8 +36,6 @@ const exp2ft = []const f64{
};
fn exp2_32(x: f32) f32 {
- @setFloatMode(this, @import("builtin").FloatMode.Strict);
-
const tblsiz = @intCast(u32, exp2ft.len);
const redux: f32 = 0x1.8p23 / @intToFloat(f32, tblsiz);
const P1: f32 = 0x1.62e430p-1;
@@ -353,8 +351,6 @@ const exp2dt = []f64{
};
fn exp2_64(x: f64) f64 {
- @setFloatMode(this, @import("builtin").FloatMode.Strict);
-
const tblsiz = @intCast(u32, exp2dt.len / 2);
const redux: f64 = 0x1.8p52 / @intToFloat(f64, tblsiz);
const P1: f64 = 0x1.62e42fefa39efp-1;
diff --git a/std/math/expm1.zig b/std/math/expm1.zig
index 6fa0194b32..6729417f60 100644
--- a/std/math/expm1.zig
+++ b/std/math/expm1.zig
@@ -19,8 +19,6 @@ pub fn expm1(x: var) @typeOf(x) {
}
fn expm1_32(x_: f32) f32 {
- @setFloatMode(this, builtin.FloatMode.Strict);
-
if (math.isNan(x_))
return math.nan(f32);
@@ -149,8 +147,6 @@ fn expm1_32(x_: f32) f32 {
}
fn expm1_64(x_: f64) f64 {
- @setFloatMode(this, builtin.FloatMode.Strict);
-
if (math.isNan(x_))
return math.nan(f64);
diff --git a/std/math/floor.zig b/std/math/floor.zig
index 0858598eea..6ce462b10f 100644
--- a/std/math/floor.zig
+++ b/std/math/floor.zig
@@ -97,10 +97,8 @@ fn floor64(x: f64) f64 {
}
if (u >> 63 != 0) {
- @setFloatMode(this, builtin.FloatMode.Strict);
y = x - math.f64_toint + math.f64_toint - x;
} else {
- @setFloatMode(this, builtin.FloatMode.Strict);
y = x + math.f64_toint - math.f64_toint - x;
}
diff --git a/std/math/ln.zig b/std/math/ln.zig
index e78cc379e0..a560fee8ec 100644
--- a/std/math/ln.zig
+++ b/std/math/ln.zig
@@ -35,8 +35,6 @@ pub fn ln(x: var) @typeOf(x) {
}
pub fn ln_32(x_: f32) f32 {
- @setFloatMode(this, @import("builtin").FloatMode.Strict);
-
const ln2_hi: f32 = 6.9313812256e-01;
const ln2_lo: f32 = 9.0580006145e-06;
const Lg1: f32 = 0xaaaaaa.0p-24;
@@ -89,8 +87,6 @@ pub fn ln_32(x_: f32) f32 {
}
pub fn ln_64(x_: f64) f64 {
- @setFloatMode(this, @import("builtin").FloatMode.Strict);
-
const ln2_hi: f64 = 6.93147180369123816490e-01;
const ln2_lo: f64 = 1.90821492927058770002e-10;
const Lg1: f64 = 6.666666666666735130e-01;
diff --git a/std/math/pow.zig b/std/math/pow.zig
index 7fc334c06b..c764b58182 100644
--- a/std/math/pow.zig
+++ b/std/math/pow.zig
@@ -28,8 +28,6 @@ const assert = std.debug.assert;
// This implementation is taken from the go stlib, musl is a bit more complex.
pub fn pow(comptime T: type, x: T, y: T) T {
- @setFloatMode(this, @import("builtin").FloatMode.Strict);
-
if (T != f32 and T != f64) {
@compileError("pow not implemented for " ++ @typeName(T));
}
diff --git a/std/math/round.zig b/std/math/round.zig
index c8d9eb4fd4..4fe35365c8 100644
--- a/std/math/round.zig
+++ b/std/math/round.zig
@@ -35,11 +35,7 @@ fn round32(x_: f32) f32 {
return 0 * @bitCast(f32, u);
}
- {
- @setFloatMode(this, builtin.FloatMode.Strict);
- y = x + math.f32_toint - math.f32_toint - x;
- }
-
+ y = x + math.f32_toint - math.f32_toint - x;
if (y > 0.5) {
y = y + x - 1;
} else if (y <= -0.5) {
@@ -72,11 +68,7 @@ fn round64(x_: f64) f64 {
return 0 * @bitCast(f64, u);
}
- {
- @setFloatMode(this, builtin.FloatMode.Strict);
- y = x + math.f64_toint - math.f64_toint - x;
- }
-
+ y = x + math.f64_toint - math.f64_toint - x;
if (y > 0.5) {
y = y + x - 1;
} else if (y <= -0.5) {
diff --git a/std/math/sin.zig b/std/math/sin.zig
index 3796d74812..15b2f9f17a 100644
--- a/std/math/sin.zig
+++ b/std/math/sin.zig
@@ -38,8 +38,6 @@ const C5 = 4.16666666666665929218E-2;
//
// This may have slight differences on some edge cases and may need to replaced if so.
fn sin32(x_: f32) f32 {
- @setFloatMode(this, @import("builtin").FloatMode.Strict);
-
const pi4a = 7.85398125648498535156e-1;
const pi4b = 3.77489470793079817668E-8;
const pi4c = 2.69515142907905952645E-15;
diff --git a/std/math/sinh.zig b/std/math/sinh.zig
index bb3af280ab..3105b9a26e 100644
--- a/std/math/sinh.zig
+++ b/std/math/sinh.zig
@@ -54,8 +54,6 @@ fn sinh32(x: f32) f32 {
}
fn sinh64(x: f64) f64 {
- @setFloatMode(this, @import("builtin").FloatMode.Strict);
-
const u = @bitCast(u64, x);
const w = @intCast(u32, u >> 32);
const ax = @bitCast(f64, u & (@maxValue(u64) >> 1));
diff --git a/std/math/tan.zig b/std/math/tan.zig
index ff3ed06186..a71a17e625 100644
--- a/std/math/tan.zig
+++ b/std/math/tan.zig
@@ -31,8 +31,6 @@ const Tq4 = -5.38695755929454629881E7;
//
// This may have slight differences on some edge cases and may need to replaced if so.
fn tan32(x_: f32) f32 {
- @setFloatMode(this, @import("builtin").FloatMode.Strict);
-
const pi4a = 7.85398125648498535156e-1;
const pi4b = 3.77489470793079817668E-8;
const pi4c = 2.69515142907905952645E-15;
diff --git a/std/mem.zig b/std/mem.zig
index 43961a6d14..4390f8ad5b 100644
--- a/std/mem.zig
+++ b/std/mem.zig
@@ -135,6 +135,12 @@ pub const Allocator = struct {
}
};
+pub const Compare = enum {
+ LessThan,
+ Equal,
+ GreaterThan,
+};
+
/// Copy all of source into dest at position 0.
/// dest.len must be >= source.len.
/// dest.ptr must be <= src.ptr.
@@ -169,16 +175,64 @@ pub fn set(comptime T: type, dest: []T, value: T) void {
d.* = value;
}
-/// Returns true if lhs < rhs, false otherwise
-pub fn lessThan(comptime T: type, lhs: []const T, rhs: []const T) bool {
+pub fn secureZero(comptime T: type, s: []T) void {
+ // NOTE: We do not use a volatile slice cast here since LLVM cannot
+ // see that it can be replaced by a memset.
+ const ptr = @ptrCast([*]volatile u8, s.ptr);
+ const length = s.len * @sizeOf(T);
+ @memset(ptr, 0, length);
+}
+
+test "mem.secureZero" {
+ var a = []u8{0xfe} ** 8;
+ var b = []u8{0xfe} ** 8;
+
+ set(u8, a[0..], 0);
+ secureZero(u8, b[0..]);
+
+ assert(eql(u8, a[0..], b[0..]));
+}
+
+pub fn compare(comptime T: type, lhs: []const T, rhs: []const T) Compare {
const n = math.min(lhs.len, rhs.len);
var i: usize = 0;
while (i < n) : (i += 1) {
- if (lhs[i] == rhs[i]) continue;
- return lhs[i] < rhs[i];
+ if (lhs[i] == rhs[i]) {
+ continue;
+ } else if (lhs[i] < rhs[i]) {
+ return Compare.LessThan;
+ } else if (lhs[i] > rhs[i]) {
+ return Compare.GreaterThan;
+ } else {
+ unreachable;
+ }
}
- return lhs.len < rhs.len;
+ if (lhs.len == rhs.len) {
+ return Compare.Equal;
+ } else if (lhs.len < rhs.len) {
+ return Compare.LessThan;
+ } else if (lhs.len > rhs.len) {
+ return Compare.GreaterThan;
+ }
+ unreachable;
+}
+
+test "mem.compare" {
+ assert(compare(u8, "abcd", "bee") == Compare.LessThan);
+ assert(compare(u8, "abc", "abc") == Compare.Equal);
+ assert(compare(u8, "abc", "abc0") == Compare.LessThan);
+ assert(compare(u8, "", "") == Compare.Equal);
+ assert(compare(u8, "", "a") == Compare.LessThan);
+}
+
+/// Returns true if lhs < rhs, false otherwise
+pub fn lessThan(comptime T: type, lhs: []const T, rhs: []const T) bool {
+ var result = compare(T, lhs, rhs);
+ if (result == Compare.LessThan) {
+ return true;
+ } else
+ return false;
}
test "mem.lessThan" {
@@ -198,6 +252,20 @@ pub fn eql(comptime T: type, a: []const T, b: []const T) bool {
return true;
}
+pub fn len(comptime T: type, ptr: [*]const T) usize {
+ var count: usize = 0;
+ while (ptr[count] != 0) : (count += 1) {}
+ return count;
+}
+
+pub fn toSliceConst(comptime T: type, ptr: [*]const T) []const T {
+ return ptr[0..len(T, ptr)];
+}
+
+pub fn toSlice(comptime T: type, ptr: [*]T) []T {
+ return ptr[0..len(T, ptr)];
+}
+
/// Returns true if all elements in a slice are equal to the scalar value provided
pub fn allEqual(comptime T: type, slice: []const T, scalar: T) bool {
for (slice) |item| {
@@ -541,7 +609,7 @@ pub fn join(allocator: *Allocator, sep: u8, strings: ...) ![]u8 {
}
}
- return buf[0..buf_index];
+ return allocator.shrink(u8, buf, buf_index);
}
test "mem.join" {
@@ -611,10 +679,38 @@ test "testWriteInt" {
comptime testWriteIntImpl();
}
fn testWriteIntImpl() void {
- var bytes: [4]u8 = undefined;
+ var bytes: [8]u8 = undefined;
+
+ writeInt(bytes[0..], u64(0x12345678CAFEBABE), builtin.Endian.Big);
+ assert(eql(u8, bytes, []u8{
+ 0x12,
+ 0x34,
+ 0x56,
+ 0x78,
+ 0xCA,
+ 0xFE,
+ 0xBA,
+ 0xBE,
+ }));
+
+ writeInt(bytes[0..], u64(0xBEBAFECA78563412), builtin.Endian.Little);
+ assert(eql(u8, bytes, []u8{
+ 0x12,
+ 0x34,
+ 0x56,
+ 0x78,
+ 0xCA,
+ 0xFE,
+ 0xBA,
+ 0xBE,
+ }));
writeInt(bytes[0..], u32(0x12345678), builtin.Endian.Big);
assert(eql(u8, bytes, []u8{
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
0x12,
0x34,
0x56,
@@ -627,10 +723,18 @@ fn testWriteIntImpl() void {
0x34,
0x56,
0x78,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
}));
writeInt(bytes[0..], u16(0x1234), builtin.Endian.Big);
assert(eql(u8, bytes, []u8{
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
0x00,
0x00,
0x12,
@@ -643,6 +747,10 @@ fn testWriteIntImpl() void {
0x12,
0x00,
0x00,
+ 0x00,
+ 0x00,
+ 0x00,
+ 0x00,
}));
}
@@ -755,3 +863,4 @@ pub fn endianSwap(comptime T: type, x: T) T {
test "std.mem.endianSwap" {
assert(endianSwap(u32, 0xDEADBEEF) == 0xEFBEADDE);
}
+
diff --git a/std/mutex.zig b/std/mutex.zig
new file mode 100644
index 0000000000..6aee87d1d7
--- /dev/null
+++ b/std/mutex.zig
@@ -0,0 +1,27 @@
+const std = @import("index.zig");
+const builtin = @import("builtin");
+const AtomicOrder = builtin.AtomicOrder;
+const AtomicRmwOp = builtin.AtomicRmwOp;
+const assert = std.debug.assert;
+
+/// TODO use syscalls instead of a spinlock
+pub const Mutex = struct {
+ lock: u8, // TODO use a bool
+
+ pub const Held = struct {
+ mutex: *Mutex,
+
+ pub fn release(self: Held) void {
+ assert(@atomicRmw(u8, &self.mutex.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
+ }
+ };
+
+ pub fn init() Mutex {
+ return Mutex{ .lock = 0 };
+ }
+
+ pub fn acquire(self: *Mutex) Held {
+ while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
+ return Held{ .mutex = self };
+ }
+};
diff --git a/std/os/child_process.zig b/std/os/child_process.zig
index 693129eea8..b79a8de16f 100644
--- a/std/os/child_process.zig
+++ b/std/os/child_process.zig
@@ -349,14 +349,7 @@ pub const ChildProcess = struct {
};
const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore);
- const dev_null_fd = if (any_ignore) blk: {
- const dev_null_path = "/dev/null";
- var fixed_buffer_mem: [dev_null_path.len + 1]u8 = undefined;
- var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- break :blk try os.posixOpen(&fixed_allocator.allocator, "/dev/null", posix.O_RDWR, 0);
- } else blk: {
- break :blk undefined;
- };
+ const dev_null_fd = if (any_ignore) try os.posixOpenC(c"/dev/null", posix.O_RDWR, 0) else undefined;
defer {
if (any_ignore) os.close(dev_null_fd);
}
@@ -453,10 +446,7 @@ pub const ChildProcess = struct {
const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore);
const nul_handle = if (any_ignore) blk: {
- const nul_file_path = "NUL";
- var fixed_buffer_mem: [nul_file_path.len + 1]u8 = undefined;
- var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- break :blk try os.windowsOpen(&fixed_allocator.allocator, "NUL", windows.GENERIC_READ, windows.FILE_SHARE_READ, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL);
+ break :blk try os.windowsOpen("NUL", windows.GENERIC_READ, windows.FILE_SHARE_READ, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL);
} else blk: {
break :blk undefined;
};
diff --git a/std/os/darwin.zig b/std/os/darwin.zig
index cf67b01d5a..935d28d6f1 100644
--- a/std/os/darwin.zig
+++ b/std/os/darwin.zig
@@ -482,91 +482,98 @@ pub const NOTE_MACH_CONTINUOUS_TIME = 0x00000080;
/// data is mach absolute time units
pub const NOTE_MACHTIME = 0x00000100;
-pub const AF_UNSPEC: c_int = 0;
-pub const AF_LOCAL: c_int = 1;
-pub const AF_UNIX: c_int = AF_LOCAL;
-pub const AF_INET: c_int = 2;
-pub const AF_SYS_CONTROL: c_int = 2;
-pub const AF_IMPLINK: c_int = 3;
-pub const AF_PUP: c_int = 4;
-pub const AF_CHAOS: c_int = 5;
-pub const AF_NS: c_int = 6;
-pub const AF_ISO: c_int = 7;
-pub const AF_OSI: c_int = AF_ISO;
-pub const AF_ECMA: c_int = 8;
-pub const AF_DATAKIT: c_int = 9;
-pub const AF_CCITT: c_int = 10;
-pub const AF_SNA: c_int = 11;
-pub const AF_DECnet: c_int = 12;
-pub const AF_DLI: c_int = 13;
-pub const AF_LAT: c_int = 14;
-pub const AF_HYLINK: c_int = 15;
-pub const AF_APPLETALK: c_int = 16;
-pub const AF_ROUTE: c_int = 17;
-pub const AF_LINK: c_int = 18;
-pub const AF_XTP: c_int = 19;
-pub const AF_COIP: c_int = 20;
-pub const AF_CNT: c_int = 21;
-pub const AF_RTIP: c_int = 22;
-pub const AF_IPX: c_int = 23;
-pub const AF_SIP: c_int = 24;
-pub const AF_PIP: c_int = 25;
-pub const AF_ISDN: c_int = 28;
-pub const AF_E164: c_int = AF_ISDN;
-pub const AF_KEY: c_int = 29;
-pub const AF_INET6: c_int = 30;
-pub const AF_NATM: c_int = 31;
-pub const AF_SYSTEM: c_int = 32;
-pub const AF_NETBIOS: c_int = 33;
-pub const AF_PPP: c_int = 34;
-pub const AF_MAX: c_int = 40;
+pub const AF_UNSPEC = 0;
+pub const AF_LOCAL = 1;
+pub const AF_UNIX = AF_LOCAL;
+pub const AF_INET = 2;
+pub const AF_SYS_CONTROL = 2;
+pub const AF_IMPLINK = 3;
+pub const AF_PUP = 4;
+pub const AF_CHAOS = 5;
+pub const AF_NS = 6;
+pub const AF_ISO = 7;
+pub const AF_OSI = AF_ISO;
+pub const AF_ECMA = 8;
+pub const AF_DATAKIT = 9;
+pub const AF_CCITT = 10;
+pub const AF_SNA = 11;
+pub const AF_DECnet = 12;
+pub const AF_DLI = 13;
+pub const AF_LAT = 14;
+pub const AF_HYLINK = 15;
+pub const AF_APPLETALK = 16;
+pub const AF_ROUTE = 17;
+pub const AF_LINK = 18;
+pub const AF_XTP = 19;
+pub const AF_COIP = 20;
+pub const AF_CNT = 21;
+pub const AF_RTIP = 22;
+pub const AF_IPX = 23;
+pub const AF_SIP = 24;
+pub const AF_PIP = 25;
+pub const AF_ISDN = 28;
+pub const AF_E164 = AF_ISDN;
+pub const AF_KEY = 29;
+pub const AF_INET6 = 30;
+pub const AF_NATM = 31;
+pub const AF_SYSTEM = 32;
+pub const AF_NETBIOS = 33;
+pub const AF_PPP = 34;
+pub const AF_MAX = 40;
-pub const PF_UNSPEC: c_int = AF_UNSPEC;
-pub const PF_LOCAL: c_int = AF_LOCAL;
-pub const PF_UNIX: c_int = PF_LOCAL;
-pub const PF_INET: c_int = AF_INET;
-pub const PF_IMPLINK: c_int = AF_IMPLINK;
-pub const PF_PUP: c_int = AF_PUP;
-pub const PF_CHAOS: c_int = AF_CHAOS;
-pub const PF_NS: c_int = AF_NS;
-pub const PF_ISO: c_int = AF_ISO;
-pub const PF_OSI: c_int = AF_ISO;
-pub const PF_ECMA: c_int = AF_ECMA;
-pub const PF_DATAKIT: c_int = AF_DATAKIT;
-pub const PF_CCITT: c_int = AF_CCITT;
-pub const PF_SNA: c_int = AF_SNA;
-pub const PF_DECnet: c_int = AF_DECnet;
-pub const PF_DLI: c_int = AF_DLI;
-pub const PF_LAT: c_int = AF_LAT;
-pub const PF_HYLINK: c_int = AF_HYLINK;
-pub const PF_APPLETALK: c_int = AF_APPLETALK;
-pub const PF_ROUTE: c_int = AF_ROUTE;
-pub const PF_LINK: c_int = AF_LINK;
-pub const PF_XTP: c_int = AF_XTP;
-pub const PF_COIP: c_int = AF_COIP;
-pub const PF_CNT: c_int = AF_CNT;
-pub const PF_SIP: c_int = AF_SIP;
-pub const PF_IPX: c_int = AF_IPX;
-pub const PF_RTIP: c_int = AF_RTIP;
-pub const PF_PIP: c_int = AF_PIP;
-pub const PF_ISDN: c_int = AF_ISDN;
-pub const PF_KEY: c_int = AF_KEY;
-pub const PF_INET6: c_int = AF_INET6;
-pub const PF_NATM: c_int = AF_NATM;
-pub const PF_SYSTEM: c_int = AF_SYSTEM;
-pub const PF_NETBIOS: c_int = AF_NETBIOS;
-pub const PF_PPP: c_int = AF_PPP;
-pub const PF_MAX: c_int = AF_MAX;
+pub const PF_UNSPEC = AF_UNSPEC;
+pub const PF_LOCAL = AF_LOCAL;
+pub const PF_UNIX = PF_LOCAL;
+pub const PF_INET = AF_INET;
+pub const PF_IMPLINK = AF_IMPLINK;
+pub const PF_PUP = AF_PUP;
+pub const PF_CHAOS = AF_CHAOS;
+pub const PF_NS = AF_NS;
+pub const PF_ISO = AF_ISO;
+pub const PF_OSI = AF_ISO;
+pub const PF_ECMA = AF_ECMA;
+pub const PF_DATAKIT = AF_DATAKIT;
+pub const PF_CCITT = AF_CCITT;
+pub const PF_SNA = AF_SNA;
+pub const PF_DECnet = AF_DECnet;
+pub const PF_DLI = AF_DLI;
+pub const PF_LAT = AF_LAT;
+pub const PF_HYLINK = AF_HYLINK;
+pub const PF_APPLETALK = AF_APPLETALK;
+pub const PF_ROUTE = AF_ROUTE;
+pub const PF_LINK = AF_LINK;
+pub const PF_XTP = AF_XTP;
+pub const PF_COIP = AF_COIP;
+pub const PF_CNT = AF_CNT;
+pub const PF_SIP = AF_SIP;
+pub const PF_IPX = AF_IPX;
+pub const PF_RTIP = AF_RTIP;
+pub const PF_PIP = AF_PIP;
+pub const PF_ISDN = AF_ISDN;
+pub const PF_KEY = AF_KEY;
+pub const PF_INET6 = AF_INET6;
+pub const PF_NATM = AF_NATM;
+pub const PF_SYSTEM = AF_SYSTEM;
+pub const PF_NETBIOS = AF_NETBIOS;
+pub const PF_PPP = AF_PPP;
+pub const PF_MAX = AF_MAX;
-pub const SYSPROTO_EVENT: c_int = 1;
-pub const SYSPROTO_CONTROL: c_int = 2;
+pub const SYSPROTO_EVENT = 1;
+pub const SYSPROTO_CONTROL = 2;
-pub const SOCK_STREAM: c_int = 1;
-pub const SOCK_DGRAM: c_int = 2;
-pub const SOCK_RAW: c_int = 3;
-pub const SOCK_RDM: c_int = 4;
-pub const SOCK_SEQPACKET: c_int = 5;
-pub const SOCK_MAXADDRLEN: c_int = 255;
+pub const SOCK_STREAM = 1;
+pub const SOCK_DGRAM = 2;
+pub const SOCK_RAW = 3;
+pub const SOCK_RDM = 4;
+pub const SOCK_SEQPACKET = 5;
+pub const SOCK_MAXADDRLEN = 255;
+
+pub const IPPROTO_ICMP = 1;
+pub const IPPROTO_ICMPV6 = 58;
+pub const IPPROTO_TCP = 6;
+pub const IPPROTO_UDP = 17;
+pub const IPPROTO_IP = 0;
+pub const IPPROTO_IPV6 = 41;
fn wstatus(x: i32) i32 {
return x & 0o177;
@@ -605,6 +612,11 @@ pub fn abort() noreturn {
c.abort();
}
+// bind(int socket, const struct sockaddr *address, socklen_t address_len)
+pub fn bind(fd: i32, addr: *const sockaddr, len: socklen_t) usize {
+ return errnoWrap(c.bind(@bitCast(c_int, fd), addr, len));
+}
+
pub fn exit(code: i32) noreturn {
c.exit(code);
}
@@ -634,6 +646,10 @@ pub fn read(fd: i32, buf: [*]u8, nbyte: usize) usize {
return errnoWrap(c.read(fd, @ptrCast(*c_void, buf), nbyte));
}
+pub fn pread(fd: i32, buf: [*]u8, nbyte: usize, offset: u64) usize {
+ return errnoWrap(c.pread(fd, @ptrCast(*c_void, buf), nbyte, offset));
+}
+
pub fn stat(noalias path: [*]const u8, noalias buf: *stat) usize {
return errnoWrap(c.stat(path, buf));
}
@@ -642,6 +658,10 @@ pub fn write(fd: i32, buf: [*]const u8, nbyte: usize) usize {
return errnoWrap(c.write(fd, @ptrCast(*const c_void, buf), nbyte));
}
+pub fn pwrite(fd: i32, buf: [*]const u8, nbyte: usize, offset: u64) usize {
+ return errnoWrap(c.pwrite(fd, @ptrCast(*const c_void, buf), nbyte, offset));
+}
+
pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize {
const ptr_result = c.mmap(
@ptrCast(*c_void, address),
@@ -805,6 +825,20 @@ pub fn sigaction(sig: u5, noalias act: *const Sigaction, noalias oact: ?*Sigacti
return result;
}
+pub fn socket(domain: u32, socket_type: u32, protocol: u32) usize {
+ return errnoWrap(c.socket(@bitCast(c_int, domain), @bitCast(c_int, socket_type), @bitCast(c_int, protocol)));
+}
+
+pub const iovec = extern struct {
+ iov_base: [*]u8,
+ iov_len: usize,
+};
+
+pub const iovec_const = extern struct {
+ iov_base: [*]const u8,
+ iov_len: usize,
+};
+
pub const sigset_t = c.sigset_t;
pub const empty_sigset = sigset_t(0);
@@ -812,8 +846,13 @@ pub const timespec = c.timespec;
pub const Stat = c.Stat;
pub const dirent = c.dirent;
+pub const in_port_t = c.in_port_t;
pub const sa_family_t = c.sa_family_t;
+pub const socklen_t = c.socklen_t;
+
pub const sockaddr = c.sockaddr;
+pub const sockaddr_in = c.sockaddr_in;
+pub const sockaddr_in6 = c.sockaddr_in6;
/// Renamed from `kevent` to `Kevent` to avoid conflict with the syscall.
pub const Kevent = c.Kevent;
diff --git a/std/os/file.zig b/std/os/file.zig
index 6998ba00d1..1f5ce7cf9d 100644
--- a/std/os/file.zig
+++ b/std/os/file.zig
@@ -7,6 +7,7 @@ const assert = std.debug.assert;
const posix = os.posix;
const windows = os.windows;
const Os = builtin.Os;
+const windows_util = @import("windows/util.zig");
const is_posix = builtin.os != builtin.Os.windows;
const is_windows = builtin.os == builtin.Os.windows;
@@ -15,18 +16,39 @@ pub const File = struct {
/// The OS-specific file descriptor or file handle.
handle: os.FileHandle,
+ pub const Mode = switch (builtin.os) {
+ Os.windows => void,
+ else => u32,
+ };
+
+ pub const default_mode = switch (builtin.os) {
+ Os.windows => {},
+ else => 0o666,
+ };
+
pub const OpenError = os.WindowsOpenError || os.PosixOpenError;
- /// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
- /// Call close to clean up.
- pub fn openRead(allocator: *mem.Allocator, path: []const u8) OpenError!File {
+ /// `openRead` except with a null terminated path
+ pub fn openReadC(path: [*]const u8) OpenError!File {
if (is_posix) {
const flags = posix.O_LARGEFILE | posix.O_RDONLY;
- const fd = try os.posixOpen(allocator, path, flags, 0);
+ const fd = try os.posixOpenC(path, flags, 0);
return openHandle(fd);
- } else if (is_windows) {
+ }
+ if (is_windows) {
+ return openRead(mem.toSliceConst(u8, path));
+ }
+ @compileError("Unsupported OS");
+ }
+
+ /// Call close to clean up.
+ pub fn openRead(path: []const u8) OpenError!File {
+ if (is_posix) {
+ const path_c = try os.toPosixPath(path);
+ return openReadC(&path_c);
+ }
+ if (is_windows) {
const handle = try os.windowsOpen(
- allocator,
path,
windows.GENERIC_READ,
windows.FILE_SHARE_READ,
@@ -34,28 +56,25 @@ pub const File = struct {
windows.FILE_ATTRIBUTE_NORMAL,
);
return openHandle(handle);
- } else {
- @compileError("TODO implement openRead for this OS");
}
+ @compileError("Unsupported OS");
}
- /// Calls `openWriteMode` with os.default_file_mode for the mode.
- pub fn openWrite(allocator: *mem.Allocator, path: []const u8) OpenError!File {
- return openWriteMode(allocator, path, os.default_file_mode);
+ /// Calls `openWriteMode` with os.File.default_mode for the mode.
+ pub fn openWrite(path: []const u8) OpenError!File {
+ return openWriteMode(path, os.File.default_mode);
}
/// If the path does not exist it will be created.
/// If a file already exists in the destination it will be truncated.
- /// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
/// Call close to clean up.
- pub fn openWriteMode(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File {
+ pub fn openWriteMode(path: []const u8, file_mode: Mode) OpenError!File {
if (is_posix) {
const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_TRUNC;
- const fd = try os.posixOpen(allocator, path, flags, file_mode);
+ const fd = try os.posixOpen(path, flags, file_mode);
return openHandle(fd);
} else if (is_windows) {
const handle = try os.windowsOpen(
- allocator,
path,
windows.GENERIC_WRITE,
windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE,
@@ -70,16 +89,14 @@ pub const File = struct {
/// If the path does not exist it will be created.
/// If a file already exists in the destination this returns OpenError.PathAlreadyExists
- /// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
/// Call close to clean up.
- pub fn openWriteNoClobber(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File {
+ pub fn openWriteNoClobber(path: []const u8, file_mode: Mode) OpenError!File {
if (is_posix) {
const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_EXCL;
- const fd = try os.posixOpen(allocator, path, flags, file_mode);
+ const fd = try os.posixOpen(path, flags, file_mode);
return openHandle(fd);
} else if (is_windows) {
const handle = try os.windowsOpen(
- allocator,
path,
windows.GENERIC_WRITE,
windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE,
@@ -98,23 +115,43 @@ pub const File = struct {
pub const AccessError = error{
PermissionDenied,
- NotFound,
+ FileNotFound,
NameTooLong,
- BadMode,
- BadPathName,
- Io,
+ InputOutput,
SystemResources,
- OutOfMemory,
+ BadPathName,
+
+ /// On Windows, file paths must be valid Unicode.
+ InvalidUtf8,
Unexpected,
};
- pub fn access(allocator: *mem.Allocator, path: []const u8) AccessError!void {
- const path_with_null = try std.cstr.addNullByte(allocator, path);
- defer allocator.free(path_with_null);
+ /// Call from Windows-specific code if you already have a UTF-16LE encoded, null terminated string.
+ /// Otherwise use `access` or `accessC`.
+ pub fn accessW(path: [*]const u16) AccessError!void {
+ if (os.windows.GetFileAttributesW(path) != os.windows.INVALID_FILE_ATTRIBUTES) {
+ return;
+ }
+ const err = windows.GetLastError();
+ switch (err) {
+ windows.ERROR.FILE_NOT_FOUND => return error.FileNotFound,
+ windows.ERROR.PATH_NOT_FOUND => return error.FileNotFound,
+ windows.ERROR.ACCESS_DENIED => return error.PermissionDenied,
+ else => return os.unexpectedErrorWindows(err),
+ }
+ }
+
+ /// Call if you have a UTF-8 encoded, null-terminated string.
+ /// Otherwise use `access` or `accessW`.
+ pub fn accessC(path: [*]const u8) AccessError!void {
+ if (is_windows) {
+ const path_w = try windows_util.cStrToPrefixedFileW(path);
+ return accessW(&path_w);
+ }
if (is_posix) {
- const result = posix.access(path_with_null.ptr, posix.F_OK);
+ const result = posix.access(path, posix.F_OK);
const err = posix.getErrno(result);
switch (err) {
0 => return,
@@ -122,32 +159,33 @@ pub const File = struct {
posix.EROFS => return error.PermissionDenied,
posix.ELOOP => return error.PermissionDenied,
posix.ETXTBSY => return error.PermissionDenied,
- posix.ENOTDIR => return error.NotFound,
- posix.ENOENT => return error.NotFound,
+ posix.ENOTDIR => return error.FileNotFound,
+ posix.ENOENT => return error.FileNotFound,
posix.ENAMETOOLONG => return error.NameTooLong,
posix.EINVAL => unreachable,
- posix.EFAULT => return error.BadPathName,
- posix.EIO => return error.Io,
+ posix.EFAULT => unreachable,
+ posix.EIO => return error.InputOutput,
posix.ENOMEM => return error.SystemResources,
else => return os.unexpectedErrorPosix(err),
}
- } else if (is_windows) {
- if (os.windows.GetFileAttributesA(path_with_null.ptr) != os.windows.INVALID_FILE_ATTRIBUTES) {
- return;
- }
-
- const err = windows.GetLastError();
- switch (err) {
- windows.ERROR.FILE_NOT_FOUND,
- windows.ERROR.PATH_NOT_FOUND,
- => return error.NotFound,
- windows.ERROR.ACCESS_DENIED => return error.PermissionDenied,
- else => return os.unexpectedErrorWindows(err),
- }
- } else {
- @compileError("TODO implement access for this OS");
}
+ @compileError("Unsupported OS");
+ }
+
+ pub fn access(path: []const u8) AccessError!void {
+ if (is_windows) {
+ const path_w = try windows_util.sliceToPrefixedFileW(path);
+ return accessW(&path_w);
+ }
+ if (is_posix) {
+ var path_with_null: [posix.PATH_MAX]u8 = undefined;
+ if (path.len >= posix.PATH_MAX) return error.NameTooLong;
+ mem.copy(u8, path_with_null[0..], path);
+ path_with_null[path.len] = 0;
+ return accessC(&path_with_null);
+ }
+ @compileError("Unsupported OS");
}
/// Upon success, the stream is in an uninitialized state. To continue using it,
@@ -169,7 +207,9 @@ pub const File = struct {
const err = posix.getErrno(result);
if (err > 0) {
return switch (err) {
- posix.EBADF => error.BadFd,
+ // We do not make this an error code because if you get EBADF it's always a bug,
+ // since the fd could have been reused.
+ posix.EBADF => unreachable,
posix.EINVAL => error.Unseekable,
posix.EOVERFLOW => error.Unseekable,
posix.ESPIPE => error.Unseekable,
@@ -182,7 +222,7 @@ pub const File = struct {
if (windows.SetFilePointerEx(self.handle, amount, null, windows.FILE_CURRENT) == 0) {
const err = windows.GetLastError();
return switch (err) {
- windows.ERROR.INVALID_PARAMETER => error.BadFd,
+ windows.ERROR.INVALID_PARAMETER => unreachable,
else => os.unexpectedErrorWindows(err),
};
}
@@ -199,7 +239,9 @@ pub const File = struct {
const err = posix.getErrno(result);
if (err > 0) {
return switch (err) {
- posix.EBADF => error.BadFd,
+ // We do not make this an error code because if you get EBADF it's always a bug,
+ // since the fd could have been reused.
+ posix.EBADF => unreachable,
posix.EINVAL => error.Unseekable,
posix.EOVERFLOW => error.Unseekable,
posix.ESPIPE => error.Unseekable,
@@ -213,7 +255,7 @@ pub const File = struct {
if (windows.SetFilePointerEx(self.handle, ipos, null, windows.FILE_BEGIN) == 0) {
const err = windows.GetLastError();
return switch (err) {
- windows.ERROR.INVALID_PARAMETER => error.BadFd,
+ windows.ERROR.INVALID_PARAMETER => unreachable,
else => os.unexpectedErrorWindows(err),
};
}
@@ -229,7 +271,9 @@ pub const File = struct {
const err = posix.getErrno(result);
if (err > 0) {
return switch (err) {
- posix.EBADF => error.BadFd,
+ // We do not make this an error code because if you get EBADF it's always a bug,
+ // since the fd could have been reused.
+ posix.EBADF => unreachable,
posix.EINVAL => error.Unseekable,
posix.EOVERFLOW => error.Unseekable,
posix.ESPIPE => error.Unseekable,
@@ -244,7 +288,7 @@ pub const File = struct {
if (windows.SetFilePointerEx(self.handle, 0, &pos, windows.FILE_CURRENT) == 0) {
const err = windows.GetLastError();
return switch (err) {
- windows.ERROR.INVALID_PARAMETER => error.BadFd,
+ windows.ERROR.INVALID_PARAMETER => unreachable,
else => os.unexpectedErrorWindows(err),
};
}
@@ -277,18 +321,19 @@ pub const File = struct {
}
pub const ModeError = error{
- BadFd,
SystemResources,
Unexpected,
};
- pub fn mode(self: *File) ModeError!os.FileMode {
+ pub fn mode(self: *File) ModeError!Mode {
if (is_posix) {
var stat: posix.Stat = undefined;
const err = posix.getErrno(posix.fstat(self.handle, &stat));
if (err > 0) {
return switch (err) {
- posix.EBADF => error.BadFd,
+ // We do not make this an error code because if you get EBADF it's always a bug,
+ // since the fd could have been reused.
+ posix.EBADF => unreachable,
posix.ENOMEM => error.SystemResources,
else => os.unexpectedErrorPosix(err),
};
@@ -296,7 +341,7 @@ pub const File = struct {
// TODO: we should be able to cast u16 to ModeError!u32, making this
// explicit cast not necessary
- return os.FileMode(stat.mode);
+ return Mode(stat.mode);
} else if (is_windows) {
return {};
} else {
@@ -305,9 +350,11 @@ pub const File = struct {
}
pub const ReadError = error{
- BadFd,
- Io,
+ FileClosed,
+ InputOutput,
IsDir,
+ WouldBlock,
+ SystemResources,
Unexpected,
};
@@ -323,9 +370,12 @@ pub const File = struct {
posix.EINTR => continue,
posix.EINVAL => unreachable,
posix.EFAULT => unreachable,
- posix.EBADF => return error.BadFd,
- posix.EIO => return error.Io,
+ posix.EAGAIN => return error.WouldBlock,
+ posix.EBADF => return error.FileClosed,
+ posix.EIO => return error.InputOutput,
posix.EISDIR => return error.IsDir,
+ posix.ENOBUFS => return error.SystemResources,
+ posix.ENOMEM => return error.SystemResources,
else => return os.unexpectedErrorPosix(read_err),
}
}
@@ -338,7 +388,7 @@ pub const File = struct {
while (index < buffer.len) {
const want_read_count = @intCast(windows.DWORD, math.min(windows.DWORD(@maxValue(windows.DWORD)), buffer.len - index));
var amt_read: windows.DWORD = undefined;
- if (windows.ReadFile(self.handle, @ptrCast(*c_void, buffer.ptr + index), want_read_count, &amt_read, null) == 0) {
+ if (windows.ReadFile(self.handle, buffer.ptr + index, want_read_count, &amt_read, null) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.OPERATION_ABORTED => continue,
diff --git a/std/os/get_app_data_dir.zig b/std/os/get_app_data_dir.zig
index e8ae5dd490..da9c6c3cb4 100644
--- a/std/os/get_app_data_dir.zig
+++ b/std/os/get_app_data_dir.zig
@@ -10,6 +10,7 @@ pub const GetAppDataDirError = error{
};
/// Caller owns returned memory.
+/// TODO determine if we can remove the allocator requirement
pub fn getAppDataDir(allocator: *mem.Allocator, appname: []const u8) GetAppDataDirError![]u8 {
switch (builtin.os) {
builtin.Os.windows => {
@@ -22,7 +23,7 @@ pub fn getAppDataDir(allocator: *mem.Allocator, appname: []const u8) GetAppDataD
)) {
os.windows.S_OK => {
defer os.windows.CoTaskMemFree(@ptrCast(*c_void, dir_path_ptr));
- const global_dir = unicode.utf16leToUtf8(allocator, utf16lePtrSlice(dir_path_ptr)) catch |err| switch (err) {
+ const global_dir = unicode.utf16leToUtf8Alloc(allocator, utf16lePtrSlice(dir_path_ptr)) catch |err| switch (err) {
error.UnexpectedSecondSurrogateHalf => return error.AppDataDirUnavailable,
error.ExpectedSecondSurrogateHalf => return error.AppDataDirUnavailable,
error.DanglingSurrogateHalf => return error.AppDataDirUnavailable,
diff --git a/std/os/index.zig b/std/os/index.zig
index 425a900a71..29d887e214 100644
--- a/std/os/index.zig
+++ b/std/os/index.zig
@@ -38,17 +38,16 @@ pub const path = @import("path.zig");
pub const File = @import("file.zig").File;
pub const time = @import("time.zig");
-pub const FileMode = switch (builtin.os) {
- Os.windows => void,
- else => u32,
-};
-
-pub const default_file_mode = switch (builtin.os) {
- Os.windows => {},
- else => 0o666,
-};
-
pub const page_size = 4 * 1024;
+pub const MAX_PATH_BYTES = switch (builtin.os) {
+ Os.linux, Os.macosx, Os.ios => posix.PATH_MAX,
+ // Each UTF-16LE character may be expanded to 3 UTF-8 bytes.
+ // If it would require 4 UTF-8 bytes, then there would be a surrogate
+ // pair in the UTF-16LE, and we (over)account 3 bytes for it that way.
+ // +1 for the null byte at the end, which can be encoded in 1 byte.
+ Os.windows => windows_util.PATH_MAX_WIDE * 3 + 1,
+ else => @compileError("Unsupported OS"),
+};
pub const UserInfo = @import("get_user_id.zig").UserInfo;
pub const getUserInfo = @import("get_user_id.zig").getUserInfo;
@@ -160,7 +159,7 @@ test "os.getRandomBytes" {
try getRandomBytes(buf_b[0..]);
// Check if random (not 100% conclusive)
- assert( !mem.eql(u8, buf_a, buf_b) );
+ assert(!mem.eql(u8, buf_a, buf_b));
}
/// Raises a signal in the current kernel thread, ending its execution.
@@ -256,6 +255,67 @@ pub fn posixRead(fd: i32, buf: []u8) !void {
}
}
+/// Number of bytes read is returned. Upon reading end-of-file, zero is returned.
+pub fn posix_preadv(fd: i32, iov: [*]const posix.iovec, count: usize, offset: u64) !usize {
+ switch (builtin.os) {
+ builtin.Os.macosx => {
+ // Darwin does not have preadv but it does have pread.
+ var off: usize = 0;
+ var iov_i: usize = 0;
+ var inner_off: usize = 0;
+ while (true) {
+ const v = iov[iov_i];
+ const rc = darwin.pread(fd, v.iov_base + inner_off, v.iov_len - inner_off, offset + off);
+ const err = darwin.getErrno(rc);
+ switch (err) {
+ 0 => {
+ off += rc;
+ inner_off += rc;
+ if (inner_off == v.iov_len) {
+ iov_i += 1;
+ inner_off = 0;
+ if (iov_i == count) {
+ return off;
+ }
+ }
+ if (rc == 0) return off; // EOF
+ continue;
+ },
+ posix.EINTR => continue,
+ posix.EINVAL => unreachable,
+ posix.EFAULT => unreachable,
+ posix.ESPIPE => unreachable, // fd is not seekable
+ posix.EAGAIN => return error.WouldBlock,
+ posix.EBADF => return error.FileClosed,
+ posix.EIO => return error.InputOutput,
+ posix.EISDIR => return error.IsDir,
+ posix.ENOBUFS => return error.SystemResources,
+ posix.ENOMEM => return error.SystemResources,
+ else => return unexpectedErrorPosix(err),
+ }
+ }
+ },
+ builtin.Os.linux, builtin.Os.freebsd => while (true) {
+ const rc = posix.preadv(fd, iov, count, offset);
+ const err = posix.getErrno(rc);
+ switch (err) {
+ 0 => return rc,
+ posix.EINTR => continue,
+ posix.EINVAL => unreachable,
+ posix.EFAULT => unreachable,
+ posix.EAGAIN => return error.WouldBlock,
+ posix.EBADF => return error.FileClosed,
+ posix.EIO => return error.InputOutput,
+ posix.EISDIR => return error.IsDir,
+ posix.ENOBUFS => return error.SystemResources,
+ posix.ENOMEM => return error.SystemResources,
+ else => return unexpectedErrorPosix(err),
+ }
+ },
+ else => @compileError("Unsupported OS"),
+ }
+}
+
pub const PosixWriteError = error{
WouldBlock,
FileClosed,
@@ -266,6 +326,8 @@ pub const PosixWriteError = error{
NoSpaceLeft,
AccessDenied,
BrokenPipe,
+
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
@@ -300,8 +362,72 @@ pub fn posixWrite(fd: i32, bytes: []const u8) !void {
}
}
+pub fn posix_pwritev(fd: i32, iov: [*]const posix.iovec_const, count: usize, offset: u64) PosixWriteError!void {
+ switch (builtin.os) {
+ builtin.Os.macosx => {
+ // Darwin does not have pwritev but it does have pwrite.
+ var off: usize = 0;
+ var iov_i: usize = 0;
+ var inner_off: usize = 0;
+ while (true) {
+ const v = iov[iov_i];
+ const rc = darwin.pwrite(fd, v.iov_base + inner_off, v.iov_len - inner_off, offset + off);
+ const err = darwin.getErrno(rc);
+ switch (err) {
+ 0 => {
+ off += rc;
+ inner_off += rc;
+ if (inner_off == v.iov_len) {
+ iov_i += 1;
+ inner_off = 0;
+ if (iov_i == count) {
+ return;
+ }
+ }
+ continue;
+ },
+ posix.EINTR => continue,
+ posix.ESPIPE => unreachable, // fd is not seekable
+ posix.EINVAL => unreachable,
+ posix.EFAULT => unreachable,
+ posix.EAGAIN => return PosixWriteError.WouldBlock,
+ posix.EBADF => return PosixWriteError.FileClosed,
+ posix.EDESTADDRREQ => return PosixWriteError.DestinationAddressRequired,
+ posix.EDQUOT => return PosixWriteError.DiskQuota,
+ posix.EFBIG => return PosixWriteError.FileTooBig,
+ posix.EIO => return PosixWriteError.InputOutput,
+ posix.ENOSPC => return PosixWriteError.NoSpaceLeft,
+ posix.EPERM => return PosixWriteError.AccessDenied,
+ posix.EPIPE => return PosixWriteError.BrokenPipe,
+ else => return unexpectedErrorPosix(err),
+ }
+ }
+ },
+ builtin.Os.linux => while (true) {
+ const rc = posix.pwritev(fd, iov, count, offset);
+ const err = posix.getErrno(rc);
+ switch (err) {
+ 0 => return,
+ posix.EINTR => continue,
+ posix.EINVAL => unreachable,
+ posix.EFAULT => unreachable,
+ posix.EAGAIN => return PosixWriteError.WouldBlock,
+ posix.EBADF => return PosixWriteError.FileClosed,
+ posix.EDESTADDRREQ => return PosixWriteError.DestinationAddressRequired,
+ posix.EDQUOT => return PosixWriteError.DiskQuota,
+ posix.EFBIG => return PosixWriteError.FileTooBig,
+ posix.EIO => return PosixWriteError.InputOutput,
+ posix.ENOSPC => return PosixWriteError.NoSpaceLeft,
+ posix.EPERM => return PosixWriteError.AccessDenied,
+ posix.EPIPE => return PosixWriteError.BrokenPipe,
+ else => return unexpectedErrorPosix(err),
+ }
+ },
+ else => @compileError("Unsupported OS"),
+ }
+}
+
pub const PosixOpenError = error{
- OutOfMemory,
AccessDenied,
FileTooBig,
IsDir,
@@ -310,22 +436,22 @@ pub const PosixOpenError = error{
NameTooLong,
SystemFdQuotaExceeded,
NoDevice,
- PathNotFound,
+ FileNotFound,
SystemResources,
NoSpaceLeft,
NotDir,
PathAlreadyExists,
+
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
/// ::file_path needs to be copied in memory to add a null terminating byte.
/// Calls POSIX open, keeps trying if it gets interrupted, and translates
/// the return value into zig errors.
-pub fn posixOpen(allocator: *Allocator, file_path: []const u8, flags: u32, perm: usize) PosixOpenError!i32 {
- const path_with_null = try cstr.addNullByte(allocator, file_path);
- defer allocator.free(path_with_null);
-
- return posixOpenC(path_with_null.ptr, flags, perm);
+pub fn posixOpen(file_path: []const u8, flags: u32, perm: usize) PosixOpenError!i32 {
+ const file_path_c = try toPosixPath(file_path);
+ return posixOpenC(&file_path_c, flags, perm);
}
// TODO https://github.com/ziglang/zig/issues/265
@@ -347,7 +473,7 @@ pub fn posixOpenC(file_path: [*]const u8, flags: u32, perm: usize) !i32 {
posix.ENAMETOOLONG => return PosixOpenError.NameTooLong,
posix.ENFILE => return PosixOpenError.SystemFdQuotaExceeded,
posix.ENODEV => return PosixOpenError.NoDevice,
- posix.ENOENT => return PosixOpenError.PathNotFound,
+ posix.ENOENT => return PosixOpenError.FileNotFound,
posix.ENOMEM => return PosixOpenError.SystemResources,
posix.ENOSPC => return PosixOpenError.NoSpaceLeft,
posix.ENOTDIR => return PosixOpenError.NotDir,
@@ -360,6 +486,16 @@ pub fn posixOpenC(file_path: [*]const u8, flags: u32, perm: usize) !i32 {
}
}
+/// Used to convert a slice to a null terminated slice on the stack.
+/// TODO well defined copy elision
+pub fn toPosixPath(file_path: []const u8) ![posix.PATH_MAX]u8 {
+ var path_with_null: [posix.PATH_MAX]u8 = undefined;
+ if (file_path.len >= posix.PATH_MAX) return error.NameTooLong;
+ mem.copy(u8, path_with_null[0..], file_path);
+ path_with_null[file_path.len] = 0;
+ return path_with_null;
+}
+
pub fn posixDup2(old_fd: i32, new_fd: i32) !void {
while (true) {
const err = posix.getErrno(posix.dup2(old_fd, new_fd));
@@ -475,6 +611,8 @@ pub const PosixExecveError = error{
FileNotFound,
NotDir,
FileBusy,
+
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
@@ -497,6 +635,35 @@ fn posixExecveErrnoToErr(err: usize) PosixExecveError {
pub var linux_aux_raw = []usize{0} ** 38;
pub var posix_environ_raw: [][*]u8 = undefined;
+/// See std.elf for the constants.
+pub fn linuxGetAuxVal(index: usize) usize {
+ if (builtin.link_libc) {
+ return usize(std.c.getauxval(index));
+ } else {
+ return linux_aux_raw[index];
+ }
+}
+
+pub fn getBaseAddress() usize {
+ switch (builtin.os) {
+ builtin.Os.linux => {
+ const base = linuxGetAuxVal(std.elf.AT_BASE);
+ if (base != 0) {
+ return base;
+ }
+ const phdr = linuxGetAuxVal(std.elf.AT_PHDR);
+ const ElfHeader = switch (@sizeOf(usize)) {
+ 4 => std.elf.Elf32_Ehdr,
+ 8 => std.elf.Elf64_Ehdr,
+ else => @compileError("Unsupported architecture"),
+ };
+ return phdr - @sizeOf(ElfHeader);
+ },
+ builtin.Os.macosx => return @ptrToInt(&std.c._mh_execute_header),
+ else => @compileError("Unsupported OS"),
+ }
+}
+
/// Caller must free result when done.
/// TODO make this go through libc when we have it
pub fn getEnvMap(allocator: *Allocator) !BufMap {
@@ -603,43 +770,39 @@ pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) GetEnvVarOwned
}
/// Caller must free the returned memory.
-pub fn getCwd(allocator: *Allocator) ![]u8 {
+pub fn getCwdAlloc(allocator: *Allocator) ![]u8 {
+ var buf: [MAX_PATH_BYTES]u8 = undefined;
+ return mem.dupe(allocator, u8, try getCwd(&buf));
+}
+
+pub const GetCwdError = error{Unexpected};
+
+/// The result is a slice of out_buffer.
+pub fn getCwd(out_buffer: *[MAX_PATH_BYTES]u8) GetCwdError![]u8 {
switch (builtin.os) {
Os.windows => {
- var buf = try allocator.alloc(u8, 256);
- errdefer allocator.free(buf);
-
- while (true) {
- const result = windows.GetCurrentDirectoryA(@intCast(windows.WORD, buf.len), buf.ptr);
-
- if (result == 0) {
- const err = windows.GetLastError();
- return switch (err) {
- else => unexpectedErrorWindows(err),
- };
+ var utf16le_buf: [windows_util.PATH_MAX_WIDE]u16 = undefined;
+ const casted_len = @intCast(windows.DWORD, utf16le_buf.len); // TODO shouldn't need this cast
+ const casted_ptr = ([*]u16)(&utf16le_buf); // TODO shouldn't need this cast
+ const result = windows.GetCurrentDirectoryW(casted_len, casted_ptr);
+ if (result == 0) {
+ const err = windows.GetLastError();
+ switch (err) {
+ else => return unexpectedErrorWindows(err),
}
-
- if (result > buf.len) {
- buf = try allocator.realloc(u8, buf, result);
- continue;
- }
-
- return allocator.shrink(u8, buf, result);
}
+ assert(result <= utf16le_buf.len);
+ const utf16le_slice = utf16le_buf[0..result];
+ // Trust that Windows gives us valid UTF-16LE.
+ const end_index = std.unicode.utf16leToUtf8(out_buffer, utf16le_slice) catch unreachable;
+ return out_buffer[0..end_index];
},
else => {
- var buf = try allocator.alloc(u8, 1024);
- errdefer allocator.free(buf);
- while (true) {
- const err = posix.getErrno(posix.getcwd(buf.ptr, buf.len));
- if (err == posix.ERANGE) {
- buf = try allocator.realloc(u8, buf, buf.len * 2);
- continue;
- } else if (err > 0) {
- return unexpectedErrorPosix(err);
- }
-
- return allocator.shrink(u8, buf, cstr.len(buf.ptr));
+ const err = posix.getErrno(posix.getcwd(out_buffer, out_buffer.len));
+ switch (err) {
+ 0 => return cstr.toSlice(out_buffer),
+ posix.ERANGE => unreachable,
+ else => return unexpectedErrorPosix(err),
}
},
}
@@ -647,7 +810,9 @@ pub fn getCwd(allocator: *Allocator) ![]u8 {
test "os.getCwd" {
// at least call it so it gets compiled
- _ = getCwd(debug.global_allocator);
+ _ = getCwdAlloc(debug.global_allocator);
+ var buf: [MAX_PATH_BYTES]u8 = undefined;
+ _ = getCwd(&buf);
}
pub const SymLinkError = PosixSymLinkError || WindowsSymLinkError;
@@ -662,6 +827,8 @@ pub fn symLink(allocator: *Allocator, existing_path: []const u8, new_path: []con
pub const WindowsSymLinkError = error{
OutOfMemory,
+
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
@@ -692,6 +859,8 @@ pub const PosixSymLinkError = error{
NoSpaceLeft,
ReadOnlyFileSystem,
NotDir,
+
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
@@ -750,7 +919,7 @@ pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path:
b64_fs_encoder.encode(tmp_path[dirname.len + 1 ..], rand_buf);
if (symLink(allocator, existing_path, tmp_path)) {
- return rename(allocator, tmp_path, new_path);
+ return rename(tmp_path, new_path);
} else |err| switch (err) {
error.PathAlreadyExists => continue,
else => return err, // TODO zig should know this set does not include PathAlreadyExists
@@ -769,70 +938,75 @@ pub const DeleteFileError = error{
NotDir,
SystemResources,
ReadOnlyFileSystem,
- OutOfMemory,
+ /// On Windows, file paths must be valid Unicode.
+ InvalidUtf8,
+
+ /// On Windows, file paths cannot contain these characters:
+ /// '/', '*', '?', '"', '<', '>', '|'
+ BadPathName,
+
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
-pub fn deleteFile(allocator: *Allocator, file_path: []const u8) DeleteFileError!void {
+pub fn deleteFile(file_path: []const u8) DeleteFileError!void {
if (builtin.os == Os.windows) {
- return deleteFileWindows(allocator, file_path);
+ return deleteFileWindows(file_path);
} else {
- return deleteFilePosix(allocator, file_path);
+ return deleteFilePosix(file_path);
}
}
-pub fn deleteFileWindows(allocator: *Allocator, file_path: []const u8) !void {
- const buf = try allocator.alloc(u8, file_path.len + 1);
- defer allocator.free(buf);
+pub fn deleteFileWindows(file_path: []const u8) !void {
+ const file_path_w = try windows_util.sliceToPrefixedFileW(file_path);
- mem.copy(u8, buf, file_path);
- buf[file_path.len] = 0;
-
- if (windows.DeleteFileA(buf.ptr) == 0) {
+ if (windows.DeleteFileW(&file_path_w) == 0) {
const err = windows.GetLastError();
- return switch (err) {
- windows.ERROR.FILE_NOT_FOUND => error.FileNotFound,
- windows.ERROR.ACCESS_DENIED => error.AccessDenied,
- windows.ERROR.FILENAME_EXCED_RANGE, windows.ERROR.INVALID_PARAMETER => error.NameTooLong,
- else => unexpectedErrorWindows(err),
- };
+ switch (err) {
+ windows.ERROR.FILE_NOT_FOUND => return error.FileNotFound,
+ windows.ERROR.ACCESS_DENIED => return error.AccessDenied,
+ windows.ERROR.FILENAME_EXCED_RANGE => return error.NameTooLong,
+ windows.ERROR.INVALID_PARAMETER => return error.NameTooLong,
+ else => return unexpectedErrorWindows(err),
+ }
}
}
-pub fn deleteFilePosix(allocator: *Allocator, file_path: []const u8) !void {
- const buf = try allocator.alloc(u8, file_path.len + 1);
- defer allocator.free(buf);
-
- mem.copy(u8, buf, file_path);
- buf[file_path.len] = 0;
-
- const err = posix.getErrno(posix.unlink(buf.ptr));
- if (err > 0) {
- return switch (err) {
- posix.EACCES, posix.EPERM => error.AccessDenied,
- posix.EBUSY => error.FileBusy,
- posix.EFAULT, posix.EINVAL => unreachable,
- posix.EIO => error.FileSystem,
- posix.EISDIR => error.IsDir,
- posix.ELOOP => error.SymLinkLoop,
- posix.ENAMETOOLONG => error.NameTooLong,
- posix.ENOENT => error.FileNotFound,
- posix.ENOTDIR => error.NotDir,
- posix.ENOMEM => error.SystemResources,
- posix.EROFS => error.ReadOnlyFileSystem,
- else => unexpectedErrorPosix(err),
- };
+pub fn deleteFilePosixC(file_path: [*]const u8) !void {
+ const err = posix.getErrno(posix.unlink(file_path));
+ switch (err) {
+ 0 => return,
+ posix.EACCES => return error.AccessDenied,
+ posix.EPERM => return error.AccessDenied,
+ posix.EBUSY => return error.FileBusy,
+ posix.EFAULT => unreachable,
+ posix.EINVAL => unreachable,
+ posix.EIO => return error.FileSystem,
+ posix.EISDIR => return error.IsDir,
+ posix.ELOOP => return error.SymLinkLoop,
+ posix.ENAMETOOLONG => return error.NameTooLong,
+ posix.ENOENT => return error.FileNotFound,
+ posix.ENOTDIR => return error.NotDir,
+ posix.ENOMEM => return error.SystemResources,
+ posix.EROFS => return error.ReadOnlyFileSystem,
+ else => return unexpectedErrorPosix(err),
}
}
+pub fn deleteFilePosix(file_path: []const u8) !void {
+ const file_path_c = try toPosixPath(file_path);
+ return deleteFilePosixC(&file_path_c);
+}
+
/// Guaranteed to be atomic. However until https://patchwork.kernel.org/patch/9636735/ is
/// merged and readily available,
/// there is a possibility of power loss or application termination leaving temporary files present
/// in the same directory as dest_path.
/// Destination file will have the same mode as the source file.
+/// TODO investigate if this can work with no allocator
pub fn copyFile(allocator: *Allocator, source_path: []const u8, dest_path: []const u8) !void {
- var in_file = try os.File.openRead(allocator, source_path);
+ var in_file = try os.File.openRead(source_path);
defer in_file.close();
const mode = try in_file.mode();
@@ -853,8 +1027,9 @@ pub fn copyFile(allocator: *Allocator, source_path: []const u8, dest_path: []con
/// Guaranteed to be atomic. However until https://patchwork.kernel.org/patch/9636735/ is
/// merged and readily available,
/// there is a possibility of power loss or application termination leaving temporary files present
-pub fn copyFileMode(allocator: *Allocator, source_path: []const u8, dest_path: []const u8, mode: FileMode) !void {
- var in_file = try os.File.openRead(allocator, source_path);
+/// TODO investigate if this can work with no allocator
+pub fn copyFileMode(allocator: *Allocator, source_path: []const u8, dest_path: []const u8, mode: File.Mode) !void {
+ var in_file = try os.File.openRead(source_path);
defer in_file.close();
var atomic_file = try AtomicFile.init(allocator, dest_path, mode);
@@ -871,6 +1046,7 @@ pub fn copyFileMode(allocator: *Allocator, source_path: []const u8, dest_path: [
}
pub const AtomicFile = struct {
+ /// TODO investigate if we can make this work with no allocator
allocator: *Allocator,
file: os.File,
tmp_path: []u8,
@@ -879,7 +1055,7 @@ pub const AtomicFile = struct {
/// dest_path must remain valid for the lifetime of AtomicFile
/// call finish to atomically replace dest_path with contents
- pub fn init(allocator: *Allocator, dest_path: []const u8, mode: FileMode) !AtomicFile {
+ pub fn init(allocator: *Allocator, dest_path: []const u8, mode: File.Mode) !AtomicFile {
const dirname = os.path.dirname(dest_path);
var rand_buf: [12]u8 = undefined;
@@ -898,7 +1074,7 @@ pub const AtomicFile = struct {
try getRandomBytes(rand_buf[0..]);
b64_fs_encoder.encode(tmp_path[dirname_component_len..], rand_buf);
- const file = os.File.openWriteNoClobber(allocator, tmp_path, mode) catch |err| switch (err) {
+ const file = os.File.openWriteNoClobber(tmp_path, mode) catch |err| switch (err) {
error.PathAlreadyExists => continue,
// TODO zig should figure out that this error set does not include PathAlreadyExists since
// it is handled in the above switch
@@ -919,7 +1095,7 @@ pub const AtomicFile = struct {
pub fn deinit(self: *AtomicFile) void {
if (!self.finished) {
self.file.close();
- deleteFile(self.allocator, self.tmp_path) catch {};
+ deleteFile(self.tmp_path) catch {};
self.allocator.free(self.tmp_path);
self.finished = true;
}
@@ -928,70 +1104,72 @@ pub const AtomicFile = struct {
pub fn finish(self: *AtomicFile) !void {
assert(!self.finished);
self.file.close();
- try rename(self.allocator, self.tmp_path, self.dest_path);
+ try rename(self.tmp_path, self.dest_path);
self.allocator.free(self.tmp_path);
self.finished = true;
}
};
-pub fn rename(allocator: *Allocator, old_path: []const u8, new_path: []const u8) !void {
- const full_buf = try allocator.alloc(u8, old_path.len + new_path.len + 2);
- defer allocator.free(full_buf);
-
- const old_buf = full_buf;
- mem.copy(u8, old_buf, old_path);
- old_buf[old_path.len] = 0;
-
- const new_buf = full_buf[old_path.len + 1 ..];
- mem.copy(u8, new_buf, new_path);
- new_buf[new_path.len] = 0;
+pub fn renameC(old_path: [*]const u8, new_path: [*]const u8) !void {
+ if (is_windows) {
+ @compileError("TODO implement for windows");
+ } else {
+ const err = posix.getErrno(posix.rename(old_path, new_path));
+ switch (err) {
+ 0 => return,
+ posix.EACCES => return error.AccessDenied,
+ posix.EPERM => return error.AccessDenied,
+ posix.EBUSY => return error.FileBusy,
+ posix.EDQUOT => return error.DiskQuota,
+ posix.EFAULT => unreachable,
+ posix.EINVAL => unreachable,
+ posix.EISDIR => return error.IsDir,
+ posix.ELOOP => return error.SymLinkLoop,
+ posix.EMLINK => return error.LinkQuotaExceeded,
+ posix.ENAMETOOLONG => return error.NameTooLong,
+ posix.ENOENT => return error.FileNotFound,
+ posix.ENOTDIR => return error.NotDir,
+ posix.ENOMEM => return error.SystemResources,
+ posix.ENOSPC => return error.NoSpaceLeft,
+ posix.EEXIST => return error.PathAlreadyExists,
+ posix.ENOTEMPTY => return error.PathAlreadyExists,
+ posix.EROFS => return error.ReadOnlyFileSystem,
+ posix.EXDEV => return error.RenameAcrossMountPoints,
+ else => return unexpectedErrorPosix(err),
+ }
+ }
+}
+pub fn rename(old_path: []const u8, new_path: []const u8) !void {
if (is_windows) {
const flags = windows.MOVEFILE_REPLACE_EXISTING | windows.MOVEFILE_WRITE_THROUGH;
- if (windows.MoveFileExA(old_buf.ptr, new_buf.ptr, flags) == 0) {
+ const old_path_w = try windows_util.sliceToPrefixedFileW(old_path);
+ const new_path_w = try windows_util.sliceToPrefixedFileW(new_path);
+ if (windows.MoveFileExW(&old_path_w, &new_path_w, flags) == 0) {
const err = windows.GetLastError();
- return switch (err) {
- else => unexpectedErrorWindows(err),
- };
+ switch (err) {
+ else => return unexpectedErrorWindows(err),
+ }
}
} else {
- const err = posix.getErrno(posix.rename(old_buf.ptr, new_buf.ptr));
- if (err > 0) {
- return switch (err) {
- posix.EACCES, posix.EPERM => error.AccessDenied,
- posix.EBUSY => error.FileBusy,
- posix.EDQUOT => error.DiskQuota,
- posix.EFAULT, posix.EINVAL => unreachable,
- posix.EISDIR => error.IsDir,
- posix.ELOOP => error.SymLinkLoop,
- posix.EMLINK => error.LinkQuotaExceeded,
- posix.ENAMETOOLONG => error.NameTooLong,
- posix.ENOENT => error.FileNotFound,
- posix.ENOTDIR => error.NotDir,
- posix.ENOMEM => error.SystemResources,
- posix.ENOSPC => error.NoSpaceLeft,
- posix.EEXIST, posix.ENOTEMPTY => error.PathAlreadyExists,
- posix.EROFS => error.ReadOnlyFileSystem,
- posix.EXDEV => error.RenameAcrossMountPoints,
- else => unexpectedErrorPosix(err),
- };
- }
+ const old_path_c = try toPosixPath(old_path);
+ const new_path_c = try toPosixPath(new_path);
+ return renameC(&old_path_c, &new_path_c);
}
}
-pub fn makeDir(allocator: *Allocator, dir_path: []const u8) !void {
+pub fn makeDir(dir_path: []const u8) !void {
if (is_windows) {
- return makeDirWindows(allocator, dir_path);
+ return makeDirWindows(dir_path);
} else {
- return makeDirPosix(allocator, dir_path);
+ return makeDirPosix(dir_path);
}
}
-pub fn makeDirWindows(allocator: *Allocator, dir_path: []const u8) !void {
- const path_buf = try cstr.addNullByte(allocator, dir_path);
- defer allocator.free(path_buf);
+pub fn makeDirWindows(dir_path: []const u8) !void {
+ const dir_path_w = try windows_util.sliceToPrefixedFileW(dir_path);
- if (windows.CreateDirectoryA(path_buf.ptr, null) == 0) {
+ if (windows.CreateDirectoryW(&dir_path_w, null) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.ALREADY_EXISTS => error.PathAlreadyExists,
@@ -1001,54 +1179,57 @@ pub fn makeDirWindows(allocator: *Allocator, dir_path: []const u8) !void {
}
}
-pub fn makeDirPosix(allocator: *Allocator, dir_path: []const u8) !void {
- const path_buf = try cstr.addNullByte(allocator, dir_path);
- defer allocator.free(path_buf);
-
- const err = posix.getErrno(posix.mkdir(path_buf.ptr, 0o755));
- if (err > 0) {
- return switch (err) {
- posix.EACCES, posix.EPERM => error.AccessDenied,
- posix.EDQUOT => error.DiskQuota,
- posix.EEXIST => error.PathAlreadyExists,
- posix.EFAULT => unreachable,
- posix.ELOOP => error.SymLinkLoop,
- posix.EMLINK => error.LinkQuotaExceeded,
- posix.ENAMETOOLONG => error.NameTooLong,
- posix.ENOENT => error.FileNotFound,
- posix.ENOMEM => error.SystemResources,
- posix.ENOSPC => error.NoSpaceLeft,
- posix.ENOTDIR => error.NotDir,
- posix.EROFS => error.ReadOnlyFileSystem,
- else => unexpectedErrorPosix(err),
- };
+pub fn makeDirPosixC(dir_path: [*]const u8) !void {
+ const err = posix.getErrno(posix.mkdir(dir_path, 0o755));
+ switch (err) {
+ 0 => return,
+ posix.EACCES => return error.AccessDenied,
+ posix.EPERM => return error.AccessDenied,
+ posix.EDQUOT => return error.DiskQuota,
+ posix.EEXIST => return error.PathAlreadyExists,
+ posix.EFAULT => unreachable,
+ posix.ELOOP => return error.SymLinkLoop,
+ posix.EMLINK => return error.LinkQuotaExceeded,
+ posix.ENAMETOOLONG => return error.NameTooLong,
+ posix.ENOENT => return error.FileNotFound,
+ posix.ENOMEM => return error.SystemResources,
+ posix.ENOSPC => return error.NoSpaceLeft,
+ posix.ENOTDIR => return error.NotDir,
+ posix.EROFS => return error.ReadOnlyFileSystem,
+ else => return unexpectedErrorPosix(err),
}
}
+pub fn makeDirPosix(dir_path: []const u8) !void {
+ const dir_path_c = try toPosixPath(dir_path);
+ return makeDirPosixC(&dir_path_c);
+}
+
/// Calls makeDir recursively to make an entire path. Returns success if the path
/// already exists and is a directory.
+/// TODO determine if we can remove the allocator requirement from this function
pub fn makePath(allocator: *Allocator, full_path: []const u8) !void {
const resolved_path = try path.resolve(allocator, full_path);
defer allocator.free(resolved_path);
var end_index: usize = resolved_path.len;
while (true) {
- makeDir(allocator, resolved_path[0..end_index]) catch |err| {
- if (err == error.PathAlreadyExists) {
+ makeDir(resolved_path[0..end_index]) catch |err| switch (err) {
+ error.PathAlreadyExists => {
// TODO stat the file and return an error if it's not a directory
// this is important because otherwise a dangling symlink
// could cause an infinite loop
if (end_index == resolved_path.len) return;
- } else if (err == error.FileNotFound) {
+ },
+ error.FileNotFound => {
// march end_index backward until next path component
while (true) {
end_index -= 1;
if (os.path.isSep(resolved_path[end_index])) break;
}
continue;
- } else {
- return err;
- }
+ },
+ else => return err,
};
if (end_index == resolved_path.len) return;
// march end_index forward until next path component
@@ -1071,6 +1252,7 @@ pub const DeleteDirError = error{
ReadOnlyFileSystem,
OutOfMemory,
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
@@ -1129,7 +1311,6 @@ const DeleteTreeError = error{
NameTooLong,
SystemFdQuotaExceeded,
NoDevice,
- PathNotFound,
SystemResources,
NoSpaceLeft,
PathAlreadyExists,
@@ -1139,20 +1320,30 @@ const DeleteTreeError = error{
FileSystem,
FileBusy,
DirNotEmpty,
+
+ /// On Windows, file paths must be valid Unicode.
+ InvalidUtf8,
+
+ /// On Windows, file paths cannot contain these characters:
+ /// '/', '*', '?', '"', '<', '>', '|'
+ BadPathName,
+
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
+
+/// TODO determine if we can remove the allocator requirement
pub fn deleteTree(allocator: *Allocator, full_path: []const u8) DeleteTreeError!void {
start_over: while (true) {
var got_access_denied = false;
// First, try deleting the item as a file. This way we don't follow sym links.
- if (deleteFile(allocator, full_path)) {
+ if (deleteFile(full_path)) {
return;
} else |err| switch (err) {
error.FileNotFound => return,
error.IsDir => {},
error.AccessDenied => got_access_denied = true,
- error.OutOfMemory,
error.SymLinkLoop,
error.NameTooLong,
error.SystemResources,
@@ -1160,6 +1351,8 @@ pub fn deleteTree(allocator: *Allocator, full_path: []const u8) DeleteTreeError!
error.NotDir,
error.FileSystem,
error.FileBusy,
+ error.InvalidUtf8,
+ error.BadPathName,
error.Unexpected,
=> return err,
}
@@ -1181,7 +1374,7 @@ pub fn deleteTree(allocator: *Allocator, full_path: []const u8) DeleteTreeError!
error.NameTooLong,
error.SystemFdQuotaExceeded,
error.NoDevice,
- error.PathNotFound,
+ error.FileNotFound,
error.SystemResources,
error.NoSpaceLeft,
error.PathAlreadyExists,
@@ -1251,7 +1444,7 @@ pub const Dir = struct {
};
pub const OpenError = error{
- PathNotFound,
+ FileNotFound,
NotDir,
AccessDenied,
FileTooBig,
@@ -1266,9 +1459,11 @@ pub const Dir = struct {
PathAlreadyExists,
OutOfMemory,
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
+ /// TODO remove the allocator requirement from this API
pub fn open(allocator: *Allocator, dir_path: []const u8) OpenError!Dir {
return Dir{
.allocator = allocator,
@@ -1284,7 +1479,6 @@ pub const Dir = struct {
},
Os.macosx, Os.ios => Handle{
.fd = try posixOpen(
- allocator,
dir_path,
posix.O_RDONLY | posix.O_NONBLOCK | posix.O_DIRECTORY | posix.O_CLOEXEC,
0,
@@ -1296,7 +1490,6 @@ pub const Dir = struct {
},
Os.linux => Handle{
.fd = try posixOpen(
- allocator,
dir_path,
posix.O_RDONLY | posix.O_DIRECTORY | posix.O_CLOEXEC,
0,
@@ -1493,39 +1686,32 @@ pub fn changeCurDir(allocator: *Allocator, dir_path: []const u8) !void {
}
/// Read value of a symbolic link.
-pub fn readLink(allocator: *Allocator, pathname: []const u8) ![]u8 {
- const path_buf = try allocator.alloc(u8, pathname.len + 1);
- defer allocator.free(path_buf);
-
- mem.copy(u8, path_buf, pathname);
- path_buf[pathname.len] = 0;
-
- var result_buf = try allocator.alloc(u8, 1024);
- errdefer allocator.free(result_buf);
- while (true) {
- const ret_val = posix.readlink(path_buf.ptr, result_buf.ptr, result_buf.len);
- const err = posix.getErrno(ret_val);
- if (err > 0) {
- return switch (err) {
- posix.EACCES => error.AccessDenied,
- posix.EFAULT, posix.EINVAL => unreachable,
- posix.EIO => error.FileSystem,
- posix.ELOOP => error.SymLinkLoop,
- posix.ENAMETOOLONG => error.NameTooLong,
- posix.ENOENT => error.FileNotFound,
- posix.ENOMEM => error.SystemResources,
- posix.ENOTDIR => error.NotDir,
- else => unexpectedErrorPosix(err),
- };
- }
- if (ret_val == result_buf.len) {
- result_buf = try allocator.realloc(u8, result_buf, result_buf.len * 2);
- continue;
- }
- return allocator.shrink(u8, result_buf, ret_val);
+/// The return value is a slice of out_buffer.
+pub fn readLinkC(out_buffer: *[posix.PATH_MAX]u8, pathname: [*]const u8) ![]u8 {
+ const rc = posix.readlink(pathname, out_buffer, out_buffer.len);
+ const err = posix.getErrno(rc);
+ switch (err) {
+ 0 => return out_buffer[0..rc],
+ posix.EACCES => return error.AccessDenied,
+ posix.EFAULT => unreachable,
+ posix.EINVAL => unreachable,
+ posix.EIO => return error.FileSystem,
+ posix.ELOOP => return error.SymLinkLoop,
+ posix.ENAMETOOLONG => unreachable, // out_buffer is at least PATH_MAX
+ posix.ENOENT => return error.FileNotFound,
+ posix.ENOMEM => return error.SystemResources,
+ posix.ENOTDIR => return error.NotDir,
+ else => return unexpectedErrorPosix(err),
}
}
+/// Read value of a symbolic link.
+/// The return value is a slice of out_buffer.
+pub fn readLink(out_buffer: *[posix.PATH_MAX]u8, file_path: []const u8) ![]u8 {
+ const file_path_c = try toPosixPath(file_path);
+ return readLinkC(out_buffer, &file_path_c);
+}
+
pub fn posix_setuid(uid: u32) !void {
const err = posix.getErrno(posix.setuid(uid));
if (err == 0) return;
@@ -1572,6 +1758,8 @@ pub fn posix_setregid(rgid: u32, egid: u32) !void {
pub const WindowsGetStdHandleErrs = error{
NoStdHandles,
+
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
@@ -1899,7 +2087,7 @@ pub fn unexpectedErrorPosix(errno: usize) UnexpectedError {
/// Call this when you made a windows DLL call or something that does SetLastError
/// and you get an unexpected error.
pub fn unexpectedErrorWindows(err: windows.DWORD) UnexpectedError {
- if (unexpected_error_tracing) {
+ if (true) {
debug.warn("unexpected GetLastError(): {}\n", err);
debug.dumpCurrentStackTrace(null);
}
@@ -1908,17 +2096,12 @@ pub fn unexpectedErrorWindows(err: windows.DWORD) UnexpectedError {
pub fn openSelfExe() !os.File {
switch (builtin.os) {
- Os.linux => {
- const proc_file_path = "/proc/self/exe";
- var fixed_buffer_mem: [proc_file_path.len + 1]u8 = undefined;
- var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- return os.File.openRead(&fixed_allocator.allocator, proc_file_path);
- },
+ Os.linux => return os.File.openReadC(c"/proc/self/exe"),
Os.macosx, Os.ios => {
- var fixed_buffer_mem: [darwin.PATH_MAX * 2]u8 = undefined;
- var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
- const self_exe_path = try selfExePath(&fixed_allocator.allocator);
- return os.File.openRead(&fixed_allocator.allocator, self_exe_path);
+ var buf: [MAX_PATH_BYTES]u8 = undefined;
+ const self_exe_path = try selfExePath(&buf);
+ buf[self_exe_path.len] = 0;
+ return os.File.openReadC(self_exe_path.ptr);
},
else => @compileError("Unsupported OS"),
}
@@ -1927,7 +2110,7 @@ pub fn openSelfExe() !os.File {
test "openSelfExe" {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => (try openSelfExe()).close(),
- else => return, // Unsupported OS.
+ else => return error.SkipZigTest, // Unsupported OS
}
}
@@ -1936,69 +2119,68 @@ test "openSelfExe" {
/// If you only want an open file handle, use openSelfExe.
/// This function may return an error if the current executable
/// was deleted after spawning.
-/// Caller owns returned memory.
-pub fn selfExePath(allocator: *mem.Allocator) ![]u8 {
+/// Returned value is a slice of out_buffer.
+///
+/// On Linux, depends on procfs being mounted. If the currently executing binary has
+/// been deleted, the file path looks something like `/a/b/c/exe (deleted)`.
+/// TODO make the return type of this a null terminated pointer
+pub fn selfExePath(out_buffer: *[MAX_PATH_BYTES]u8) ![]u8 {
switch (builtin.os) {
- Os.linux => {
- // If the currently executing binary has been deleted,
- // the file path looks something like `/a/b/c/exe (deleted)`
- return readLink(allocator, "/proc/self/exe");
- },
+ Os.linux => return readLink(out_buffer, "/proc/self/exe"),
Os.windows => {
- var out_path = try Buffer.initSize(allocator, 0xff);
- errdefer out_path.deinit();
- while (true) {
- const dword_len = try math.cast(windows.DWORD, out_path.len());
- const copied_amt = windows.GetModuleFileNameA(null, out_path.ptr(), dword_len);
- if (copied_amt <= 0) {
- const err = windows.GetLastError();
- return switch (err) {
- else => unexpectedErrorWindows(err),
- };
+ var utf16le_buf: [windows_util.PATH_MAX_WIDE]u16 = undefined;
+ const casted_len = @intCast(windows.DWORD, utf16le_buf.len); // TODO shouldn't need this cast
+ const rc = windows.GetModuleFileNameW(null, &utf16le_buf, casted_len);
+ assert(rc <= utf16le_buf.len);
+ if (rc == 0) {
+ const err = windows.GetLastError();
+ switch (err) {
+ else => return unexpectedErrorWindows(err),
}
- if (copied_amt < out_path.len()) {
- out_path.shrink(copied_amt);
- return out_path.toOwnedSlice();
- }
- const new_len = (out_path.len() << 1) | 0b1;
- try out_path.resize(new_len);
}
+ const utf16le_slice = utf16le_buf[0..rc];
+ // Trust that Windows gives us valid UTF-16LE.
+ const end_index = std.unicode.utf16leToUtf8(out_buffer, utf16le_slice) catch unreachable;
+ return out_buffer[0..end_index];
},
Os.macosx, Os.ios => {
- var u32_len: u32 = 0;
- const ret1 = c._NSGetExecutablePath(undefined, &u32_len);
- assert(ret1 != 0);
- const bytes = try allocator.alloc(u8, u32_len);
- errdefer allocator.free(bytes);
- const ret2 = c._NSGetExecutablePath(bytes.ptr, &u32_len);
- assert(ret2 == 0);
- return bytes;
+ var u32_len: u32 = @intCast(u32, out_buffer.len); // TODO shouldn't need this cast
+ const rc = c._NSGetExecutablePath(out_buffer, &u32_len);
+ if (rc != 0) return error.NameTooLong;
+ return mem.toSlice(u8, out_buffer);
},
else => @compileError("Unsupported OS"),
}
}
-/// Get the directory path that contains the current executable.
+/// `selfExeDirPath` except allocates the result on the heap.
/// Caller owns returned memory.
-pub fn selfExeDirPath(allocator: *mem.Allocator) ![]u8 {
+pub fn selfExeDirPathAlloc(allocator: *Allocator) ![]u8 {
+ var buf: [MAX_PATH_BYTES]u8 = undefined;
+ return mem.dupe(allocator, u8, try selfExeDirPath(&buf));
+}
+
+/// Get the directory path that contains the current executable.
+/// Returned value is a slice of out_buffer.
+pub fn selfExeDirPath(out_buffer: *[MAX_PATH_BYTES]u8) ![]const u8 {
switch (builtin.os) {
Os.linux => {
// If the currently executing binary has been deleted,
// the file path looks something like `/a/b/c/exe (deleted)`
// This path cannot be opened, but it's valid for determining the directory
// the executable was in when it was run.
- const full_exe_path = try readLink(allocator, "/proc/self/exe");
- errdefer allocator.free(full_exe_path);
- const dir = path.dirname(full_exe_path) orelse ".";
- return allocator.shrink(u8, full_exe_path, dir.len);
+ const full_exe_path = try readLinkC(out_buffer, c"/proc/self/exe");
+ // Assume that /proc/self/exe has an absolute path, and therefore dirname
+ // will not return null.
+ return path.dirname(full_exe_path).?;
},
Os.windows, Os.macosx, Os.ios => {
- const self_exe_path = try selfExePath(allocator);
- errdefer allocator.free(self_exe_path);
- const dirname = os.path.dirname(self_exe_path) orelse ".";
- return allocator.shrink(u8, self_exe_path, dirname.len);
+ const self_exe_path = try selfExePath(out_buffer);
+ // Assume that the OS APIs return absolute paths, and therefore dirname
+ // will not return null.
+ return path.dirname(self_exe_path).?;
},
- else => @compileError("unimplemented: std.os.selfExeDirPath for " ++ @tagName(builtin.os)),
+ else => @compileError("Unsupported OS"),
}
}
@@ -2102,6 +2284,7 @@ pub const PosixBindError = error{
/// The socket inode would reside on a read-only filesystem.
ReadOnlyFileSystem,
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
@@ -2145,6 +2328,7 @@ const PosixListenError = error{
/// The socket is not of a type that supports the listen() operation.
OperationNotSupported,
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
@@ -2198,6 +2382,7 @@ pub const PosixAcceptError = error{
/// Firewall rules forbid connection.
BlockedByFirewall,
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
@@ -2243,6 +2428,7 @@ pub const LinuxEpollCreateError = error{
/// There was insufficient memory to create the kernel object.
SystemResources,
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
@@ -2297,6 +2483,7 @@ pub const LinuxEpollCtlError = error{
/// for example, a regular file or a directory.
FileDescriptorIncompatibleWithEpoll,
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
@@ -2339,6 +2526,7 @@ pub const LinuxEventFdError = error{
ProcessFdQuotaExceeded,
SystemFdQuotaExceeded,
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
@@ -2361,6 +2549,7 @@ pub const PosixGetSockNameError = error{
/// Insufficient resources were available in the system to perform the operation.
SystemResources,
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
@@ -2414,6 +2603,7 @@ pub const PosixConnectError = error{
/// that for IP sockets the timeout may be very long when syncookies are enabled on the server.
ConnectionTimedOut,
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
@@ -2516,26 +2706,66 @@ pub const Thread = struct {
data: Data,
pub const use_pthreads = is_posix and builtin.link_libc;
+
+ /// Represents a kernel thread handle.
+ /// May be an integer or a pointer depending on the platform.
+ /// On Linux and POSIX, this is the same as Id.
+ pub const Handle = if (use_pthreads)
+ c.pthread_t
+ else switch (builtin.os) {
+ builtin.Os.linux => i32,
+ builtin.Os.windows => windows.HANDLE,
+ else => @compileError("Unsupported OS"),
+ };
+
+ /// Represents a unique ID per thread.
+ /// May be an integer or pointer depending on the platform.
+ /// On Linux and POSIX, this is the same as Handle.
+ pub const Id = switch (builtin.os) {
+ builtin.Os.windows => windows.DWORD,
+ else => Handle,
+ };
+
pub const Data = if (use_pthreads)
struct {
- handle: c.pthread_t,
+ handle: Thread.Handle,
stack_addr: usize,
stack_len: usize,
}
else switch (builtin.os) {
builtin.Os.linux => struct {
- pid: i32,
+ handle: Thread.Handle,
stack_addr: usize,
stack_len: usize,
},
builtin.Os.windows => struct {
- handle: windows.HANDLE,
+ handle: Thread.Handle,
alloc_start: *c_void,
heap_handle: windows.HANDLE,
},
else => @compileError("Unsupported OS"),
};
+ /// Returns the ID of the calling thread.
+ /// Makes a syscall every time the function is called.
+ /// On Linux and POSIX, this Id is the same as a Handle.
+ pub fn getCurrentId() Id {
+ if (use_pthreads) {
+ return c.pthread_self();
+ } else
+ return switch (builtin.os) {
+ builtin.Os.linux => linux.gettid(),
+ builtin.Os.windows => windows.GetCurrentThreadId(),
+ else => @compileError("Unsupported OS"),
+ };
+ }
+
+ /// Returns the handle of this thread.
+ /// On Linux and POSIX, this is the same as Id.
+ pub fn handle(self: Thread) Handle {
+ return self.data.handle;
+ }
+
pub fn wait(self: *const Thread) void {
if (use_pthreads) {
const err = c.pthread_join(self.data.handle, null);
@@ -2550,9 +2780,9 @@ pub const Thread = struct {
} else switch (builtin.os) {
builtin.Os.linux => {
while (true) {
- const pid_value = @atomicLoad(i32, &self.data.pid, builtin.AtomicOrder.SeqCst);
+ const pid_value = @atomicLoad(i32, &self.data.handle, builtin.AtomicOrder.SeqCst);
if (pid_value == 0) break;
- const rc = linux.futex_wait(@ptrToInt(&self.data.pid), linux.FUTEX_WAIT, pid_value, null);
+ const rc = linux.futex_wait(@ptrToInt(&self.data.handle), linux.FUTEX_WAIT, pid_value, null);
switch (linux.getErrno(rc)) {
0 => continue,
posix.EINTR => continue,
@@ -2595,6 +2825,7 @@ pub const SpawnThreadError = error{
/// Not enough userland memory to spawn the thread.
OutOfMemory,
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
@@ -2734,7 +2965,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread
// use linux API directly. TODO use posix.CLONE_SETTLS and initialize thread local storage correctly
const flags = posix.CLONE_VM | posix.CLONE_FS | posix.CLONE_FILES | posix.CLONE_SIGHAND | posix.CLONE_THREAD | posix.CLONE_SYSVSEM | posix.CLONE_PARENT_SETTID | posix.CLONE_CHILD_CLEARTID | posix.CLONE_DETACHED;
const newtls: usize = 0;
- const rc = posix.clone(MainFuncs.linuxThreadMain, stack_end, flags, arg, &thread_ptr.data.pid, newtls, &thread_ptr.data.pid);
+ const rc = posix.clone(MainFuncs.linuxThreadMain, stack_end, flags, arg, &thread_ptr.data.handle, newtls, &thread_ptr.data.handle);
const err = posix.getErrno(rc);
switch (err) {
0 => return thread_ptr,
@@ -2770,7 +3001,9 @@ pub fn posixFStat(fd: i32) !posix.Stat {
const err = posix.getErrno(posix.fstat(fd, &stat));
if (err > 0) {
return switch (err) {
- posix.EBADF => error.BadFd,
+ // We do not make this an error code because if you get EBADF it's always a bug,
+ // since the fd could have been reused.
+ posix.EBADF => unreachable,
posix.ENOMEM => error.SystemResources,
else => os.unexpectedErrorPosix(err),
};
@@ -2782,6 +3015,8 @@ pub fn posixFStat(fd: i32) !posix.Stat {
pub const CpuCountError = error{
OutOfMemory,
PermissionDenied,
+
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
@@ -2852,6 +3087,7 @@ pub const BsdKQueueError = error{
/// The system-wide limit on the total number of open files has been reached.
SystemFdQuotaExceeded,
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
@@ -2903,3 +3139,44 @@ pub fn bsdKEvent(
}
}
}
+
+pub fn linuxINotifyInit1(flags: u32) !i32 {
+ const rc = linux.inotify_init1(flags);
+ const err = posix.getErrno(rc);
+ switch (err) {
+ 0 => return @intCast(i32, rc),
+ posix.EINVAL => unreachable,
+ posix.EMFILE => return error.ProcessFdQuotaExceeded,
+ posix.ENFILE => return error.SystemFdQuotaExceeded,
+ posix.ENOMEM => return error.SystemResources,
+ else => return unexpectedErrorPosix(err),
+ }
+}
+
+pub fn linuxINotifyAddWatchC(inotify_fd: i32, pathname: [*]const u8, mask: u32) !i32 {
+ const rc = linux.inotify_add_watch(inotify_fd, pathname, mask);
+ const err = posix.getErrno(rc);
+ switch (err) {
+ 0 => return @intCast(i32, rc),
+ posix.EACCES => return error.AccessDenied,
+ posix.EBADF => unreachable,
+ posix.EFAULT => unreachable,
+ posix.EINVAL => unreachable,
+ posix.ENAMETOOLONG => return error.NameTooLong,
+ posix.ENOENT => return error.FileNotFound,
+ posix.ENOMEM => return error.SystemResources,
+ posix.ENOSPC => return error.UserResourceLimitReached,
+ else => return unexpectedErrorPosix(err),
+ }
+}
+
+pub fn linuxINotifyRmWatch(inotify_fd: i32, wd: i32) !void {
+ const rc = linux.inotify_rm_watch(inotify_fd, wd);
+ const err = posix.getErrno(rc);
+ switch (err) {
+ 0 => return rc,
+ posix.EBADF => unreachable,
+ posix.EINVAL => unreachable,
+ else => unreachable,
+ }
+}
diff --git a/std/os/linux/index.zig b/std/os/linux/index.zig
index 15607ea6c0..c369921e14 100644
--- a/std/os/linux/index.zig
+++ b/std/os/linux/index.zig
@@ -567,6 +567,37 @@ pub const MNT_DETACH = 2;
pub const MNT_EXPIRE = 4;
pub const UMOUNT_NOFOLLOW = 8;
+pub const IN_CLOEXEC = O_CLOEXEC;
+pub const IN_NONBLOCK = O_NONBLOCK;
+
+pub const IN_ACCESS = 0x00000001;
+pub const IN_MODIFY = 0x00000002;
+pub const IN_ATTRIB = 0x00000004;
+pub const IN_CLOSE_WRITE = 0x00000008;
+pub const IN_CLOSE_NOWRITE = 0x00000010;
+pub const IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE;
+pub const IN_OPEN = 0x00000020;
+pub const IN_MOVED_FROM = 0x00000040;
+pub const IN_MOVED_TO = 0x00000080;
+pub const IN_MOVE = IN_MOVED_FROM | IN_MOVED_TO;
+pub const IN_CREATE = 0x00000100;
+pub const IN_DELETE = 0x00000200;
+pub const IN_DELETE_SELF = 0x00000400;
+pub const IN_MOVE_SELF = 0x00000800;
+pub const IN_ALL_EVENTS = 0x00000fff;
+
+pub const IN_UNMOUNT = 0x00002000;
+pub const IN_Q_OVERFLOW = 0x00004000;
+pub const IN_IGNORED = 0x00008000;
+
+pub const IN_ONLYDIR = 0x01000000;
+pub const IN_DONT_FOLLOW = 0x02000000;
+pub const IN_EXCL_UNLINK = 0x04000000;
+pub const IN_MASK_ADD = 0x20000000;
+
+pub const IN_ISDIR = 0x40000000;
+pub const IN_ONESHOT = 0x80000000;
+
pub const S_IFMT = 0o170000;
pub const S_IFDIR = 0o040000;
@@ -692,6 +723,10 @@ pub fn futex_wait(uaddr: usize, futex_op: u32, val: i32, timeout: ?*timespec) us
return syscall4(SYS_futex, uaddr, futex_op, @bitCast(u32, val), @ptrToInt(timeout));
}
+pub fn futex_wake(uaddr: usize, futex_op: u32, val: i32) usize {
+ return syscall3(SYS_futex, uaddr, futex_op, @bitCast(u32, val));
+}
+
pub fn getcwd(buf: [*]u8, size: usize) usize {
return syscall2(SYS_getcwd, @ptrToInt(buf), size);
}
@@ -700,6 +735,18 @@ pub fn getdents(fd: i32, dirp: [*]u8, count: usize) usize {
return syscall3(SYS_getdents, @intCast(usize, fd), @ptrToInt(dirp), count);
}
+pub fn inotify_init1(flags: u32) usize {
+ return syscall1(SYS_inotify_init1, flags);
+}
+
+pub fn inotify_add_watch(fd: i32, pathname: [*]const u8, mask: u32) usize {
+ return syscall3(SYS_inotify_add_watch, @intCast(usize, fd), @ptrToInt(pathname), mask);
+}
+
+pub fn inotify_rm_watch(fd: i32, wd: i32) usize {
+ return syscall2(SYS_inotify_rm_watch, @intCast(usize, fd), @intCast(usize, wd));
+}
+
pub fn isatty(fd: i32) bool {
var wsz: winsize = undefined;
return syscall3(SYS_ioctl, @intCast(usize, fd), TIOCGWINSZ, @ptrToInt(&wsz)) == 0;
@@ -742,6 +789,14 @@ pub fn read(fd: i32, buf: [*]u8, count: usize) usize {
return syscall3(SYS_read, @intCast(usize, fd), @ptrToInt(buf), count);
}
+pub fn preadv(fd: i32, iov: [*]const iovec, count: usize, offset: u64) usize {
+ return syscall4(SYS_preadv, @intCast(usize, fd), @ptrToInt(iov), count, offset);
+}
+
+pub fn pwritev(fd: i32, iov: [*]const iovec_const, count: usize, offset: u64) usize {
+ return syscall4(SYS_pwritev, @intCast(usize, fd), @ptrToInt(iov), count, offset);
+}
+
// TODO https://github.com/ziglang/zig/issues/265
pub fn rmdir(path: [*]const u8) usize {
return syscall1(SYS_rmdir, @ptrToInt(path));
@@ -947,6 +1002,10 @@ pub fn getpid() i32 {
return @bitCast(i32, @truncate(u32, syscall0(SYS_getpid)));
}
+pub fn gettid() i32 {
+ return @bitCast(i32, @truncate(u32, syscall0(SYS_gettid)));
+}
+
pub fn sigprocmask(flags: u32, noalias set: *const sigset_t, noalias oldset: ?*sigset_t) usize {
return syscall4(SYS_rt_sigprocmask, flags, @ptrToInt(set), @ptrToInt(oldset), NSIG / 8);
}
@@ -1060,6 +1119,11 @@ pub const iovec = extern struct {
iov_len: usize,
};
+pub const iovec_const = extern struct {
+ iov_base: [*]const u8,
+ iov_len: usize,
+};
+
pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
return syscall3(SYS_getsockname, @intCast(usize, fd), @ptrToInt(addr), @ptrToInt(len));
}
@@ -1368,6 +1432,14 @@ pub fn capset(hdrp: *cap_user_header_t, datap: *const cap_user_data_t) usize {
return syscall2(SYS_capset, @ptrToInt(hdrp), @ptrToInt(datap));
}
+pub const inotify_event = extern struct {
+ wd: i32,
+ mask: u32,
+ cookie: u32,
+ len: u32,
+ //name: [?]u8,
+};
+
test "import" {
if (builtin.os == builtin.Os.linux) {
_ = @import("test.zig");
diff --git a/std/os/path.zig b/std/os/path.zig
index d3ab0c519f..b3cfec1a3a 100644
--- a/std/os/path.zig
+++ b/std/os/path.zig
@@ -11,11 +11,14 @@ const math = std.math;
const posix = os.posix;
const windows = os.windows;
const cstr = std.cstr;
+const windows_util = @import("windows/util.zig");
pub const sep_windows = '\\';
pub const sep_posix = '/';
pub const sep = if (is_windows) sep_windows else sep_posix;
+pub const sep_str = [1]u8{sep};
+
pub const delimiter_windows = ';';
pub const delimiter_posix = ':';
pub const delimiter = if (is_windows) delimiter_windows else delimiter_posix;
@@ -337,7 +340,7 @@ pub fn resolveSlice(allocator: *Allocator, paths: []const []const u8) ![]u8 {
pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
if (paths.len == 0) {
assert(is_windows); // resolveWindows called on non windows can't use getCwd
- return os.getCwd(allocator);
+ return os.getCwdAlloc(allocator);
}
// determine which disk designator we will result with, if any
@@ -432,7 +435,7 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
},
WindowsPath.Kind.None => {
assert(is_windows); // resolveWindows called on non windows can't use getCwd
- const cwd = try os.getCwd(allocator);
+ const cwd = try os.getCwdAlloc(allocator);
defer allocator.free(cwd);
const parsed_cwd = windowsParsePath(cwd);
result = try allocator.alloc(u8, max_size + parsed_cwd.disk_designator.len + 1);
@@ -448,7 +451,7 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
} else {
assert(is_windows); // resolveWindows called on non windows can't use getCwd
// TODO call get cwd for the result_disk_designator instead of the global one
- const cwd = try os.getCwd(allocator);
+ const cwd = try os.getCwdAlloc(allocator);
defer allocator.free(cwd);
result = try allocator.alloc(u8, max_size + cwd.len + 1);
@@ -506,7 +509,7 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
result_index += 1;
}
- return result[0..result_index];
+ return allocator.shrink(u8, result, result_index);
}
/// This function is like a series of `cd` statements executed one after another.
@@ -516,7 +519,7 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
if (paths.len == 0) {
assert(!is_windows); // resolvePosix called on windows can't use getCwd
- return os.getCwd(allocator);
+ return os.getCwdAlloc(allocator);
}
var first_index: usize = 0;
@@ -538,7 +541,7 @@ pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
result = try allocator.alloc(u8, max_size);
} else {
assert(!is_windows); // resolvePosix called on windows can't use getCwd
- const cwd = try os.getCwd(allocator);
+ const cwd = try os.getCwdAlloc(allocator);
defer allocator.free(cwd);
result = try allocator.alloc(u8, max_size + cwd.len + 1);
mem.copy(u8, result, cwd);
@@ -573,11 +576,11 @@ pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
result_index += 1;
}
- return result[0..result_index];
+ return allocator.shrink(u8, result, result_index);
}
test "os.path.resolve" {
- const cwd = try os.getCwd(debug.global_allocator);
+ const cwd = try os.getCwdAlloc(debug.global_allocator);
if (is_windows) {
if (windowsParsePath(cwd).kind == WindowsPath.Kind.Drive) {
cwd[0] = asciiUpper(cwd[0]);
@@ -591,7 +594,7 @@ test "os.path.resolve" {
test "os.path.resolveWindows" {
if (is_windows) {
- const cwd = try os.getCwd(debug.global_allocator);
+ const cwd = try os.getCwdAlloc(debug.global_allocator);
const parsed_cwd = windowsParsePath(cwd);
{
const result = testResolveWindows([][]const u8{ "/usr/local", "lib\\zig\\std\\array_list.zig" });
@@ -1073,112 +1076,148 @@ fn testRelativeWindows(from: []const u8, to: []const u8, expected_output: []cons
assert(mem.eql(u8, result, expected_output));
}
-/// Return the canonicalized absolute pathname.
-/// Expands all symbolic links and resolves references to `.`, `..`, and
-/// extra `/` characters in ::pathname.
-/// Caller must deallocate result.
-pub fn real(allocator: *Allocator, pathname: []const u8) ![]u8 {
+pub const RealError = error{
+ FileNotFound,
+ AccessDenied,
+ NameTooLong,
+ NotSupported,
+ NotDir,
+ SymLinkLoop,
+ InputOutput,
+ FileTooBig,
+ IsDir,
+ ProcessFdQuotaExceeded,
+ SystemFdQuotaExceeded,
+ NoDevice,
+ SystemResources,
+ NoSpaceLeft,
+ FileSystem,
+ BadPathName,
+
+ /// On Windows, file paths must be valid Unicode.
+ InvalidUtf8,
+
+ /// TODO remove this possibility
+ PathAlreadyExists,
+
+ /// TODO remove this possibility
+ Unexpected,
+};
+
+/// Call from Windows-specific code if you already have a UTF-16LE encoded, null terminated string.
+/// Otherwise use `real` or `realC`.
+pub fn realW(out_buffer: *[os.MAX_PATH_BYTES]u8, pathname: [*]const u16) RealError![]u8 {
+ const h_file = windows.CreateFileW(
+ pathname,
+ windows.GENERIC_READ,
+ windows.FILE_SHARE_READ,
+ null,
+ windows.OPEN_EXISTING,
+ windows.FILE_ATTRIBUTE_NORMAL,
+ null,
+ );
+ if (h_file == windows.INVALID_HANDLE_VALUE) {
+ const err = windows.GetLastError();
+ switch (err) {
+ windows.ERROR.FILE_NOT_FOUND => return error.FileNotFound,
+ windows.ERROR.ACCESS_DENIED => return error.AccessDenied,
+ windows.ERROR.FILENAME_EXCED_RANGE => return error.NameTooLong,
+ else => return os.unexpectedErrorWindows(err),
+ }
+ }
+ defer os.close(h_file);
+ var utf16le_buf: [windows_util.PATH_MAX_WIDE]u16 = undefined;
+ const casted_len = @intCast(windows.DWORD, utf16le_buf.len); // TODO shouldn't need this cast
+ const result = windows.GetFinalPathNameByHandleW(h_file, &utf16le_buf, casted_len, windows.VOLUME_NAME_DOS);
+ assert(result <= utf16le_buf.len);
+ if (result == 0) {
+ const err = windows.GetLastError();
+ switch (err) {
+ windows.ERROR.FILE_NOT_FOUND => return error.FileNotFound,
+ windows.ERROR.PATH_NOT_FOUND => return error.FileNotFound,
+ windows.ERROR.NOT_ENOUGH_MEMORY => return error.SystemResources,
+ windows.ERROR.FILENAME_EXCED_RANGE => return error.NameTooLong,
+ windows.ERROR.INVALID_PARAMETER => unreachable,
+ else => return os.unexpectedErrorWindows(err),
+ }
+ }
+ const utf16le_slice = utf16le_buf[0..result];
+
+ // windows returns \\?\ prepended to the path
+ // we strip it because nobody wants \\?\ prepended to their path
+ const prefix = []u16{ '\\', '\\', '?', '\\' };
+ const start_index = if (mem.startsWith(u16, utf16le_slice, prefix)) prefix.len else 0;
+
+ // Trust that Windows gives us valid UTF-16LE.
+ const end_index = std.unicode.utf16leToUtf8(out_buffer, utf16le_slice[start_index..]) catch unreachable;
+ return out_buffer[0..end_index];
+}
+
+/// See `real`
+/// Use this when you have a null terminated pointer path.
+pub fn realC(out_buffer: *[os.MAX_PATH_BYTES]u8, pathname: [*]const u8) RealError![]u8 {
switch (builtin.os) {
Os.windows => {
- const pathname_buf = try allocator.alloc(u8, pathname.len + 1);
- defer allocator.free(pathname_buf);
-
- mem.copy(u8, pathname_buf, pathname);
- pathname_buf[pathname.len] = 0;
-
- const h_file = windows.CreateFileA(pathname_buf.ptr, windows.GENERIC_READ, windows.FILE_SHARE_READ, null, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, null);
- if (h_file == windows.INVALID_HANDLE_VALUE) {
- const err = windows.GetLastError();
- return switch (err) {
- windows.ERROR.FILE_NOT_FOUND => error.FileNotFound,
- windows.ERROR.ACCESS_DENIED => error.AccessDenied,
- windows.ERROR.FILENAME_EXCED_RANGE => error.NameTooLong,
- else => os.unexpectedErrorWindows(err),
- };
- }
- defer os.close(h_file);
- var buf = try allocator.alloc(u8, 256);
- errdefer allocator.free(buf);
- while (true) {
- const buf_len = math.cast(windows.DWORD, buf.len) catch return error.NameTooLong;
- const result = windows.GetFinalPathNameByHandleA(h_file, buf.ptr, buf_len, windows.VOLUME_NAME_DOS);
-
- if (result == 0) {
- const err = windows.GetLastError();
- return switch (err) {
- windows.ERROR.PATH_NOT_FOUND => error.FileNotFound,
- windows.ERROR.NOT_ENOUGH_MEMORY => error.OutOfMemory,
- windows.ERROR.INVALID_PARAMETER => unreachable,
- else => os.unexpectedErrorWindows(err),
- };
- }
-
- if (result > buf.len) {
- buf = try allocator.realloc(u8, buf, result);
- continue;
- }
-
- // windows returns \\?\ prepended to the path
- // we strip it because nobody wants \\?\ prepended to their path
- const final_len = x: {
- if (result > 4 and mem.startsWith(u8, buf, "\\\\?\\")) {
- var i: usize = 4;
- while (i < result) : (i += 1) {
- buf[i - 4] = buf[i];
- }
- break :x result - 4;
- } else {
- break :x result;
- }
- };
-
- return allocator.shrink(u8, buf, final_len);
- }
+ const pathname_w = try windows_util.cStrToPrefixedFileW(pathname);
+ return realW(out_buffer, pathname_w);
},
Os.macosx, Os.ios => {
- // TODO instead of calling the libc function here, port the implementation
- // to Zig, and then remove the NameTooLong error possibility.
- const pathname_buf = try allocator.alloc(u8, pathname.len + 1);
- defer allocator.free(pathname_buf);
-
- const result_buf = try allocator.alloc(u8, posix.PATH_MAX);
- errdefer allocator.free(result_buf);
-
- mem.copy(u8, pathname_buf, pathname);
- pathname_buf[pathname.len] = 0;
-
- const err = posix.getErrno(posix.realpath(pathname_buf.ptr, result_buf.ptr));
- if (err > 0) {
- return switch (err) {
- posix.EINVAL => unreachable,
- posix.EBADF => unreachable,
- posix.EFAULT => unreachable,
- posix.EACCES => error.AccessDenied,
- posix.ENOENT => error.FileNotFound,
- posix.ENOTSUP => error.NotSupported,
- posix.ENOTDIR => error.NotDir,
- posix.ENAMETOOLONG => error.NameTooLong,
- posix.ELOOP => error.SymLinkLoop,
- posix.EIO => error.InputOutput,
- else => os.unexpectedErrorPosix(err),
- };
+ // TODO instead of calling the libc function here, port the implementation to Zig
+ const err = posix.getErrno(posix.realpath(pathname, out_buffer));
+ switch (err) {
+ 0 => return mem.toSlice(u8, out_buffer),
+ posix.EINVAL => unreachable,
+ posix.EBADF => unreachable,
+ posix.EFAULT => unreachable,
+ posix.EACCES => return error.AccessDenied,
+ posix.ENOENT => return error.FileNotFound,
+ posix.ENOTSUP => return error.NotSupported,
+ posix.ENOTDIR => return error.NotDir,
+ posix.ENAMETOOLONG => return error.NameTooLong,
+ posix.ELOOP => return error.SymLinkLoop,
+ posix.EIO => return error.InputOutput,
+ else => return os.unexpectedErrorPosix(err),
}
- return allocator.shrink(u8, result_buf, cstr.len(result_buf.ptr));
},
Os.linux => {
- const fd = try os.posixOpen(allocator, pathname, posix.O_PATH | posix.O_NONBLOCK | posix.O_CLOEXEC, 0);
+ const fd = try os.posixOpenC(pathname, posix.O_PATH | posix.O_NONBLOCK | posix.O_CLOEXEC, 0);
defer os.close(fd);
var buf: ["/proc/self/fd/-2147483648".len]u8 = undefined;
- const proc_path = fmt.bufPrint(buf[0..], "/proc/self/fd/{}", fd) catch unreachable;
+ const proc_path = fmt.bufPrint(buf[0..], "/proc/self/fd/{}\x00", fd) catch unreachable;
- return os.readLink(allocator, proc_path);
+ return os.readLinkC(out_buffer, proc_path.ptr);
},
else => @compileError("TODO implement os.path.real for " ++ @tagName(builtin.os)),
}
}
+/// Return the canonicalized absolute pathname.
+/// Expands all symbolic links and resolves references to `.`, `..`, and
+/// extra `/` characters in ::pathname.
+/// The return value is a slice of out_buffer, and not necessarily from the beginning.
+pub fn real(out_buffer: *[os.MAX_PATH_BYTES]u8, pathname: []const u8) RealError![]u8 {
+ switch (builtin.os) {
+ Os.windows => {
+ const pathname_w = try windows_util.sliceToPrefixedFileW(pathname);
+ return realW(out_buffer, &pathname_w);
+ },
+ Os.macosx, Os.ios, Os.linux => {
+ const pathname_c = try os.toPosixPath(pathname);
+ return realC(out_buffer, &pathname_c);
+ },
+ else => @compileError("Unsupported OS"),
+ }
+}
+
+/// `real`, except caller must free the returned memory.
+pub fn realAlloc(allocator: *Allocator, pathname: []const u8) ![]u8 {
+ var buf: [os.MAX_PATH_BYTES]u8 = undefined;
+ return mem.dupe(allocator, u8, try real(&buf, pathname));
+}
+
test "os.path.real" {
// at least call it so it gets compiled
- _ = real(debug.global_allocator, "some_path");
+ var buf: [os.MAX_PATH_BYTES]u8 = undefined;
+ std.debug.assertError(real(&buf, "definitely_bogus_does_not_exist1234"), error.FileNotFound);
}
diff --git a/std/os/test.zig b/std/os/test.zig
index 9e795e8ad2..653ab13fd8 100644
--- a/std/os/test.zig
+++ b/std/os/test.zig
@@ -10,30 +10,47 @@ const AtomicRmwOp = builtin.AtomicRmwOp;
const AtomicOrder = builtin.AtomicOrder;
test "makePath, put some files in it, deleteTree" {
- try os.makePath(a, "os_test_tmp/b/c");
- try io.writeFile(a, "os_test_tmp/b/c/file.txt", "nonsense");
- try io.writeFile(a, "os_test_tmp/b/file2.txt", "blah");
+ try os.makePath(a, "os_test_tmp" ++ os.path.sep_str ++ "b" ++ os.path.sep_str ++ "c");
+ try io.writeFile("os_test_tmp" ++ os.path.sep_str ++ "b" ++ os.path.sep_str ++ "c" ++ os.path.sep_str ++ "file.txt", "nonsense");
+ try io.writeFile("os_test_tmp" ++ os.path.sep_str ++ "b" ++ os.path.sep_str ++ "file2.txt", "blah");
try os.deleteTree(a, "os_test_tmp");
if (os.Dir.open(a, "os_test_tmp")) |dir| {
@panic("expected error");
} else |err| {
- assert(err == error.PathNotFound);
+ assert(err == error.FileNotFound);
}
}
test "access file" {
try os.makePath(a, "os_test_tmp");
- if (os.File.access(a, "os_test_tmp/file.txt")) |ok| {
+ if (os.File.access("os_test_tmp" ++ os.path.sep_str ++ "file.txt")) |ok| {
@panic("expected error");
} else |err| {
- assert(err == error.NotFound);
+ assert(err == error.FileNotFound);
}
- try io.writeFile(a, "os_test_tmp/file.txt", "");
- try os.File.access(a, "os_test_tmp/file.txt");
+ try io.writeFile("os_test_tmp" ++ os.path.sep_str ++ "file.txt", "");
+ try os.File.access("os_test_tmp" ++ os.path.sep_str ++ "file.txt");
try os.deleteTree(a, "os_test_tmp");
}
+fn testThreadIdFn(thread_id: *os.Thread.Id) void {
+ thread_id.* = os.Thread.getCurrentId();
+}
+
+test "std.os.Thread.getCurrentId" {
+ var thread_current_id: os.Thread.Id = undefined;
+ const thread = try os.spawnThread(&thread_current_id, testThreadIdFn);
+ const thread_id = thread.handle();
+ thread.wait();
+ switch (builtin.os) {
+ builtin.Os.windows => assert(os.Thread.getCurrentId() != thread_current_id),
+ else => {
+ assert(thread_current_id == thread_id);
+ },
+ }
+}
+
test "spawn threads" {
var shared_ctx: i32 = 1;
diff --git a/std/os/windows/index.zig b/std/os/windows/index.zig
index 90ccfaf6c5..bb055468a5 100644
--- a/std/os/windows/index.zig
+++ b/std/os/windows/index.zig
@@ -67,8 +67,9 @@ pub const INVALID_FILE_ATTRIBUTES = DWORD(@maxValue(DWORD));
pub const OVERLAPPED = extern struct {
Internal: ULONG_PTR,
InternalHigh: ULONG_PTR,
- Pointer: PVOID,
- hEvent: HANDLE,
+ Offset: DWORD,
+ OffsetHigh: DWORD,
+ hEvent: ?HANDLE,
};
pub const LPOVERLAPPED = *OVERLAPPED;
@@ -350,3 +351,15 @@ pub const E_ACCESSDENIED = @bitCast(c_long, c_ulong(0x80070005));
pub const E_HANDLE = @bitCast(c_long, c_ulong(0x80070006));
pub const E_OUTOFMEMORY = @bitCast(c_long, c_ulong(0x8007000E));
pub const E_INVALIDARG = @bitCast(c_long, c_ulong(0x80070057));
+
+pub const FILE_FLAG_BACKUP_SEMANTICS = 0x02000000;
+pub const FILE_FLAG_DELETE_ON_CLOSE = 0x04000000;
+pub const FILE_FLAG_NO_BUFFERING = 0x20000000;
+pub const FILE_FLAG_OPEN_NO_RECALL = 0x00100000;
+pub const FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000;
+pub const FILE_FLAG_OVERLAPPED = 0x40000000;
+pub const FILE_FLAG_POSIX_SEMANTICS = 0x0100000;
+pub const FILE_FLAG_RANDOM_ACCESS = 0x10000000;
+pub const FILE_FLAG_SESSION_AWARE = 0x00800000;
+pub const FILE_FLAG_SEQUENTIAL_SCAN = 0x08000000;
+pub const FILE_FLAG_WRITE_THROUGH = 0x80000000;
diff --git a/std/os/windows/kernel32.zig b/std/os/windows/kernel32.zig
index fa3473ad05..66b5291189 100644
--- a/std/os/windows/kernel32.zig
+++ b/std/os/windows/kernel32.zig
@@ -1,14 +1,24 @@
use @import("index.zig");
+pub extern "kernel32" stdcallcc fn CancelIoEx(hFile: HANDLE, lpOverlapped: LPOVERLAPPED) BOOL;
+
pub extern "kernel32" stdcallcc fn CloseHandle(hObject: HANDLE) BOOL;
-pub extern "kernel32" stdcallcc fn CreateDirectoryA(
- lpPathName: LPCSTR,
- lpSecurityAttributes: ?*SECURITY_ATTRIBUTES,
-) BOOL;
+pub extern "kernel32" stdcallcc fn CreateDirectoryA(lpPathName: [*]const u8, lpSecurityAttributes: ?*SECURITY_ATTRIBUTES) BOOL;
+pub extern "kernel32" stdcallcc fn CreateDirectoryW(lpPathName: [*]const u16, lpSecurityAttributes: ?*SECURITY_ATTRIBUTES) BOOL;
pub extern "kernel32" stdcallcc fn CreateFileA(
- lpFileName: LPCSTR,
+ lpFileName: [*]const u8, // TODO null terminated pointer type
+ dwDesiredAccess: DWORD,
+ dwShareMode: DWORD,
+ lpSecurityAttributes: ?LPSECURITY_ATTRIBUTES,
+ dwCreationDisposition: DWORD,
+ dwFlagsAndAttributes: DWORD,
+ hTemplateFile: ?HANDLE,
+) HANDLE;
+
+pub extern "kernel32" stdcallcc fn CreateFileW(
+ lpFileName: [*]const u16, // TODO null terminated pointer type
dwDesiredAccess: DWORD,
dwShareMode: DWORD,
lpSecurityAttributes: ?LPSECURITY_ATTRIBUTES,
@@ -47,7 +57,8 @@ pub extern "kernel32" stdcallcc fn CreateIoCompletionPort(FileHandle: HANDLE, Ex
pub extern "kernel32" stdcallcc fn CreateThread(lpThreadAttributes: ?LPSECURITY_ATTRIBUTES, dwStackSize: SIZE_T, lpStartAddress: LPTHREAD_START_ROUTINE, lpParameter: ?LPVOID, dwCreationFlags: DWORD, lpThreadId: ?LPDWORD) ?HANDLE;
-pub extern "kernel32" stdcallcc fn DeleteFileA(lpFileName: LPCSTR) BOOL;
+pub extern "kernel32" stdcallcc fn DeleteFileA(lpFileName: [*]const u8) BOOL;
+pub extern "kernel32" stdcallcc fn DeleteFileW(lpFileName: [*]const u16) BOOL;
pub extern "kernel32" stdcallcc fn ExitProcess(exit_code: UINT) noreturn;
@@ -61,7 +72,11 @@ pub extern "kernel32" stdcallcc fn GetCommandLineA() LPSTR;
pub extern "kernel32" stdcallcc fn GetConsoleMode(in_hConsoleHandle: HANDLE, out_lpMode: *DWORD) BOOL;
-pub extern "kernel32" stdcallcc fn GetCurrentDirectoryA(nBufferLength: WORD, lpBuffer: ?LPSTR) DWORD;
+pub extern "kernel32" stdcallcc fn GetCurrentDirectoryA(nBufferLength: DWORD, lpBuffer: ?[*]CHAR) DWORD;
+pub extern "kernel32" stdcallcc fn GetCurrentDirectoryW(nBufferLength: DWORD, lpBuffer: ?[*]WCHAR) DWORD;
+
+pub extern "kernel32" stdcallcc fn GetCurrentThread() HANDLE;
+pub extern "kernel32" stdcallcc fn GetCurrentThreadId() DWORD;
pub extern "kernel32" stdcallcc fn GetEnvironmentStringsA() ?[*]u8;
@@ -71,9 +86,11 @@ pub extern "kernel32" stdcallcc fn GetExitCodeProcess(hProcess: HANDLE, lpExitCo
pub extern "kernel32" stdcallcc fn GetFileSizeEx(hFile: HANDLE, lpFileSize: *LARGE_INTEGER) BOOL;
-pub extern "kernel32" stdcallcc fn GetFileAttributesA(lpFileName: LPCSTR) DWORD;
+pub extern "kernel32" stdcallcc fn GetFileAttributesA(lpFileName: [*]const CHAR) DWORD;
+pub extern "kernel32" stdcallcc fn GetFileAttributesW(lpFileName: [*]const WCHAR) DWORD;
-pub extern "kernel32" stdcallcc fn GetModuleFileNameA(hModule: ?HMODULE, lpFilename: LPSTR, nSize: DWORD) DWORD;
+pub extern "kernel32" stdcallcc fn GetModuleFileNameA(hModule: ?HMODULE, lpFilename: [*]u8, nSize: DWORD) DWORD;
+pub extern "kernel32" stdcallcc fn GetModuleFileNameW(hModule: ?HMODULE, lpFilename: [*]u16, nSize: DWORD) DWORD;
pub extern "kernel32" stdcallcc fn GetLastError() DWORD;
@@ -91,6 +108,15 @@ pub extern "kernel32" stdcallcc fn GetFinalPathNameByHandleA(
dwFlags: DWORD,
) DWORD;
+pub extern "kernel32" stdcallcc fn GetFinalPathNameByHandleW(
+ hFile: HANDLE,
+ lpszFilePath: [*]u16,
+ cchFilePath: DWORD,
+ dwFlags: DWORD,
+) DWORD;
+
+pub extern "kernel32" stdcallcc fn GetOverlappedResult(hFile: HANDLE, lpOverlapped: *OVERLAPPED, lpNumberOfBytesTransferred: *DWORD, bWait: BOOL) BOOL;
+
pub extern "kernel32" stdcallcc fn GetProcessHeap() ?HANDLE;
pub extern "kernel32" stdcallcc fn GetQueuedCompletionStatus(CompletionPort: HANDLE, lpNumberOfBytesTransferred: LPDWORD, lpCompletionKey: *ULONG_PTR, lpOverlapped: *?*OVERLAPPED, dwMilliseconds: DWORD) BOOL;
@@ -101,7 +127,6 @@ pub extern "kernel32" stdcallcc fn HeapCreate(flOptions: DWORD, dwInitialSize: S
pub extern "kernel32" stdcallcc fn HeapDestroy(hHeap: HANDLE) BOOL;
pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void, dwBytes: SIZE_T) ?*c_void;
pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) SIZE_T;
-pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) BOOL;
pub extern "kernel32" stdcallcc fn HeapCompact(hHeap: HANDLE, dwFlags: DWORD) SIZE_T;
pub extern "kernel32" stdcallcc fn HeapSummary(hHeap: HANDLE, dwFlags: DWORD, lpSummary: LPHEAP_SUMMARY) BOOL;
@@ -111,9 +136,17 @@ pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBy
pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void) BOOL;
+pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: ?*const c_void) BOOL;
+
pub extern "kernel32" stdcallcc fn MoveFileExA(
- lpExistingFileName: LPCSTR,
- lpNewFileName: LPCSTR,
+ lpExistingFileName: [*]const u8,
+ lpNewFileName: [*]const u8,
+ dwFlags: DWORD,
+) BOOL;
+
+pub extern "kernel32" stdcallcc fn MoveFileExW(
+ lpExistingFileName: [*]const u16,
+ lpNewFileName: [*]const u16,
dwFlags: DWORD,
) BOOL;
@@ -123,11 +156,22 @@ pub extern "kernel32" stdcallcc fn QueryPerformanceCounter(lpPerformanceCount: *
pub extern "kernel32" stdcallcc fn QueryPerformanceFrequency(lpFrequency: *LARGE_INTEGER) BOOL;
+pub extern "kernel32" stdcallcc fn ReadDirectoryChangesW(
+ hDirectory: HANDLE,
+ lpBuffer: [*]align(@alignOf(FILE_NOTIFY_INFORMATION)) u8,
+ nBufferLength: DWORD,
+ bWatchSubtree: BOOL,
+ dwNotifyFilter: DWORD,
+ lpBytesReturned: ?*DWORD,
+ lpOverlapped: ?*OVERLAPPED,
+ lpCompletionRoutine: LPOVERLAPPED_COMPLETION_ROUTINE,
+) BOOL;
+
pub extern "kernel32" stdcallcc fn ReadFile(
in_hFile: HANDLE,
- out_lpBuffer: *c_void,
+ out_lpBuffer: [*]u8,
in_nNumberOfBytesToRead: DWORD,
- out_lpNumberOfBytesRead: *DWORD,
+ out_lpNumberOfBytesRead: ?*DWORD,
in_out_lpOverlapped: ?*OVERLAPPED,
) BOOL;
@@ -150,13 +194,41 @@ pub extern "kernel32" stdcallcc fn WaitForSingleObject(hHandle: HANDLE, dwMillis
pub extern "kernel32" stdcallcc fn WriteFile(
in_hFile: HANDLE,
- in_lpBuffer: *const c_void,
+ in_lpBuffer: [*]const u8,
in_nNumberOfBytesToWrite: DWORD,
out_lpNumberOfBytesWritten: ?*DWORD,
in_out_lpOverlapped: ?*OVERLAPPED,
) BOOL;
+pub extern "kernel32" stdcallcc fn WriteFileEx(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpOverlapped: LPOVERLAPPED, lpCompletionRoutine: LPOVERLAPPED_COMPLETION_ROUTINE) BOOL;
+
//TODO: call unicode versions instead of relying on ANSI code page
pub extern "kernel32" stdcallcc fn LoadLibraryA(lpLibFileName: LPCSTR) ?HMODULE;
pub extern "kernel32" stdcallcc fn FreeLibrary(hModule: HMODULE) BOOL;
+
+pub const FILE_NOTIFY_INFORMATION = extern struct {
+ NextEntryOffset: DWORD,
+ Action: DWORD,
+ FileNameLength: DWORD,
+ FileName: [1]WCHAR,
+};
+
+pub const FILE_ACTION_ADDED = 0x00000001;
+pub const FILE_ACTION_REMOVED = 0x00000002;
+pub const FILE_ACTION_MODIFIED = 0x00000003;
+pub const FILE_ACTION_RENAMED_OLD_NAME = 0x00000004;
+pub const FILE_ACTION_RENAMED_NEW_NAME = 0x00000005;
+
+pub const LPOVERLAPPED_COMPLETION_ROUTINE = ?extern fn (DWORD, DWORD, *OVERLAPPED) void;
+
+pub const FILE_LIST_DIRECTORY = 1;
+
+pub const FILE_NOTIFY_CHANGE_CREATION = 64;
+pub const FILE_NOTIFY_CHANGE_SIZE = 8;
+pub const FILE_NOTIFY_CHANGE_SECURITY = 256;
+pub const FILE_NOTIFY_CHANGE_LAST_ACCESS = 32;
+pub const FILE_NOTIFY_CHANGE_LAST_WRITE = 16;
+pub const FILE_NOTIFY_CHANGE_DIR_NAME = 2;
+pub const FILE_NOTIFY_CHANGE_FILE_NAME = 1;
+pub const FILE_NOTIFY_CHANGE_ATTRIBUTES = 4;
diff --git a/std/os/windows/util.zig b/std/os/windows/util.zig
index c9d2c3c3e6..72de896996 100644
--- a/std/os/windows/util.zig
+++ b/std/os/windows/util.zig
@@ -7,9 +7,17 @@ const mem = std.mem;
const BufMap = std.BufMap;
const cstr = std.cstr;
+// > The maximum path of 32,767 characters is approximate, because the "\\?\"
+// > prefix may be expanded to a longer string by the system at run time, and
+// > this expansion applies to the total length.
+// from https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file#maximum-path-length-limitation
+pub const PATH_MAX_WIDE = 32767;
+
pub const WaitError = error{
WaitAbandoned,
WaitTimeOut,
+
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
@@ -36,20 +44,21 @@ pub fn windowsClose(handle: windows.HANDLE) void {
pub const WriteError = error{
SystemResources,
OperationAborted,
- IoPending,
BrokenPipe,
+
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
};
pub fn windowsWrite(handle: windows.HANDLE, bytes: []const u8) WriteError!void {
- if (windows.WriteFile(handle, @ptrCast(*const c_void, bytes.ptr), @intCast(u32, bytes.len), null, null) == 0) {
+ if (windows.WriteFile(handle, bytes.ptr, @intCast(u32, bytes.len), null, null) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.INVALID_USER_BUFFER => WriteError.SystemResources,
windows.ERROR.NOT_ENOUGH_MEMORY => WriteError.SystemResources,
windows.ERROR.OPERATION_ABORTED => WriteError.OperationAborted,
windows.ERROR.NOT_ENOUGH_QUOTA => WriteError.SystemResources,
- windows.ERROR.IO_PENDING => WriteError.IoPending,
+ windows.ERROR.IO_PENDING => unreachable,
windows.ERROR.BROKEN_PIPE => WriteError.BrokenPipe,
else => os.unexpectedErrorWindows(err),
};
@@ -87,37 +96,51 @@ pub fn windowsIsCygwinPty(handle: windows.HANDLE) bool {
pub const OpenError = error{
SharingViolation,
PathAlreadyExists,
+
+ /// When any of the path components can not be found or the file component can not
+ /// be found. Some operating systems distinguish between path components not found and
+ /// file components not found, but they are collapsed into FileNotFound to gain
+ /// consistency across operating systems.
FileNotFound,
+
AccessDenied,
PipeBusy,
+ NameTooLong,
+
+ /// On Windows, file paths must be valid Unicode.
+ InvalidUtf8,
+
+ /// On Windows, file paths cannot contain these characters:
+ /// '/', '*', '?', '"', '<', '>', '|'
+ BadPathName,
+
+ /// See https://github.com/ziglang/zig/issues/1396
Unexpected,
- OutOfMemory,
};
-/// `file_path` needs to be copied in memory to add a null terminating byte, hence the allocator.
pub fn windowsOpen(
- allocator: *mem.Allocator,
file_path: []const u8,
desired_access: windows.DWORD,
share_mode: windows.DWORD,
creation_disposition: windows.DWORD,
flags_and_attrs: windows.DWORD,
) OpenError!windows.HANDLE {
- const path_with_null = try cstr.addNullByte(allocator, file_path);
- defer allocator.free(path_with_null);
+ const file_path_w = try sliceToPrefixedFileW(file_path);
- const result = windows.CreateFileA(path_with_null.ptr, desired_access, share_mode, null, creation_disposition, flags_and_attrs, null);
+ const result = windows.CreateFileW(&file_path_w, desired_access, share_mode, null, creation_disposition, flags_and_attrs, null);
if (result == windows.INVALID_HANDLE_VALUE) {
const err = windows.GetLastError();
- return switch (err) {
- windows.ERROR.SHARING_VIOLATION => OpenError.SharingViolation,
- windows.ERROR.ALREADY_EXISTS, windows.ERROR.FILE_EXISTS => OpenError.PathAlreadyExists,
- windows.ERROR.FILE_NOT_FOUND => OpenError.FileNotFound,
- windows.ERROR.ACCESS_DENIED => OpenError.AccessDenied,
- windows.ERROR.PIPE_BUSY => OpenError.PipeBusy,
- else => os.unexpectedErrorWindows(err),
- };
+ switch (err) {
+ windows.ERROR.SHARING_VIOLATION => return OpenError.SharingViolation,
+ windows.ERROR.ALREADY_EXISTS => return OpenError.PathAlreadyExists,
+ windows.ERROR.FILE_EXISTS => return OpenError.PathAlreadyExists,
+ windows.ERROR.FILE_NOT_FOUND => return OpenError.FileNotFound,
+ windows.ERROR.PATH_NOT_FOUND => return OpenError.FileNotFound,
+ windows.ERROR.ACCESS_DENIED => return OpenError.AccessDenied,
+ windows.ERROR.PIPE_BUSY => return OpenError.PipeBusy,
+ else => return os.unexpectedErrorWindows(err),
+ }
}
return result;
@@ -193,9 +216,8 @@ pub fn windowsFindFirstFile(
if (handle == windows.INVALID_HANDLE_VALUE) {
const err = windows.GetLastError();
switch (err) {
- windows.ERROR.FILE_NOT_FOUND,
- windows.ERROR.PATH_NOT_FOUND,
- => return error.PathNotFound,
+ windows.ERROR.FILE_NOT_FOUND => return error.FileNotFound,
+ windows.ERROR.PATH_NOT_FOUND => return error.FileNotFound,
else => return os.unexpectedErrorWindows(err),
}
}
@@ -221,6 +243,7 @@ pub fn windowsCreateIoCompletionPort(file_handle: windows.HANDLE, existing_compl
const handle = windows.CreateIoCompletionPort(file_handle, existing_completion_port, completion_key, concurrent_thread_count) orelse {
const err = windows.GetLastError();
switch (err) {
+ windows.ERROR.INVALID_PARAMETER => unreachable,
else => return os.unexpectedErrorWindows(err),
}
};
@@ -238,21 +261,55 @@ pub fn windowsPostQueuedCompletionStatus(completion_port: windows.HANDLE, bytes_
}
}
-pub const WindowsWaitResult = error{
+pub const WindowsWaitResult = enum {
Normal,
Aborted,
+ Cancelled,
};
pub fn windowsGetQueuedCompletionStatus(completion_port: windows.HANDLE, bytes_transferred_count: *windows.DWORD, lpCompletionKey: *usize, lpOverlapped: *?*windows.OVERLAPPED, dwMilliseconds: windows.DWORD) WindowsWaitResult {
if (windows.GetQueuedCompletionStatus(completion_port, bytes_transferred_count, lpCompletionKey, lpOverlapped, dwMilliseconds) == windows.FALSE) {
- if (std.debug.runtime_safety) {
- const err = windows.GetLastError();
- if (err != windows.ERROR.ABANDONED_WAIT_0) {
- std.debug.warn("err: {}\n", err);
- }
- assert(err == windows.ERROR.ABANDONED_WAIT_0);
+ const err = windows.GetLastError();
+ switch (err) {
+ windows.ERROR.ABANDONED_WAIT_0 => return WindowsWaitResult.Aborted,
+ windows.ERROR.OPERATION_ABORTED => return WindowsWaitResult.Cancelled,
+ else => {
+ if (std.debug.runtime_safety) {
+ std.debug.panic("unexpected error: {}\n", err);
+ }
+ },
}
- return WindowsWaitResult.Aborted;
}
return WindowsWaitResult.Normal;
}
+
+pub fn cStrToPrefixedFileW(s: [*]const u8) ![PATH_MAX_WIDE + 1]u16 {
+ return sliceToPrefixedFileW(mem.toSliceConst(u8, s));
+}
+
+pub fn sliceToPrefixedFileW(s: []const u8) ![PATH_MAX_WIDE + 1]u16 {
+ // TODO well defined copy elision
+ var result: [PATH_MAX_WIDE + 1]u16 = undefined;
+
+ // > File I/O functions in the Windows API convert "/" to "\" as part of
+ // > converting the name to an NT-style name, except when using the "\\?\"
+ // > prefix as detailed in the following sections.
+ // from https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file#maximum-path-length-limitation
+ // Because we want the larger maximum path length for absolute paths, we
+ // disallow forward slashes in zig std lib file functions on Windows.
+ for (s) |byte|
+ switch (byte) {
+ '/', '*', '?', '"', '<', '>', '|' => return error.BadPathName,
+ else => {},
+ };
+ const start_index = if (mem.startsWith(u8, s, "\\\\") or !os.path.isAbsolute(s)) 0 else blk: {
+ const prefix = []u16{ '\\', '\\', '?', '\\' };
+ mem.copy(u16, result[0..], prefix);
+ break :blk prefix.len;
+ };
+ const end_index = start_index + try std.unicode.utf8ToUtf16Le(result[start_index..], s);
+ assert(end_index <= result.len);
+ if (end_index == result.len) return error.NameTooLong;
+ result[end_index] = 0;
+ return result;
+}
diff --git a/std/os/zen.zig b/std/os/zen.zig
index 2312b36dea..55b6d91128 100644
--- a/std/os/zen.zig
+++ b/std/os/zen.zig
@@ -1,38 +1,55 @@
+const std = @import("../index.zig");
+const assert = std.debug.assert;
+
//////////////////////////
//// IPC structures ////
//////////////////////////
pub const Message = struct {
- sender: MailboxId,
+sender: MailboxId,
receiver: MailboxId,
- type: usize,
- payload: usize,
+ code: usize,
+ args: [5]usize,
+ payload: ?[]const u8,
pub fn from(mailbox_id: *const MailboxId) Message {
- return Message{
- .sender = MailboxId.Undefined,
- .receiver = *mailbox_id,
- .type = 0,
- .payload = 0,
+ return Message {
+ .sender = MailboxId.Undefined,
+ .receiver = mailbox_id.*,
+ .code = undefined,
+ .args = undefined,
+ .payload = null,
};
}
- pub fn to(mailbox_id: *const MailboxId, msg_type: usize) Message {
- return Message{
- .sender = MailboxId.This,
- .receiver = *mailbox_id,
- .type = msg_type,
- .payload = 0,
+ pub fn to(mailbox_id: *const MailboxId, msg_code: usize, args: ...) Message {
+ var message = Message {
+ .sender = MailboxId.This,
+ .receiver = mailbox_id.*,
+ .code = msg_code,
+ .args = undefined,
+ .payload = null,
};
+
+ assert (args.len <= message.args.len);
+ comptime var i = 0;
+ inline while (i < args.len) : (i += 1) {
+ message.args[i] = args[i];
+ }
+
+ return message;
}
- pub fn withData(mailbox_id: *const MailboxId, msg_type: usize, payload: usize) Message {
- return Message{
- .sender = MailboxId.This,
- .receiver = *mailbox_id,
- .type = msg_type,
- .payload = payload,
- };
+ pub fn as(self: *const Message, sender: *const MailboxId) Message {
+ var message = self.*;
+ message.sender = sender.*;
+ return message;
+ }
+
+ pub fn withPayload(self: *const Message, payload: []const u8) Message {
+ var message = self.*;
+ message.payload = payload;
+ return message;
}
};
@@ -63,21 +80,26 @@ pub const STDOUT_FILENO = 1;
pub const STDERR_FILENO = 2;
// FIXME: let's borrow Linux's error numbers for now.
-pub const getErrno = @import("linux/index.zig").getErrno;
use @import("linux/errno.zig");
+// Get the errno from a syscall return value, or 0 for no error.
+pub fn getErrno(r: usize) usize {
+ const signed_r = @bitCast(isize, r);
+ return if (signed_r > -4096 and signed_r < 0) @intCast(usize, -signed_r) else 0;
+}
// TODO: implement this correctly.
-pub fn read(fd: i32, buf: *u8, count: usize) usize {
+pub fn read(fd: i32, buf: [*]u8, count: usize) usize {
switch (fd) {
STDIN_FILENO => {
var i: usize = 0;
while (i < count) : (i += 1) {
send(Message.to(Server.Keyboard, 0));
+ // FIXME: we should be certain that we are receiving from Keyboard.
var message = Message.from(MailboxId.This);
- receive(*message);
+ receive(&message);
- buf[i] = u8(message.payload);
+ buf[i] = @intCast(u8, message.args[0]);
}
},
else => unreachable,
@@ -86,13 +108,11 @@ pub fn read(fd: i32, buf: *u8, count: usize) usize {
}
// TODO: implement this correctly.
-pub fn write(fd: i32, buf: *const u8, count: usize) usize {
+pub fn write(fd: i32, buf: [*]const u8, count: usize) usize {
switch (fd) {
STDOUT_FILENO, STDERR_FILENO => {
- var i: usize = 0;
- while (i < count) : (i += 1) {
- send(Message.withData(Server.Terminal, 1, buf[i]));
- }
+ send(Message.to(Server.Terminal, 1)
+ .withPayload(buf[0..count]));
},
else => unreachable,
}
@@ -104,17 +124,14 @@ pub fn write(fd: i32, buf: *const u8, count: usize) usize {
///////////////////////////
pub const Syscall = enum(usize) {
- exit = 0,
- createPort = 1,
- send = 2,
- receive = 3,
- subscribeIRQ = 4,
- inb = 5,
- map = 6,
- createThread = 7,
- createProcess = 8,
- wait = 9,
- portReady = 10,
+ exit = 0,
+ send = 1,
+ receive = 2,
+ subscribeIRQ = 3,
+ inb = 4,
+ outb = 5,
+ map = 6,
+ createThread = 7,
};
////////////////////
@@ -126,13 +143,6 @@ pub fn exit(status: i32) noreturn {
unreachable;
}
-pub fn createPort(mailbox_id: *const MailboxId) void {
- _ = switch (*mailbox_id) {
- MailboxId.Port => |id| syscall1(Syscall.createPort, id),
- else => unreachable,
- };
-}
-
pub fn send(message: *const Message) void {
_ = syscall1(Syscall.send, @ptrToInt(message));
}
@@ -146,29 +156,21 @@ pub fn subscribeIRQ(irq: u8, mailbox_id: *const MailboxId) void {
}
pub fn inb(port: u16) u8 {
- return u8(syscall1(Syscall.inb, port));
+ return @intCast(u8, syscall1(Syscall.inb, port));
+}
+
+pub fn outb(port: u16, value: u8) void {
+ _ = syscall2(Syscall.outb, port, value);
}
pub fn map(v_addr: usize, p_addr: usize, size: usize, writable: bool) bool {
- return syscall4(Syscall.map, v_addr, p_addr, size, usize(writable)) != 0;
+ return syscall4(Syscall.map, v_addr, p_addr, size, @boolToInt(writable)) != 0;
}
pub fn createThread(function: fn () void) u16 {
return u16(syscall1(Syscall.createThread, @ptrToInt(function)));
}
-pub fn createProcess(elf_addr: usize) u16 {
- return u16(syscall1(Syscall.createProcess, elf_addr));
-}
-
-pub fn wait(tid: u16) void {
- _ = syscall1(Syscall.wait, tid);
-}
-
-pub fn portReady(port: u16) bool {
- return syscall1(Syscall.portReady, port) != 0;
-}
-
/////////////////////////
//// Syscall stubs ////
/////////////////////////
diff --git a/std/rb.zig b/std/rb.zig
new file mode 100644
index 0000000000..d523069846
--- /dev/null
+++ b/std/rb.zig
@@ -0,0 +1,543 @@
+const std = @import("index.zig");
+const assert = std.debug.assert;
+const mem = std.mem; // For mem.Compare
+
+const Color = enum(u1) {
+ Black,
+ Red,
+};
+const Red = Color.Red;
+const Black = Color.Black;
+
+const ReplaceError = error {
+ NotEqual,
+};
+
+/// Insert this into your struct that you want to add to a red-black tree.
+/// Do not use a pointer. Turn the *rb.Node results of the functions in rb
+/// (after resolving optionals) to your structure using @fieldParentPtr(). Example:
+///
+/// const Number = struct {
+/// node: rb.Node,
+/// value: i32,
+/// };
+/// fn number(node: *Node) Number {
+/// return @fieldParentPtr(Number, "node", node);
+/// }
+pub const Node = struct {
+ left: ?*Node,
+ right: ?*Node,
+ parent_and_color: usize, /// parent | color
+
+ pub fn next(constnode: *Node) ?*Node {
+ var node = constnode;
+
+ if (node.right) |right| {
+ var n = right;
+ while (n.left) |left|
+ n = left;
+ return n;
+ }
+
+ while (true) {
+ var parent = node.get_parent();
+ if (parent) |p| {
+ if (node != p.right)
+ return p;
+ node = p;
+ } else
+ return null;
+ }
+ }
+
+ pub fn prev(constnode: *Node) ?*Node {
+ var node = constnode;
+
+ if (node.left) |left| {
+ var n = left;
+ while (n.right) |right|
+ n = right;
+ return n;
+ }
+
+ while (true) {
+ var parent = node.get_parent();
+ if (parent) |p| {
+ if (node != p.left)
+ return p;
+ node = p;
+ } else
+ return null;
+ }
+ }
+
+ pub fn is_root(node: *Node) bool {
+ return node.get_parent() == null;
+ }
+
+ fn is_red(node: *Node) bool {
+ return node.get_color() == Red;
+ }
+
+ fn is_black(node: *Node) bool {
+ return node.get_color() == Black;
+ }
+
+ fn set_parent(node: *Node, parent: ?*Node) void {
+ node.parent_and_color = @ptrToInt(parent) | (node.parent_and_color & 1);
+ }
+
+ fn get_parent(node: *Node) ?*Node {
+ const mask: usize = 1;
+ comptime {
+ assert(@alignOf(*Node) >= 2);
+ }
+ return @intToPtr(*Node, node.parent_and_color & ~mask);
+ }
+
+ fn set_color(node: *Node, color: Color) void {
+ const mask: usize = 1;
+ node.parent_and_color = (node.parent_and_color & ~mask) | @enumToInt(color);
+ }
+
+ fn get_color(node: *Node) Color {
+ return @intToEnum(Color, @intCast(u1, node.parent_and_color & 1));
+ }
+
+ fn set_child(node: *Node, child: ?*Node, is_left: bool) void {
+ if (is_left) {
+ node.left = child;
+ } else {
+ node.right = child;
+ }
+ }
+
+ fn get_first(nodeconst: *Node) *Node {
+ var node = nodeconst;
+ while (node.left) |left| {
+ node = left;
+ }
+ return node;
+ }
+
+ fn get_last(node: *Node) *Node {
+ while (node.right) |right| {
+ node = right;
+ }
+ return node;
+ }
+};
+
+pub const Tree = struct {
+ root: ?*Node,
+ compareFn: fn(*Node, *Node) mem.Compare,
+
+ /// If you have a need for a version that caches this, please file a bug.
+ pub fn first(tree: *Tree) ?*Node {
+ var node: *Node = tree.root orelse return null;
+
+ while (node.left) |left| {
+ node = left;
+ }
+
+ return node;
+ }
+
+ pub fn last(tree: *Tree) ?*Node {
+ var node: *Node = tree.root orelse return null;
+
+ while (node.right) |right| {
+ node = right;
+ }
+
+ return node;
+ }
+
+ /// Duplicate keys are not allowed. The item with the same key already in the
+ /// tree will be returned, and the item will not be inserted.
+ pub fn insert(tree: *Tree, node_const: *Node) ?*Node {
+ var node = node_const;
+ var maybe_key: ?*Node = undefined;
+ var maybe_parent: ?*Node = undefined;
+ var is_left: bool = undefined;
+
+ maybe_key = do_lookup(node, tree, &maybe_parent, &is_left);
+ if (maybe_key) |key| {
+ return key;
+ }
+
+ node.left = null;
+ node.right = null;
+ node.set_color(Red);
+ node.set_parent(maybe_parent);
+
+ if (maybe_parent) |parent| {
+ parent.set_child(node, is_left);
+ } else {
+ tree.root = node;
+ }
+
+ while (node.get_parent()) |*parent| {
+ if (parent.*.is_black())
+ break;
+ // the root is always black
+ var grandpa = parent.*.get_parent() orelse unreachable;
+
+ if (parent.* == grandpa.left) {
+ var maybe_uncle = grandpa.right;
+
+ if (maybe_uncle) |uncle| {
+ if (uncle.is_black())
+ break;
+
+ parent.*.set_color(Black);
+ uncle.set_color(Black);
+ grandpa.set_color(Red);
+ node = grandpa;
+ } else {
+ if (node == parent.*.right) {
+ rotate_left(parent.*, tree);
+ node = parent.*;
+ parent.* = node.get_parent().?; // Just rotated
+ }
+ parent.*.set_color(Black);
+ grandpa.set_color(Red);
+ rotate_right(grandpa, tree);
+ }
+ } else {
+ var maybe_uncle = grandpa.left;
+
+ if (maybe_uncle) |uncle| {
+ if (uncle.is_black())
+ break;
+
+ parent.*.set_color(Black);
+ uncle.set_color(Black);
+ grandpa.set_color(Red);
+ node = grandpa;
+ } else {
+ if (node == parent.*.left) {
+ rotate_right(parent.*, tree);
+ node = parent.*;
+ parent.* = node.get_parent().?; // Just rotated
+ }
+ parent.*.set_color(Black);
+ grandpa.set_color(Red);
+ rotate_left(grandpa, tree);
+ }
+ }
+ }
+ // This was an insert, there is at least one node.
+ tree.root.?.set_color(Black);
+ return null;
+ }
+
+ pub fn lookup(tree: *Tree, key: *Node) ?*Node {
+ var parent: *Node = undefined;
+ var is_left: bool = undefined;
+
+ return do_lookup(key, tree, &parent, &is_left);
+ }
+
+ pub fn remove(tree: *Tree, nodeconst: *Node) void {
+ var node = nodeconst;
+ // as this has the same value as node, it is unsafe to access node after newnode
+ var newnode: ?*Node = nodeconst;
+ var maybe_parent: ?*Node = node.get_parent();
+ var color: Color = undefined;
+ var next: *Node = undefined;
+
+ // This clause is to avoid optionals
+ if (node.left == null and node.right == null) {
+ if (maybe_parent) |parent| {
+ parent.set_child(null, parent.left == node);
+ } else
+ tree.root = null;
+ color = node.get_color();
+ newnode = null;
+ } else {
+ if (node.left == null) {
+ next = node.right.?; // Not both null as per above
+ } else if (node.right == null) {
+ next = node.left.?; // Not both null as per above
+ } else
+ next = node.right.?.get_first(); // Just checked for null above
+
+ if (maybe_parent) |parent| {
+ parent.set_child(next, parent.left == node);
+ } else
+ tree.root = next;
+
+ if (node.left != null and node.right != null) {
+ const left = node.left.?;
+ const right = node.right.?;
+
+ color = next.get_color();
+ next.set_color(node.get_color());
+
+ next.left = left;
+ left.set_parent(next);
+
+ if (next != right) {
+ var parent = next.get_parent().?; // Was traversed via child node (right/left)
+ next.set_parent(node.get_parent());
+
+ newnode = next.right;
+ parent.left = node;
+
+ next.right = right;
+ right.set_parent(next);
+ } else {
+ next.set_parent(maybe_parent);
+ maybe_parent = next;
+ newnode = next.right;
+ }
+ } else {
+ color = node.get_color();
+ newnode = next;
+ }
+ }
+
+ if (newnode) |n|
+ n.set_parent(maybe_parent);
+
+ if (color == Red)
+ return;
+ if (newnode) |n| {
+ n.set_color(Black);
+ return;
+ }
+
+ while (node == tree.root) {
+ // If not root, there must be parent
+ var parent = maybe_parent.?;
+ if (node == parent.left) {
+ var sibling = parent.right.?; // Same number of black nodes.
+
+ if (sibling.is_red()) {
+ sibling.set_color(Black);
+ parent.set_color(Red);
+ rotate_left(parent, tree);
+ sibling = parent.right.?; // Just rotated
+ }
+ if ((if (sibling.left) |n| n.is_black() else true) and
+ (if (sibling.right) |n| n.is_black() else true)) {
+ sibling.set_color(Red);
+ node = parent;
+ maybe_parent = parent.get_parent();
+ continue;
+ }
+ if (if (sibling.right) |n| n.is_black() else true) {
+ sibling.left.?.set_color(Black); // Same number of black nodes.
+ sibling.set_color(Red);
+ rotate_right(sibling, tree);
+ sibling = parent.right.?; // Just rotated
+ }
+ sibling.set_color(parent.get_color());
+ parent.set_color(Black);
+ sibling.right.?.set_color(Black); // Same number of black nodes.
+ rotate_left(parent, tree);
+ newnode = tree.root;
+ break;
+ } else {
+ var sibling = parent.left.?; // Same number of black nodes.
+
+ if (sibling.is_red()) {
+ sibling.set_color(Black);
+ parent.set_color(Red);
+ rotate_right(parent, tree);
+ sibling = parent.left.?; // Just rotated
+ }
+ if ((if (sibling.left) |n| n.is_black() else true) and
+ (if (sibling.right) |n| n.is_black() else true)) {
+ sibling.set_color(Red);
+ node = parent;
+ maybe_parent = parent.get_parent();
+ continue;
+ }
+ if (if (sibling.left) |n| n.is_black() else true) {
+ sibling.right.?.set_color(Black); // Same number of black nodes
+ sibling.set_color(Red);
+ rotate_left(sibling, tree);
+ sibling = parent.left.?; // Just rotated
+ }
+ sibling.set_color(parent.get_color());
+ parent.set_color(Black);
+ sibling.left.?.set_color(Black); // Same number of black nodes
+ rotate_right(parent, tree);
+ newnode = tree.root;
+ break;
+ }
+
+ if (node.is_red())
+ break;
+ }
+
+ if (newnode) |n|
+ n.set_color(Black);
+ }
+
+ /// This is a shortcut to avoid removing and re-inserting an item with the same key.
+ pub fn replace(tree: *Tree, old: *Node, newconst: *Node) !void {
+ var new = newconst;
+
+ // I assume this can get optimized out if the caller already knows.
+ if (tree.compareFn(old, new) != mem.Compare.Equal) return ReplaceError.NotEqual;
+
+ if (old.get_parent()) |parent| {
+ parent.set_child(new, parent.left == old);
+ } else
+ tree.root = new;
+
+ if (old.left) |left|
+ left.set_parent(new);
+ if (old.right) |right|
+ right.set_parent(new);
+
+ new.* = old.*;
+ }
+
+ pub fn init(tree: *Tree, f: fn(*Node, *Node) mem.Compare) void {
+ tree.root = null;
+ tree.compareFn = f;
+ }
+};
+
+fn rotate_left(node: *Node, tree: *Tree) void {
+ var p: *Node = node;
+ var q: *Node = node.right orelse unreachable;
+ var parent: *Node = undefined;
+
+ if (!p.is_root()) {
+ parent = p.get_parent().?;
+ if (parent.left == p) {
+ parent.left = q;
+ } else {
+ parent.right = q;
+ }
+ q.set_parent(parent);
+ } else {
+ tree.root = q;
+ q.set_parent(null);
+ }
+ p.set_parent(q);
+
+ p.right = q.left;
+ if (p.right) |right| {
+ right.set_parent(p);
+ }
+ q.left = p;
+}
+
+fn rotate_right(node: *Node, tree: *Tree) void {
+ var p: *Node = node;
+ var q: *Node = node.left orelse unreachable;
+ var parent: *Node = undefined;
+
+ if (!p.is_root()) {
+ parent = p.get_parent().?;
+ if (parent.left == p) {
+ parent.left = q;
+ } else {
+ parent.right = q;
+ }
+ q.set_parent(parent);
+ } else {
+ tree.root = q;
+ q.set_parent(null);
+ }
+ p.set_parent(q);
+
+ p.left = q.right;
+ if (p.left) |left| {
+ left.set_parent(p);
+ }
+ q.right = p;
+}
+
+fn do_lookup(key: *Node, tree: *Tree, pparent: *?*Node, is_left: *bool) ?*Node {
+ var maybe_node: ?*Node = tree.root;
+
+ pparent.* = null;
+ is_left.* = false;
+
+ while (maybe_node) |node| {
+ var res: mem.Compare = tree.compareFn(node, key);
+ if (res == mem.Compare.Equal) {
+ return node;
+ }
+ pparent.* = node;
+ if (res == mem.Compare.GreaterThan) {
+ is_left.* = true;
+ maybe_node = node.left;
+ } else if (res == mem.Compare.LessThan) {
+ is_left.* = false;
+ maybe_node = node.right;
+ } else {
+ unreachable;
+ }
+ }
+ return null;
+}
+
+const testNumber = struct {
+ node: Node,
+ value: usize,
+};
+
+fn testGetNumber(node: *Node) *testNumber {
+ return @fieldParentPtr(testNumber, "node", node);
+}
+
+fn testCompare(l: *Node, r: *Node) mem.Compare {
+ var left = testGetNumber(l);
+ var right = testGetNumber(r);
+
+ if (left.value < right.value) {
+ return mem.Compare.LessThan;
+ } else if (left.value == right.value) {
+ return mem.Compare.Equal;
+ } else if (left.value > right.value) {
+ return mem.Compare.GreaterThan;
+ }
+ unreachable;
+}
+
+test "rb" {
+ var tree: Tree = undefined;
+ var ns: [10]testNumber = undefined;
+ ns[0].value = 42;
+ ns[1].value = 41;
+ ns[2].value = 40;
+ ns[3].value = 39;
+ ns[4].value = 38;
+ ns[5].value = 39;
+ ns[6].value = 3453;
+ ns[7].value = 32345;
+ ns[8].value = 392345;
+ ns[9].value = 4;
+
+ var dup: testNumber = undefined;
+ dup.value = 32345;
+
+ tree.init(testCompare);
+ _ = tree.insert(&ns[1].node);
+ _ = tree.insert(&ns[2].node);
+ _ = tree.insert(&ns[3].node);
+ _ = tree.insert(&ns[4].node);
+ _ = tree.insert(&ns[5].node);
+ _ = tree.insert(&ns[6].node);
+ _ = tree.insert(&ns[7].node);
+ _ = tree.insert(&ns[8].node);
+ _ = tree.insert(&ns[9].node);
+ tree.remove(&ns[3].node);
+ assert(tree.insert(&dup.node) == &ns[7].node);
+ try tree.replace(&ns[7].node, &dup.node);
+
+ var num: *testNumber = undefined;
+ num = testGetNumber(tree.first().?);
+ while (num.node.next() != null) {
+ assert(testGetNumber(num.node.next().?).value > num.value);
+ num = testGetNumber(num.node.next().?);
+ }
+}
diff --git a/std/segmented_list.zig b/std/segmented_list.zig
index 6e3f32e9d6..c6d8effdd2 100644
--- a/std/segmented_list.zig
+++ b/std/segmented_list.zig
@@ -2,7 +2,7 @@ const std = @import("index.zig");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
-// Imagine that `fn at(self: &Self, index: usize) &T` is a customer asking for a box
+// Imagine that `fn at(self: *Self, index: usize) &T` is a customer asking for a box
// from a warehouse, based on a flat array, boxes ordered from 0 to N - 1.
// But the warehouse actually stores boxes in shelves of increasing powers of 2 sizes.
// So when the customer requests a box index, we have to translate it to shelf index
@@ -93,6 +93,14 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
pub const prealloc_count = prealloc_item_count;
+ fn AtType(comptime SelfType: type) type {
+ if (@typeInfo(SelfType).Pointer.is_const) {
+ return *const T;
+ } else {
+ return *T;
+ }
+ }
+
/// Deinitialize with `deinit`
pub fn init(allocator: *Allocator) Self {
return Self{
@@ -109,7 +117,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
self.* = undefined;
}
- pub fn at(self: *Self, i: usize) *T {
+ pub fn at(self: var, i: usize) AtType(@typeOf(self)) {
assert(i < self.len);
return self.uncheckedAt(i);
}
@@ -133,7 +141,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
if (self.len == 0) return null;
const index = self.len - 1;
- const result = self.uncheckedAt(index).*;
+ const result = uncheckedAt(self, index).*;
self.len = index;
return result;
}
@@ -141,7 +149,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
pub fn addOne(self: *Self) !*T {
const new_length = self.len + 1;
try self.growCapacity(new_length);
- const result = self.uncheckedAt(self.len);
+ const result = uncheckedAt(self, self.len);
self.len = new_length;
return result;
}
@@ -193,7 +201,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
self.dynamic_segments = self.allocator.shrink([*]T, self.dynamic_segments, new_cap_shelf_count);
}
- pub fn uncheckedAt(self: *Self, index: usize) *T {
+ pub fn uncheckedAt(self: var, index: usize) AtType(@typeOf(self)) {
if (index < prealloc_item_count) {
return &self.prealloc_segment[index];
}
diff --git a/std/special/bootstrap.zig b/std/special/bootstrap.zig
index 5c8a330a92..47b57c6c23 100644
--- a/std/special/bootstrap.zig
+++ b/std/special/bootstrap.zig
@@ -13,17 +13,11 @@ comptime {
@export("main", main, strong_linkage);
} else if (builtin.os == builtin.Os.windows) {
@export("WinMainCRTStartup", WinMainCRTStartup, strong_linkage);
- } else if (builtin.os == builtin.Os.zen) {
- @export("_start", zen_start, strong_linkage);
} else {
@export("_start", _start, strong_linkage);
}
}
-extern fn zen_start() noreturn {
- std.os.posix.exit(@inlineCall(callMain));
-}
-
nakedcc fn _start() noreturn {
switch (builtin.arch) {
builtin.Arch.x86_64 => {
diff --git a/std/special/build_runner.zig b/std/special/build_runner.zig
index 2f073b3e98..982c60aed8 100644
--- a/std/special/build_runner.zig
+++ b/std/special/build_runner.zig
@@ -72,10 +72,10 @@ pub fn main() !void {
if (mem.indexOfScalar(u8, option_contents, '=')) |name_end| {
const option_name = option_contents[0..name_end];
const option_value = option_contents[name_end + 1 ..];
- if (builder.addUserInputOption(option_name, option_value))
+ if (try builder.addUserInputOption(option_name, option_value))
return usageAndErr(&builder, false, try stderr_stream);
} else {
- if (builder.addUserInputFlag(option_contents))
+ if (try builder.addUserInputFlag(option_contents))
return usageAndErr(&builder, false, try stderr_stream);
}
} else if (mem.startsWith(u8, arg, "-")) {
diff --git a/std/unicode.zig b/std/unicode.zig
index 8a9d4a9214..105c38627f 100644
--- a/std/unicode.zig
+++ b/std/unicode.zig
@@ -188,6 +188,7 @@ pub const Utf8View = struct {
return Utf8View{ .bytes = s };
}
+ /// TODO: https://github.com/ziglang/zig/issues/425
pub fn initComptime(comptime s: []const u8) Utf8View {
if (comptime init(s)) |r| {
return r;
@@ -199,7 +200,7 @@ pub const Utf8View = struct {
}
}
- pub fn iterator(s: *const Utf8View) Utf8Iterator {
+ pub fn iterator(s: Utf8View) Utf8Iterator {
return Utf8Iterator{
.bytes = s.bytes,
.i = 0,
@@ -217,7 +218,6 @@ const Utf8Iterator = struct {
}
const cp_len = utf8ByteSequenceLength(it.bytes[it.i]) catch unreachable;
-
it.i += cp_len;
return it.bytes[it.i - cp_len .. it.i];
}
@@ -235,6 +235,38 @@ const Utf8Iterator = struct {
}
};
+pub const Utf16LeIterator = struct {
+ bytes: []const u8,
+ i: usize,
+
+ pub fn init(s: []const u16) Utf16LeIterator {
+ return Utf16LeIterator{
+ .bytes = @sliceToBytes(s),
+ .i = 0,
+ };
+ }
+
+ pub fn nextCodepoint(it: *Utf16LeIterator) !?u32 {
+ assert(it.i <= it.bytes.len);
+ if (it.i == it.bytes.len) return null;
+ const c0: u32 = mem.readIntLE(u16, it.bytes[it.i .. it.i + 2]);
+ if (c0 & ~u32(0x03ff) == 0xd800) {
+ // surrogate pair
+ it.i += 2;
+ if (it.i >= it.bytes.len) return error.DanglingSurrogateHalf;
+ const c1: u32 = mem.readIntLE(u16, it.bytes[it.i .. it.i + 2]);
+ if (c1 & ~u32(0x03ff) != 0xdc00) return error.ExpectedSecondSurrogateHalf;
+ it.i += 2;
+ return 0x10000 + (((c0 & 0x03ff) << 10) | (c1 & 0x03ff));
+ } else if (c0 & ~u32(0x03ff) == 0xdc00) {
+ return error.UnexpectedSecondSurrogateHalf;
+ } else {
+ it.i += 2;
+ return c0;
+ }
+ }
+};
+
test "utf8 encode" {
comptime testUtf8Encode() catch unreachable;
try testUtf8Encode();
@@ -445,42 +477,34 @@ fn testDecode(bytes: []const u8) !u32 {
return utf8Decode(bytes);
}
-// TODO: make this API on top of a non-allocating Utf16LeView
-pub fn utf16leToUtf8(allocator: *mem.Allocator, utf16le: []const u16) ![]u8 {
+/// Caller must free returned memory.
+pub fn utf16leToUtf8Alloc(allocator: *mem.Allocator, utf16le: []const u16) ![]u8 {
var result = std.ArrayList(u8).init(allocator);
// optimistically guess that it will all be ascii.
try result.ensureCapacity(utf16le.len);
-
- const utf16le_as_bytes = @sliceToBytes(utf16le);
- var i: usize = 0;
var out_index: usize = 0;
- while (i < utf16le_as_bytes.len) : (i += 2) {
- // decode
- const c0: u32 = mem.readIntLE(u16, utf16le_as_bytes[i..i + 2]);
- var codepoint: u32 = undefined;
- if (c0 & ~u32(0x03ff) == 0xd800) {
- // surrogate pair
- i += 2;
- if (i >= utf16le_as_bytes.len) return error.DanglingSurrogateHalf;
- const c1: u32 = mem.readIntLE(u16, utf16le_as_bytes[i..i + 2]);
- if (c1 & ~u32(0x03ff) != 0xdc00) return error.ExpectedSecondSurrogateHalf;
- codepoint = 0x10000 + (((c0 & 0x03ff) << 10) | (c1 & 0x03ff));
- } else if (c0 & ~u32(0x03ff) == 0xdc00) {
- return error.UnexpectedSecondSurrogateHalf;
- } else {
- codepoint = c0;
- }
-
- // encode
+ var it = Utf16LeIterator.init(utf16le);
+ while (try it.nextCodepoint()) |codepoint| {
const utf8_len = utf8CodepointSequenceLength(codepoint) catch unreachable;
try result.resize(result.len + utf8_len);
- _ = utf8Encode(codepoint, result.items[out_index..]) catch unreachable;
+ assert((utf8Encode(codepoint, result.items[out_index..]) catch unreachable) == utf8_len);
out_index += utf8_len;
}
return result.toOwnedSlice();
}
+/// Asserts that the output buffer is big enough.
+/// Returns end byte index into utf8.
+pub fn utf16leToUtf8(utf8: []u8, utf16le: []const u16) !usize {
+ var end_index: usize = 0;
+ var it = Utf16LeIterator.init(utf16le);
+ while (try it.nextCodepoint()) |codepoint| {
+ end_index += try utf8Encode(codepoint, utf8[end_index..]);
+ }
+ return end_index;
+}
+
test "utf16leToUtf8" {
var utf16le: [2]u16 = undefined;
const utf16le_as_bytes = @sliceToBytes(utf16le[0..]);
@@ -488,14 +512,14 @@ test "utf16leToUtf8" {
{
mem.writeInt(utf16le_as_bytes[0..], u16('A'), builtin.Endian.Little);
mem.writeInt(utf16le_as_bytes[2..], u16('a'), builtin.Endian.Little);
- const utf8 = try utf16leToUtf8(std.debug.global_allocator, utf16le);
+ const utf8 = try utf16leToUtf8Alloc(std.debug.global_allocator, utf16le);
assert(mem.eql(u8, utf8, "Aa"));
}
{
mem.writeInt(utf16le_as_bytes[0..], u16(0x80), builtin.Endian.Little);
mem.writeInt(utf16le_as_bytes[2..], u16(0xffff), builtin.Endian.Little);
- const utf8 = try utf16leToUtf8(std.debug.global_allocator, utf16le);
+ const utf8 = try utf16leToUtf8Alloc(std.debug.global_allocator, utf16le);
assert(mem.eql(u8, utf8, "\xc2\x80" ++ "\xef\xbf\xbf"));
}
@@ -503,7 +527,7 @@ test "utf16leToUtf8" {
// the values just outside the surrogate half range
mem.writeInt(utf16le_as_bytes[0..], u16(0xd7ff), builtin.Endian.Little);
mem.writeInt(utf16le_as_bytes[2..], u16(0xe000), builtin.Endian.Little);
- const utf8 = try utf16leToUtf8(std.debug.global_allocator, utf16le);
+ const utf8 = try utf16leToUtf8Alloc(std.debug.global_allocator, utf16le);
assert(mem.eql(u8, utf8, "\xed\x9f\xbf" ++ "\xee\x80\x80"));
}
@@ -511,7 +535,7 @@ test "utf16leToUtf8" {
// smallest surrogate pair
mem.writeInt(utf16le_as_bytes[0..], u16(0xd800), builtin.Endian.Little);
mem.writeInt(utf16le_as_bytes[2..], u16(0xdc00), builtin.Endian.Little);
- const utf8 = try utf16leToUtf8(std.debug.global_allocator, utf16le);
+ const utf8 = try utf16leToUtf8Alloc(std.debug.global_allocator, utf16le);
assert(mem.eql(u8, utf8, "\xf0\x90\x80\x80"));
}
@@ -519,14 +543,48 @@ test "utf16leToUtf8" {
// largest surrogate pair
mem.writeInt(utf16le_as_bytes[0..], u16(0xdbff), builtin.Endian.Little);
mem.writeInt(utf16le_as_bytes[2..], u16(0xdfff), builtin.Endian.Little);
- const utf8 = try utf16leToUtf8(std.debug.global_allocator, utf16le);
+ const utf8 = try utf16leToUtf8Alloc(std.debug.global_allocator, utf16le);
assert(mem.eql(u8, utf8, "\xf4\x8f\xbf\xbf"));
}
{
mem.writeInt(utf16le_as_bytes[0..], u16(0xdbff), builtin.Endian.Little);
mem.writeInt(utf16le_as_bytes[2..], u16(0xdc00), builtin.Endian.Little);
- const utf8 = try utf16leToUtf8(std.debug.global_allocator, utf16le);
+ const utf8 = try utf16leToUtf8Alloc(std.debug.global_allocator, utf16le);
assert(mem.eql(u8, utf8, "\xf4\x8f\xb0\x80"));
}
}
+
+/// TODO support codepoints bigger than 16 bits
+/// TODO type for null terminated pointer
+pub fn utf8ToUtf16LeWithNull(allocator: *mem.Allocator, utf8: []const u8) ![]u16 {
+ var result = std.ArrayList(u16).init(allocator);
+ // optimistically guess that it will not require surrogate pairs
+ try result.ensureCapacity(utf8.len + 1);
+
+ const view = try Utf8View.init(utf8);
+ var it = view.iterator();
+ while (it.nextCodepoint()) |codepoint| {
+ try result.append(@intCast(u16, codepoint)); // TODO surrogate pairs
+ }
+
+ try result.append(0);
+ return result.toOwnedSlice();
+}
+
+/// Returns index of next character. If exact fit, returned index equals output slice length.
+/// If ran out of room, returned index equals output slice length + 1.
+/// TODO support codepoints bigger than 16 bits
+pub fn utf8ToUtf16Le(utf16le: []u16, utf8: []const u8) !usize {
+ const utf16le_as_bytes = @sliceToBytes(utf16le[0..]);
+ var end_index: usize = 0;
+
+ var it = (try Utf8View.init(utf8)).iterator();
+ while (it.nextCodepoint()) |codepoint| {
+ if (end_index == utf16le_as_bytes.len) return (end_index / 2) + 1;
+ // TODO surrogate pairs
+ mem.writeInt(utf16le_as_bytes[end_index..], @intCast(u16, codepoint), builtin.Endian.Little);
+ end_index += 2;
+ }
+ return end_index / 2;
+}
diff --git a/std/zig/ast.zig b/std/zig/ast.zig
index 95e899fb92..0046dff1a2 100644
--- a/std/zig/ast.zig
+++ b/std/zig/ast.zig
@@ -32,6 +32,12 @@ pub const Tree = struct {
return self.source[token.start..token.end];
}
+ pub fn getNodeSource(self: *const Tree, node: *const Node) []const u8 {
+ const first_token = self.tokens.at(node.firstToken());
+ const last_token = self.tokens.at(node.lastToken());
+ return self.source[first_token.start..last_token.end];
+ }
+
pub const Location = struct {
line: usize,
column: usize,
@@ -338,7 +344,7 @@ pub const Node = struct {
unreachable;
}
- pub fn firstToken(base: *Node) TokenIndex {
+ pub fn firstToken(base: *const Node) TokenIndex {
comptime var i = 0;
inline while (i < @memberCount(Id)) : (i += 1) {
if (base.id == @field(Id, @memberName(Id, i))) {
@@ -349,7 +355,7 @@ pub const Node = struct {
unreachable;
}
- pub fn lastToken(base: *Node) TokenIndex {
+ pub fn lastToken(base: *const Node) TokenIndex {
comptime var i = 0;
inline while (i < @memberCount(Id)) : (i += 1) {
if (base.id == @field(Id, @memberName(Id, i))) {
@@ -473,11 +479,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *Root) TokenIndex {
+ pub fn firstToken(self: *const Root) TokenIndex {
return if (self.decls.len == 0) self.eof_token else (self.decls.at(0).*).firstToken();
}
- pub fn lastToken(self: *Root) TokenIndex {
+ pub fn lastToken(self: *const Root) TokenIndex {
return if (self.decls.len == 0) self.eof_token else (self.decls.at(self.decls.len - 1).*).lastToken();
}
};
@@ -518,7 +524,7 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *VarDecl) TokenIndex {
+ pub fn firstToken(self: *const VarDecl) TokenIndex {
if (self.visib_token) |visib_token| return visib_token;
if (self.comptime_token) |comptime_token| return comptime_token;
if (self.extern_export_token) |extern_export_token| return extern_export_token;
@@ -526,7 +532,7 @@ pub const Node = struct {
return self.mut_token;
}
- pub fn lastToken(self: *VarDecl) TokenIndex {
+ pub fn lastToken(self: *const VarDecl) TokenIndex {
return self.semicolon_token;
}
};
@@ -548,12 +554,12 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *Use) TokenIndex {
+ pub fn firstToken(self: *const Use) TokenIndex {
if (self.visib_token) |visib_token| return visib_token;
return self.use_token;
}
- pub fn lastToken(self: *Use) TokenIndex {
+ pub fn lastToken(self: *const Use) TokenIndex {
return self.semicolon_token;
}
};
@@ -575,11 +581,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *ErrorSetDecl) TokenIndex {
+ pub fn firstToken(self: *const ErrorSetDecl) TokenIndex {
return self.error_token;
}
- pub fn lastToken(self: *ErrorSetDecl) TokenIndex {
+ pub fn lastToken(self: *const ErrorSetDecl) TokenIndex {
return self.rbrace_token;
}
};
@@ -618,14 +624,14 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *ContainerDecl) TokenIndex {
+ pub fn firstToken(self: *const ContainerDecl) TokenIndex {
if (self.layout_token) |layout_token| {
return layout_token;
}
return self.kind_token;
}
- pub fn lastToken(self: *ContainerDecl) TokenIndex {
+ pub fn lastToken(self: *const ContainerDecl) TokenIndex {
return self.rbrace_token;
}
};
@@ -646,12 +652,12 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *StructField) TokenIndex {
+ pub fn firstToken(self: *const StructField) TokenIndex {
if (self.visib_token) |visib_token| return visib_token;
return self.name_token;
}
- pub fn lastToken(self: *StructField) TokenIndex {
+ pub fn lastToken(self: *const StructField) TokenIndex {
return self.type_expr.lastToken();
}
};
@@ -679,11 +685,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *UnionTag) TokenIndex {
+ pub fn firstToken(self: *const UnionTag) TokenIndex {
return self.name_token;
}
- pub fn lastToken(self: *UnionTag) TokenIndex {
+ pub fn lastToken(self: *const UnionTag) TokenIndex {
if (self.value_expr) |value_expr| {
return value_expr.lastToken();
}
@@ -712,11 +718,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *EnumTag) TokenIndex {
+ pub fn firstToken(self: *const EnumTag) TokenIndex {
return self.name_token;
}
- pub fn lastToken(self: *EnumTag) TokenIndex {
+ pub fn lastToken(self: *const EnumTag) TokenIndex {
if (self.value) |value| {
return value.lastToken();
}
@@ -741,11 +747,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *ErrorTag) TokenIndex {
+ pub fn firstToken(self: *const ErrorTag) TokenIndex {
return self.name_token;
}
- pub fn lastToken(self: *ErrorTag) TokenIndex {
+ pub fn lastToken(self: *const ErrorTag) TokenIndex {
return self.name_token;
}
};
@@ -758,11 +764,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *Identifier) TokenIndex {
+ pub fn firstToken(self: *const Identifier) TokenIndex {
return self.token;
}
- pub fn lastToken(self: *Identifier) TokenIndex {
+ pub fn lastToken(self: *const Identifier) TokenIndex {
return self.token;
}
};
@@ -784,11 +790,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *AsyncAttribute) TokenIndex {
+ pub fn firstToken(self: *const AsyncAttribute) TokenIndex {
return self.async_token;
}
- pub fn lastToken(self: *AsyncAttribute) TokenIndex {
+ pub fn lastToken(self: *const AsyncAttribute) TokenIndex {
if (self.rangle_bracket) |rangle_bracket| {
return rangle_bracket;
}
@@ -856,7 +862,7 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *FnProto) TokenIndex {
+ pub fn firstToken(self: *const FnProto) TokenIndex {
if (self.visib_token) |visib_token| return visib_token;
if (self.async_attr) |async_attr| return async_attr.firstToken();
if (self.extern_export_inline_token) |extern_export_inline_token| return extern_export_inline_token;
@@ -865,7 +871,7 @@ pub const Node = struct {
return self.fn_token;
}
- pub fn lastToken(self: *FnProto) TokenIndex {
+ pub fn lastToken(self: *const FnProto) TokenIndex {
if (self.body_node) |body_node| return body_node.lastToken();
switch (self.return_type) {
// TODO allow this and next prong to share bodies since the types are the same
@@ -896,11 +902,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *PromiseType) TokenIndex {
+ pub fn firstToken(self: *const PromiseType) TokenIndex {
return self.promise_token;
}
- pub fn lastToken(self: *PromiseType) TokenIndex {
+ pub fn lastToken(self: *const PromiseType) TokenIndex {
if (self.result) |result| return result.return_type.lastToken();
return self.promise_token;
}
@@ -923,14 +929,14 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *ParamDecl) TokenIndex {
+ pub fn firstToken(self: *const ParamDecl) TokenIndex {
if (self.comptime_token) |comptime_token| return comptime_token;
if (self.noalias_token) |noalias_token| return noalias_token;
if (self.name_token) |name_token| return name_token;
return self.type_node.firstToken();
}
- pub fn lastToken(self: *ParamDecl) TokenIndex {
+ pub fn lastToken(self: *const ParamDecl) TokenIndex {
if (self.var_args_token) |var_args_token| return var_args_token;
return self.type_node.lastToken();
}
@@ -954,7 +960,7 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *Block) TokenIndex {
+ pub fn firstToken(self: *const Block) TokenIndex {
if (self.label) |label| {
return label;
}
@@ -962,7 +968,7 @@ pub const Node = struct {
return self.lbrace;
}
- pub fn lastToken(self: *Block) TokenIndex {
+ pub fn lastToken(self: *const Block) TokenIndex {
return self.rbrace;
}
};
@@ -981,11 +987,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *Defer) TokenIndex {
+ pub fn firstToken(self: *const Defer) TokenIndex {
return self.defer_token;
}
- pub fn lastToken(self: *Defer) TokenIndex {
+ pub fn lastToken(self: *const Defer) TokenIndex {
return self.expr.lastToken();
}
};
@@ -1005,11 +1011,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *Comptime) TokenIndex {
+ pub fn firstToken(self: *const Comptime) TokenIndex {
return self.comptime_token;
}
- pub fn lastToken(self: *Comptime) TokenIndex {
+ pub fn lastToken(self: *const Comptime) TokenIndex {
return self.expr.lastToken();
}
};
@@ -1029,11 +1035,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *Payload) TokenIndex {
+ pub fn firstToken(self: *const Payload) TokenIndex {
return self.lpipe;
}
- pub fn lastToken(self: *Payload) TokenIndex {
+ pub fn lastToken(self: *const Payload) TokenIndex {
return self.rpipe;
}
};
@@ -1054,11 +1060,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *PointerPayload) TokenIndex {
+ pub fn firstToken(self: *const PointerPayload) TokenIndex {
return self.lpipe;
}
- pub fn lastToken(self: *PointerPayload) TokenIndex {
+ pub fn lastToken(self: *const PointerPayload) TokenIndex {
return self.rpipe;
}
};
@@ -1085,11 +1091,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *PointerIndexPayload) TokenIndex {
+ pub fn firstToken(self: *const PointerIndexPayload) TokenIndex {
return self.lpipe;
}
- pub fn lastToken(self: *PointerIndexPayload) TokenIndex {
+ pub fn lastToken(self: *const PointerIndexPayload) TokenIndex {
return self.rpipe;
}
};
@@ -1114,11 +1120,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *Else) TokenIndex {
+ pub fn firstToken(self: *const Else) TokenIndex {
return self.else_token;
}
- pub fn lastToken(self: *Else) TokenIndex {
+ pub fn lastToken(self: *const Else) TokenIndex {
return self.body.lastToken();
}
};
@@ -1146,11 +1152,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *Switch) TokenIndex {
+ pub fn firstToken(self: *const Switch) TokenIndex {
return self.switch_token;
}
- pub fn lastToken(self: *Switch) TokenIndex {
+ pub fn lastToken(self: *const Switch) TokenIndex {
return self.rbrace;
}
};
@@ -1181,11 +1187,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *SwitchCase) TokenIndex {
+ pub fn firstToken(self: *const SwitchCase) TokenIndex {
return (self.items.at(0).*).firstToken();
}
- pub fn lastToken(self: *SwitchCase) TokenIndex {
+ pub fn lastToken(self: *const SwitchCase) TokenIndex {
return self.expr.lastToken();
}
};
@@ -1198,11 +1204,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *SwitchElse) TokenIndex {
+ pub fn firstToken(self: *const SwitchElse) TokenIndex {
return self.token;
}
- pub fn lastToken(self: *SwitchElse) TokenIndex {
+ pub fn lastToken(self: *const SwitchElse) TokenIndex {
return self.token;
}
};
@@ -1245,7 +1251,7 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *While) TokenIndex {
+ pub fn firstToken(self: *const While) TokenIndex {
if (self.label) |label| {
return label;
}
@@ -1257,7 +1263,7 @@ pub const Node = struct {
return self.while_token;
}
- pub fn lastToken(self: *While) TokenIndex {
+ pub fn lastToken(self: *const While) TokenIndex {
if (self.@"else") |@"else"| {
return @"else".body.lastToken();
}
@@ -1298,7 +1304,7 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *For) TokenIndex {
+ pub fn firstToken(self: *const For) TokenIndex {
if (self.label) |label| {
return label;
}
@@ -1310,7 +1316,7 @@ pub const Node = struct {
return self.for_token;
}
- pub fn lastToken(self: *For) TokenIndex {
+ pub fn lastToken(self: *const For) TokenIndex {
if (self.@"else") |@"else"| {
return @"else".body.lastToken();
}
@@ -1349,11 +1355,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *If) TokenIndex {
+ pub fn firstToken(self: *const If) TokenIndex {
return self.if_token;
}
- pub fn lastToken(self: *If) TokenIndex {
+ pub fn lastToken(self: *const If) TokenIndex {
if (self.@"else") |@"else"| {
return @"else".body.lastToken();
}
@@ -1480,11 +1486,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *InfixOp) TokenIndex {
+ pub fn firstToken(self: *const InfixOp) TokenIndex {
return self.lhs.firstToken();
}
- pub fn lastToken(self: *InfixOp) TokenIndex {
+ pub fn lastToken(self: *const InfixOp) TokenIndex {
return self.rhs.lastToken();
}
};
@@ -1570,11 +1576,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *PrefixOp) TokenIndex {
+ pub fn firstToken(self: *const PrefixOp) TokenIndex {
return self.op_token;
}
- pub fn lastToken(self: *PrefixOp) TokenIndex {
+ pub fn lastToken(self: *const PrefixOp) TokenIndex {
return self.rhs.lastToken();
}
};
@@ -1594,11 +1600,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *FieldInitializer) TokenIndex {
+ pub fn firstToken(self: *const FieldInitializer) TokenIndex {
return self.period_token;
}
- pub fn lastToken(self: *FieldInitializer) TokenIndex {
+ pub fn lastToken(self: *const FieldInitializer) TokenIndex {
return self.expr.lastToken();
}
};
@@ -1673,7 +1679,7 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *SuffixOp) TokenIndex {
+ pub fn firstToken(self: *const SuffixOp) TokenIndex {
switch (self.op) {
@TagType(Op).Call => |*call_info| if (call_info.async_attr) |async_attr| return async_attr.firstToken(),
else => {},
@@ -1681,7 +1687,7 @@ pub const Node = struct {
return self.lhs.firstToken();
}
- pub fn lastToken(self: *SuffixOp) TokenIndex {
+ pub fn lastToken(self: *const SuffixOp) TokenIndex {
return self.rtoken;
}
};
@@ -1701,11 +1707,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *GroupedExpression) TokenIndex {
+ pub fn firstToken(self: *const GroupedExpression) TokenIndex {
return self.lparen;
}
- pub fn lastToken(self: *GroupedExpression) TokenIndex {
+ pub fn lastToken(self: *const GroupedExpression) TokenIndex {
return self.rparen;
}
};
@@ -1749,11 +1755,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *ControlFlowExpression) TokenIndex {
+ pub fn firstToken(self: *const ControlFlowExpression) TokenIndex {
return self.ltoken;
}
- pub fn lastToken(self: *ControlFlowExpression) TokenIndex {
+ pub fn lastToken(self: *const ControlFlowExpression) TokenIndex {
if (self.rhs) |rhs| {
return rhs.lastToken();
}
@@ -1792,11 +1798,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *Suspend) TokenIndex {
+ pub fn firstToken(self: *const Suspend) TokenIndex {
return self.suspend_token;
}
- pub fn lastToken(self: *Suspend) TokenIndex {
+ pub fn lastToken(self: *const Suspend) TokenIndex {
if (self.body) |body| {
return body.lastToken();
}
@@ -1813,11 +1819,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *IntegerLiteral) TokenIndex {
+ pub fn firstToken(self: *const IntegerLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: *IntegerLiteral) TokenIndex {
+ pub fn lastToken(self: *const IntegerLiteral) TokenIndex {
return self.token;
}
};
@@ -1830,11 +1836,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *FloatLiteral) TokenIndex {
+ pub fn firstToken(self: *const FloatLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: *FloatLiteral) TokenIndex {
+ pub fn lastToken(self: *const FloatLiteral) TokenIndex {
return self.token;
}
};
@@ -1856,11 +1862,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *BuiltinCall) TokenIndex {
+ pub fn firstToken(self: *const BuiltinCall) TokenIndex {
return self.builtin_token;
}
- pub fn lastToken(self: *BuiltinCall) TokenIndex {
+ pub fn lastToken(self: *const BuiltinCall) TokenIndex {
return self.rparen_token;
}
};
@@ -1873,11 +1879,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *StringLiteral) TokenIndex {
+ pub fn firstToken(self: *const StringLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: *StringLiteral) TokenIndex {
+ pub fn lastToken(self: *const StringLiteral) TokenIndex {
return self.token;
}
};
@@ -1892,11 +1898,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *MultilineStringLiteral) TokenIndex {
+ pub fn firstToken(self: *const MultilineStringLiteral) TokenIndex {
return self.lines.at(0).*;
}
- pub fn lastToken(self: *MultilineStringLiteral) TokenIndex {
+ pub fn lastToken(self: *const MultilineStringLiteral) TokenIndex {
return self.lines.at(self.lines.len - 1).*;
}
};
@@ -1909,11 +1915,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *CharLiteral) TokenIndex {
+ pub fn firstToken(self: *const CharLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: *CharLiteral) TokenIndex {
+ pub fn lastToken(self: *const CharLiteral) TokenIndex {
return self.token;
}
};
@@ -1926,11 +1932,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *BoolLiteral) TokenIndex {
+ pub fn firstToken(self: *const BoolLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: *BoolLiteral) TokenIndex {
+ pub fn lastToken(self: *const BoolLiteral) TokenIndex {
return self.token;
}
};
@@ -1943,11 +1949,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *NullLiteral) TokenIndex {
+ pub fn firstToken(self: *const NullLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: *NullLiteral) TokenIndex {
+ pub fn lastToken(self: *const NullLiteral) TokenIndex {
return self.token;
}
};
@@ -1960,11 +1966,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *UndefinedLiteral) TokenIndex {
+ pub fn firstToken(self: *const UndefinedLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: *UndefinedLiteral) TokenIndex {
+ pub fn lastToken(self: *const UndefinedLiteral) TokenIndex {
return self.token;
}
};
@@ -1977,11 +1983,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *ThisLiteral) TokenIndex {
+ pub fn firstToken(self: *const ThisLiteral) TokenIndex {
return self.token;
}
- pub fn lastToken(self: *ThisLiteral) TokenIndex {
+ pub fn lastToken(self: *const ThisLiteral) TokenIndex {
return self.token;
}
};
@@ -2022,11 +2028,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *AsmOutput) TokenIndex {
+ pub fn firstToken(self: *const AsmOutput) TokenIndex {
return self.lbracket;
}
- pub fn lastToken(self: *AsmOutput) TokenIndex {
+ pub fn lastToken(self: *const AsmOutput) TokenIndex {
return self.rparen;
}
};
@@ -2054,11 +2060,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *AsmInput) TokenIndex {
+ pub fn firstToken(self: *const AsmInput) TokenIndex {
return self.lbracket;
}
- pub fn lastToken(self: *AsmInput) TokenIndex {
+ pub fn lastToken(self: *const AsmInput) TokenIndex {
return self.rparen;
}
};
@@ -2089,11 +2095,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *Asm) TokenIndex {
+ pub fn firstToken(self: *const Asm) TokenIndex {
return self.asm_token;
}
- pub fn lastToken(self: *Asm) TokenIndex {
+ pub fn lastToken(self: *const Asm) TokenIndex {
return self.rparen;
}
};
@@ -2106,11 +2112,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *Unreachable) TokenIndex {
+ pub fn firstToken(self: *const Unreachable) TokenIndex {
return self.token;
}
- pub fn lastToken(self: *Unreachable) TokenIndex {
+ pub fn lastToken(self: *const Unreachable) TokenIndex {
return self.token;
}
};
@@ -2123,11 +2129,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *ErrorType) TokenIndex {
+ pub fn firstToken(self: *const ErrorType) TokenIndex {
return self.token;
}
- pub fn lastToken(self: *ErrorType) TokenIndex {
+ pub fn lastToken(self: *const ErrorType) TokenIndex {
return self.token;
}
};
@@ -2140,11 +2146,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *VarType) TokenIndex {
+ pub fn firstToken(self: *const VarType) TokenIndex {
return self.token;
}
- pub fn lastToken(self: *VarType) TokenIndex {
+ pub fn lastToken(self: *const VarType) TokenIndex {
return self.token;
}
};
@@ -2159,11 +2165,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *DocComment) TokenIndex {
+ pub fn firstToken(self: *const DocComment) TokenIndex {
return self.lines.at(0).*;
}
- pub fn lastToken(self: *DocComment) TokenIndex {
+ pub fn lastToken(self: *const DocComment) TokenIndex {
return self.lines.at(self.lines.len - 1).*;
}
};
@@ -2184,11 +2190,11 @@ pub const Node = struct {
return null;
}
- pub fn firstToken(self: *TestDecl) TokenIndex {
+ pub fn firstToken(self: *const TestDecl) TokenIndex {
return self.test_token;
}
- pub fn lastToken(self: *TestDecl) TokenIndex {
+ pub fn lastToken(self: *const TestDecl) TokenIndex {
return self.body_node.lastToken();
}
};
diff --git a/test/behavior.zig b/test/behavior.zig
index e993d7e0dc..5a26e206bf 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -10,6 +10,7 @@ comptime {
_ = @import("cases/bool.zig");
_ = @import("cases/bugs/1111.zig");
_ = @import("cases/bugs/1230.zig");
+ _ = @import("cases/bugs/1277.zig");
_ = @import("cases/bugs/394.zig");
_ = @import("cases/bugs/655.zig");
_ = @import("cases/bugs/656.zig");
diff --git a/test/cases/bugs/1277.zig b/test/cases/bugs/1277.zig
new file mode 100644
index 0000000000..a83e7653e2
--- /dev/null
+++ b/test/cases/bugs/1277.zig
@@ -0,0 +1,15 @@
+const std = @import("std");
+
+const S = struct {
+ f: ?fn () i32,
+};
+
+const s = S{ .f = f };
+
+fn f() i32 {
+ return 1234;
+}
+
+test "don't emit an LLVM global for a const function when it's in an optional in a struct" {
+ std.debug.assertOrPanic(s.f.?() == 1234);
+}
diff --git a/test/cases/cast.zig b/test/cases/cast.zig
index 63cc6313e1..df37bd1dd9 100644
--- a/test/cases/cast.zig
+++ b/test/cases/cast.zig
@@ -485,3 +485,14 @@ fn MakeType(comptime T: type) type {
}
};
}
+
+test "implicit cast from *[N]T to ?[*]T" {
+ var x: ?[*]u16 = null;
+ var y: [4]u16 = [4]u16 {0, 1, 2, 3};
+
+ x = &y;
+ assert(std.mem.eql(u16, x.?[0..4], y[0..4]));
+ x.?[0] = 8;
+ y[3] = 6;
+ assert(std.mem.eql(u16, x.?[0..4], y[0..4]));
+}
\ No newline at end of file
diff --git a/test/cases/merge_error_sets.zig b/test/cases/merge_error_sets.zig
index 189bd16a4d..147b580232 100644
--- a/test/cases/merge_error_sets.zig
+++ b/test/cases/merge_error_sets.zig
@@ -1,5 +1,5 @@
const A = error{
- PathNotFound,
+ FileNotFound,
NotDir,
};
const B = error{OutOfMemory};
@@ -15,7 +15,7 @@ test "merge error sets" {
@panic("unexpected");
} else |err| switch (err) {
error.OutOfMemory => @panic("unexpected"),
- error.PathNotFound => @panic("unexpected"),
+ error.FileNotFound => @panic("unexpected"),
error.NotDir => {},
}
}
diff --git a/test/translate_c.zig b/test/translate_c.zig
index 417171d2c2..b31e515aa2 100644
--- a/test/translate_c.zig
+++ b/test/translate_c.zig
@@ -1,6 +1,51 @@
const tests = @import("tests.zig");
pub fn addCases(cases: *tests.TranslateCContext) void {
+ cases.add("for loop with var init but empty body",
+ \\void foo(void) {
+ \\ for (int x = 0; x < 10; x++);
+ \\}
+ ,
+ \\pub fn foo() void {
+ \\ {
+ \\ var x: c_int = 0;
+ \\ while (x < 10) : (x += 1) {}
+ \\ }
+ \\}
+ );
+
+ cases.add("do while with empty body",
+ \\void foo(void) {
+ \\ do ; while (1);
+ \\}
+ , // TODO this should be if (1 != 0) break
+ \\pub fn foo() void {
+ \\ while (true) {
+ \\ if (!1) break;
+ \\ }
+ \\}
+ );
+
+ cases.add("for with empty body",
+ \\void foo(void) {
+ \\ for (;;);
+ \\}
+ ,
+ \\pub fn foo() void {
+ \\ while (true) {}
+ \\}
+ );
+
+ cases.add("while with empty body",
+ \\void foo(void) {
+ \\ while (1);
+ \\}
+ ,
+ \\pub fn foo() void {
+ \\ while (1 != 0) {}
+ \\}
+ );
+
cases.add("double define struct",
\\typedef struct Bar Bar;
\\typedef struct Foo Foo;
|