Merge pull request #1294 from ziglang/async-fs

introduce std.event.fs for async file system functions
This commit is contained in:
Andrew Kelley 2018-08-10 15:51:17 -04:00 committed by GitHub
commit c4b9466da7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
46 changed files with 4055 additions and 1061 deletions

View File

@ -460,11 +460,14 @@ set(ZIG_STD_FILES
"empty.zig" "empty.zig"
"event.zig" "event.zig"
"event/channel.zig" "event/channel.zig"
"event/fs.zig"
"event/future.zig" "event/future.zig"
"event/group.zig" "event/group.zig"
"event/lock.zig" "event/lock.zig"
"event/locked.zig" "event/locked.zig"
"event/loop.zig" "event/loop.zig"
"event/rwlock.zig"
"event/rwlocked.zig"
"event/tcp.zig" "event/tcp.zig"
"fmt/errol/enum3.zig" "fmt/errol/enum3.zig"
"fmt/errol/index.zig" "fmt/errol/index.zig"
@ -553,6 +556,7 @@ set(ZIG_STD_FILES
"math/tanh.zig" "math/tanh.zig"
"math/trunc.zig" "math/trunc.zig"
"mem.zig" "mem.zig"
"mutex.zig"
"net.zig" "net.zig"
"os/child_process.zig" "os/child_process.zig"
"os/darwin.zig" "os/darwin.zig"

View File

@ -370,9 +370,9 @@ fn genToc(allocator: *mem.Allocator, tokenizer: *Tokenizer) !Toc {
.n = header_stack_size, .n = header_stack_size,
}, },
}); });
if (try urls.put(urlized, tag_token)) |other_tag_token| { if (try urls.put(urlized, tag_token)) |entry| {
parseError(tokenizer, tag_token, "duplicate header url: #{}", urlized) catch {}; parseError(tokenizer, tag_token, "duplicate header url: #{}", urlized) catch {};
parseError(tokenizer, other_tag_token, "other tag here") catch {}; parseError(tokenizer, entry.value, "other tag here") catch {};
return error.ParseError; return error.ParseError;
} }
if (last_action == Action.Open) { if (last_action == Action.Open) {

View File

@ -19,8 +19,8 @@ pub async fn renderToLlvm(comp: *Compilation, fn_val: *Value.Fn, code: *ir.Code)
var output_path = try await (async comp.createRandomOutputPath(comp.target.objFileExt()) catch unreachable); var output_path = try await (async comp.createRandomOutputPath(comp.target.objFileExt()) catch unreachable);
errdefer output_path.deinit(); errdefer output_path.deinit();
const llvm_handle = try comp.event_loop_local.getAnyLlvmContext(); const llvm_handle = try comp.zig_compiler.getAnyLlvmContext();
defer llvm_handle.release(comp.event_loop_local); defer llvm_handle.release(comp.zig_compiler);
const context = llvm_handle.node.data; const context = llvm_handle.node.data;

View File

@ -30,9 +30,12 @@ const Package = @import("package.zig").Package;
const link = @import("link.zig").link; const link = @import("link.zig").link;
const LibCInstallation = @import("libc_installation.zig").LibCInstallation; const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
const CInt = @import("c_int.zig").CInt; const CInt = @import("c_int.zig").CInt;
const fs = event.fs;
const max_src_size = 2 * 1024 * 1024 * 1024; // 2 GiB
/// Data that is local to the event loop. /// Data that is local to the event loop.
pub const EventLoopLocal = struct { pub const ZigCompiler = struct {
loop: *event.Loop, loop: *event.Loop,
llvm_handle_pool: std.atomic.Stack(llvm.ContextRef), llvm_handle_pool: std.atomic.Stack(llvm.ContextRef),
lld_lock: event.Lock, lld_lock: event.Lock,
@ -44,7 +47,7 @@ pub const EventLoopLocal = struct {
var lazy_init_targets = std.lazyInit(void); var lazy_init_targets = std.lazyInit(void);
fn init(loop: *event.Loop) !EventLoopLocal { fn init(loop: *event.Loop) !ZigCompiler {
lazy_init_targets.get() orelse { lazy_init_targets.get() orelse {
Target.initializeAll(); Target.initializeAll();
lazy_init_targets.resolve(); lazy_init_targets.resolve();
@ -54,7 +57,7 @@ pub const EventLoopLocal = struct {
try std.os.getRandomBytes(seed_bytes[0..]); try std.os.getRandomBytes(seed_bytes[0..]);
const seed = std.mem.readInt(seed_bytes, u64, builtin.Endian.Big); const seed = std.mem.readInt(seed_bytes, u64, builtin.Endian.Big);
return EventLoopLocal{ return ZigCompiler{
.loop = loop, .loop = loop,
.lld_lock = event.Lock.init(loop), .lld_lock = event.Lock.init(loop),
.llvm_handle_pool = std.atomic.Stack(llvm.ContextRef).init(), .llvm_handle_pool = std.atomic.Stack(llvm.ContextRef).init(),
@ -64,7 +67,7 @@ pub const EventLoopLocal = struct {
} }
/// Must be called only after EventLoop.run completes. /// Must be called only after EventLoop.run completes.
fn deinit(self: *EventLoopLocal) void { fn deinit(self: *ZigCompiler) void {
self.lld_lock.deinit(); self.lld_lock.deinit();
while (self.llvm_handle_pool.pop()) |node| { while (self.llvm_handle_pool.pop()) |node| {
c.LLVMContextDispose(node.data); c.LLVMContextDispose(node.data);
@ -74,7 +77,7 @@ pub const EventLoopLocal = struct {
/// Gets an exclusive handle on any LlvmContext. /// Gets an exclusive handle on any LlvmContext.
/// Caller must release the handle when done. /// Caller must release the handle when done.
pub fn getAnyLlvmContext(self: *EventLoopLocal) !LlvmHandle { pub fn getAnyLlvmContext(self: *ZigCompiler) !LlvmHandle {
if (self.llvm_handle_pool.pop()) |node| return LlvmHandle{ .node = node }; if (self.llvm_handle_pool.pop()) |node| return LlvmHandle{ .node = node };
const context_ref = c.LLVMContextCreate() orelse return error.OutOfMemory; const context_ref = c.LLVMContextCreate() orelse return error.OutOfMemory;
@ -89,24 +92,36 @@ pub const EventLoopLocal = struct {
return LlvmHandle{ .node = node }; return LlvmHandle{ .node = node };
} }
pub async fn getNativeLibC(self: *EventLoopLocal) !*LibCInstallation { pub async fn getNativeLibC(self: *ZigCompiler) !*LibCInstallation {
if (await (async self.native_libc.start() catch unreachable)) |ptr| return ptr; if (await (async self.native_libc.start() catch unreachable)) |ptr| return ptr;
try await (async self.native_libc.data.findNative(self.loop) catch unreachable); try await (async self.native_libc.data.findNative(self.loop) catch unreachable);
self.native_libc.resolve(); self.native_libc.resolve();
return &self.native_libc.data; return &self.native_libc.data;
} }
/// Must be called only once, ever. Sets global state.
pub fn setLlvmArgv(allocator: *Allocator, llvm_argv: []const []const u8) !void {
if (llvm_argv.len != 0) {
var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(allocator, [][]const []const u8{
[][]const u8{"zig (LLVM option parsing)"},
llvm_argv,
});
defer c_compatible_args.deinit();
c.ZigLLVMParseCommandLineOptions(llvm_argv.len + 1, c_compatible_args.ptr);
}
}
}; };
pub const LlvmHandle = struct { pub const LlvmHandle = struct {
node: *std.atomic.Stack(llvm.ContextRef).Node, node: *std.atomic.Stack(llvm.ContextRef).Node,
pub fn release(self: LlvmHandle, event_loop_local: *EventLoopLocal) void { pub fn release(self: LlvmHandle, zig_compiler: *ZigCompiler) void {
event_loop_local.llvm_handle_pool.push(self.node); zig_compiler.llvm_handle_pool.push(self.node);
} }
}; };
pub const Compilation = struct { pub const Compilation = struct {
event_loop_local: *EventLoopLocal, zig_compiler: *ZigCompiler,
loop: *event.Loop, loop: *event.Loop,
name: Buffer, name: Buffer,
llvm_triple: Buffer, llvm_triple: Buffer,
@ -134,7 +149,6 @@ pub const Compilation = struct {
linker_rdynamic: bool, linker_rdynamic: bool,
clang_argv: []const []const u8, clang_argv: []const []const u8,
llvm_argv: []const []const u8,
lib_dirs: []const []const u8, lib_dirs: []const []const u8,
rpath_list: []const []const u8, rpath_list: []const []const u8,
assembly_files: []const []const u8, assembly_files: []const []const u8,
@ -214,6 +228,8 @@ pub const Compilation = struct {
deinit_group: event.Group(void), deinit_group: event.Group(void),
destroy_handle: promise, destroy_handle: promise,
main_loop_handle: promise,
main_loop_future: event.Future(void),
have_err_ret_tracing: bool, have_err_ret_tracing: bool,
@ -227,6 +243,8 @@ pub const Compilation = struct {
c_int_types: [CInt.list.len]*Type.Int, c_int_types: [CInt.list.len]*Type.Int,
fs_watch: *fs.Watch(*Scope.Root),
const IntTypeTable = std.HashMap(*const Type.Int.Key, *Type.Int, Type.Int.Key.hash, Type.Int.Key.eql); const IntTypeTable = std.HashMap(*const Type.Int.Key, *Type.Int, Type.Int.Key.hash, Type.Int.Key.eql);
const ArrayTypeTable = std.HashMap(*const Type.Array.Key, *Type.Array, Type.Array.Key.hash, Type.Array.Key.eql); const ArrayTypeTable = std.HashMap(*const Type.Array.Key, *Type.Array, Type.Array.Key.hash, Type.Array.Key.eql);
const PtrTypeTable = std.HashMap(*const Type.Pointer.Key, *Type.Pointer, Type.Pointer.Key.hash, Type.Pointer.Key.eql); const PtrTypeTable = std.HashMap(*const Type.Pointer.Key, *Type.Pointer, Type.Pointer.Key.hash, Type.Pointer.Key.eql);
@ -282,6 +300,8 @@ pub const Compilation = struct {
LibCMissingDynamicLinker, LibCMissingDynamicLinker,
InvalidDarwinVersionString, InvalidDarwinVersionString,
UnsupportedLinkArchitecture, UnsupportedLinkArchitecture,
UserResourceLimitReached,
InvalidUtf8,
}; };
pub const Event = union(enum) { pub const Event = union(enum) {
@ -318,7 +338,7 @@ pub const Compilation = struct {
}; };
pub fn create( pub fn create(
event_loop_local: *EventLoopLocal, zig_compiler: *ZigCompiler,
name: []const u8, name: []const u8,
root_src_path: ?[]const u8, root_src_path: ?[]const u8,
target: Target, target: Target,
@ -327,11 +347,45 @@ pub const Compilation = struct {
is_static: bool, is_static: bool,
zig_lib_dir: []const u8, zig_lib_dir: []const u8,
) !*Compilation { ) !*Compilation {
const loop = event_loop_local.loop; var optional_comp: ?*Compilation = null;
const comp = try event_loop_local.loop.allocator.create(Compilation{ const handle = try async<zig_compiler.loop.allocator> createAsync(
&optional_comp,
zig_compiler,
name,
root_src_path,
target,
kind,
build_mode,
is_static,
zig_lib_dir,
);
return optional_comp orelse if (getAwaitResult(
zig_compiler.loop.allocator,
handle,
)) |_| unreachable else |err| err;
}
async fn createAsync(
out_comp: *?*Compilation,
zig_compiler: *ZigCompiler,
name: []const u8,
root_src_path: ?[]const u8,
target: Target,
kind: Kind,
build_mode: builtin.Mode,
is_static: bool,
zig_lib_dir: []const u8,
) !void {
// workaround for https://github.com/ziglang/zig/issues/1194
suspend {
resume @handle();
}
const loop = zig_compiler.loop;
var comp = Compilation{
.loop = loop, .loop = loop,
.arena_allocator = std.heap.ArenaAllocator.init(loop.allocator), .arena_allocator = std.heap.ArenaAllocator.init(loop.allocator),
.event_loop_local = event_loop_local, .zig_compiler = zig_compiler,
.events = undefined, .events = undefined,
.root_src_path = root_src_path, .root_src_path = root_src_path,
.target = target, .target = target,
@ -341,6 +395,9 @@ pub const Compilation = struct {
.zig_lib_dir = zig_lib_dir, .zig_lib_dir = zig_lib_dir,
.zig_std_dir = undefined, .zig_std_dir = undefined,
.tmp_dir = event.Future(BuildError![]u8).init(loop), .tmp_dir = event.Future(BuildError![]u8).init(loop),
.destroy_handle = @handle(),
.main_loop_handle = undefined,
.main_loop_future = event.Future(void).init(loop),
.name = undefined, .name = undefined,
.llvm_triple = undefined, .llvm_triple = undefined,
@ -365,7 +422,6 @@ pub const Compilation = struct {
.is_static = is_static, .is_static = is_static,
.linker_rdynamic = false, .linker_rdynamic = false,
.clang_argv = [][]const u8{}, .clang_argv = [][]const u8{},
.llvm_argv = [][]const u8{},
.lib_dirs = [][]const u8{}, .lib_dirs = [][]const u8{},
.rpath_list = [][]const u8{}, .rpath_list = [][]const u8{},
.assembly_files = [][]const u8{}, .assembly_files = [][]const u8{},
@ -412,25 +468,26 @@ pub const Compilation = struct {
.std_package = undefined, .std_package = undefined,
.override_libc = null, .override_libc = null,
.destroy_handle = undefined,
.have_err_ret_tracing = false, .have_err_ret_tracing = false,
.primitive_type_table = undefined, .primitive_type_table = undefined,
});
errdefer { .fs_watch = undefined,
};
comp.link_libs_list = ArrayList(*LinkLib).init(comp.arena());
comp.primitive_type_table = TypeTable.init(comp.arena());
defer {
comp.int_type_table.private_data.deinit(); comp.int_type_table.private_data.deinit();
comp.array_type_table.private_data.deinit(); comp.array_type_table.private_data.deinit();
comp.ptr_type_table.private_data.deinit(); comp.ptr_type_table.private_data.deinit();
comp.fn_type_table.private_data.deinit(); comp.fn_type_table.private_data.deinit();
comp.arena_allocator.deinit(); comp.arena_allocator.deinit();
comp.loop.allocator.destroy(comp);
} }
comp.name = try Buffer.init(comp.arena(), name); comp.name = try Buffer.init(comp.arena(), name);
comp.llvm_triple = try target.getTriple(comp.arena()); comp.llvm_triple = try target.getTriple(comp.arena());
comp.llvm_target = try Target.llvmTargetFromTriple(comp.llvm_triple); comp.llvm_target = try Target.llvmTargetFromTriple(comp.llvm_triple);
comp.link_libs_list = ArrayList(*LinkLib).init(comp.arena());
comp.zig_std_dir = try std.os.path.join(comp.arena(), zig_lib_dir, "std"); comp.zig_std_dir = try std.os.path.join(comp.arena(), zig_lib_dir, "std");
comp.primitive_type_table = TypeTable.init(comp.arena());
const opt_level = switch (build_mode) { const opt_level = switch (build_mode) {
builtin.Mode.Debug => llvm.CodeGenLevelNone, builtin.Mode.Debug => llvm.CodeGenLevelNone,
@ -444,8 +501,8 @@ pub const Compilation = struct {
// As a workaround we do not use target native features on Windows. // As a workaround we do not use target native features on Windows.
var target_specific_cpu_args: ?[*]u8 = null; var target_specific_cpu_args: ?[*]u8 = null;
var target_specific_cpu_features: ?[*]u8 = null; var target_specific_cpu_features: ?[*]u8 = null;
errdefer llvm.DisposeMessage(target_specific_cpu_args); defer llvm.DisposeMessage(target_specific_cpu_args);
errdefer llvm.DisposeMessage(target_specific_cpu_features); defer llvm.DisposeMessage(target_specific_cpu_features);
if (target == Target.Native and !target.isWindows()) { if (target == Target.Native and !target.isWindows()) {
target_specific_cpu_args = llvm.GetHostCPUName() orelse return error.OutOfMemory; target_specific_cpu_args = llvm.GetHostCPUName() orelse return error.OutOfMemory;
target_specific_cpu_features = llvm.GetNativeFeatures() orelse return error.OutOfMemory; target_specific_cpu_features = llvm.GetNativeFeatures() orelse return error.OutOfMemory;
@ -460,16 +517,16 @@ pub const Compilation = struct {
reloc_mode, reloc_mode,
llvm.CodeModelDefault, llvm.CodeModelDefault,
) orelse return error.OutOfMemory; ) orelse return error.OutOfMemory;
errdefer llvm.DisposeTargetMachine(comp.target_machine); defer llvm.DisposeTargetMachine(comp.target_machine);
comp.target_data_ref = llvm.CreateTargetDataLayout(comp.target_machine) orelse return error.OutOfMemory; comp.target_data_ref = llvm.CreateTargetDataLayout(comp.target_machine) orelse return error.OutOfMemory;
errdefer llvm.DisposeTargetData(comp.target_data_ref); defer llvm.DisposeTargetData(comp.target_data_ref);
comp.target_layout_str = llvm.CopyStringRepOfTargetData(comp.target_data_ref) orelse return error.OutOfMemory; comp.target_layout_str = llvm.CopyStringRepOfTargetData(comp.target_data_ref) orelse return error.OutOfMemory;
errdefer llvm.DisposeMessage(comp.target_layout_str); defer llvm.DisposeMessage(comp.target_layout_str);
comp.events = try event.Channel(Event).create(comp.loop, 0); comp.events = try event.Channel(Event).create(comp.loop, 0);
errdefer comp.events.destroy(); defer comp.events.destroy();
if (root_src_path) |root_src| { if (root_src_path) |root_src| {
const dirname = std.os.path.dirname(root_src) orelse "."; const dirname = std.os.path.dirname(root_src) orelse ".";
@ -482,11 +539,27 @@ pub const Compilation = struct {
comp.root_package = try Package.create(comp.arena(), ".", ""); comp.root_package = try Package.create(comp.arena(), ".", "");
} }
comp.fs_watch = try fs.Watch(*Scope.Root).create(loop, 16);
defer comp.fs_watch.destroy();
try comp.initTypes(); try comp.initTypes();
defer comp.primitive_type_table.deinit();
comp.destroy_handle = try async<loop.allocator> comp.internalDeinit(); comp.main_loop_handle = async comp.mainLoop() catch unreachable;
// Set this to indicate that initialization completed successfully.
// from here on out we must not return an error.
// This must occur before the first suspend/await.
out_comp.* = &comp;
// This suspend is resumed by destroy()
suspend;
// From here on is cleanup.
return comp; await (async comp.deinit_group.wait() catch unreachable);
if (comp.tmp_dir.getOrNull()) |tmp_dir_result| if (tmp_dir_result.*) |tmp_dir| {
// TODO evented I/O?
os.deleteTree(comp.arena(), tmp_dir) catch {};
} else |_| {};
} }
/// it does ref the result because it could be an arbitrary integer size /// it does ref the result because it could be an arbitrary integer size
@ -672,55 +745,28 @@ pub const Compilation = struct {
assert((try comp.primitive_type_table.put(comp.u8_type.base.name, &comp.u8_type.base)) == null); assert((try comp.primitive_type_table.put(comp.u8_type.base.name, &comp.u8_type.base)) == null);
} }
/// This function can safely use async/await, because it manages Compilation's lifetime,
/// and EventLoopLocal.deinit will not be called until the event.Loop.run() completes.
async fn internalDeinit(self: *Compilation) void {
suspend;
await (async self.deinit_group.wait() catch unreachable);
if (self.tmp_dir.getOrNull()) |tmp_dir_result| if (tmp_dir_result.*) |tmp_dir| {
// TODO evented I/O?
os.deleteTree(self.arena(), tmp_dir) catch {};
} else |_| {};
self.events.destroy();
llvm.DisposeMessage(self.target_layout_str);
llvm.DisposeTargetData(self.target_data_ref);
llvm.DisposeTargetMachine(self.target_machine);
self.primitive_type_table.deinit();
self.arena_allocator.deinit();
self.gpa().destroy(self);
}
pub fn destroy(self: *Compilation) void { pub fn destroy(self: *Compilation) void {
cancel self.main_loop_handle;
resume self.destroy_handle; resume self.destroy_handle;
} }
pub fn build(self: *Compilation) !void { fn start(self: *Compilation) void {
if (self.llvm_argv.len != 0) { self.main_loop_future.resolve();
var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.arena(), [][]const []const u8{
[][]const u8{"zig (LLVM option parsing)"},
self.llvm_argv,
});
defer c_compatible_args.deinit();
// TODO this sets global state
c.ZigLLVMParseCommandLineOptions(self.llvm_argv.len + 1, c_compatible_args.ptr);
} }
_ = try async<self.gpa()> self.buildAsync(); async fn mainLoop(self: *Compilation) void {
} // wait until start() is called
_ = await (async self.main_loop_future.get() catch unreachable);
var build_result = await (async self.initialCompile() catch unreachable);
async fn buildAsync(self: *Compilation) void {
while (true) { while (true) {
// TODO directly awaiting async should guarantee memory allocation elision const link_result = if (build_result) blk: {
const build_result = await (async self.compileAndLink() catch unreachable); break :blk await (async self.maybeLink() catch unreachable);
} else |err| err;
// this makes a handy error return trace and stack trace in debug mode // this makes a handy error return trace and stack trace in debug mode
if (std.debug.runtime_safety) { if (std.debug.runtime_safety) {
build_result catch unreachable; link_result catch unreachable;
} }
const compile_errors = blk: { const compile_errors = blk: {
@ -729,7 +775,7 @@ pub const Compilation = struct {
break :blk held.value.toOwnedSlice(); break :blk held.value.toOwnedSlice();
}; };
if (build_result) |_| { if (link_result) |_| {
if (compile_errors.len == 0) { if (compile_errors.len == 0) {
await (async self.events.put(Event.Ok) catch unreachable); await (async self.events.put(Event.Ok) catch unreachable);
} else { } else {
@ -742,25 +788,45 @@ pub const Compilation = struct {
await (async self.events.put(Event{ .Error = err }) catch unreachable); await (async self.events.put(Event{ .Error = err }) catch unreachable);
} }
// for now we stop after 1 // First, get an item from the watch channel, waiting on the channel.
return; var group = event.Group(BuildError!void).init(self.loop);
} {
} const ev = (await (async self.fs_watch.channel.get() catch unreachable)) catch |err| {
build_result = err;
async fn compileAndLink(self: *Compilation) !void { continue;
if (self.root_src_path) |root_src_path| {
// TODO async/await os.path.real
const root_src_real_path = os.path.real(self.gpa(), root_src_path) catch |err| {
try printError("unable to get real path '{}': {}", root_src_path, err);
return err;
}; };
const root_scope = blk: { const root_scope = ev.data;
errdefer self.gpa().free(root_src_real_path); group.call(rebuildFile, self, root_scope) catch |err| {
build_result = err;
continue;
};
}
// Next, get all the items from the channel that are buffered up.
while (await (async self.fs_watch.channel.getOrNull() catch unreachable)) |ev_or_err| {
if (ev_or_err) |ev| {
const root_scope = ev.data;
group.call(rebuildFile, self, root_scope) catch |err| {
build_result = err;
continue;
};
} else |err| {
build_result = err;
continue;
}
}
build_result = await (async group.wait() catch unreachable);
}
}
// TODO async/await readFileAlloc() async fn rebuildFile(self: *Compilation, root_scope: *Scope.Root) !void {
const source_code = io.readFileAlloc(self.gpa(), root_src_real_path) catch |err| { const tree_scope = blk: {
try printError("unable to open '{}': {}", root_src_real_path, err); const source_code = (await (async fs.readFile(
return err; self.loop,
root_scope.realpath,
max_src_size,
) catch unreachable)) catch |err| {
try self.addCompileErrorCli(root_scope.realpath, "unable to open: {}", @errorName(err));
return;
}; };
errdefer self.gpa().free(source_code); errdefer self.gpa().free(source_code);
@ -771,76 +837,146 @@ pub const Compilation = struct {
self.gpa().destroy(tree); self.gpa().destroy(tree);
} }
break :blk try Scope.Root.create(self, tree, root_src_real_path); break :blk try Scope.AstTree.create(self, tree, root_scope);
}; };
defer root_scope.base.deref(self); defer tree_scope.base.deref(self);
const tree = root_scope.tree;
var error_it = tree.errors.iterator(0); var error_it = tree_scope.tree.errors.iterator(0);
while (error_it.next()) |parse_error| { while (error_it.next()) |parse_error| {
const msg = try Msg.createFromParseErrorAndScope(self, root_scope, parse_error); const msg = try Msg.createFromParseErrorAndScope(self, tree_scope, parse_error);
errdefer msg.destroy(); errdefer msg.destroy();
try await (async self.addCompileErrorAsync(msg) catch unreachable); try await (async self.addCompileErrorAsync(msg) catch unreachable);
} }
if (tree.errors.len != 0) { if (tree_scope.tree.errors.len != 0) {
return; return;
} }
const decls = try Scope.Decls.create(self, &root_scope.base); const locked_table = await (async root_scope.decls.table.acquireWrite() catch unreachable);
defer decls.base.deref(self); defer locked_table.release();
var decl_group = event.Group(BuildError!void).init(self.loop); var decl_group = event.Group(BuildError!void).init(self.loop);
var decl_group_consumed = false; defer decl_group.deinit();
errdefer if (!decl_group_consumed) decl_group.cancelAll();
var it = tree.root_node.decls.iterator(0); try await try async self.rebuildChangedDecls(
while (it.next()) |decl_ptr| { &decl_group,
locked_table.value,
root_scope.decls,
&tree_scope.tree.root_node.decls,
tree_scope,
);
try await (async decl_group.wait() catch unreachable);
}
async fn rebuildChangedDecls(
self: *Compilation,
group: *event.Group(BuildError!void),
locked_table: *Decl.Table,
decl_scope: *Scope.Decls,
ast_decls: *ast.Node.Root.DeclList,
tree_scope: *Scope.AstTree,
) !void {
var existing_decls = try locked_table.clone();
defer existing_decls.deinit();
var ast_it = ast_decls.iterator(0);
while (ast_it.next()) |decl_ptr| {
const decl = decl_ptr.*; const decl = decl_ptr.*;
switch (decl.id) { switch (decl.id) {
ast.Node.Id.Comptime => { ast.Node.Id.Comptime => {
const comptime_node = @fieldParentPtr(ast.Node.Comptime, "base", decl); const comptime_node = @fieldParentPtr(ast.Node.Comptime, "base", decl);
try self.prelink_group.call(addCompTimeBlock, self, &decls.base, comptime_node); // TODO connect existing comptime decls to updated source files
try self.prelink_group.call(addCompTimeBlock, self, tree_scope, &decl_scope.base, comptime_node);
}, },
ast.Node.Id.VarDecl => @panic("TODO"), ast.Node.Id.VarDecl => @panic("TODO"),
ast.Node.Id.FnProto => { ast.Node.Id.FnProto => {
const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl); const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
const name = if (fn_proto.name_token) |name_token| tree.tokenSlice(name_token) else { const name = if (fn_proto.name_token) |name_token| tree_scope.tree.tokenSlice(name_token) else {
try self.addCompileError(root_scope, Span{ try self.addCompileError(tree_scope, Span{
.first = fn_proto.fn_token, .first = fn_proto.fn_token,
.last = fn_proto.fn_token + 1, .last = fn_proto.fn_token + 1,
}, "missing function name"); }, "missing function name");
continue; continue;
}; };
if (existing_decls.remove(name)) |entry| {
// compare new code to existing
if (entry.value.cast(Decl.Fn)) |existing_fn_decl| {
// Just compare the old bytes to the new bytes of the top level decl.
// Even if the AST is technically the same, we want error messages to display
// from the most recent source.
const old_decl_src = existing_fn_decl.base.tree_scope.tree.getNodeSource(
&existing_fn_decl.fn_proto.base,
);
const new_decl_src = tree_scope.tree.getNodeSource(&fn_proto.base);
if (mem.eql(u8, old_decl_src, new_decl_src)) {
// it's the same, we can skip this decl
continue;
} else {
@panic("TODO decl changed implementation");
// Add the new thing before dereferencing the old thing. This way we don't end
// up pointlessly re-creating things we end up using in the new thing.
}
} else {
@panic("TODO decl changed kind");
}
} else {
// add new decl
const fn_decl = try self.gpa().create(Decl.Fn{ const fn_decl = try self.gpa().create(Decl.Fn{
.base = Decl{ .base = Decl{
.id = Decl.Id.Fn, .id = Decl.Id.Fn,
.name = name, .name = name,
.visib = parseVisibToken(tree, fn_proto.visib_token), .visib = parseVisibToken(tree_scope.tree, fn_proto.visib_token),
.resolution = event.Future(BuildError!void).init(self.loop), .resolution = event.Future(BuildError!void).init(self.loop),
.parent_scope = &decls.base, .parent_scope = &decl_scope.base,
.tree_scope = tree_scope,
}, },
.value = Decl.Fn.Val{ .Unresolved = {} }, .value = Decl.Fn.Val{ .Unresolved = {} },
.fn_proto = fn_proto, .fn_proto = fn_proto,
}); });
tree_scope.base.ref();
errdefer self.gpa().destroy(fn_decl); errdefer self.gpa().destroy(fn_decl);
try decl_group.call(addTopLevelDecl, self, decls, &fn_decl.base); try group.call(addTopLevelDecl, self, &fn_decl.base, locked_table);
}
}, },
ast.Node.Id.TestDecl => @panic("TODO"), ast.Node.Id.TestDecl => @panic("TODO"),
else => unreachable, else => unreachable,
} }
} }
decl_group_consumed = true;
try await (async decl_group.wait() catch unreachable);
// Now other code can rely on the decls scope having a complete list of names. var existing_decl_it = existing_decls.iterator();
decls.name_future.resolve(); while (existing_decl_it.next()) |entry| {
// this decl was deleted
const existing_decl = entry.value;
@panic("TODO handle decl deletion");
}
} }
async fn initialCompile(self: *Compilation) !void {
if (self.root_src_path) |root_src_path| {
const root_scope = blk: {
// TODO async/await os.path.real
const root_src_real_path = os.path.real(self.gpa(), root_src_path) catch |err| {
try self.addCompileErrorCli(root_src_path, "unable to open: {}", @errorName(err));
return;
};
errdefer self.gpa().free(root_src_real_path);
break :blk try Scope.Root.create(self, root_src_real_path);
};
defer root_scope.base.deref(self);
assert((try await try async self.fs_watch.addFile(root_scope.realpath, root_scope)) == null);
try await try async self.rebuildFile(root_scope);
}
}
async fn maybeLink(self: *Compilation) !void {
(await (async self.prelink_group.wait() catch unreachable)) catch |err| switch (err) { (await (async self.prelink_group.wait() catch unreachable)) catch |err| switch (err) {
error.SemanticAnalysisFailed => {}, error.SemanticAnalysisFailed => {},
else => return err, else => return err,
@ -861,6 +997,7 @@ pub const Compilation = struct {
/// caller takes ownership of resulting Code /// caller takes ownership of resulting Code
async fn genAndAnalyzeCode( async fn genAndAnalyzeCode(
comp: *Compilation, comp: *Compilation,
tree_scope: *Scope.AstTree,
scope: *Scope, scope: *Scope,
node: *ast.Node, node: *ast.Node,
expected_type: ?*Type, expected_type: ?*Type,
@ -868,6 +1005,7 @@ pub const Compilation = struct {
const unanalyzed_code = try await (async ir.gen( const unanalyzed_code = try await (async ir.gen(
comp, comp,
node, node,
tree_scope,
scope, scope,
) catch unreachable); ) catch unreachable);
defer unanalyzed_code.destroy(comp.gpa()); defer unanalyzed_code.destroy(comp.gpa());
@ -894,6 +1032,7 @@ pub const Compilation = struct {
async fn addCompTimeBlock( async fn addCompTimeBlock(
comp: *Compilation, comp: *Compilation,
tree_scope: *Scope.AstTree,
scope: *Scope, scope: *Scope,
comptime_node: *ast.Node.Comptime, comptime_node: *ast.Node.Comptime,
) !void { ) !void {
@ -902,6 +1041,7 @@ pub const Compilation = struct {
const analyzed_code = (await (async genAndAnalyzeCode( const analyzed_code = (await (async genAndAnalyzeCode(
comp, comp,
tree_scope,
scope, scope,
comptime_node.expr, comptime_node.expr,
&void_type.base, &void_type.base,
@ -914,38 +1054,42 @@ pub const Compilation = struct {
analyzed_code.destroy(comp.gpa()); analyzed_code.destroy(comp.gpa());
} }
async fn addTopLevelDecl(self: *Compilation, decls: *Scope.Decls, decl: *Decl) !void { async fn addTopLevelDecl(
const tree = decl.findRootScope().tree; self: *Compilation,
const is_export = decl.isExported(tree); decl: *Decl,
locked_table: *Decl.Table,
var add_to_table_resolved = false; ) !void {
const add_to_table = async self.addDeclToTable(decls, decl) catch unreachable; const is_export = decl.isExported(decl.tree_scope.tree);
errdefer if (!add_to_table_resolved) cancel add_to_table; // TODO https://github.com/ziglang/zig/issues/1261
if (is_export) { if (is_export) {
try self.prelink_group.call(verifyUniqueSymbol, self, decl); try self.prelink_group.call(verifyUniqueSymbol, self, decl);
try self.prelink_group.call(resolveDecl, self, decl); try self.prelink_group.call(resolveDecl, self, decl);
} }
add_to_table_resolved = true; const gop = try locked_table.getOrPut(decl.name);
try await add_to_table; if (gop.found_existing) {
} try self.addCompileError(decl.tree_scope, decl.getSpan(), "redefinition of '{}'", decl.name);
async fn addDeclToTable(self: *Compilation, decls: *Scope.Decls, decl: *Decl) !void {
const held = await (async decls.table.acquire() catch unreachable);
defer held.release();
if (try held.value.put(decl.name, decl)) |other_decl| {
try self.addCompileError(decls.base.findRoot(), decl.getSpan(), "redefinition of '{}'", decl.name);
// TODO note: other definition here // TODO note: other definition here
} else {
gop.kv.value = decl;
} }
} }
fn addCompileError(self: *Compilation, root: *Scope.Root, span: Span, comptime fmt: []const u8, args: ...) !void { fn addCompileError(self: *Compilation, tree_scope: *Scope.AstTree, span: Span, comptime fmt: []const u8, args: ...) !void {
const text = try std.fmt.allocPrint(self.gpa(), fmt, args); const text = try std.fmt.allocPrint(self.gpa(), fmt, args);
errdefer self.gpa().free(text); errdefer self.gpa().free(text);
const msg = try Msg.createFromScope(self, root, span, text); const msg = try Msg.createFromScope(self, tree_scope, span, text);
errdefer msg.destroy();
try self.prelink_group.call(addCompileErrorAsync, self, msg);
}
fn addCompileErrorCli(self: *Compilation, realpath: []const u8, comptime fmt: []const u8, args: ...) !void {
const text = try std.fmt.allocPrint(self.gpa(), fmt, args);
errdefer self.gpa().free(text);
const msg = try Msg.createFromCli(self, realpath, text);
errdefer msg.destroy(); errdefer msg.destroy();
try self.prelink_group.call(addCompileErrorAsync, self, msg); try self.prelink_group.call(addCompileErrorAsync, self, msg);
@ -969,7 +1113,7 @@ pub const Compilation = struct {
if (try exported_symbol_names.value.put(decl.name, decl)) |other_decl| { if (try exported_symbol_names.value.put(decl.name, decl)) |other_decl| {
try self.addCompileError( try self.addCompileError(
decl.findRootScope(), decl.tree_scope,
decl.getSpan(), decl.getSpan(),
"exported symbol collision: '{}'", "exported symbol collision: '{}'",
decl.name, decl.name,
@ -1019,7 +1163,7 @@ pub const Compilation = struct {
async fn startFindingNativeLibC(self: *Compilation) void { async fn startFindingNativeLibC(self: *Compilation) void {
await (async self.loop.yield() catch unreachable); await (async self.loop.yield() catch unreachable);
// we don't care if it fails, we're just trying to kick off the future resolution // we don't care if it fails, we're just trying to kick off the future resolution
_ = (await (async self.event_loop_local.getNativeLibC() catch unreachable)) catch return; _ = (await (async self.zig_compiler.getNativeLibC() catch unreachable)) catch return;
} }
/// General Purpose Allocator. Must free when done. /// General Purpose Allocator. Must free when done.
@ -1077,7 +1221,7 @@ pub const Compilation = struct {
var rand_bytes: [9]u8 = undefined; var rand_bytes: [9]u8 = undefined;
{ {
const held = await (async self.event_loop_local.prng.acquire() catch unreachable); const held = await (async self.zig_compiler.prng.acquire() catch unreachable);
defer held.release(); defer held.release();
held.value.random.bytes(rand_bytes[0..]); held.value.random.bytes(rand_bytes[0..]);
@ -1093,18 +1237,24 @@ pub const Compilation = struct {
} }
/// Returns a value which has been ref()'d once /// Returns a value which has been ref()'d once
async fn analyzeConstValue(comp: *Compilation, scope: *Scope, node: *ast.Node, expected_type: *Type) !*Value { async fn analyzeConstValue(
const analyzed_code = try await (async comp.genAndAnalyzeCode(scope, node, expected_type) catch unreachable); comp: *Compilation,
tree_scope: *Scope.AstTree,
scope: *Scope,
node: *ast.Node,
expected_type: *Type,
) !*Value {
const analyzed_code = try await (async comp.genAndAnalyzeCode(tree_scope, scope, node, expected_type) catch unreachable);
defer analyzed_code.destroy(comp.gpa()); defer analyzed_code.destroy(comp.gpa());
return analyzed_code.getCompTimeResult(comp); return analyzed_code.getCompTimeResult(comp);
} }
async fn analyzeTypeExpr(comp: *Compilation, scope: *Scope, node: *ast.Node) !*Type { async fn analyzeTypeExpr(comp: *Compilation, tree_scope: *Scope.AstTree, scope: *Scope, node: *ast.Node) !*Type {
const meta_type = &Type.MetaType.get(comp).base; const meta_type = &Type.MetaType.get(comp).base;
defer meta_type.base.deref(comp); defer meta_type.base.deref(comp);
const result_val = try await (async comp.analyzeConstValue(scope, node, meta_type) catch unreachable); const result_val = try await (async comp.analyzeConstValue(tree_scope, scope, node, meta_type) catch unreachable);
errdefer result_val.base.deref(comp); errdefer result_val.base.deref(comp);
return result_val.cast(Type).?; return result_val.cast(Type).?;
@ -1120,13 +1270,6 @@ pub const Compilation = struct {
} }
}; };
fn printError(comptime format: []const u8, args: ...) !void {
var stderr_file = try std.io.getStdErr();
var stderr_file_out_stream = std.io.FileOutStream.init(&stderr_file);
const out_stream = &stderr_file_out_stream.stream;
try out_stream.print(format, args);
}
fn parseVisibToken(tree: *ast.Tree, optional_token_index: ?ast.TokenIndex) Visib { fn parseVisibToken(tree: *ast.Tree, optional_token_index: ?ast.TokenIndex) Visib {
if (optional_token_index) |token_index| { if (optional_token_index) |token_index| {
const token = tree.tokens.at(token_index); const token = tree.tokens.at(token_index);
@ -1150,12 +1293,14 @@ async fn generateDecl(comp: *Compilation, decl: *Decl) !void {
} }
async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void { async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void {
const tree_scope = fn_decl.base.tree_scope;
const body_node = fn_decl.fn_proto.body_node orelse return await (async generateDeclFnProto(comp, fn_decl) catch unreachable); const body_node = fn_decl.fn_proto.body_node orelse return await (async generateDeclFnProto(comp, fn_decl) catch unreachable);
const fndef_scope = try Scope.FnDef.create(comp, fn_decl.base.parent_scope); const fndef_scope = try Scope.FnDef.create(comp, fn_decl.base.parent_scope);
defer fndef_scope.base.deref(comp); defer fndef_scope.base.deref(comp);
const fn_type = try await (async analyzeFnType(comp, fn_decl.base.parent_scope, fn_decl.fn_proto) catch unreachable); const fn_type = try await (async analyzeFnType(comp, tree_scope, fn_decl.base.parent_scope, fn_decl.fn_proto) catch unreachable);
defer fn_type.base.base.deref(comp); defer fn_type.base.base.deref(comp);
var symbol_name = try std.Buffer.init(comp.gpa(), fn_decl.base.name); var symbol_name = try std.Buffer.init(comp.gpa(), fn_decl.base.name);
@ -1168,18 +1313,17 @@ async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void {
symbol_name_consumed = true; symbol_name_consumed = true;
// Define local parameter variables // Define local parameter variables
const root_scope = fn_decl.base.findRootScope();
for (fn_type.key.data.Normal.params) |param, i| { for (fn_type.key.data.Normal.params) |param, i| {
//AstNode *param_decl_node = get_param_decl_node(fn_table_entry, i); //AstNode *param_decl_node = get_param_decl_node(fn_table_entry, i);
const param_decl = @fieldParentPtr(ast.Node.ParamDecl, "base", fn_decl.fn_proto.params.at(i).*); const param_decl = @fieldParentPtr(ast.Node.ParamDecl, "base", fn_decl.fn_proto.params.at(i).*);
const name_token = param_decl.name_token orelse { const name_token = param_decl.name_token orelse {
try comp.addCompileError(root_scope, Span{ try comp.addCompileError(tree_scope, Span{
.first = param_decl.firstToken(), .first = param_decl.firstToken(),
.last = param_decl.type_node.firstToken(), .last = param_decl.type_node.firstToken(),
}, "missing parameter name"); }, "missing parameter name");
return error.SemanticAnalysisFailed; return error.SemanticAnalysisFailed;
}; };
const param_name = root_scope.tree.tokenSlice(name_token); const param_name = tree_scope.tree.tokenSlice(name_token);
// if (is_noalias && get_codegen_ptr_type(param_type) == nullptr) { // if (is_noalias && get_codegen_ptr_type(param_type) == nullptr) {
// add_node_error(g, param_decl_node, buf_sprintf("noalias on non-pointer parameter")); // add_node_error(g, param_decl_node, buf_sprintf("noalias on non-pointer parameter"));
@ -1201,6 +1345,7 @@ async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void {
} }
const analyzed_code = try await (async comp.genAndAnalyzeCode( const analyzed_code = try await (async comp.genAndAnalyzeCode(
tree_scope,
fn_val.child_scope, fn_val.child_scope,
body_node, body_node,
fn_type.key.data.Normal.return_type, fn_type.key.data.Normal.return_type,
@ -1231,12 +1376,17 @@ fn getZigDir(allocator: *mem.Allocator) ![]u8 {
return os.getAppDataDir(allocator, "zig"); return os.getAppDataDir(allocator, "zig");
} }
async fn analyzeFnType(comp: *Compilation, scope: *Scope, fn_proto: *ast.Node.FnProto) !*Type.Fn { async fn analyzeFnType(
comp: *Compilation,
tree_scope: *Scope.AstTree,
scope: *Scope,
fn_proto: *ast.Node.FnProto,
) !*Type.Fn {
const return_type_node = switch (fn_proto.return_type) { const return_type_node = switch (fn_proto.return_type) {
ast.Node.FnProto.ReturnType.Explicit => |n| n, ast.Node.FnProto.ReturnType.Explicit => |n| n,
ast.Node.FnProto.ReturnType.InferErrorSet => |n| n, ast.Node.FnProto.ReturnType.InferErrorSet => |n| n,
}; };
const return_type = try await (async comp.analyzeTypeExpr(scope, return_type_node) catch unreachable); const return_type = try await (async comp.analyzeTypeExpr(tree_scope, scope, return_type_node) catch unreachable);
return_type.base.deref(comp); return_type.base.deref(comp);
var params = ArrayList(Type.Fn.Param).init(comp.gpa()); var params = ArrayList(Type.Fn.Param).init(comp.gpa());
@ -1252,7 +1402,7 @@ async fn analyzeFnType(comp: *Compilation, scope: *Scope, fn_proto: *ast.Node.Fn
var it = fn_proto.params.iterator(0); var it = fn_proto.params.iterator(0);
while (it.next()) |param_node_ptr| { while (it.next()) |param_node_ptr| {
const param_node = param_node_ptr.*.cast(ast.Node.ParamDecl).?; const param_node = param_node_ptr.*.cast(ast.Node.ParamDecl).?;
const param_type = try await (async comp.analyzeTypeExpr(scope, param_node.type_node) catch unreachable); const param_type = try await (async comp.analyzeTypeExpr(tree_scope, scope, param_node.type_node) catch unreachable);
errdefer param_type.base.deref(comp); errdefer param_type.base.deref(comp);
try params.append(Type.Fn.Param{ try params.append(Type.Fn.Param{
.typ = param_type, .typ = param_type,
@ -1289,7 +1439,12 @@ async fn analyzeFnType(comp: *Compilation, scope: *Scope, fn_proto: *ast.Node.Fn
} }
async fn generateDeclFnProto(comp: *Compilation, fn_decl: *Decl.Fn) !void { async fn generateDeclFnProto(comp: *Compilation, fn_decl: *Decl.Fn) !void {
const fn_type = try await (async analyzeFnType(comp, fn_decl.base.parent_scope, fn_decl.fn_proto) catch unreachable); const fn_type = try await (async analyzeFnType(
comp,
fn_decl.base.tree_scope,
fn_decl.base.parent_scope,
fn_decl.fn_proto,
) catch unreachable);
defer fn_type.base.base.deref(comp); defer fn_type.base.base.deref(comp);
var symbol_name = try std.Buffer.init(comp.gpa(), fn_decl.base.name); var symbol_name = try std.Buffer.init(comp.gpa(), fn_decl.base.name);
@ -1301,3 +1456,14 @@ async fn generateDeclFnProto(comp: *Compilation, fn_decl: *Decl.Fn) !void {
fn_decl.value = Decl.Fn.Val{ .FnProto = fn_proto_val }; fn_decl.value = Decl.Fn.Val{ .FnProto = fn_proto_val };
symbol_name_consumed = true; symbol_name_consumed = true;
} }
// TODO these are hacks which should probably be solved by the language
fn getAwaitResult(allocator: *Allocator, handle: var) @typeInfo(@typeOf(handle)).Promise.child.? {
var result: ?@typeInfo(@typeOf(handle)).Promise.child.? = null;
cancel (async<allocator> getAwaitResultAsync(handle, &result) catch unreachable);
return result.?;
}
async fn getAwaitResultAsync(handle: var, out: *?@typeInfo(@typeOf(handle)).Promise.child.?) void {
out.* = await handle;
}

View File

@ -17,8 +17,16 @@ pub const Decl = struct {
resolution: event.Future(Compilation.BuildError!void), resolution: event.Future(Compilation.BuildError!void),
parent_scope: *Scope, parent_scope: *Scope,
// TODO when we destroy the decl, deref the tree scope
tree_scope: *Scope.AstTree,
pub const Table = std.HashMap([]const u8, *Decl, mem.hash_slice_u8, mem.eql_slice_u8); pub const Table = std.HashMap([]const u8, *Decl, mem.hash_slice_u8, mem.eql_slice_u8);
pub fn cast(base: *Decl, comptime T: type) ?*T {
if (base.id != @field(Id, @typeName(T))) return null;
return @fieldParentPtr(T, "base", base);
}
pub fn isExported(base: *const Decl, tree: *ast.Tree) bool { pub fn isExported(base: *const Decl, tree: *ast.Tree) bool {
switch (base.id) { switch (base.id) {
Id.Fn => { Id.Fn => {
@ -95,4 +103,3 @@ pub const Decl = struct {
base: Decl, base: Decl,
}; };
}; };

View File

@ -33,35 +33,48 @@ pub const Span = struct {
}; };
pub const Msg = struct { pub const Msg = struct {
span: Span,
text: []u8, text: []u8,
realpath: []u8,
data: Data, data: Data,
const Data = union(enum) { const Data = union(enum) {
Cli: Cli,
PathAndTree: PathAndTree, PathAndTree: PathAndTree,
ScopeAndComp: ScopeAndComp, ScopeAndComp: ScopeAndComp,
}; };
const PathAndTree = struct { const PathAndTree = struct {
realpath: []const u8, span: Span,
tree: *ast.Tree, tree: *ast.Tree,
allocator: *mem.Allocator, allocator: *mem.Allocator,
}; };
const ScopeAndComp = struct { const ScopeAndComp = struct {
root_scope: *Scope.Root, span: Span,
tree_scope: *Scope.AstTree,
compilation: *Compilation, compilation: *Compilation,
}; };
const Cli = struct {
allocator: *mem.Allocator,
};
pub fn destroy(self: *Msg) void { pub fn destroy(self: *Msg) void {
switch (self.data) { switch (self.data) {
Data.Cli => |cli| {
cli.allocator.free(self.text);
cli.allocator.free(self.realpath);
cli.allocator.destroy(self);
},
Data.PathAndTree => |path_and_tree| { Data.PathAndTree => |path_and_tree| {
path_and_tree.allocator.free(self.text); path_and_tree.allocator.free(self.text);
path_and_tree.allocator.free(self.realpath);
path_and_tree.allocator.destroy(self); path_and_tree.allocator.destroy(self);
}, },
Data.ScopeAndComp => |scope_and_comp| { Data.ScopeAndComp => |scope_and_comp| {
scope_and_comp.root_scope.base.deref(scope_and_comp.compilation); scope_and_comp.tree_scope.base.deref(scope_and_comp.compilation);
scope_and_comp.compilation.gpa().free(self.text); scope_and_comp.compilation.gpa().free(self.text);
scope_and_comp.compilation.gpa().free(self.realpath);
scope_and_comp.compilation.gpa().destroy(self); scope_and_comp.compilation.gpa().destroy(self);
}, },
} }
@ -69,6 +82,7 @@ pub const Msg = struct {
fn getAllocator(self: *const Msg) *mem.Allocator { fn getAllocator(self: *const Msg) *mem.Allocator {
switch (self.data) { switch (self.data) {
Data.Cli => |cli| return cli.allocator,
Data.PathAndTree => |path_and_tree| { Data.PathAndTree => |path_and_tree| {
return path_and_tree.allocator; return path_and_tree.allocator;
}, },
@ -78,71 +92,93 @@ pub const Msg = struct {
} }
} }
pub fn getRealPath(self: *const Msg) []const u8 {
switch (self.data) {
Data.PathAndTree => |path_and_tree| {
return path_and_tree.realpath;
},
Data.ScopeAndComp => |scope_and_comp| {
return scope_and_comp.root_scope.realpath;
},
}
}
pub fn getTree(self: *const Msg) *ast.Tree { pub fn getTree(self: *const Msg) *ast.Tree {
switch (self.data) { switch (self.data) {
Data.Cli => unreachable,
Data.PathAndTree => |path_and_tree| { Data.PathAndTree => |path_and_tree| {
return path_and_tree.tree; return path_and_tree.tree;
}, },
Data.ScopeAndComp => |scope_and_comp| { Data.ScopeAndComp => |scope_and_comp| {
return scope_and_comp.root_scope.tree; return scope_and_comp.tree_scope.tree;
}, },
} }
} }
pub fn getSpan(self: *const Msg) Span {
return switch (self.data) {
Data.Cli => unreachable,
Data.PathAndTree => |path_and_tree| path_and_tree.span,
Data.ScopeAndComp => |scope_and_comp| scope_and_comp.span,
};
}
/// Takes ownership of text /// Takes ownership of text
/// References root_scope, and derefs when the msg is freed /// References tree_scope, and derefs when the msg is freed
pub fn createFromScope(comp: *Compilation, root_scope: *Scope.Root, span: Span, text: []u8) !*Msg { pub fn createFromScope(comp: *Compilation, tree_scope: *Scope.AstTree, span: Span, text: []u8) !*Msg {
const realpath = try mem.dupe(comp.gpa(), u8, tree_scope.root().realpath);
errdefer comp.gpa().free(realpath);
const msg = try comp.gpa().create(Msg{ const msg = try comp.gpa().create(Msg{
.text = text, .text = text,
.span = span, .realpath = realpath,
.data = Data{ .data = Data{
.ScopeAndComp = ScopeAndComp{ .ScopeAndComp = ScopeAndComp{
.root_scope = root_scope, .tree_scope = tree_scope,
.compilation = comp, .compilation = comp,
.span = span,
}, },
}, },
}); });
root_scope.base.ref(); tree_scope.base.ref();
return msg;
}
/// Caller owns returned Msg and must free with `allocator`
/// allocator will additionally be used for printing messages later.
pub fn createFromCli(comp: *Compilation, realpath: []const u8, text: []u8) !*Msg {
const realpath_copy = try mem.dupe(comp.gpa(), u8, realpath);
errdefer comp.gpa().free(realpath_copy);
const msg = try comp.gpa().create(Msg{
.text = text,
.realpath = realpath_copy,
.data = Data{
.Cli = Cli{ .allocator = comp.gpa() },
},
});
return msg; return msg;
} }
pub fn createFromParseErrorAndScope( pub fn createFromParseErrorAndScope(
comp: *Compilation, comp: *Compilation,
root_scope: *Scope.Root, tree_scope: *Scope.AstTree,
parse_error: *const ast.Error, parse_error: *const ast.Error,
) !*Msg { ) !*Msg {
const loc_token = parse_error.loc(); const loc_token = parse_error.loc();
var text_buf = try std.Buffer.initSize(comp.gpa(), 0); var text_buf = try std.Buffer.initSize(comp.gpa(), 0);
defer text_buf.deinit(); defer text_buf.deinit();
const realpath_copy = try mem.dupe(comp.gpa(), u8, tree_scope.root().realpath);
errdefer comp.gpa().free(realpath_copy);
var out_stream = &std.io.BufferOutStream.init(&text_buf).stream; var out_stream = &std.io.BufferOutStream.init(&text_buf).stream;
try parse_error.render(&root_scope.tree.tokens, out_stream); try parse_error.render(&tree_scope.tree.tokens, out_stream);
const msg = try comp.gpa().create(Msg{ const msg = try comp.gpa().create(Msg{
.text = undefined, .text = undefined,
.realpath = realpath_copy,
.data = Data{
.ScopeAndComp = ScopeAndComp{
.tree_scope = tree_scope,
.compilation = comp,
.span = Span{ .span = Span{
.first = loc_token, .first = loc_token,
.last = loc_token, .last = loc_token,
}, },
.data = Data{
.ScopeAndComp = ScopeAndComp{
.root_scope = root_scope,
.compilation = comp,
}, },
}, },
}); });
root_scope.base.ref(); tree_scope.base.ref();
msg.text = text_buf.toOwnedSlice(); msg.text = text_buf.toOwnedSlice();
return msg; return msg;
} }
@ -161,22 +197,25 @@ pub const Msg = struct {
var text_buf = try std.Buffer.initSize(allocator, 0); var text_buf = try std.Buffer.initSize(allocator, 0);
defer text_buf.deinit(); defer text_buf.deinit();
const realpath_copy = try mem.dupe(allocator, u8, realpath);
errdefer allocator.free(realpath_copy);
var out_stream = &std.io.BufferOutStream.init(&text_buf).stream; var out_stream = &std.io.BufferOutStream.init(&text_buf).stream;
try parse_error.render(&tree.tokens, out_stream); try parse_error.render(&tree.tokens, out_stream);
const msg = try allocator.create(Msg{ const msg = try allocator.create(Msg{
.text = undefined, .text = undefined,
.realpath = realpath_copy,
.data = Data{ .data = Data{
.PathAndTree = PathAndTree{ .PathAndTree = PathAndTree{
.allocator = allocator, .allocator = allocator,
.realpath = realpath,
.tree = tree, .tree = tree,
},
},
.span = Span{ .span = Span{
.first = loc_token, .first = loc_token,
.last = loc_token, .last = loc_token,
}, },
},
},
}); });
msg.text = text_buf.toOwnedSlice(); msg.text = text_buf.toOwnedSlice();
errdefer allocator.destroy(msg); errdefer allocator.destroy(msg);
@ -185,20 +224,28 @@ pub const Msg = struct {
} }
pub fn printToStream(msg: *const Msg, stream: var, color_on: bool) !void { pub fn printToStream(msg: *const Msg, stream: var, color_on: bool) !void {
switch (msg.data) {
Data.Cli => {
try stream.print("{}:-:-: error: {}\n", msg.realpath, msg.text);
return;
},
else => {},
}
const allocator = msg.getAllocator(); const allocator = msg.getAllocator();
const realpath = msg.getRealPath();
const tree = msg.getTree(); const tree = msg.getTree();
const cwd = try os.getCwd(allocator); const cwd = try os.getCwd(allocator);
defer allocator.free(cwd); defer allocator.free(cwd);
const relpath = try os.path.relative(allocator, cwd, realpath); const relpath = try os.path.relative(allocator, cwd, msg.realpath);
defer allocator.free(relpath); defer allocator.free(relpath);
const path = if (relpath.len < realpath.len) relpath else realpath; const path = if (relpath.len < msg.realpath.len) relpath else msg.realpath;
const span = msg.getSpan();
const first_token = tree.tokens.at(msg.span.first); const first_token = tree.tokens.at(span.first);
const last_token = tree.tokens.at(msg.span.last); const last_token = tree.tokens.at(span.last);
const start_loc = tree.tokenLocationPtr(0, first_token); const start_loc = tree.tokenLocationPtr(0, first_token);
const end_loc = tree.tokenLocationPtr(first_token.end, last_token); const end_loc = tree.tokenLocationPtr(first_token.end, last_token);
if (!color_on) { if (!color_on) {

View File

@ -961,6 +961,7 @@ pub const Code = struct {
basic_block_list: std.ArrayList(*BasicBlock), basic_block_list: std.ArrayList(*BasicBlock),
arena: std.heap.ArenaAllocator, arena: std.heap.ArenaAllocator,
return_type: ?*Type, return_type: ?*Type,
tree_scope: *Scope.AstTree,
/// allocator is comp.gpa() /// allocator is comp.gpa()
pub fn destroy(self: *Code, allocator: *Allocator) void { pub fn destroy(self: *Code, allocator: *Allocator) void {
@ -990,14 +991,14 @@ pub const Code = struct {
return ret_value.val.KnownValue.getRef(); return ret_value.val.KnownValue.getRef();
} }
try comp.addCompileError( try comp.addCompileError(
ret_value.scope.findRoot(), self.tree_scope,
ret_value.span, ret_value.span,
"unable to evaluate constant expression", "unable to evaluate constant expression",
); );
return error.SemanticAnalysisFailed; return error.SemanticAnalysisFailed;
} else if (inst.hasSideEffects()) { } else if (inst.hasSideEffects()) {
try comp.addCompileError( try comp.addCompileError(
inst.scope.findRoot(), self.tree_scope,
inst.span, inst.span,
"unable to evaluate constant expression", "unable to evaluate constant expression",
); );
@ -1013,25 +1014,24 @@ pub const Builder = struct {
code: *Code, code: *Code,
current_basic_block: *BasicBlock, current_basic_block: *BasicBlock,
next_debug_id: usize, next_debug_id: usize,
root_scope: *Scope.Root,
is_comptime: bool, is_comptime: bool,
is_async: bool, is_async: bool,
begin_scope: ?*Scope, begin_scope: ?*Scope,
pub const Error = Analyze.Error; pub const Error = Analyze.Error;
pub fn init(comp: *Compilation, root_scope: *Scope.Root, begin_scope: ?*Scope) !Builder { pub fn init(comp: *Compilation, tree_scope: *Scope.AstTree, begin_scope: ?*Scope) !Builder {
const code = try comp.gpa().create(Code{ const code = try comp.gpa().create(Code{
.basic_block_list = undefined, .basic_block_list = undefined,
.arena = std.heap.ArenaAllocator.init(comp.gpa()), .arena = std.heap.ArenaAllocator.init(comp.gpa()),
.return_type = null, .return_type = null,
.tree_scope = tree_scope,
}); });
code.basic_block_list = std.ArrayList(*BasicBlock).init(&code.arena.allocator); code.basic_block_list = std.ArrayList(*BasicBlock).init(&code.arena.allocator);
errdefer code.destroy(comp.gpa()); errdefer code.destroy(comp.gpa());
return Builder{ return Builder{
.comp = comp, .comp = comp,
.root_scope = root_scope,
.current_basic_block = undefined, .current_basic_block = undefined,
.code = code, .code = code,
.next_debug_id = 0, .next_debug_id = 0,
@ -1292,6 +1292,7 @@ pub const Builder = struct {
Scope.Id.FnDef => return false, Scope.Id.FnDef => return false,
Scope.Id.Decls => unreachable, Scope.Id.Decls => unreachable,
Scope.Id.Root => unreachable, Scope.Id.Root => unreachable,
Scope.Id.AstTree => unreachable,
Scope.Id.Block, Scope.Id.Block,
Scope.Id.Defer, Scope.Id.Defer,
Scope.Id.DeferExpr, Scope.Id.DeferExpr,
@ -1302,7 +1303,7 @@ pub const Builder = struct {
} }
pub fn genIntLit(irb: *Builder, int_lit: *ast.Node.IntegerLiteral, scope: *Scope) !*Inst { pub fn genIntLit(irb: *Builder, int_lit: *ast.Node.IntegerLiteral, scope: *Scope) !*Inst {
const int_token = irb.root_scope.tree.tokenSlice(int_lit.token); const int_token = irb.code.tree_scope.tree.tokenSlice(int_lit.token);
var base: u8 = undefined; var base: u8 = undefined;
var rest: []const u8 = undefined; var rest: []const u8 = undefined;
@ -1341,7 +1342,7 @@ pub const Builder = struct {
} }
pub async fn genStrLit(irb: *Builder, str_lit: *ast.Node.StringLiteral, scope: *Scope) !*Inst { pub async fn genStrLit(irb: *Builder, str_lit: *ast.Node.StringLiteral, scope: *Scope) !*Inst {
const str_token = irb.root_scope.tree.tokenSlice(str_lit.token); const str_token = irb.code.tree_scope.tree.tokenSlice(str_lit.token);
const src_span = Span.token(str_lit.token); const src_span = Span.token(str_lit.token);
var bad_index: usize = undefined; var bad_index: usize = undefined;
@ -1349,7 +1350,7 @@ pub const Builder = struct {
error.OutOfMemory => return error.OutOfMemory, error.OutOfMemory => return error.OutOfMemory,
error.InvalidCharacter => { error.InvalidCharacter => {
try irb.comp.addCompileError( try irb.comp.addCompileError(
irb.root_scope, irb.code.tree_scope,
src_span, src_span,
"invalid character in string literal: '{c}'", "invalid character in string literal: '{c}'",
str_token[bad_index], str_token[bad_index],
@ -1427,7 +1428,7 @@ pub const Builder = struct {
if (statement_node.cast(ast.Node.Defer)) |defer_node| { if (statement_node.cast(ast.Node.Defer)) |defer_node| {
// defer starts a new scope // defer starts a new scope
const defer_token = irb.root_scope.tree.tokens.at(defer_node.defer_token); const defer_token = irb.code.tree_scope.tree.tokens.at(defer_node.defer_token);
const kind = switch (defer_token.id) { const kind = switch (defer_token.id) {
Token.Id.Keyword_defer => Scope.Defer.Kind.ScopeExit, Token.Id.Keyword_defer => Scope.Defer.Kind.ScopeExit,
Token.Id.Keyword_errdefer => Scope.Defer.Kind.ErrorExit, Token.Id.Keyword_errdefer => Scope.Defer.Kind.ErrorExit,
@ -1513,7 +1514,7 @@ pub const Builder = struct {
const src_span = Span.token(control_flow_expr.ltoken); const src_span = Span.token(control_flow_expr.ltoken);
if (scope.findFnDef() == null) { if (scope.findFnDef() == null) {
try irb.comp.addCompileError( try irb.comp.addCompileError(
irb.root_scope, irb.code.tree_scope,
src_span, src_span,
"return expression outside function definition", "return expression outside function definition",
); );
@ -1523,7 +1524,7 @@ pub const Builder = struct {
if (scope.findDeferExpr()) |scope_defer_expr| { if (scope.findDeferExpr()) |scope_defer_expr| {
if (!scope_defer_expr.reported_err) { if (!scope_defer_expr.reported_err) {
try irb.comp.addCompileError( try irb.comp.addCompileError(
irb.root_scope, irb.code.tree_scope,
src_span, src_span,
"cannot return from defer expression", "cannot return from defer expression",
); );
@ -1599,7 +1600,7 @@ pub const Builder = struct {
pub async fn genIdentifier(irb: *Builder, identifier: *ast.Node.Identifier, scope: *Scope, lval: LVal) !*Inst { pub async fn genIdentifier(irb: *Builder, identifier: *ast.Node.Identifier, scope: *Scope, lval: LVal) !*Inst {
const src_span = Span.token(identifier.token); const src_span = Span.token(identifier.token);
const name = irb.root_scope.tree.tokenSlice(identifier.token); const name = irb.code.tree_scope.tree.tokenSlice(identifier.token);
//if (buf_eql_str(variable_name, "_") && lval == LValPtr) { //if (buf_eql_str(variable_name, "_") && lval == LValPtr) {
// IrInstructionConst *const_instruction = ir_build_instruction<IrInstructionConst>(irb, scope, node); // IrInstructionConst *const_instruction = ir_build_instruction<IrInstructionConst>(irb, scope, node);
@ -1622,7 +1623,7 @@ pub const Builder = struct {
} }
} else |err| switch (err) { } else |err| switch (err) {
error.Overflow => { error.Overflow => {
try irb.comp.addCompileError(irb.root_scope, src_span, "integer too large"); try irb.comp.addCompileError(irb.code.tree_scope, src_span, "integer too large");
return error.SemanticAnalysisFailed; return error.SemanticAnalysisFailed;
}, },
error.OutOfMemory => return error.OutOfMemory, error.OutOfMemory => return error.OutOfMemory,
@ -1656,7 +1657,7 @@ pub const Builder = struct {
// TODO put a variable of same name with invalid type in global scope // TODO put a variable of same name with invalid type in global scope
// so that future references to this same name will find a variable with an invalid type // so that future references to this same name will find a variable with an invalid type
try irb.comp.addCompileError(irb.root_scope, src_span, "unknown identifier '{}'", name); try irb.comp.addCompileError(irb.code.tree_scope, src_span, "unknown identifier '{}'", name);
return error.SemanticAnalysisFailed; return error.SemanticAnalysisFailed;
} }
@ -1689,6 +1690,7 @@ pub const Builder = struct {
=> scope = scope.parent orelse break, => scope = scope.parent orelse break,
Scope.Id.DeferExpr => unreachable, Scope.Id.DeferExpr => unreachable,
Scope.Id.AstTree => unreachable,
} }
} }
return result; return result;
@ -1740,6 +1742,7 @@ pub const Builder = struct {
=> scope = scope.parent orelse return is_noreturn, => scope = scope.parent orelse return is_noreturn,
Scope.Id.DeferExpr => unreachable, Scope.Id.DeferExpr => unreachable,
Scope.Id.AstTree => unreachable,
} }
} }
} }
@ -1929,8 +1932,9 @@ pub const Builder = struct {
Scope.Id.Root => return Ident.NotFound, Scope.Id.Root => return Ident.NotFound,
Scope.Id.Decls => { Scope.Id.Decls => {
const decls = @fieldParentPtr(Scope.Decls, "base", s); const decls = @fieldParentPtr(Scope.Decls, "base", s);
const table = await (async decls.getTableReadOnly() catch unreachable); const locked_table = await (async decls.table.acquireRead() catch unreachable);
if (table.get(name)) |entry| { defer locked_table.release();
if (locked_table.value.get(name)) |entry| {
return Ident{ .Decl = entry.value }; return Ident{ .Decl = entry.value };
} }
}, },
@ -1967,8 +1971,8 @@ const Analyze = struct {
OutOfMemory, OutOfMemory,
}; };
pub fn init(comp: *Compilation, root_scope: *Scope.Root, explicit_return_type: ?*Type) !Analyze { pub fn init(comp: *Compilation, tree_scope: *Scope.AstTree, explicit_return_type: ?*Type) !Analyze {
var irb = try Builder.init(comp, root_scope, null); var irb = try Builder.init(comp, tree_scope, null);
errdefer irb.abort(); errdefer irb.abort();
return Analyze{ return Analyze{
@ -2046,7 +2050,7 @@ const Analyze = struct {
} }
fn addCompileError(self: *Analyze, span: Span, comptime fmt: []const u8, args: ...) !void { fn addCompileError(self: *Analyze, span: Span, comptime fmt: []const u8, args: ...) !void {
return self.irb.comp.addCompileError(self.irb.root_scope, span, fmt, args); return self.irb.comp.addCompileError(self.irb.code.tree_scope, span, fmt, args);
} }
fn resolvePeerTypes(self: *Analyze, expected_type: ?*Type, peers: []const *Inst) Analyze.Error!*Type { fn resolvePeerTypes(self: *Analyze, expected_type: ?*Type, peers: []const *Inst) Analyze.Error!*Type {
@ -2534,9 +2538,10 @@ const Analyze = struct {
pub async fn gen( pub async fn gen(
comp: *Compilation, comp: *Compilation,
body_node: *ast.Node, body_node: *ast.Node,
tree_scope: *Scope.AstTree,
scope: *Scope, scope: *Scope,
) !*Code { ) !*Code {
var irb = try Builder.init(comp, scope.findRoot(), scope); var irb = try Builder.init(comp, tree_scope, scope);
errdefer irb.abort(); errdefer irb.abort();
const entry_block = try irb.createBasicBlock(scope, c"Entry"); const entry_block = try irb.createBasicBlock(scope, c"Entry");
@ -2554,9 +2559,8 @@ pub async fn gen(
pub async fn analyze(comp: *Compilation, old_code: *Code, expected_type: ?*Type) !*Code { pub async fn analyze(comp: *Compilation, old_code: *Code, expected_type: ?*Type) !*Code {
const old_entry_bb = old_code.basic_block_list.at(0); const old_entry_bb = old_code.basic_block_list.at(0);
const root_scope = old_entry_bb.scope.findRoot();
var ira = try Analyze.init(comp, root_scope, expected_type); var ira = try Analyze.init(comp, old_code.tree_scope, expected_type);
errdefer ira.abort(); errdefer ira.abort();
const new_entry_bb = try ira.getNewBasicBlock(old_entry_bb, null); const new_entry_bb = try ira.getNewBasicBlock(old_entry_bb, null);

View File

@ -143,7 +143,7 @@ pub const LibCInstallation = struct {
pub async fn findNative(self: *LibCInstallation, loop: *event.Loop) !void { pub async fn findNative(self: *LibCInstallation, loop: *event.Loop) !void {
self.initEmpty(); self.initEmpty();
var group = event.Group(FindError!void).init(loop); var group = event.Group(FindError!void).init(loop);
errdefer group.cancelAll(); errdefer group.deinit();
var windows_sdk: ?*c.ZigWindowsSDK = null; var windows_sdk: ?*c.ZigWindowsSDK = null;
errdefer if (windows_sdk) |sdk| c.zig_free_windows_sdk(@ptrCast(?[*]c.ZigWindowsSDK, sdk)); errdefer if (windows_sdk) |sdk| c.zig_free_windows_sdk(@ptrCast(?[*]c.ZigWindowsSDK, sdk));
@ -313,7 +313,7 @@ pub const LibCInstallation = struct {
}, },
}; };
var group = event.Group(FindError!void).init(loop); var group = event.Group(FindError!void).init(loop);
errdefer group.cancelAll(); errdefer group.deinit();
for (dyn_tests) |*dyn_test| { for (dyn_tests) |*dyn_test| {
try group.call(testNativeDynamicLinker, self, loop, dyn_test); try group.call(testNativeDynamicLinker, self, loop, dyn_test);
} }
@ -341,7 +341,6 @@ pub const LibCInstallation = struct {
} }
} }
async fn findNativeKernel32LibDir(self: *LibCInstallation, loop: *event.Loop, sdk: *c.ZigWindowsSDK) FindError!void { async fn findNativeKernel32LibDir(self: *LibCInstallation, loop: *event.Loop, sdk: *c.ZigWindowsSDK) FindError!void {
var search_buf: [2]Search = undefined; var search_buf: [2]Search = undefined;
const searches = fillSearch(&search_buf, sdk); const searches = fillSearch(&search_buf, sdk);
@ -450,7 +449,6 @@ fn fillSearch(search_buf: *[2]Search, sdk: *c.ZigWindowsSDK) []Search {
return search_buf[0..search_end]; return search_buf[0..search_end];
} }
fn fileExists(allocator: *std.mem.Allocator, path: []const u8) !bool { fn fileExists(allocator: *std.mem.Allocator, path: []const u8) !bool {
if (std.os.File.access(allocator, path)) |_| { if (std.os.File.access(allocator, path)) |_| {
return true; return true;

View File

@ -61,7 +61,7 @@ pub async fn link(comp: *Compilation) !void {
ctx.libc = ctx.comp.override_libc orelse blk: { ctx.libc = ctx.comp.override_libc orelse blk: {
switch (comp.target) { switch (comp.target) {
Target.Native => { Target.Native => {
break :blk (await (async comp.event_loop_local.getNativeLibC() catch unreachable)) catch return error.LibCRequiredButNotProvidedOrFound; break :blk (await (async comp.zig_compiler.getNativeLibC() catch unreachable)) catch return error.LibCRequiredButNotProvidedOrFound;
}, },
else => return error.LibCRequiredButNotProvidedOrFound, else => return error.LibCRequiredButNotProvidedOrFound,
} }
@ -83,7 +83,7 @@ pub async fn link(comp: *Compilation) !void {
{ {
// LLD is not thread-safe, so we grab a global lock. // LLD is not thread-safe, so we grab a global lock.
const held = await (async comp.event_loop_local.lld_lock.acquire() catch unreachable); const held = await (async comp.zig_compiler.lld_lock.acquire() catch unreachable);
defer held.release(); defer held.release();
// Not evented I/O. LLD does its own multithreading internally. // Not evented I/O. LLD does its own multithreading internally.

View File

@ -14,7 +14,7 @@ const c = @import("c.zig");
const introspect = @import("introspect.zig"); const introspect = @import("introspect.zig");
const Args = arg.Args; const Args = arg.Args;
const Flag = arg.Flag; const Flag = arg.Flag;
const EventLoopLocal = @import("compilation.zig").EventLoopLocal; const ZigCompiler = @import("compilation.zig").ZigCompiler;
const Compilation = @import("compilation.zig").Compilation; const Compilation = @import("compilation.zig").Compilation;
const Target = @import("target.zig").Target; const Target = @import("target.zig").Target;
const errmsg = @import("errmsg.zig"); const errmsg = @import("errmsg.zig");
@ -24,6 +24,8 @@ var stderr_file: os.File = undefined;
var stderr: *io.OutStream(io.FileOutStream.Error) = undefined; var stderr: *io.OutStream(io.FileOutStream.Error) = undefined;
var stdout: *io.OutStream(io.FileOutStream.Error) = undefined; var stdout: *io.OutStream(io.FileOutStream.Error) = undefined;
const max_src_size = 2 * 1024 * 1024 * 1024; // 2 GiB
const usage = const usage =
\\usage: zig [command] [options] \\usage: zig [command] [options]
\\ \\
@ -371,6 +373,16 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co
os.exit(1); os.exit(1);
} }
var clang_argv_buf = ArrayList([]const u8).init(allocator);
defer clang_argv_buf.deinit();
const mllvm_flags = flags.many("mllvm");
for (mllvm_flags) |mllvm| {
try clang_argv_buf.append("-mllvm");
try clang_argv_buf.append(mllvm);
}
try ZigCompiler.setLlvmArgv(allocator, mllvm_flags);
const zig_lib_dir = introspect.resolveZigLibDir(allocator) catch os.exit(1); const zig_lib_dir = introspect.resolveZigLibDir(allocator) catch os.exit(1);
defer allocator.free(zig_lib_dir); defer allocator.free(zig_lib_dir);
@ -380,11 +392,11 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co
try loop.initMultiThreaded(allocator); try loop.initMultiThreaded(allocator);
defer loop.deinit(); defer loop.deinit();
var event_loop_local = try EventLoopLocal.init(&loop); var zig_compiler = try ZigCompiler.init(&loop);
defer event_loop_local.deinit(); defer zig_compiler.deinit();
var comp = try Compilation.create( var comp = try Compilation.create(
&event_loop_local, &zig_compiler,
root_name, root_name,
root_source_file, root_source_file,
Target.Native, Target.Native,
@ -413,16 +425,6 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co
comp.linker_script = flags.single("linker-script"); comp.linker_script = flags.single("linker-script");
comp.each_lib_rpath = flags.present("each-lib-rpath"); comp.each_lib_rpath = flags.present("each-lib-rpath");
var clang_argv_buf = ArrayList([]const u8).init(allocator);
defer clang_argv_buf.deinit();
const mllvm_flags = flags.many("mllvm");
for (mllvm_flags) |mllvm| {
try clang_argv_buf.append("-mllvm");
try clang_argv_buf.append(mllvm);
}
comp.llvm_argv = mllvm_flags;
comp.clang_argv = clang_argv_buf.toSliceConst(); comp.clang_argv = clang_argv_buf.toSliceConst();
comp.strip = flags.present("strip"); comp.strip = flags.present("strip");
@ -465,25 +467,28 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co
comp.link_out_file = flags.single("output"); comp.link_out_file = flags.single("output");
comp.link_objects = link_objects; comp.link_objects = link_objects;
try comp.build(); comp.start();
const process_build_events_handle = try async<loop.allocator> processBuildEvents(comp, color); const process_build_events_handle = try async<loop.allocator> processBuildEvents(comp, color);
defer cancel process_build_events_handle; defer cancel process_build_events_handle;
loop.run(); loop.run();
} }
async fn processBuildEvents(comp: *Compilation, color: errmsg.Color) void { async fn processBuildEvents(comp: *Compilation, color: errmsg.Color) void {
var count: usize = 0;
while (true) {
// TODO directly awaiting async should guarantee memory allocation elision // TODO directly awaiting async should guarantee memory allocation elision
const build_event = await (async comp.events.get() catch unreachable); const build_event = await (async comp.events.get() catch unreachable);
count += 1;
switch (build_event) { switch (build_event) {
Compilation.Event.Ok => { Compilation.Event.Ok => {
return; stderr.print("Build {} succeeded\n", count) catch os.exit(1);
}, },
Compilation.Event.Error => |err| { Compilation.Event.Error => |err| {
std.debug.warn("build failed: {}\n", @errorName(err)); stderr.print("Build {} failed: {}\n", count, @errorName(err)) catch os.exit(1);
os.exit(1);
}, },
Compilation.Event.Fail => |msgs| { Compilation.Event.Fail => |msgs| {
stderr.print("Build {} compile errors:\n", count) catch os.exit(1);
for (msgs) |msg| { for (msgs) |msg| {
defer msg.destroy(); defer msg.destroy();
msg.printToFile(&stderr_file, color) catch os.exit(1); msg.printToFile(&stderr_file, color) catch os.exit(1);
@ -491,6 +496,7 @@ async fn processBuildEvents(comp: *Compilation, color: errmsg.Color) void {
}, },
} }
} }
}
fn cmdBuildExe(allocator: *Allocator, args: []const []const u8) !void { fn cmdBuildExe(allocator: *Allocator, args: []const []const u8) !void {
return buildOutputType(allocator, args, Compilation.Kind.Exe); return buildOutputType(allocator, args, Compilation.Kind.Exe);
@ -528,33 +534,12 @@ const args_fmt_spec = []Flag{
}; };
const Fmt = struct { const Fmt = struct {
seen: std.HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8), seen: event.Locked(SeenMap),
queue: std.LinkedList([]const u8),
any_error: bool, any_error: bool,
color: errmsg.Color,
loop: *event.Loop,
// file_path must outlive Fmt const SeenMap = std.HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8);
fn addToQueue(self: *Fmt, file_path: []const u8) !void {
const new_node = try self.seen.allocator.create(std.LinkedList([]const u8).Node{
.prev = undefined,
.next = undefined,
.data = file_path,
});
if (try self.seen.put(file_path, {})) |_| return;
self.queue.append(new_node);
}
fn addDirToQueue(self: *Fmt, file_path: []const u8) !void {
var dir = try std.os.Dir.open(self.seen.allocator, file_path);
defer dir.close();
while (try dir.next()) |entry| {
if (entry.kind == std.os.Dir.Entry.Kind.Directory or mem.endsWith(u8, entry.name, ".zig")) {
const full_path = try os.path.join(self.seen.allocator, file_path, entry.name);
try self.addToQueue(full_path);
}
}
}
}; };
fn parseLibcPaths(allocator: *Allocator, libc: *LibCInstallation, libc_paths_file: []const u8) void { fn parseLibcPaths(allocator: *Allocator, libc: *LibCInstallation, libc_paths_file: []const u8) void {
@ -587,17 +572,17 @@ fn cmdLibC(allocator: *Allocator, args: []const []const u8) !void {
try loop.initMultiThreaded(allocator); try loop.initMultiThreaded(allocator);
defer loop.deinit(); defer loop.deinit();
var event_loop_local = try EventLoopLocal.init(&loop); var zig_compiler = try ZigCompiler.init(&loop);
defer event_loop_local.deinit(); defer zig_compiler.deinit();
const handle = try async<loop.allocator> findLibCAsync(&event_loop_local); const handle = try async<loop.allocator> findLibCAsync(&zig_compiler);
defer cancel handle; defer cancel handle;
loop.run(); loop.run();
} }
async fn findLibCAsync(event_loop_local: *EventLoopLocal) void { async fn findLibCAsync(zig_compiler: *ZigCompiler) void {
const libc = (await (async event_loop_local.getNativeLibC() catch unreachable)) catch |err| { const libc = (await (async zig_compiler.getNativeLibC() catch unreachable)) catch |err| {
stderr.print("unable to find libc: {}\n", @errorName(err)) catch os.exit(1); stderr.print("unable to find libc: {}\n", @errorName(err)) catch os.exit(1);
os.exit(1); os.exit(1);
}; };
@ -636,7 +621,7 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
var stdin_file = try io.getStdIn(); var stdin_file = try io.getStdIn();
var stdin = io.FileInStream.init(&stdin_file); var stdin = io.FileInStream.init(&stdin_file);
const source_code = try stdin.stream.readAllAlloc(allocator, @maxValue(usize)); const source_code = try stdin.stream.readAllAlloc(allocator, max_src_size);
defer allocator.free(source_code); defer allocator.free(source_code);
var tree = std.zig.parse(allocator, source_code) catch |err| { var tree = std.zig.parse(allocator, source_code) catch |err| {
@ -665,69 +650,146 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
os.exit(1); os.exit(1);
} }
var fmt = Fmt{ var loop: event.Loop = undefined;
.seen = std.HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8).init(allocator), try loop.initMultiThreaded(allocator);
.queue = std.LinkedList([]const u8).init(), defer loop.deinit();
.any_error = false,
};
for (flags.positionals.toSliceConst()) |file_path| { var result: FmtError!void = undefined;
try fmt.addToQueue(file_path); const main_handle = try async<allocator> asyncFmtMainChecked(
&result,
&loop,
flags,
color,
);
defer cancel main_handle;
loop.run();
return result;
} }
while (fmt.queue.popFirst()) |node| { async fn asyncFmtMainChecked(
const file_path = node.data; result: *(FmtError!void),
loop: *event.Loop,
flags: *const Args,
color: errmsg.Color,
) void {
result.* = await (async asyncFmtMain(loop, flags, color) catch unreachable);
}
var file = try os.File.openRead(allocator, file_path); const FmtError = error{
defer file.close(); SystemResources,
OperationAborted,
IoPending,
BrokenPipe,
Unexpected,
WouldBlock,
FileClosed,
DestinationAddressRequired,
DiskQuota,
FileTooBig,
InputOutput,
NoSpaceLeft,
AccessDenied,
OutOfMemory,
RenameAcrossMountPoints,
ReadOnlyFileSystem,
LinkQuotaExceeded,
FileBusy,
} || os.File.OpenError;
const source_code = io.readFileAlloc(allocator, file_path) catch |err| switch (err) { async fn asyncFmtMain(
loop: *event.Loop,
flags: *const Args,
color: errmsg.Color,
) FmtError!void {
suspend {
resume @handle();
}
var fmt = Fmt{
.seen = event.Locked(Fmt.SeenMap).init(loop, Fmt.SeenMap.init(loop.allocator)),
.any_error = false,
.color = color,
.loop = loop,
};
var group = event.Group(FmtError!void).init(loop);
for (flags.positionals.toSliceConst()) |file_path| {
try group.call(fmtPath, &fmt, file_path);
}
try await (async group.wait() catch unreachable);
if (fmt.any_error) {
os.exit(1);
}
}
async fn fmtPath(fmt: *Fmt, file_path_ref: []const u8) FmtError!void {
const file_path = try std.mem.dupe(fmt.loop.allocator, u8, file_path_ref);
defer fmt.loop.allocator.free(file_path);
{
const held = await (async fmt.seen.acquire() catch unreachable);
defer held.release();
if (try held.value.put(file_path, {})) |_| return;
}
const source_code = (await try async event.fs.readFile(
fmt.loop,
file_path,
max_src_size,
)) catch |err| switch (err) {
error.IsDir => { error.IsDir => {
try fmt.addDirToQueue(file_path); // TODO make event based (and dir.next())
continue; var dir = try std.os.Dir.open(fmt.loop.allocator, file_path);
defer dir.close();
var group = event.Group(FmtError!void).init(fmt.loop);
while (try dir.next()) |entry| {
if (entry.kind == std.os.Dir.Entry.Kind.Directory or mem.endsWith(u8, entry.name, ".zig")) {
const full_path = try os.path.join(fmt.loop.allocator, file_path, entry.name);
try group.call(fmtPath, fmt, full_path);
}
}
return await (async group.wait() catch unreachable);
}, },
else => { else => {
// TODO lock stderr printing
try stderr.print("unable to open '{}': {}\n", file_path, err); try stderr.print("unable to open '{}': {}\n", file_path, err);
fmt.any_error = true; fmt.any_error = true;
continue; return;
}, },
}; };
defer allocator.free(source_code); defer fmt.loop.allocator.free(source_code);
var tree = std.zig.parse(allocator, source_code) catch |err| { var tree = std.zig.parse(fmt.loop.allocator, source_code) catch |err| {
try stderr.print("error parsing file '{}': {}\n", file_path, err); try stderr.print("error parsing file '{}': {}\n", file_path, err);
fmt.any_error = true; fmt.any_error = true;
continue; return;
}; };
defer tree.deinit(); defer tree.deinit();
var error_it = tree.errors.iterator(0); var error_it = tree.errors.iterator(0);
while (error_it.next()) |parse_error| { while (error_it.next()) |parse_error| {
const msg = try errmsg.Msg.createFromParseError(allocator, parse_error, &tree, file_path); const msg = try errmsg.Msg.createFromParseError(fmt.loop.allocator, parse_error, &tree, file_path);
defer msg.destroy(); defer fmt.loop.allocator.destroy(msg);
try msg.printToFile(&stderr_file, color); try msg.printToFile(&stderr_file, fmt.color);
} }
if (tree.errors.len != 0) { if (tree.errors.len != 0) {
fmt.any_error = true; fmt.any_error = true;
continue; return;
} }
const baf = try io.BufferedAtomicFile.create(allocator, file_path); // TODO make this evented
const baf = try io.BufferedAtomicFile.create(fmt.loop.allocator, file_path);
defer baf.destroy(); defer baf.destroy();
const anything_changed = try std.zig.render(allocator, baf.stream(), &tree); const anything_changed = try std.zig.render(fmt.loop.allocator, baf.stream(), &tree);
if (anything_changed) { if (anything_changed) {
try stderr.print("{}\n", file_path); try stderr.print("{}\n", file_path);
try baf.finish(); try baf.finish();
} }
} }
if (fmt.any_error) {
os.exit(1);
}
}
// cmd:targets ///////////////////////////////////////////////////////////////////////////////////// // cmd:targets /////////////////////////////////////////////////////////////////////////////////////
fn cmdTargets(allocator: *Allocator, args: []const []const u8) !void { fn cmdTargets(allocator: *Allocator, args: []const []const u8) !void {

View File

@ -36,6 +36,7 @@ pub const Scope = struct {
Id.Defer => @fieldParentPtr(Defer, "base", base).destroy(comp), Id.Defer => @fieldParentPtr(Defer, "base", base).destroy(comp),
Id.DeferExpr => @fieldParentPtr(DeferExpr, "base", base).destroy(comp), Id.DeferExpr => @fieldParentPtr(DeferExpr, "base", base).destroy(comp),
Id.Var => @fieldParentPtr(Var, "base", base).destroy(comp), Id.Var => @fieldParentPtr(Var, "base", base).destroy(comp),
Id.AstTree => @fieldParentPtr(AstTree, "base", base).destroy(comp),
} }
} }
} }
@ -62,6 +63,8 @@ pub const Scope = struct {
Id.CompTime, Id.CompTime,
Id.Var, Id.Var,
=> scope = scope.parent.?, => scope = scope.parent.?,
Id.AstTree => unreachable,
} }
} }
} }
@ -82,6 +85,8 @@ pub const Scope = struct {
Id.Root, Id.Root,
Id.Var, Id.Var,
=> scope = scope.parent orelse return null, => scope = scope.parent orelse return null,
Id.AstTree => unreachable,
} }
} }
} }
@ -97,6 +102,7 @@ pub const Scope = struct {
pub const Id = enum { pub const Id = enum {
Root, Root,
AstTree,
Decls, Decls,
Block, Block,
FnDef, FnDef,
@ -108,13 +114,12 @@ pub const Scope = struct {
pub const Root = struct { pub const Root = struct {
base: Scope, base: Scope,
tree: *ast.Tree,
realpath: []const u8, realpath: []const u8,
decls: *Decls,
/// Creates a Root scope with 1 reference /// Creates a Root scope with 1 reference
/// Takes ownership of realpath /// Takes ownership of realpath
/// Takes ownership of tree, will deinit and destroy when done. pub fn create(comp: *Compilation, realpath: []u8) !*Root {
pub fn create(comp: *Compilation, tree: *ast.Tree, realpath: []u8) !*Root {
const self = try comp.gpa().createOne(Root); const self = try comp.gpa().createOne(Root);
self.* = Root{ self.* = Root{
.base = Scope{ .base = Scope{
@ -122,41 +127,65 @@ pub const Scope = struct {
.parent = null, .parent = null,
.ref_count = std.atomic.Int(usize).init(1), .ref_count = std.atomic.Int(usize).init(1),
}, },
.tree = tree,
.realpath = realpath, .realpath = realpath,
.decls = undefined,
}; };
errdefer comp.gpa().destroy(self);
self.decls = try Decls.create(comp, &self.base);
return self; return self;
} }
pub fn destroy(self: *Root, comp: *Compilation) void { pub fn destroy(self: *Root, comp: *Compilation) void {
// TODO comp.fs_watch.removeFile(self.realpath);
self.decls.base.deref(comp);
comp.gpa().free(self.realpath);
comp.gpa().destroy(self);
}
};
pub const AstTree = struct {
base: Scope,
tree: *ast.Tree,
/// Creates a scope with 1 reference
/// Takes ownership of tree, will deinit and destroy when done.
pub fn create(comp: *Compilation, tree: *ast.Tree, root_scope: *Root) !*AstTree {
const self = try comp.gpa().createOne(AstTree);
self.* = AstTree{
.base = undefined,
.tree = tree,
};
self.base.init(Id.AstTree, &root_scope.base);
return self;
}
pub fn destroy(self: *AstTree, comp: *Compilation) void {
comp.gpa().free(self.tree.source); comp.gpa().free(self.tree.source);
self.tree.deinit(); self.tree.deinit();
comp.gpa().destroy(self.tree); comp.gpa().destroy(self.tree);
comp.gpa().free(self.realpath);
comp.gpa().destroy(self); comp.gpa().destroy(self);
} }
pub fn root(self: *AstTree) *Root {
return self.base.findRoot();
}
}; };
pub const Decls = struct { pub const Decls = struct {
base: Scope, base: Scope,
/// The lock must be respected for writing. However once name_future resolves, /// This table remains Write Locked when the names are incomplete or possibly outdated.
/// readers can freely access it. /// So if a reader manages to grab a lock, it can be sure that the set of names is complete
table: event.Locked(Decl.Table), /// and correct.
table: event.RwLocked(Decl.Table),
/// Once this future is resolved, the table is complete and available for unlocked
/// read-only access. It does not mean all the decls are resolved; it means only that
/// the table has all the names. Each decl in the table has its own resolution state.
name_future: event.Future(void),
/// Creates a Decls scope with 1 reference /// Creates a Decls scope with 1 reference
pub fn create(comp: *Compilation, parent: *Scope) !*Decls { pub fn create(comp: *Compilation, parent: *Scope) !*Decls {
const self = try comp.gpa().createOne(Decls); const self = try comp.gpa().createOne(Decls);
self.* = Decls{ self.* = Decls{
.base = undefined, .base = undefined,
.table = event.Locked(Decl.Table).init(comp.loop, Decl.Table.init(comp.gpa())), .table = event.RwLocked(Decl.Table).init(comp.loop, Decl.Table.init(comp.gpa())),
.name_future = event.Future(void).init(comp.loop),
}; };
self.base.init(Id.Decls, parent); self.base.init(Id.Decls, parent);
return self; return self;
@ -166,11 +195,6 @@ pub const Scope = struct {
self.table.deinit(); self.table.deinit();
comp.gpa().destroy(self); comp.gpa().destroy(self);
} }
pub async fn getTableReadOnly(self: *Decls) *Decl.Table {
_ = await (async self.name_future.get() catch unreachable);
return &self.table.private_data;
}
}; };
pub const Block = struct { pub const Block = struct {

View File

@ -6,7 +6,7 @@ const Compilation = @import("compilation.zig").Compilation;
const introspect = @import("introspect.zig"); const introspect = @import("introspect.zig");
const assertOrPanic = std.debug.assertOrPanic; const assertOrPanic = std.debug.assertOrPanic;
const errmsg = @import("errmsg.zig"); const errmsg = @import("errmsg.zig");
const EventLoopLocal = @import("compilation.zig").EventLoopLocal; const ZigCompiler = @import("compilation.zig").ZigCompiler;
var ctx: TestContext = undefined; var ctx: TestContext = undefined;
@ -25,7 +25,7 @@ const allocator = std.heap.c_allocator;
pub const TestContext = struct { pub const TestContext = struct {
loop: std.event.Loop, loop: std.event.Loop,
event_loop_local: EventLoopLocal, zig_compiler: ZigCompiler,
zig_lib_dir: []u8, zig_lib_dir: []u8,
file_index: std.atomic.Int(usize), file_index: std.atomic.Int(usize),
group: std.event.Group(error!void), group: std.event.Group(error!void),
@ -37,20 +37,20 @@ pub const TestContext = struct {
self.* = TestContext{ self.* = TestContext{
.any_err = {}, .any_err = {},
.loop = undefined, .loop = undefined,
.event_loop_local = undefined, .zig_compiler = undefined,
.zig_lib_dir = undefined, .zig_lib_dir = undefined,
.group = undefined, .group = undefined,
.file_index = std.atomic.Int(usize).init(0), .file_index = std.atomic.Int(usize).init(0),
}; };
try self.loop.initMultiThreaded(allocator); try self.loop.initSingleThreaded(allocator);
errdefer self.loop.deinit(); errdefer self.loop.deinit();
self.event_loop_local = try EventLoopLocal.init(&self.loop); self.zig_compiler = try ZigCompiler.init(&self.loop);
errdefer self.event_loop_local.deinit(); errdefer self.zig_compiler.deinit();
self.group = std.event.Group(error!void).init(&self.loop); self.group = std.event.Group(error!void).init(&self.loop);
errdefer self.group.cancelAll(); errdefer self.group.deinit();
self.zig_lib_dir = try introspect.resolveZigLibDir(allocator); self.zig_lib_dir = try introspect.resolveZigLibDir(allocator);
errdefer allocator.free(self.zig_lib_dir); errdefer allocator.free(self.zig_lib_dir);
@ -62,7 +62,7 @@ pub const TestContext = struct {
fn deinit(self: *TestContext) void { fn deinit(self: *TestContext) void {
std.os.deleteTree(allocator, tmp_dir_name) catch {}; std.os.deleteTree(allocator, tmp_dir_name) catch {};
allocator.free(self.zig_lib_dir); allocator.free(self.zig_lib_dir);
self.event_loop_local.deinit(); self.zig_compiler.deinit();
self.loop.deinit(); self.loop.deinit();
} }
@ -97,7 +97,7 @@ pub const TestContext = struct {
try std.io.writeFile(allocator, file1_path, source); try std.io.writeFile(allocator, file1_path, source);
var comp = try Compilation.create( var comp = try Compilation.create(
&self.event_loop_local, &self.zig_compiler,
"test", "test",
file1_path, file1_path,
Target.Native, Target.Native,
@ -108,7 +108,7 @@ pub const TestContext = struct {
); );
errdefer comp.destroy(); errdefer comp.destroy();
try comp.build(); comp.start();
try self.group.call(getModuleEvent, comp, source, path, line, column, msg); try self.group.call(getModuleEvent, comp, source, path, line, column, msg);
} }
@ -131,7 +131,7 @@ pub const TestContext = struct {
try std.io.writeFile(allocator, file1_path, source); try std.io.writeFile(allocator, file1_path, source);
var comp = try Compilation.create( var comp = try Compilation.create(
&self.event_loop_local, &self.zig_compiler,
"test", "test",
file1_path, file1_path,
Target.Native, Target.Native,
@ -144,7 +144,7 @@ pub const TestContext = struct {
_ = try comp.addLinkLib("c", true); _ = try comp.addLinkLib("c", true);
comp.link_out_file = output_file; comp.link_out_file = output_file;
try comp.build(); comp.start();
try self.group.call(getModuleEventSuccess, comp, output_file, expected_output); try self.group.call(getModuleEventSuccess, comp, output_file, expected_output);
} }
@ -212,9 +212,10 @@ pub const TestContext = struct {
Compilation.Event.Fail => |msgs| { Compilation.Event.Fail => |msgs| {
assertOrPanic(msgs.len != 0); assertOrPanic(msgs.len != 0);
for (msgs) |msg| { for (msgs) |msg| {
if (mem.endsWith(u8, msg.getRealPath(), path) and mem.eql(u8, msg.text, text)) { if (mem.endsWith(u8, msg.realpath, path) and mem.eql(u8, msg.text, text)) {
const first_token = msg.getTree().tokens.at(msg.span.first); const span = msg.getSpan();
const last_token = msg.getTree().tokens.at(msg.span.first); const first_token = msg.getTree().tokens.at(span.first);
const last_token = msg.getTree().tokens.at(span.first);
const start_loc = msg.getTree().tokenLocationPtr(0, first_token); const start_loc = msg.getTree().tokenLocationPtr(0, first_token);
if (start_loc.line + 1 == line and start_loc.column + 1 == column) { if (start_loc.line + 1 == line and start_loc.column + 1 == column) {
return; return;

View File

@ -184,8 +184,8 @@ pub const Type = struct {
if (await (async base.abi_alignment.start() catch unreachable)) |ptr| return ptr.*; if (await (async base.abi_alignment.start() catch unreachable)) |ptr| return ptr.*;
{ {
const held = try comp.event_loop_local.getAnyLlvmContext(); const held = try comp.zig_compiler.getAnyLlvmContext();
defer held.release(comp.event_loop_local); defer held.release(comp.zig_compiler);
const llvm_context = held.node.data; const llvm_context = held.node.data;

View File

@ -1,40 +1,38 @@
const std = @import("../index.zig");
const builtin = @import("builtin"); const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder; const AtomicOrder = builtin.AtomicOrder;
const AtomicRmwOp = builtin.AtomicRmwOp; const AtomicRmwOp = builtin.AtomicRmwOp;
const assert = std.debug.assert;
/// Many producer, many consumer, non-allocating, thread-safe. /// Many producer, many consumer, non-allocating, thread-safe.
/// Uses a spinlock to protect get() and put(). /// Uses a mutex to protect access.
pub fn Queue(comptime T: type) type { pub fn Queue(comptime T: type) type {
return struct { return struct {
head: ?*Node, head: ?*Node,
tail: ?*Node, tail: ?*Node,
lock: u8, mutex: std.Mutex,
pub const Self = this; pub const Self = this;
pub const Node = std.LinkedList(T).Node;
pub const Node = struct {
next: ?*Node,
data: T,
};
pub fn init() Self { pub fn init() Self {
return Self{ return Self{
.head = null, .head = null,
.tail = null, .tail = null,
.lock = 0, .mutex = std.Mutex.init(),
}; };
} }
pub fn put(self: *Self, node: *Node) void { pub fn put(self: *Self, node: *Node) void {
node.next = null; node.next = null;
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {} const held = self.mutex.acquire();
defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1); defer held.release();
const opt_tail = self.tail; node.prev = self.tail;
self.tail = node; self.tail = node;
if (opt_tail) |tail| { if (node.prev) |prev_tail| {
tail.next = node; prev_tail.next = node;
} else { } else {
assert(self.head == null); assert(self.head == null);
self.head = node; self.head = node;
@ -42,18 +40,27 @@ pub fn Queue(comptime T: type) type {
} }
pub fn get(self: *Self) ?*Node { pub fn get(self: *Self) ?*Node {
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {} const held = self.mutex.acquire();
defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1); defer held.release();
const head = self.head orelse return null; const head = self.head orelse return null;
self.head = head.next; self.head = head.next;
if (head.next == null) self.tail = null; if (head.next) |new_head| {
new_head.prev = null;
} else {
self.tail = null;
}
// This way, a get() and a remove() are thread-safe with each other.
head.prev = null;
head.next = null;
return head; return head;
} }
pub fn unget(self: *Self, node: *Node) void { pub fn unget(self: *Self, node: *Node) void {
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {} node.prev = null;
defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
const held = self.mutex.acquire();
defer held.release();
const opt_head = self.head; const opt_head = self.head;
self.head = node; self.head = node;
@ -65,13 +72,39 @@ pub fn Queue(comptime T: type) type {
} }
} }
/// Thread-safe with get() and remove(). Returns whether node was actually removed.
pub fn remove(self: *Self, node: *Node) bool {
const held = self.mutex.acquire();
defer held.release();
if (node.prev == null and node.next == null and self.head != node) {
return false;
}
if (node.prev) |prev| {
prev.next = node.next;
} else {
self.head = node.next;
}
if (node.next) |next| {
next.prev = node.prev;
} else {
self.tail = node.prev;
}
node.prev = null;
node.next = null;
return true;
}
pub fn isEmpty(self: *Self) bool { pub fn isEmpty(self: *Self) bool {
return @atomicLoad(?*Node, &self.head, builtin.AtomicOrder.SeqCst) != null; const held = self.mutex.acquire();
defer held.release();
return self.head != null;
} }
pub fn dump(self: *Self) void { pub fn dump(self: *Self) void {
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {} const held = self.mutex.acquire();
defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1); defer held.release();
std.debug.warn("head: "); std.debug.warn("head: ");
dumpRecursive(self.head, 0); dumpRecursive(self.head, 0);
@ -93,9 +126,6 @@ pub fn Queue(comptime T: type) type {
}; };
} }
const std = @import("../index.zig");
const assert = std.debug.assert;
const Context = struct { const Context = struct {
allocator: *std.mem.Allocator, allocator: *std.mem.Allocator,
queue: *Queue(i32), queue: *Queue(i32),
@ -169,6 +199,7 @@ fn startPuts(ctx: *Context) u8 {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
const x = @bitCast(i32, r.random.scalar(u32)); const x = @bitCast(i32, r.random.scalar(u32));
const node = ctx.allocator.create(Queue(i32).Node{ const node = ctx.allocator.create(Queue(i32).Node{
.prev = undefined,
.next = undefined, .next = undefined,
.data = x, .data = x,
}) catch unreachable; }) catch unreachable;
@ -198,12 +229,14 @@ test "std.atomic.Queue single-threaded" {
var node_0 = Queue(i32).Node{ var node_0 = Queue(i32).Node{
.data = 0, .data = 0,
.next = undefined, .next = undefined,
.prev = undefined,
}; };
queue.put(&node_0); queue.put(&node_0);
var node_1 = Queue(i32).Node{ var node_1 = Queue(i32).Node{
.data = 1, .data = 1,
.next = undefined, .next = undefined,
.prev = undefined,
}; };
queue.put(&node_1); queue.put(&node_1);
@ -212,12 +245,14 @@ test "std.atomic.Queue single-threaded" {
var node_2 = Queue(i32).Node{ var node_2 = Queue(i32).Node{
.data = 2, .data = 2,
.next = undefined, .next = undefined,
.prev = undefined,
}; };
queue.put(&node_2); queue.put(&node_2);
var node_3 = Queue(i32).Node{ var node_3 = Queue(i32).Node{
.data = 3, .data = 3,
.next = undefined, .next = undefined,
.prev = undefined,
}; };
queue.put(&node_3); queue.put(&node_3);
@ -228,6 +263,7 @@ test "std.atomic.Queue single-threaded" {
var node_4 = Queue(i32).Node{ var node_4 = Queue(i32).Node{
.data = 4, .data = 4,
.next = undefined, .next = undefined,
.prev = undefined,
}; };
queue.put(&node_4); queue.put(&node_4);

View File

@ -424,14 +424,19 @@ pub const Builder = struct {
return mode; return mode;
} }
pub fn addUserInputOption(self: *Builder, name: []const u8, value: []const u8) bool { pub fn addUserInputOption(self: *Builder, name: []const u8, value: []const u8) !bool {
if (self.user_input_options.put(name, UserInputOption{ const gop = try self.user_input_options.getOrPut(name);
if (!gop.found_existing) {
gop.kv.value = UserInputOption{
.name = name, .name = name,
.value = UserValue{ .Scalar = value }, .value = UserValue{ .Scalar = value },
.used = false, .used = false,
}) catch unreachable) |*prev_value| { };
return false;
}
// option already exists // option already exists
switch (prev_value.value) { switch (gop.kv.value.value) {
UserValue.Scalar => |s| { UserValue.Scalar => |s| {
// turn it into a list // turn it into a list
var list = ArrayList([]const u8).init(self.allocator); var list = ArrayList([]const u8).init(self.allocator);
@ -457,17 +462,22 @@ pub const Builder = struct {
return true; return true;
}, },
} }
}
return false; return false;
} }
pub fn addUserInputFlag(self: *Builder, name: []const u8) bool { pub fn addUserInputFlag(self: *Builder, name: []const u8) !bool {
if (self.user_input_options.put(name, UserInputOption{ const gop = try self.user_input_options.getOrPut(name);
if (!gop.found_existing) {
gop.kv.value = UserInputOption{
.name = name, .name = name,
.value = UserValue{ .Flag = {} }, .value = UserValue{ .Flag = {} },
.used = false, .used = false,
}) catch unreachable) |*prev_value| { };
switch (prev_value.value) { return false;
}
// option already exists
switch (gop.kv.value.value) {
UserValue.Scalar => |s| { UserValue.Scalar => |s| {
warn("Flag '-D{}' conflicts with option '-D{}={}'.\n", name, name, s); warn("Flag '-D{}' conflicts with option '-D{}={}'.\n", name, name, s);
return true; return true;
@ -478,7 +488,6 @@ pub const Builder = struct {
}, },
UserValue.Flag => {}, UserValue.Flag => {},
} }
}
return false; return false;
} }
@ -603,10 +612,10 @@ pub const Builder = struct {
} }
fn copyFile(self: *Builder, source_path: []const u8, dest_path: []const u8) !void { fn copyFile(self: *Builder, source_path: []const u8, dest_path: []const u8) !void {
return self.copyFileMode(source_path, dest_path, os.default_file_mode); return self.copyFileMode(source_path, dest_path, os.File.default_mode);
} }
fn copyFileMode(self: *Builder, source_path: []const u8, dest_path: []const u8, mode: os.FileMode) !void { fn copyFileMode(self: *Builder, source_path: []const u8, dest_path: []const u8, mode: os.File.Mode) !void {
if (self.verbose) { if (self.verbose) {
warn("cp {} {}\n", source_path, dest_path); warn("cp {} {}\n", source_path, dest_path);
} }

View File

@ -30,10 +30,36 @@ pub extern "c" fn sysctl(name: [*]c_int, namelen: c_uint, oldp: ?*c_void, oldlen
pub extern "c" fn sysctlbyname(name: [*]const u8, oldp: ?*c_void, oldlenp: ?*usize, newp: ?*c_void, newlen: usize) c_int; pub extern "c" fn sysctlbyname(name: [*]const u8, oldp: ?*c_void, oldlenp: ?*usize, newp: ?*c_void, newlen: usize) c_int;
pub extern "c" fn sysctlnametomib(name: [*]const u8, mibp: ?*c_int, sizep: ?*usize) c_int; pub extern "c" fn sysctlnametomib(name: [*]const u8, mibp: ?*c_int, sizep: ?*usize) c_int;
pub extern "c" fn bind(socket: c_int, address: ?*const sockaddr, address_len: socklen_t) c_int;
pub extern "c" fn socket(domain: c_int, type: c_int, protocol: c_int) c_int;
pub use @import("../os/darwin/errno.zig"); pub use @import("../os/darwin/errno.zig");
pub const _errno = __error; pub const _errno = __error;
pub const in_port_t = u16;
pub const sa_family_t = u8;
pub const socklen_t = u32;
pub const sockaddr = extern union {
in: sockaddr_in,
in6: sockaddr_in6,
};
pub const sockaddr_in = extern struct {
len: u8,
family: sa_family_t,
port: in_port_t,
addr: u32,
zero: [8]u8,
};
pub const sockaddr_in6 = extern struct {
len: u8,
family: sa_family_t,
port: in_port_t,
flowinfo: u32,
addr: [16]u8,
scope_id: u32,
};
pub const timeval = extern struct { pub const timeval = extern struct {
tv_sec: isize, tv_sec: isize,
tv_usec: isize, tv_usec: isize,
@ -98,14 +124,6 @@ pub const dirent = extern struct {
d_name: u8, // field address is address of first byte of name d_name: u8, // field address is address of first byte of name
}; };
pub const sockaddr = extern struct {
sa_len: u8,
sa_family: sa_family_t,
sa_data: [14]u8,
};
pub const sa_family_t = u8;
pub const pthread_attr_t = extern struct { pub const pthread_attr_t = extern struct {
__sig: c_long, __sig: c_long,
__opaque: [56]u8, __opaque: [56]u8,

View File

@ -21,8 +21,10 @@ pub extern "c" fn lseek(fd: c_int, offset: isize, whence: c_int) isize;
pub extern "c" fn open(path: [*]const u8, oflag: c_int, ...) c_int; pub extern "c" fn open(path: [*]const u8, oflag: c_int, ...) c_int;
pub extern "c" fn raise(sig: c_int) c_int; pub extern "c" fn raise(sig: c_int) c_int;
pub extern "c" fn read(fd: c_int, buf: *c_void, nbyte: usize) isize; pub extern "c" fn read(fd: c_int, buf: *c_void, nbyte: usize) isize;
pub extern "c" fn pread(fd: c_int, buf: *c_void, nbyte: usize, offset: u64) isize;
pub extern "c" fn stat(noalias path: [*]const u8, noalias buf: *Stat) c_int; pub extern "c" fn stat(noalias path: [*]const u8, noalias buf: *Stat) c_int;
pub extern "c" fn write(fd: c_int, buf: *const c_void, nbyte: usize) isize; pub extern "c" fn write(fd: c_int, buf: *const c_void, nbyte: usize) isize;
pub extern "c" fn pwrite(fd: c_int, buf: *const c_void, nbyte: usize, offset: u64) isize;
pub extern "c" fn mmap(addr: ?*c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?*c_void; pub extern "c" fn mmap(addr: ?*c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?*c_void;
pub extern "c" fn munmap(addr: *c_void, len: usize) c_int; pub extern "c" fn munmap(addr: *c_void, len: usize) c_int;
pub extern "c" fn unlink(path: [*]const u8) c_int; pub extern "c" fn unlink(path: [*]const u8) c_int;

View File

@ -23,7 +23,10 @@ pub const runtime_safety = switch (builtin.mode) {
var stderr_file: os.File = undefined; var stderr_file: os.File = undefined;
var stderr_file_out_stream: io.FileOutStream = undefined; var stderr_file_out_stream: io.FileOutStream = undefined;
var stderr_stream: ?*io.OutStream(io.FileOutStream.Error) = null; var stderr_stream: ?*io.OutStream(io.FileOutStream.Error) = null;
var stderr_mutex = std.Mutex.init();
pub fn warn(comptime fmt: []const u8, args: ...) void { pub fn warn(comptime fmt: []const u8, args: ...) void {
const held = stderr_mutex.acquire();
defer held.release();
const stderr = getStderrStream() catch return; const stderr = getStderrStream() catch return;
stderr.print(fmt, args) catch return; stderr.print(fmt, args) catch return;
} }
@ -672,14 +675,10 @@ fn parseFormValueRef(allocator: *mem.Allocator, in_stream: var, comptime T: type
const ParseFormValueError = error{ const ParseFormValueError = error{
EndOfStream, EndOfStream,
Io,
BadFd,
Unexpected,
InvalidDebugInfo, InvalidDebugInfo,
EndOfFile, EndOfFile,
IsDir,
OutOfMemory, OutOfMemory,
}; } || std.os.File.ReadError;
fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, is_64: bool) ParseFormValueError!FormValue { fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, is_64: bool) ParseFormValueError!FormValue {
return switch (form_id) { return switch (form_id) {

View File

@ -1,17 +1,23 @@
pub const Locked = @import("event/locked.zig").Locked;
pub const Loop = @import("event/loop.zig").Loop;
pub const Lock = @import("event/lock.zig").Lock;
pub const tcp = @import("event/tcp.zig");
pub const Channel = @import("event/channel.zig").Channel; pub const Channel = @import("event/channel.zig").Channel;
pub const Group = @import("event/group.zig").Group;
pub const Future = @import("event/future.zig").Future; pub const Future = @import("event/future.zig").Future;
pub const Group = @import("event/group.zig").Group;
pub const Lock = @import("event/lock.zig").Lock;
pub const Locked = @import("event/locked.zig").Locked;
pub const RwLock = @import("event/rwlock.zig").RwLock;
pub const RwLocked = @import("event/rwlocked.zig").RwLocked;
pub const Loop = @import("event/loop.zig").Loop;
pub const fs = @import("event/fs.zig");
pub const tcp = @import("event/tcp.zig");
test "import event tests" { test "import event tests" {
_ = @import("event/locked.zig");
_ = @import("event/loop.zig");
_ = @import("event/lock.zig");
_ = @import("event/tcp.zig");
_ = @import("event/channel.zig"); _ = @import("event/channel.zig");
_ = @import("event/group.zig"); _ = @import("event/fs.zig");
_ = @import("event/future.zig"); _ = @import("event/future.zig");
_ = @import("event/group.zig");
_ = @import("event/lock.zig");
_ = @import("event/locked.zig");
_ = @import("event/rwlock.zig");
_ = @import("event/rwlocked.zig");
_ = @import("event/loop.zig");
_ = @import("event/tcp.zig");
} }

View File

@ -5,7 +5,7 @@ const AtomicRmwOp = builtin.AtomicRmwOp;
const AtomicOrder = builtin.AtomicOrder; const AtomicOrder = builtin.AtomicOrder;
const Loop = std.event.Loop; const Loop = std.event.Loop;
/// many producer, many consumer, thread-safe, lock-free, runtime configurable buffer size /// many producer, many consumer, thread-safe, runtime configurable buffer size
/// when buffer is empty, consumers suspend and are resumed by producers /// when buffer is empty, consumers suspend and are resumed by producers
/// when buffer is full, producers suspend and are resumed by consumers /// when buffer is full, producers suspend and are resumed by consumers
pub fn Channel(comptime T: type) type { pub fn Channel(comptime T: type) type {
@ -13,6 +13,7 @@ pub fn Channel(comptime T: type) type {
loop: *Loop, loop: *Loop,
getters: std.atomic.Queue(GetNode), getters: std.atomic.Queue(GetNode),
or_null_queue: std.atomic.Queue(*std.atomic.Queue(GetNode).Node),
putters: std.atomic.Queue(PutNode), putters: std.atomic.Queue(PutNode),
get_count: usize, get_count: usize,
put_count: usize, put_count: usize,
@ -26,8 +27,22 @@ pub fn Channel(comptime T: type) type {
const SelfChannel = this; const SelfChannel = this;
const GetNode = struct { const GetNode = struct {
ptr: *T,
tick_node: *Loop.NextTickNode, tick_node: *Loop.NextTickNode,
data: Data,
const Data = union(enum) {
Normal: Normal,
OrNull: OrNull,
};
const Normal = struct {
ptr: *T,
};
const OrNull = struct {
ptr: *?T,
or_null: *std.atomic.Queue(*std.atomic.Queue(GetNode).Node).Node,
};
}; };
const PutNode = struct { const PutNode = struct {
data: T, data: T,
@ -48,6 +63,7 @@ pub fn Channel(comptime T: type) type {
.need_dispatch = 0, .need_dispatch = 0,
.getters = std.atomic.Queue(GetNode).init(), .getters = std.atomic.Queue(GetNode).init(),
.putters = std.atomic.Queue(PutNode).init(), .putters = std.atomic.Queue(PutNode).init(),
.or_null_queue = std.atomic.Queue(*std.atomic.Queue(GetNode).Node).init(),
.get_count = 0, .get_count = 0,
.put_count = 0, .put_count = 0,
}); });
@ -71,18 +87,29 @@ pub fn Channel(comptime T: type) type {
/// puts a data item in the channel. The promise completes when the value has been added to the /// puts a data item in the channel. The promise completes when the value has been added to the
/// buffer, or in the case of a zero size buffer, when the item has been retrieved by a getter. /// buffer, or in the case of a zero size buffer, when the item has been retrieved by a getter.
pub async fn put(self: *SelfChannel, data: T) void { pub async fn put(self: *SelfChannel, data: T) void {
// TODO fix this workaround
suspend { suspend {
var my_tick_node = Loop.NextTickNode{ resume @handle();
.next = undefined, }
.data = @handle(),
}; var my_tick_node = Loop.NextTickNode.init(@handle());
var queue_node = std.atomic.Queue(PutNode).Node{ var queue_node = std.atomic.Queue(PutNode).Node.init(PutNode{
.data = PutNode{
.tick_node = &my_tick_node, .tick_node = &my_tick_node,
.data = data, .data = data,
}, });
.next = undefined,
}; // TODO test canceling a put()
errdefer {
_ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
const need_dispatch = !self.putters.remove(&queue_node);
self.loop.cancelOnNextTick(&my_tick_node);
if (need_dispatch) {
// oops we made the put_count incorrect for a period of time. fix by dispatching.
_ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
self.dispatch();
}
}
suspend {
self.putters.put(&queue_node); self.putters.put(&queue_node);
_ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); _ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
@ -93,21 +120,35 @@ pub fn Channel(comptime T: type) type {
/// await this function to get an item from the channel. If the buffer is empty, the promise will /// await this function to get an item from the channel. If the buffer is empty, the promise will
/// complete when the next item is put in the channel. /// complete when the next item is put in the channel.
pub async fn get(self: *SelfChannel) T { pub async fn get(self: *SelfChannel) T {
// TODO fix this workaround
suspend {
resume @handle();
}
// TODO integrate this function with named return values // TODO integrate this function with named return values
// so we can get rid of this extra result copy // so we can get rid of this extra result copy
var result: T = undefined; var result: T = undefined;
suspend { var my_tick_node = Loop.NextTickNode.init(@handle());
var my_tick_node = Loop.NextTickNode{ var queue_node = std.atomic.Queue(GetNode).Node.init(GetNode{
.next = undefined,
.data = @handle(),
};
var queue_node = std.atomic.Queue(GetNode).Node{
.data = GetNode{
.ptr = &result,
.tick_node = &my_tick_node, .tick_node = &my_tick_node,
.data = GetNode.Data{
.Normal = GetNode.Normal{ .ptr = &result },
}, },
.next = undefined, });
};
// TODO test canceling a get()
errdefer {
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
const need_dispatch = !self.getters.remove(&queue_node);
self.loop.cancelOnNextTick(&my_tick_node);
if (need_dispatch) {
// oops we made the get_count incorrect for a period of time. fix by dispatching.
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
self.dispatch();
}
}
suspend {
self.getters.put(&queue_node); self.getters.put(&queue_node);
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
@ -116,6 +157,64 @@ pub fn Channel(comptime T: type) type {
return result; return result;
} }
//pub async fn select(comptime EnumUnion: type, channels: ...) EnumUnion {
// assert(@memberCount(EnumUnion) == channels.len); // enum union and channels mismatch
// assert(channels.len != 0); // enum unions cannot have 0 fields
// if (channels.len == 1) {
// const result = await (async channels[0].get() catch unreachable);
// return @unionInit(EnumUnion, @memberName(EnumUnion, 0), result);
// }
//}
/// Await this function to get an item from the channel. If the buffer is empty and there are no
/// puts waiting, this returns null.
/// Await is necessary for locking purposes. The function will be resumed after checking the channel
/// for data and will not wait for data to be available.
pub async fn getOrNull(self: *SelfChannel) ?T {
// TODO fix this workaround
suspend {
resume @handle();
}
// TODO integrate this function with named return values
// so we can get rid of this extra result copy
var result: ?T = null;
var my_tick_node = Loop.NextTickNode.init(@handle());
var or_null_node = std.atomic.Queue(*std.atomic.Queue(GetNode).Node).Node.init(undefined);
var queue_node = std.atomic.Queue(GetNode).Node.init(GetNode{
.tick_node = &my_tick_node,
.data = GetNode.Data{
.OrNull = GetNode.OrNull{
.ptr = &result,
.or_null = &or_null_node,
},
},
});
or_null_node.data = &queue_node;
// TODO test canceling getOrNull
errdefer {
_ = self.or_null_queue.remove(&or_null_node);
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
const need_dispatch = !self.getters.remove(&queue_node);
self.loop.cancelOnNextTick(&my_tick_node);
if (need_dispatch) {
// oops we made the get_count incorrect for a period of time. fix by dispatching.
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
self.dispatch();
}
}
suspend {
self.getters.put(&queue_node);
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
self.or_null_queue.put(&or_null_node);
self.dispatch();
}
return result;
}
fn dispatch(self: *SelfChannel) void { fn dispatch(self: *SelfChannel) void {
// set the "need dispatch" flag // set the "need dispatch" flag
_ = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); _ = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
@ -139,7 +238,15 @@ pub fn Channel(comptime T: type) type {
if (get_count == 0) break :one_dispatch; if (get_count == 0) break :one_dispatch;
const get_node = &self.getters.get().?.data; const get_node = &self.getters.get().?.data;
get_node.ptr.* = self.buffer_nodes[self.buffer_index -% self.buffer_len]; switch (get_node.data) {
GetNode.Data.Normal => |info| {
info.ptr.* = self.buffer_nodes[self.buffer_index -% self.buffer_len];
},
GetNode.Data.OrNull => |info| {
_ = self.or_null_queue.remove(info.or_null);
info.ptr.* = self.buffer_nodes[self.buffer_index -% self.buffer_len];
},
}
self.loop.onNextTick(get_node.tick_node); self.loop.onNextTick(get_node.tick_node);
self.buffer_len -= 1; self.buffer_len -= 1;
@ -151,7 +258,15 @@ pub fn Channel(comptime T: type) type {
const get_node = &self.getters.get().?.data; const get_node = &self.getters.get().?.data;
const put_node = &self.putters.get().?.data; const put_node = &self.putters.get().?.data;
get_node.ptr.* = put_node.data; switch (get_node.data) {
GetNode.Data.Normal => |info| {
info.ptr.* = put_node.data;
},
GetNode.Data.OrNull => |info| {
_ = self.or_null_queue.remove(info.or_null);
info.ptr.* = put_node.data;
},
}
self.loop.onNextTick(get_node.tick_node); self.loop.onNextTick(get_node.tick_node);
self.loop.onNextTick(put_node.tick_node); self.loop.onNextTick(put_node.tick_node);
@ -176,6 +291,16 @@ pub fn Channel(comptime T: type) type {
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); _ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
_ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); _ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
// All the "get or null" functions should resume now.
var remove_count: usize = 0;
while (self.or_null_queue.get()) |or_null_node| {
remove_count += @boolToInt(self.getters.remove(or_null_node.data));
self.loop.onNextTick(or_null_node.data.data.tick_node);
}
if (remove_count != 0) {
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, remove_count, AtomicOrder.SeqCst);
}
// clear need-dispatch flag // clear need-dispatch flag
const need_dispatch = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst); const need_dispatch = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
if (need_dispatch != 0) continue; if (need_dispatch != 0) continue;
@ -226,6 +351,15 @@ async fn testChannelGetter(loop: *Loop, channel: *Channel(i32)) void {
const value2_promise = try async channel.get(); const value2_promise = try async channel.get();
const value2 = await value2_promise; const value2 = await value2_promise;
assert(value2 == 4567); assert(value2 == 4567);
const value3_promise = try async channel.getOrNull();
const value3 = await value3_promise;
assert(value3 == null);
const last_put = try async testPut(channel, 4444);
const value4 = await try async channel.getOrNull();
assert(value4.? == 4444);
await last_put;
} }
async fn testChannelPutter(channel: *Channel(i32)) void { async fn testChannelPutter(channel: *Channel(i32)) void {
@ -233,3 +367,6 @@ async fn testChannelPutter(channel: *Channel(i32)) void {
await (async channel.put(4567) catch @panic("out of memory")); await (async channel.put(4567) catch @panic("out of memory"));
} }
async fn testPut(channel: *Channel(i32), value: i32) void {
await (async channel.put(value) catch @panic("out of memory"));
}

1362
std/event/fs.zig Normal file

File diff suppressed because it is too large Load Diff

View File

@ -29,6 +29,17 @@ pub fn Group(comptime ReturnType: type) type {
}; };
} }
/// Cancel all the outstanding promises. Can be called even if wait was already called.
pub fn deinit(self: *Self) void {
while (self.coro_stack.pop()) |node| {
cancel node.data;
}
while (self.alloc_stack.pop()) |node| {
cancel node.data;
self.lock.loop.allocator.destroy(node);
}
}
/// Add a promise to the group. Thread-safe. /// Add a promise to the group. Thread-safe.
pub fn add(self: *Self, handle: promise->ReturnType) (error{OutOfMemory}!void) { pub fn add(self: *Self, handle: promise->ReturnType) (error{OutOfMemory}!void) {
const node = try self.lock.loop.allocator.create(Stack.Node{ const node = try self.lock.loop.allocator.create(Stack.Node{
@ -88,7 +99,7 @@ pub fn Group(comptime ReturnType: type) type {
await node.data; await node.data;
} else { } else {
(await node.data) catch |err| { (await node.data) catch |err| {
self.cancelAll(); self.deinit();
return err; return err;
}; };
} }
@ -100,25 +111,12 @@ pub fn Group(comptime ReturnType: type) type {
await handle; await handle;
} else { } else {
(await handle) catch |err| { (await handle) catch |err| {
self.cancelAll(); self.deinit();
return err; return err;
}; };
} }
} }
} }
/// Cancel all the outstanding promises. May only be called if wait was never called.
/// TODO These should be `cancelasync` not `cancel`.
/// See https://github.com/ziglang/zig/issues/1261
pub fn cancelAll(self: *Self) void {
while (self.coro_stack.pop()) |node| {
cancel node.data;
}
while (self.alloc_stack.pop()) |node| {
cancel node.data;
self.lock.loop.allocator.destroy(node);
}
}
}; };
} }

View File

@ -9,6 +9,7 @@ const Loop = std.event.Loop;
/// Thread-safe async/await lock. /// Thread-safe async/await lock.
/// Does not make any syscalls - coroutines which are waiting for the lock are suspended, and /// Does not make any syscalls - coroutines which are waiting for the lock are suspended, and
/// are resumed when the lock is released, in order. /// are resumed when the lock is released, in order.
/// Allows only one actor to hold the lock.
pub const Lock = struct { pub const Lock = struct {
loop: *Loop, loop: *Loop,
shared_bit: u8, // TODO make this a bool shared_bit: u8, // TODO make this a bool
@ -90,13 +91,14 @@ pub const Lock = struct {
} }
pub async fn acquire(self: *Lock) Held { pub async fn acquire(self: *Lock) Held {
suspend {
// TODO explicitly put this memory in the coroutine frame #1194 // TODO explicitly put this memory in the coroutine frame #1194
var my_tick_node = Loop.NextTickNode{ suspend {
.data = @handle(), resume @handle();
.next = undefined, }
}; var my_tick_node = Loop.NextTickNode.init(@handle());
errdefer _ = self.queue.remove(&my_tick_node); // TODO test canceling an acquire
suspend {
self.queue.put(&my_tick_node); self.queue.put(&my_tick_node);
// At this point, we are in the queue, so we might have already been resumed and this coroutine // At this point, we are in the queue, so we might have already been resumed and this coroutine
@ -146,6 +148,7 @@ async fn testLock(loop: *Loop, lock: *Lock) void {
} }
const handle1 = async lockRunner(lock) catch @panic("out of memory"); const handle1 = async lockRunner(lock) catch @panic("out of memory");
var tick_node1 = Loop.NextTickNode{ var tick_node1 = Loop.NextTickNode{
.prev = undefined,
.next = undefined, .next = undefined,
.data = handle1, .data = handle1,
}; };
@ -153,6 +156,7 @@ async fn testLock(loop: *Loop, lock: *Lock) void {
const handle2 = async lockRunner(lock) catch @panic("out of memory"); const handle2 = async lockRunner(lock) catch @panic("out of memory");
var tick_node2 = Loop.NextTickNode{ var tick_node2 = Loop.NextTickNode{
.prev = undefined,
.next = undefined, .next = undefined,
.data = handle2, .data = handle2,
}; };
@ -160,6 +164,7 @@ async fn testLock(loop: *Loop, lock: *Lock) void {
const handle3 = async lockRunner(lock) catch @panic("out of memory"); const handle3 = async lockRunner(lock) catch @panic("out of memory");
var tick_node3 = Loop.NextTickNode{ var tick_node3 = Loop.NextTickNode{
.prev = undefined,
.next = undefined, .next = undefined,
.data = handle3, .data = handle3,
}; };

View File

@ -2,10 +2,12 @@ const std = @import("../index.zig");
const builtin = @import("builtin"); const builtin = @import("builtin");
const assert = std.debug.assert; const assert = std.debug.assert;
const mem = std.mem; const mem = std.mem;
const posix = std.os.posix;
const windows = std.os.windows;
const AtomicRmwOp = builtin.AtomicRmwOp; const AtomicRmwOp = builtin.AtomicRmwOp;
const AtomicOrder = builtin.AtomicOrder; const AtomicOrder = builtin.AtomicOrder;
const fs = std.event.fs;
const os = std.os;
const posix = os.posix;
const windows = os.windows;
pub const Loop = struct { pub const Loop = struct {
allocator: *mem.Allocator, allocator: *mem.Allocator,
@ -13,7 +15,7 @@ pub const Loop = struct {
os_data: OsData, os_data: OsData,
final_resume_node: ResumeNode, final_resume_node: ResumeNode,
pending_event_count: usize, pending_event_count: usize,
extra_threads: []*std.os.Thread, extra_threads: []*os.Thread,
// pre-allocated eventfds. all permanently active. // pre-allocated eventfds. all permanently active.
// this is how we send promises to be resumed on other threads. // this is how we send promises to be resumed on other threads.
@ -50,6 +52,22 @@ pub const Loop = struct {
base: ResumeNode, base: ResumeNode,
kevent: posix.Kevent, kevent: posix.Kevent,
}; };
pub const Basic = switch (builtin.os) {
builtin.Os.macosx => MacOsBasic,
builtin.Os.linux => struct {
base: ResumeNode,
},
builtin.Os.windows => struct {
base: ResumeNode,
},
else => @compileError("unsupported OS"),
};
const MacOsBasic = struct {
base: ResumeNode,
kev: posix.Kevent,
};
}; };
/// After initialization, call run(). /// After initialization, call run().
@ -65,7 +83,7 @@ pub const Loop = struct {
/// TODO copy elision / named return values so that the threads referencing *Loop /// TODO copy elision / named return values so that the threads referencing *Loop
/// have the correct pointer value. /// have the correct pointer value.
pub fn initMultiThreaded(self: *Loop, allocator: *mem.Allocator) !void { pub fn initMultiThreaded(self: *Loop, allocator: *mem.Allocator) !void {
const core_count = try std.os.cpuCount(allocator); const core_count = try os.cpuCount(allocator);
return self.initInternal(allocator, core_count); return self.initInternal(allocator, core_count);
} }
@ -92,7 +110,7 @@ pub const Loop = struct {
); );
errdefer self.allocator.free(self.eventfd_resume_nodes); errdefer self.allocator.free(self.eventfd_resume_nodes);
self.extra_threads = try self.allocator.alloc(*std.os.Thread, extra_thread_count); self.extra_threads = try self.allocator.alloc(*os.Thread, extra_thread_count);
errdefer self.allocator.free(self.extra_threads); errdefer self.allocator.free(self.extra_threads);
try self.initOsData(extra_thread_count); try self.initOsData(extra_thread_count);
@ -104,17 +122,30 @@ pub const Loop = struct {
self.allocator.free(self.extra_threads); self.allocator.free(self.extra_threads);
} }
const InitOsDataError = std.os.LinuxEpollCreateError || mem.Allocator.Error || std.os.LinuxEventFdError || const InitOsDataError = os.LinuxEpollCreateError || mem.Allocator.Error || os.LinuxEventFdError ||
std.os.SpawnThreadError || std.os.LinuxEpollCtlError || std.os.BsdKEventError || os.SpawnThreadError || os.LinuxEpollCtlError || os.BsdKEventError ||
std.os.WindowsCreateIoCompletionPortError; os.WindowsCreateIoCompletionPortError;
const wakeup_bytes = []u8{0x1} ** 8; const wakeup_bytes = []u8{0x1} ** 8;
fn initOsData(self: *Loop, extra_thread_count: usize) InitOsDataError!void { fn initOsData(self: *Loop, extra_thread_count: usize) InitOsDataError!void {
switch (builtin.os) { switch (builtin.os) {
builtin.Os.linux => { builtin.Os.linux => {
self.os_data.fs_queue = std.atomic.Queue(fs.Request).init();
self.os_data.fs_queue_item = 0;
// we need another thread for the file system because Linux does not have an async
// file system I/O API.
self.os_data.fs_end_request = fs.RequestNode{
.prev = undefined,
.next = undefined,
.data = fs.Request{
.msg = fs.Request.Msg.End,
.finish = fs.Request.Finish.NoAction,
},
};
errdefer { errdefer {
while (self.available_eventfd_resume_nodes.pop()) |node| std.os.close(node.data.eventfd); while (self.available_eventfd_resume_nodes.pop()) |node| os.close(node.data.eventfd);
} }
for (self.eventfd_resume_nodes) |*eventfd_node| { for (self.eventfd_resume_nodes) |*eventfd_node| {
eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{ eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
@ -123,7 +154,7 @@ pub const Loop = struct {
.id = ResumeNode.Id.EventFd, .id = ResumeNode.Id.EventFd,
.handle = undefined, .handle = undefined,
}, },
.eventfd = try std.os.linuxEventFd(1, posix.EFD_CLOEXEC | posix.EFD_NONBLOCK), .eventfd = try os.linuxEventFd(1, posix.EFD_CLOEXEC | posix.EFD_NONBLOCK),
.epoll_op = posix.EPOLL_CTL_ADD, .epoll_op = posix.EPOLL_CTL_ADD,
}, },
.next = undefined, .next = undefined,
@ -131,44 +162,62 @@ pub const Loop = struct {
self.available_eventfd_resume_nodes.push(eventfd_node); self.available_eventfd_resume_nodes.push(eventfd_node);
} }
self.os_data.epollfd = try std.os.linuxEpollCreate(posix.EPOLL_CLOEXEC); self.os_data.epollfd = try os.linuxEpollCreate(posix.EPOLL_CLOEXEC);
errdefer std.os.close(self.os_data.epollfd); errdefer os.close(self.os_data.epollfd);
self.os_data.final_eventfd = try std.os.linuxEventFd(0, posix.EFD_CLOEXEC | posix.EFD_NONBLOCK); self.os_data.final_eventfd = try os.linuxEventFd(0, posix.EFD_CLOEXEC | posix.EFD_NONBLOCK);
errdefer std.os.close(self.os_data.final_eventfd); errdefer os.close(self.os_data.final_eventfd);
self.os_data.final_eventfd_event = posix.epoll_event{ self.os_data.final_eventfd_event = posix.epoll_event{
.events = posix.EPOLLIN, .events = posix.EPOLLIN,
.data = posix.epoll_data{ .ptr = @ptrToInt(&self.final_resume_node) }, .data = posix.epoll_data{ .ptr = @ptrToInt(&self.final_resume_node) },
}; };
try std.os.linuxEpollCtl( try os.linuxEpollCtl(
self.os_data.epollfd, self.os_data.epollfd,
posix.EPOLL_CTL_ADD, posix.EPOLL_CTL_ADD,
self.os_data.final_eventfd, self.os_data.final_eventfd,
&self.os_data.final_eventfd_event, &self.os_data.final_eventfd_event,
); );
self.os_data.fs_thread = try os.spawnThread(self, posixFsRun);
errdefer {
self.posixFsRequest(&self.os_data.fs_end_request);
self.os_data.fs_thread.wait();
}
var extra_thread_index: usize = 0; var extra_thread_index: usize = 0;
errdefer { errdefer {
// writing 8 bytes to an eventfd cannot fail // writing 8 bytes to an eventfd cannot fail
std.os.posixWrite(self.os_data.final_eventfd, wakeup_bytes) catch unreachable; os.posixWrite(self.os_data.final_eventfd, wakeup_bytes) catch unreachable;
while (extra_thread_index != 0) { while (extra_thread_index != 0) {
extra_thread_index -= 1; extra_thread_index -= 1;
self.extra_threads[extra_thread_index].wait(); self.extra_threads[extra_thread_index].wait();
} }
} }
while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) { while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) {
self.extra_threads[extra_thread_index] = try std.os.spawnThread(self, workerRun); self.extra_threads[extra_thread_index] = try os.spawnThread(self, workerRun);
} }
}, },
builtin.Os.macosx => { builtin.Os.macosx => {
self.os_data.kqfd = try std.os.bsdKQueue(); self.os_data.kqfd = try os.bsdKQueue();
errdefer std.os.close(self.os_data.kqfd); errdefer os.close(self.os_data.kqfd);
self.os_data.kevents = try self.allocator.alloc(posix.Kevent, extra_thread_count); self.os_data.fs_kqfd = try os.bsdKQueue();
errdefer self.allocator.free(self.os_data.kevents); errdefer os.close(self.os_data.fs_kqfd);
const eventlist = ([*]posix.Kevent)(undefined)[0..0]; self.os_data.fs_queue = std.atomic.Queue(fs.Request).init();
// we need another thread for the file system because Darwin does not have an async
// file system I/O API.
self.os_data.fs_end_request = fs.RequestNode{
.prev = undefined,
.next = undefined,
.data = fs.Request{
.msg = fs.Request.Msg.End,
.finish = fs.Request.Finish.NoAction,
},
};
const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
for (self.eventfd_resume_nodes) |*eventfd_node, i| { for (self.eventfd_resume_nodes) |*eventfd_node, i| {
eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{ eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
@ -191,18 +240,9 @@ pub const Loop = struct {
}; };
self.available_eventfd_resume_nodes.push(eventfd_node); self.available_eventfd_resume_nodes.push(eventfd_node);
const kevent_array = (*[1]posix.Kevent)(&eventfd_node.data.kevent); const kevent_array = (*[1]posix.Kevent)(&eventfd_node.data.kevent);
_ = try std.os.bsdKEvent(self.os_data.kqfd, kevent_array, eventlist, null); _ = try os.bsdKEvent(self.os_data.kqfd, kevent_array, empty_kevs, null);
eventfd_node.data.kevent.flags = posix.EV_CLEAR | posix.EV_ENABLE; eventfd_node.data.kevent.flags = posix.EV_CLEAR | posix.EV_ENABLE;
eventfd_node.data.kevent.fflags = posix.NOTE_TRIGGER; eventfd_node.data.kevent.fflags = posix.NOTE_TRIGGER;
// this one is for waiting for events
self.os_data.kevents[i] = posix.Kevent{
.ident = i,
.filter = posix.EVFILT_USER,
.flags = 0,
.fflags = 0,
.data = 0,
.udata = @ptrToInt(&eventfd_node.data.base),
};
} }
// Pre-add so that we cannot get error.SystemResources // Pre-add so that we cannot get error.SystemResources
@ -215,31 +255,55 @@ pub const Loop = struct {
.data = 0, .data = 0,
.udata = @ptrToInt(&self.final_resume_node), .udata = @ptrToInt(&self.final_resume_node),
}; };
const kevent_array = (*[1]posix.Kevent)(&self.os_data.final_kevent); const final_kev_arr = (*[1]posix.Kevent)(&self.os_data.final_kevent);
_ = try std.os.bsdKEvent(self.os_data.kqfd, kevent_array, eventlist, null); _ = try os.bsdKEvent(self.os_data.kqfd, final_kev_arr, empty_kevs, null);
self.os_data.final_kevent.flags = posix.EV_ENABLE; self.os_data.final_kevent.flags = posix.EV_ENABLE;
self.os_data.final_kevent.fflags = posix.NOTE_TRIGGER; self.os_data.final_kevent.fflags = posix.NOTE_TRIGGER;
self.os_data.fs_kevent_wake = posix.Kevent{
.ident = 0,
.filter = posix.EVFILT_USER,
.flags = posix.EV_ADD | posix.EV_ENABLE,
.fflags = posix.NOTE_TRIGGER,
.data = 0,
.udata = undefined,
};
self.os_data.fs_kevent_wait = posix.Kevent{
.ident = 0,
.filter = posix.EVFILT_USER,
.flags = posix.EV_ADD | posix.EV_CLEAR,
.fflags = 0,
.data = 0,
.udata = undefined,
};
self.os_data.fs_thread = try os.spawnThread(self, posixFsRun);
errdefer {
self.posixFsRequest(&self.os_data.fs_end_request);
self.os_data.fs_thread.wait();
}
var extra_thread_index: usize = 0; var extra_thread_index: usize = 0;
errdefer { errdefer {
_ = std.os.bsdKEvent(self.os_data.kqfd, kevent_array, eventlist, null) catch unreachable; _ = os.bsdKEvent(self.os_data.kqfd, final_kev_arr, empty_kevs, null) catch unreachable;
while (extra_thread_index != 0) { while (extra_thread_index != 0) {
extra_thread_index -= 1; extra_thread_index -= 1;
self.extra_threads[extra_thread_index].wait(); self.extra_threads[extra_thread_index].wait();
} }
} }
while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) { while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) {
self.extra_threads[extra_thread_index] = try std.os.spawnThread(self, workerRun); self.extra_threads[extra_thread_index] = try os.spawnThread(self, workerRun);
} }
}, },
builtin.Os.windows => { builtin.Os.windows => {
self.os_data.io_port = try std.os.windowsCreateIoCompletionPort( self.os_data.io_port = try os.windowsCreateIoCompletionPort(
windows.INVALID_HANDLE_VALUE, windows.INVALID_HANDLE_VALUE,
null, null,
undefined, undefined,
undefined, @maxValue(windows.DWORD),
); );
errdefer std.os.close(self.os_data.io_port); errdefer os.close(self.os_data.io_port);
for (self.eventfd_resume_nodes) |*eventfd_node, i| { for (self.eventfd_resume_nodes) |*eventfd_node, i| {
eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{ eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
@ -262,7 +326,7 @@ pub const Loop = struct {
while (i < extra_thread_index) : (i += 1) { while (i < extra_thread_index) : (i += 1) {
while (true) { while (true) {
const overlapped = @intToPtr(?*windows.OVERLAPPED, 0x1); const overlapped = @intToPtr(?*windows.OVERLAPPED, 0x1);
std.os.windowsPostQueuedCompletionStatus(self.os_data.io_port, undefined, @ptrToInt(&self.final_resume_node), overlapped) catch continue; os.windowsPostQueuedCompletionStatus(self.os_data.io_port, undefined, @ptrToInt(&self.final_resume_node), overlapped) catch continue;
break; break;
} }
} }
@ -272,7 +336,7 @@ pub const Loop = struct {
} }
} }
while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) { while (extra_thread_index < extra_thread_count) : (extra_thread_index += 1) {
self.extra_threads[extra_thread_index] = try std.os.spawnThread(self, workerRun); self.extra_threads[extra_thread_index] = try os.spawnThread(self, workerRun);
} }
}, },
else => {}, else => {},
@ -282,65 +346,115 @@ pub const Loop = struct {
fn deinitOsData(self: *Loop) void { fn deinitOsData(self: *Loop) void {
switch (builtin.os) { switch (builtin.os) {
builtin.Os.linux => { builtin.Os.linux => {
std.os.close(self.os_data.final_eventfd); os.close(self.os_data.final_eventfd);
while (self.available_eventfd_resume_nodes.pop()) |node| std.os.close(node.data.eventfd); while (self.available_eventfd_resume_nodes.pop()) |node| os.close(node.data.eventfd);
std.os.close(self.os_data.epollfd); os.close(self.os_data.epollfd);
self.allocator.free(self.eventfd_resume_nodes); self.allocator.free(self.eventfd_resume_nodes);
}, },
builtin.Os.macosx => { builtin.Os.macosx => {
self.allocator.free(self.os_data.kevents); os.close(self.os_data.kqfd);
std.os.close(self.os_data.kqfd); os.close(self.os_data.fs_kqfd);
}, },
builtin.Os.windows => { builtin.Os.windows => {
std.os.close(self.os_data.io_port); os.close(self.os_data.io_port);
}, },
else => {}, else => {},
} }
} }
/// resume_node must live longer than the promise that it holds a reference to. /// resume_node must live longer than the promise that it holds a reference to.
pub fn addFd(self: *Loop, fd: i32, resume_node: *ResumeNode) !void { /// flags must contain EPOLLET
_ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); pub fn linuxAddFd(self: *Loop, fd: i32, resume_node: *ResumeNode, flags: u32) !void {
errdefer { assert(flags & posix.EPOLLET == posix.EPOLLET);
self.finishOneEvent(); self.beginOneEvent();
} errdefer self.finishOneEvent();
try self.modFd( try self.linuxModFd(
fd, fd,
posix.EPOLL_CTL_ADD, posix.EPOLL_CTL_ADD,
std.os.linux.EPOLLIN | std.os.linux.EPOLLOUT | std.os.linux.EPOLLET, flags,
resume_node, resume_node,
); );
} }
pub fn modFd(self: *Loop, fd: i32, op: u32, events: u32, resume_node: *ResumeNode) !void { pub fn linuxModFd(self: *Loop, fd: i32, op: u32, flags: u32, resume_node: *ResumeNode) !void {
var ev = std.os.linux.epoll_event{ assert(flags & posix.EPOLLET == posix.EPOLLET);
.events = events, var ev = os.linux.epoll_event{
.data = std.os.linux.epoll_data{ .ptr = @ptrToInt(resume_node) }, .events = flags,
.data = os.linux.epoll_data{ .ptr = @ptrToInt(resume_node) },
}; };
try std.os.linuxEpollCtl(self.os_data.epollfd, op, fd, &ev); try os.linuxEpollCtl(self.os_data.epollfd, op, fd, &ev);
} }
pub fn removeFd(self: *Loop, fd: i32) void { pub fn linuxRemoveFd(self: *Loop, fd: i32) void {
self.removeFdNoCounter(fd); os.linuxEpollCtl(self.os_data.epollfd, os.linux.EPOLL_CTL_DEL, fd, undefined) catch {};
self.finishOneEvent(); self.finishOneEvent();
} }
fn removeFdNoCounter(self: *Loop, fd: i32) void { pub async fn linuxWaitFd(self: *Loop, fd: i32, flags: u32) !void {
std.os.linuxEpollCtl(self.os_data.epollfd, std.os.linux.EPOLL_CTL_DEL, fd, undefined) catch {}; defer self.linuxRemoveFd(fd);
}
pub async fn waitFd(self: *Loop, fd: i32) !void {
defer self.removeFd(fd);
suspend { suspend {
// TODO explicitly put this memory in the coroutine frame #1194 // TODO explicitly put this memory in the coroutine frame #1194
var resume_node = ResumeNode{ var resume_node = ResumeNode.Basic{
.base = ResumeNode{
.id = ResumeNode.Id.Basic, .id = ResumeNode.Id.Basic,
.handle = @handle(), .handle = @handle(),
},
}; };
try self.addFd(fd, &resume_node); try self.linuxAddFd(fd, &resume_node.base, flags);
} }
} }
pub async fn bsdWaitKev(self: *Loop, ident: usize, filter: i16, fflags: u32) !posix.Kevent {
// TODO #1194
suspend {
resume @handle();
}
var resume_node = ResumeNode.Basic{
.base = ResumeNode{
.id = ResumeNode.Id.Basic,
.handle = @handle(),
},
.kev = undefined,
};
defer self.bsdRemoveKev(ident, filter);
suspend {
try self.bsdAddKev(&resume_node, ident, filter, fflags);
}
return resume_node.kev;
}
/// resume_node must live longer than the promise that it holds a reference to.
pub fn bsdAddKev(self: *Loop, resume_node: *ResumeNode.Basic, ident: usize, filter: i16, fflags: u32) !void {
self.beginOneEvent();
errdefer self.finishOneEvent();
var kev = posix.Kevent{
.ident = ident,
.filter = filter,
.flags = posix.EV_ADD | posix.EV_ENABLE | posix.EV_CLEAR,
.fflags = fflags,
.data = 0,
.udata = @ptrToInt(&resume_node.base),
};
const kevent_array = (*[1]posix.Kevent)(&kev);
const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
_ = try os.bsdKEvent(self.os_data.kqfd, kevent_array, empty_kevs, null);
}
pub fn bsdRemoveKev(self: *Loop, ident: usize, filter: i16) void {
var kev = posix.Kevent{
.ident = ident,
.filter = filter,
.flags = posix.EV_DELETE,
.fflags = 0,
.data = 0,
.udata = 0,
};
const kevent_array = (*[1]posix.Kevent)(&kev);
const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
_ = os.bsdKEvent(self.os_data.kqfd, kevent_array, empty_kevs, null) catch undefined;
self.finishOneEvent();
}
fn dispatch(self: *Loop) void { fn dispatch(self: *Loop) void {
while (self.available_eventfd_resume_nodes.pop()) |resume_stack_node| { while (self.available_eventfd_resume_nodes.pop()) |resume_stack_node| {
const next_tick_node = self.next_tick_queue.get() orelse { const next_tick_node = self.next_tick_queue.get() orelse {
@ -352,8 +466,8 @@ pub const Loop = struct {
switch (builtin.os) { switch (builtin.os) {
builtin.Os.macosx => { builtin.Os.macosx => {
const kevent_array = (*[1]posix.Kevent)(&eventfd_node.kevent); const kevent_array = (*[1]posix.Kevent)(&eventfd_node.kevent);
const eventlist = ([*]posix.Kevent)(undefined)[0..0]; const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
_ = std.os.bsdKEvent(self.os_data.kqfd, kevent_array, eventlist, null) catch { _ = os.bsdKEvent(self.os_data.kqfd, kevent_array, empty_kevs, null) catch {
self.next_tick_queue.unget(next_tick_node); self.next_tick_queue.unget(next_tick_node);
self.available_eventfd_resume_nodes.push(resume_stack_node); self.available_eventfd_resume_nodes.push(resume_stack_node);
return; return;
@ -361,9 +475,9 @@ pub const Loop = struct {
}, },
builtin.Os.linux => { builtin.Os.linux => {
// the pending count is already accounted for // the pending count is already accounted for
const epoll_events = posix.EPOLLONESHOT | std.os.linux.EPOLLIN | std.os.linux.EPOLLOUT | const epoll_events = posix.EPOLLONESHOT | os.linux.EPOLLIN | os.linux.EPOLLOUT |
std.os.linux.EPOLLET; os.linux.EPOLLET;
self.modFd( self.linuxModFd(
eventfd_node.eventfd, eventfd_node.eventfd,
eventfd_node.epoll_op, eventfd_node.epoll_op,
epoll_events, epoll_events,
@ -379,7 +493,7 @@ pub const Loop = struct {
// the consumer code can decide whether to read the completion key. // the consumer code can decide whether to read the completion key.
// it has to do this for normal I/O, so we match that behavior here. // it has to do this for normal I/O, so we match that behavior here.
const overlapped = @intToPtr(?*windows.OVERLAPPED, 0x1); const overlapped = @intToPtr(?*windows.OVERLAPPED, 0x1);
std.os.windowsPostQueuedCompletionStatus( os.windowsPostQueuedCompletionStatus(
self.os_data.io_port, self.os_data.io_port,
undefined, undefined,
eventfd_node.completion_key, eventfd_node.completion_key,
@ -397,15 +511,29 @@ pub const Loop = struct {
/// Bring your own linked list node. This means it can't fail. /// Bring your own linked list node. This means it can't fail.
pub fn onNextTick(self: *Loop, node: *NextTickNode) void { pub fn onNextTick(self: *Loop, node: *NextTickNode) void {
_ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); self.beginOneEvent(); // finished in dispatch()
self.next_tick_queue.put(node); self.next_tick_queue.put(node);
self.dispatch(); self.dispatch();
} }
pub fn cancelOnNextTick(self: *Loop, node: *NextTickNode) void {
if (self.next_tick_queue.remove(node)) {
self.finishOneEvent();
}
}
pub fn run(self: *Loop) void { pub fn run(self: *Loop) void {
self.finishOneEvent(); // the reference we start with self.finishOneEvent(); // the reference we start with
self.workerRun(); self.workerRun();
switch (builtin.os) {
builtin.Os.linux,
builtin.Os.macosx,
=> self.os_data.fs_thread.wait(),
else => {},
}
for (self.extra_threads) |extra_thread| { for (self.extra_threads) |extra_thread| {
extra_thread.wait(); extra_thread.wait();
} }
@ -420,6 +548,7 @@ pub const Loop = struct {
suspend { suspend {
handle.* = @handle(); handle.* = @handle();
var my_tick_node = Loop.NextTickNode{ var my_tick_node = Loop.NextTickNode{
.prev = undefined,
.next = undefined, .next = undefined,
.data = @handle(), .data = @handle(),
}; };
@ -441,6 +570,7 @@ pub const Loop = struct {
pub async fn yield(self: *Loop) void { pub async fn yield(self: *Loop) void {
suspend { suspend {
var my_tick_node = Loop.NextTickNode{ var my_tick_node = Loop.NextTickNode{
.prev = undefined,
.next = undefined, .next = undefined,
.data = @handle(), .data = @handle(),
}; };
@ -448,20 +578,28 @@ pub const Loop = struct {
} }
} }
fn finishOneEvent(self: *Loop) void { /// call finishOneEvent when done
if (@atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst) == 1) { pub fn beginOneEvent(self: *Loop) void {
_ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
}
pub fn finishOneEvent(self: *Loop) void {
const prev = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
if (prev == 1) {
// cause all the threads to stop // cause all the threads to stop
switch (builtin.os) { switch (builtin.os) {
builtin.Os.linux => { builtin.Os.linux => {
self.posixFsRequest(&self.os_data.fs_end_request);
// writing 8 bytes to an eventfd cannot fail // writing 8 bytes to an eventfd cannot fail
std.os.posixWrite(self.os_data.final_eventfd, wakeup_bytes) catch unreachable; os.posixWrite(self.os_data.final_eventfd, wakeup_bytes) catch unreachable;
return; return;
}, },
builtin.Os.macosx => { builtin.Os.macosx => {
self.posixFsRequest(&self.os_data.fs_end_request);
const final_kevent = (*[1]posix.Kevent)(&self.os_data.final_kevent); const final_kevent = (*[1]posix.Kevent)(&self.os_data.final_kevent);
const eventlist = ([*]posix.Kevent)(undefined)[0..0]; const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
// cannot fail because we already added it and this just enables it // cannot fail because we already added it and this just enables it
_ = std.os.bsdKEvent(self.os_data.kqfd, final_kevent, eventlist, null) catch unreachable; _ = os.bsdKEvent(self.os_data.kqfd, final_kevent, empty_kevs, null) catch unreachable;
return; return;
}, },
builtin.Os.windows => { builtin.Os.windows => {
@ -469,7 +607,7 @@ pub const Loop = struct {
while (i < self.extra_threads.len + 1) : (i += 1) { while (i < self.extra_threads.len + 1) : (i += 1) {
while (true) { while (true) {
const overlapped = @intToPtr(?*windows.OVERLAPPED, 0x1); const overlapped = @intToPtr(?*windows.OVERLAPPED, 0x1);
std.os.windowsPostQueuedCompletionStatus(self.os_data.io_port, undefined, @ptrToInt(&self.final_resume_node), overlapped) catch continue; os.windowsPostQueuedCompletionStatus(self.os_data.io_port, undefined, @ptrToInt(&self.final_resume_node), overlapped) catch continue;
break; break;
} }
} }
@ -492,8 +630,8 @@ pub const Loop = struct {
switch (builtin.os) { switch (builtin.os) {
builtin.Os.linux => { builtin.Os.linux => {
// only process 1 event so we don't steal from other threads // only process 1 event so we don't steal from other threads
var events: [1]std.os.linux.epoll_event = undefined; var events: [1]os.linux.epoll_event = undefined;
const count = std.os.linuxEpollWait(self.os_data.epollfd, events[0..], -1); const count = os.linuxEpollWait(self.os_data.epollfd, events[0..], -1);
for (events[0..count]) |ev| { for (events[0..count]) |ev| {
const resume_node = @intToPtr(*ResumeNode, ev.data.ptr); const resume_node = @intToPtr(*ResumeNode, ev.data.ptr);
const handle = resume_node.handle; const handle = resume_node.handle;
@ -516,13 +654,17 @@ pub const Loop = struct {
}, },
builtin.Os.macosx => { builtin.Os.macosx => {
var eventlist: [1]posix.Kevent = undefined; var eventlist: [1]posix.Kevent = undefined;
const count = std.os.bsdKEvent(self.os_data.kqfd, self.os_data.kevents, eventlist[0..], null) catch unreachable; const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
const count = os.bsdKEvent(self.os_data.kqfd, empty_kevs, eventlist[0..], null) catch unreachable;
for (eventlist[0..count]) |ev| { for (eventlist[0..count]) |ev| {
const resume_node = @intToPtr(*ResumeNode, ev.udata); const resume_node = @intToPtr(*ResumeNode, ev.udata);
const handle = resume_node.handle; const handle = resume_node.handle;
const resume_node_id = resume_node.id; const resume_node_id = resume_node.id;
switch (resume_node_id) { switch (resume_node_id) {
ResumeNode.Id.Basic => {}, ResumeNode.Id.Basic => {
const basic_node = @fieldParentPtr(ResumeNode.Basic, "base", resume_node);
basic_node.kev = ev;
},
ResumeNode.Id.Stop => return, ResumeNode.Id.Stop => return,
ResumeNode.Id.EventFd => { ResumeNode.Id.EventFd => {
const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node); const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node);
@ -541,9 +683,10 @@ pub const Loop = struct {
while (true) { while (true) {
var nbytes: windows.DWORD = undefined; var nbytes: windows.DWORD = undefined;
var overlapped: ?*windows.OVERLAPPED = undefined; var overlapped: ?*windows.OVERLAPPED = undefined;
switch (std.os.windowsGetQueuedCompletionStatus(self.os_data.io_port, &nbytes, &completion_key, &overlapped, windows.INFINITE)) { switch (os.windowsGetQueuedCompletionStatus(self.os_data.io_port, &nbytes, &completion_key, &overlapped, windows.INFINITE)) {
std.os.WindowsWaitResult.Aborted => return, os.WindowsWaitResult.Aborted => return,
std.os.WindowsWaitResult.Normal => {}, os.WindowsWaitResult.Normal => {},
os.WindowsWaitResult.Cancelled => continue,
} }
if (overlapped != null) break; if (overlapped != null) break;
} }
@ -560,21 +703,101 @@ pub const Loop = struct {
}, },
} }
resume handle; resume handle;
if (resume_node_id == ResumeNode.Id.EventFd) {
self.finishOneEvent(); self.finishOneEvent();
}
}, },
else => @compileError("unsupported OS"), else => @compileError("unsupported OS"),
} }
} }
} }
const OsData = switch (builtin.os) { fn posixFsRequest(self: *Loop, request_node: *fs.RequestNode) void {
builtin.Os.linux => struct { self.beginOneEvent(); // finished in posixFsRun after processing the msg
epollfd: i32, self.os_data.fs_queue.put(request_node);
final_eventfd: i32, switch (builtin.os) {
final_eventfd_event: std.os.linux.epoll_event, builtin.Os.macosx => {
const fs_kevs = (*[1]posix.Kevent)(&self.os_data.fs_kevent_wake);
const empty_kevs = ([*]posix.Kevent)(undefined)[0..0];
_ = os.bsdKEvent(self.os_data.fs_kqfd, fs_kevs, empty_kevs, null) catch unreachable;
}, },
builtin.Os.linux => {
_ = @atomicRmw(u8, &self.os_data.fs_queue_item, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
const rc = os.linux.futex_wake(@ptrToInt(&self.os_data.fs_queue_item), os.linux.FUTEX_WAKE, 1);
switch (os.linux.getErrno(rc)) {
0 => {},
posix.EINVAL => unreachable,
else => unreachable,
}
},
else => @compileError("Unsupported OS"),
}
}
fn posixFsCancel(self: *Loop, request_node: *fs.RequestNode) void {
if (self.os_data.fs_queue.remove(request_node)) {
self.finishOneEvent();
}
}
fn posixFsRun(self: *Loop) void {
while (true) {
if (builtin.os == builtin.Os.linux) {
_ = @atomicRmw(u8, &self.os_data.fs_queue_item, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
}
while (self.os_data.fs_queue.get()) |node| {
switch (node.data.msg) {
@TagType(fs.Request.Msg).End => return,
@TagType(fs.Request.Msg).PWriteV => |*msg| {
msg.result = os.posix_pwritev(msg.fd, msg.iov.ptr, msg.iov.len, msg.offset);
},
@TagType(fs.Request.Msg).PReadV => |*msg| {
msg.result = os.posix_preadv(msg.fd, msg.iov.ptr, msg.iov.len, msg.offset);
},
@TagType(fs.Request.Msg).Open => |*msg| {
msg.result = os.posixOpenC(msg.path.ptr, msg.flags, msg.mode);
},
@TagType(fs.Request.Msg).Close => |*msg| os.close(msg.fd),
@TagType(fs.Request.Msg).WriteFile => |*msg| blk: {
const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT |
posix.O_CLOEXEC | posix.O_TRUNC;
const fd = os.posixOpenC(msg.path.ptr, flags, msg.mode) catch |err| {
msg.result = err;
break :blk;
};
defer os.close(fd);
msg.result = os.posixWrite(fd, msg.contents);
},
}
switch (node.data.finish) {
@TagType(fs.Request.Finish).TickNode => |*tick_node| self.onNextTick(tick_node),
@TagType(fs.Request.Finish).DeallocCloseOperation => |close_op| {
self.allocator.destroy(close_op);
},
@TagType(fs.Request.Finish).NoAction => {},
}
self.finishOneEvent();
}
switch (builtin.os) {
builtin.Os.linux => {
const rc = os.linux.futex_wait(@ptrToInt(&self.os_data.fs_queue_item), os.linux.FUTEX_WAIT, 0, null);
switch (os.linux.getErrno(rc)) {
0 => continue,
posix.EINTR => continue,
posix.EAGAIN => continue,
else => unreachable,
}
},
builtin.Os.macosx => {
const fs_kevs = (*[1]posix.Kevent)(&self.os_data.fs_kevent_wait);
var out_kevs: [1]posix.Kevent = undefined;
_ = os.bsdKEvent(self.os_data.fs_kqfd, fs_kevs, out_kevs[0..], null) catch unreachable;
},
else => @compileError("Unsupported OS"),
}
}
}
const OsData = switch (builtin.os) {
builtin.Os.linux => LinuxOsData,
builtin.Os.macosx => MacOsData, builtin.Os.macosx => MacOsData,
builtin.Os.windows => struct { builtin.Os.windows => struct {
io_port: windows.HANDLE, io_port: windows.HANDLE,
@ -586,7 +809,22 @@ pub const Loop = struct {
const MacOsData = struct { const MacOsData = struct {
kqfd: i32, kqfd: i32,
final_kevent: posix.Kevent, final_kevent: posix.Kevent,
kevents: []posix.Kevent, fs_kevent_wake: posix.Kevent,
fs_kevent_wait: posix.Kevent,
fs_thread: *os.Thread,
fs_kqfd: i32,
fs_queue: std.atomic.Queue(fs.Request),
fs_end_request: fs.RequestNode,
};
const LinuxOsData = struct {
epollfd: i32,
final_eventfd: i32,
final_eventfd_event: os.linux.epoll_event,
fs_thread: *os.Thread,
fs_queue_item: u8,
fs_queue: std.atomic.Queue(fs.Request),
fs_end_request: fs.RequestNode,
}; };
}; };

296
std/event/rwlock.zig Normal file
View File

@ -0,0 +1,296 @@
const std = @import("../index.zig");
const builtin = @import("builtin");
const assert = std.debug.assert;
const mem = std.mem;
const AtomicRmwOp = builtin.AtomicRmwOp;
const AtomicOrder = builtin.AtomicOrder;
const Loop = std.event.Loop;
/// Thread-safe async/await lock.
/// Does not make any syscalls - coroutines which are waiting for the lock are suspended, and
/// are resumed when the lock is released, in order.
/// Many readers can hold the lock at the same time; however locking for writing is exclusive.
/// When a read lock is held, it will not be released until the reader queue is empty.
/// When a write lock is held, it will not be released until the writer queue is empty.
pub const RwLock = struct {
loop: *Loop,
shared_state: u8, // TODO make this an enum
writer_queue: Queue,
reader_queue: Queue,
writer_queue_empty_bit: u8, // TODO make this a bool
reader_queue_empty_bit: u8, // TODO make this a bool
reader_lock_count: usize,
const State = struct {
const Unlocked = 0;
const WriteLock = 1;
const ReadLock = 2;
};
const Queue = std.atomic.Queue(promise);
pub const HeldRead = struct {
lock: *RwLock,
pub fn release(self: HeldRead) void {
// If other readers still hold the lock, we're done.
if (@atomicRmw(usize, &self.lock.reader_lock_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst) != 1) {
return;
}
_ = @atomicRmw(u8, &self.lock.reader_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
if (@cmpxchgStrong(u8, &self.lock.shared_state, State.ReadLock, State.Unlocked, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) {
// Didn't unlock. Someone else's problem.
return;
}
self.lock.commonPostUnlock();
}
};
pub const HeldWrite = struct {
lock: *RwLock,
pub fn release(self: HeldWrite) void {
// See if we can leave it locked for writing, and pass the lock to the next writer
// in the queue to grab the lock.
if (self.lock.writer_queue.get()) |node| {
self.lock.loop.onNextTick(node);
return;
}
// We need to release the write lock. Check if any readers are waiting to grab the lock.
if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, AtomicOrder.SeqCst) == 0) {
// Switch to a read lock.
_ = @atomicRmw(u8, &self.lock.shared_state, AtomicRmwOp.Xchg, State.ReadLock, AtomicOrder.SeqCst);
while (self.lock.reader_queue.get()) |node| {
self.lock.loop.onNextTick(node);
}
return;
}
_ = @atomicRmw(u8, &self.lock.writer_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
_ = @atomicRmw(u8, &self.lock.shared_state, AtomicRmwOp.Xchg, State.Unlocked, AtomicOrder.SeqCst);
self.lock.commonPostUnlock();
}
};
pub fn init(loop: *Loop) RwLock {
return RwLock{
.loop = loop,
.shared_state = State.Unlocked,
.writer_queue = Queue.init(),
.writer_queue_empty_bit = 1,
.reader_queue = Queue.init(),
.reader_queue_empty_bit = 1,
.reader_lock_count = 0,
};
}
/// Must be called when not locked. Not thread safe.
/// All calls to acquire() and release() must complete before calling deinit().
pub fn deinit(self: *RwLock) void {
assert(self.shared_state == State.Unlocked);
while (self.writer_queue.get()) |node| cancel node.data;
while (self.reader_queue.get()) |node| cancel node.data;
}
pub async fn acquireRead(self: *RwLock) HeldRead {
_ = @atomicRmw(usize, &self.reader_lock_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
suspend {
// TODO explicitly put this memory in the coroutine frame #1194
var my_tick_node = Loop.NextTickNode{
.data = @handle(),
.prev = undefined,
.next = undefined,
};
self.reader_queue.put(&my_tick_node);
// At this point, we are in the reader_queue, so we might have already been resumed and this coroutine
// frame might be destroyed. For the rest of the suspend block we cannot access the coroutine frame.
// We set this bit so that later we can rely on the fact, that if reader_queue_empty_bit is 1,
// some actor will attempt to grab the lock.
_ = @atomicRmw(u8, &self.reader_queue_empty_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
// Here we don't care if we are the one to do the locking or if it was already locked for reading.
const have_read_lock = if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |old_state| old_state == State.ReadLock else true;
if (have_read_lock) {
// Give out all the read locks.
if (self.reader_queue.get()) |first_node| {
while (self.reader_queue.get()) |node| {
self.loop.onNextTick(node);
}
resume first_node.data;
}
}
}
return HeldRead{ .lock = self };
}
pub async fn acquireWrite(self: *RwLock) HeldWrite {
suspend {
// TODO explicitly put this memory in the coroutine frame #1194
var my_tick_node = Loop.NextTickNode{
.data = @handle(),
.prev = undefined,
.next = undefined,
};
self.writer_queue.put(&my_tick_node);
// At this point, we are in the writer_queue, so we might have already been resumed and this coroutine
// frame might be destroyed. For the rest of the suspend block we cannot access the coroutine frame.
// We set this bit so that later we can rely on the fact, that if writer_queue_empty_bit is 1,
// some actor will attempt to grab the lock.
_ = @atomicRmw(u8, &self.writer_queue_empty_bit, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
// Here we must be the one to acquire the write lock. It cannot already be locked.
if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) == null) {
// We now have a write lock.
if (self.writer_queue.get()) |node| {
// Whether this node is us or someone else, we tail resume it.
resume node.data;
}
}
}
return HeldWrite{ .lock = self };
}
fn commonPostUnlock(self: *RwLock) void {
while (true) {
// There might be a writer_queue item or a reader_queue item
// If we check and both are empty, we can be done, because the other actors will try to
// obtain the lock.
// But if there's a writer_queue item or a reader_queue item,
// we are the actor which must loop and attempt to grab the lock again.
if (@atomicLoad(u8, &self.writer_queue_empty_bit, AtomicOrder.SeqCst) == 0) {
if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) {
// We did not obtain the lock. Great, the queues are someone else's problem.
return;
}
// If there's an item in the writer queue, give them the lock, and we're done.
if (self.writer_queue.get()) |node| {
self.loop.onNextTick(node);
return;
}
// Release the lock again.
_ = @atomicRmw(u8, &self.writer_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
_ = @atomicRmw(u8, &self.shared_state, AtomicRmwOp.Xchg, State.Unlocked, AtomicOrder.SeqCst);
continue;
}
if (@atomicLoad(u8, &self.reader_queue_empty_bit, AtomicOrder.SeqCst) == 0) {
if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) {
// We did not obtain the lock. Great, the queues are someone else's problem.
return;
}
// If there are any items in the reader queue, give out all the reader locks, and we're done.
if (self.reader_queue.get()) |first_node| {
self.loop.onNextTick(first_node);
while (self.reader_queue.get()) |node| {
self.loop.onNextTick(node);
}
return;
}
// Release the lock again.
_ = @atomicRmw(u8, &self.reader_queue_empty_bit, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
if (@cmpxchgStrong(u8, &self.shared_state, State.ReadLock, State.Unlocked, AtomicOrder.SeqCst, AtomicOrder.SeqCst) != null) {
// Didn't unlock. Someone else's problem.
return;
}
continue;
}
return;
}
}
};
test "std.event.RwLock" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
const allocator = &da.allocator;
var loop: Loop = undefined;
try loop.initMultiThreaded(allocator);
defer loop.deinit();
var lock = RwLock.init(&loop);
defer lock.deinit();
const handle = try async<allocator> testLock(&loop, &lock);
defer cancel handle;
loop.run();
const expected_result = [1]i32{shared_it_count * @intCast(i32, shared_test_data.len)} ** shared_test_data.len;
assert(mem.eql(i32, shared_test_data, expected_result));
}
async fn testLock(loop: *Loop, lock: *RwLock) void {
// TODO explicitly put next tick node memory in the coroutine frame #1194
suspend {
resume @handle();
}
var read_nodes: [100]Loop.NextTickNode = undefined;
for (read_nodes) |*read_node| {
read_node.data = async readRunner(lock) catch @panic("out of memory");
loop.onNextTick(read_node);
}
var write_nodes: [shared_it_count]Loop.NextTickNode = undefined;
for (write_nodes) |*write_node| {
write_node.data = async writeRunner(lock) catch @panic("out of memory");
loop.onNextTick(write_node);
}
for (write_nodes) |*write_node| {
await @ptrCast(promise->void, write_node.data);
}
for (read_nodes) |*read_node| {
await @ptrCast(promise->void, read_node.data);
}
}
const shared_it_count = 10;
var shared_test_data = [1]i32{0} ** 10;
var shared_test_index: usize = 0;
var shared_count: usize = 0;
async fn writeRunner(lock: *RwLock) void {
suspend; // resumed by onNextTick
var i: usize = 0;
while (i < shared_test_data.len) : (i += 1) {
std.os.time.sleep(0, 100000);
const lock_promise = async lock.acquireWrite() catch @panic("out of memory");
const handle = await lock_promise;
defer handle.release();
shared_count += 1;
while (shared_test_index < shared_test_data.len) : (shared_test_index += 1) {
shared_test_data[shared_test_index] = shared_test_data[shared_test_index] + 1;
}
shared_test_index = 0;
}
}
async fn readRunner(lock: *RwLock) void {
suspend; // resumed by onNextTick
std.os.time.sleep(0, 1);
var i: usize = 0;
while (i < shared_test_data.len) : (i += 1) {
const lock_promise = async lock.acquireRead() catch @panic("out of memory");
const handle = await lock_promise;
defer handle.release();
assert(shared_test_index == 0);
assert(shared_test_data[i] == @intCast(i32, shared_count));
}
}

58
std/event/rwlocked.zig Normal file
View File

@ -0,0 +1,58 @@
const std = @import("../index.zig");
const RwLock = std.event.RwLock;
const Loop = std.event.Loop;
/// Thread-safe async/await RW lock that protects one piece of data.
/// Does not make any syscalls - coroutines which are waiting for the lock are suspended, and
/// are resumed when the lock is released, in order.
pub fn RwLocked(comptime T: type) type {
return struct {
lock: RwLock,
locked_data: T,
const Self = this;
pub const HeldReadLock = struct {
value: *const T,
held: RwLock.HeldRead,
pub fn release(self: HeldReadLock) void {
self.held.release();
}
};
pub const HeldWriteLock = struct {
value: *T,
held: RwLock.HeldWrite,
pub fn release(self: HeldWriteLock) void {
self.held.release();
}
};
pub fn init(loop: *Loop, data: T) Self {
return Self{
.lock = RwLock.init(loop),
.locked_data = data,
};
}
pub fn deinit(self: *Self) void {
self.lock.deinit();
}
pub async fn acquireRead(self: *Self) HeldReadLock {
return HeldReadLock{
.held = await (async self.lock.acquireRead() catch unreachable),
.value = &self.locked_data,
};
}
pub async fn acquireWrite(self: *Self) HeldWriteLock {
return HeldWriteLock{
.held = await (async self.lock.acquireWrite() catch unreachable),
.value = &self.locked_data,
};
}
};
}

View File

@ -55,13 +55,13 @@ pub const Server = struct {
errdefer cancel self.accept_coro.?; errdefer cancel self.accept_coro.?;
self.listen_resume_node.handle = self.accept_coro.?; self.listen_resume_node.handle = self.accept_coro.?;
try self.loop.addFd(sockfd, &self.listen_resume_node); try self.loop.linuxAddFd(sockfd, &self.listen_resume_node, posix.EPOLLIN | posix.EPOLLOUT | posix.EPOLLET);
errdefer self.loop.removeFd(sockfd); errdefer self.loop.removeFd(sockfd);
} }
/// Stop listening /// Stop listening
pub fn close(self: *Server) void { pub fn close(self: *Server) void {
self.loop.removeFd(self.sockfd.?); self.loop.linuxRemoveFd(self.sockfd.?);
std.os.close(self.sockfd.?); std.os.close(self.sockfd.?);
} }
@ -116,7 +116,7 @@ pub async fn connect(loop: *Loop, _address: *const std.net.Address) !std.os.File
errdefer std.os.close(sockfd); errdefer std.os.close(sockfd);
try std.os.posixConnectAsync(sockfd, &address.os_addr); try std.os.posixConnectAsync(sockfd, &address.os_addr);
try await try async loop.waitFd(sockfd); try await try async loop.linuxWaitFd(sockfd, posix.EPOLLIN | posix.EPOLLOUT | posix.EPOLLET);
try std.os.posixGetSockOptConnectError(sockfd); try std.os.posixGetSockOptConnectError(sockfd);
return std.os.File.openHandle(sockfd); return std.os.File.openHandle(sockfd);
@ -181,4 +181,3 @@ async fn doAsyncTest(loop: *Loop, address: *const std.net.Address, server: *Serv
assert(mem.eql(u8, msg, "hello from server\n")); assert(mem.eql(u8, msg, "hello from server\n"));
server.close(); server.close();
} }

View File

@ -9,6 +9,10 @@ const builtin = @import("builtin");
const want_modification_safety = builtin.mode != builtin.Mode.ReleaseFast; const want_modification_safety = builtin.mode != builtin.Mode.ReleaseFast;
const debug_u32 = if (want_modification_safety) u32 else void; const debug_u32 = if (want_modification_safety) u32 else void;
pub fn AutoHashMap(comptime K: type, comptime V: type) type {
return HashMap(K, V, getAutoHashFn(K), getAutoEqlFn(K));
}
pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u32, comptime eql: fn (a: K, b: K) bool) type { pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u32, comptime eql: fn (a: K, b: K) bool) type {
return struct { return struct {
entries: []Entry, entries: []Entry,
@ -20,13 +24,22 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
const Self = this; const Self = this;
pub const Entry = struct { pub const KV = struct {
used: bool,
distance_from_start_index: usize,
key: K, key: K,
value: V, value: V,
}; };
const Entry = struct {
used: bool,
distance_from_start_index: usize,
kv: KV,
};
pub const GetOrPutResult = struct {
kv: *KV,
found_existing: bool,
};
pub const Iterator = struct { pub const Iterator = struct {
hm: *const Self, hm: *const Self,
// how many items have we returned // how many items have we returned
@ -36,7 +49,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
// used to detect concurrent modification // used to detect concurrent modification
initial_modification_count: debug_u32, initial_modification_count: debug_u32,
pub fn next(it: *Iterator) ?*Entry { pub fn next(it: *Iterator) ?*KV {
if (want_modification_safety) { if (want_modification_safety) {
assert(it.initial_modification_count == it.hm.modification_count); // concurrent modification assert(it.initial_modification_count == it.hm.modification_count); // concurrent modification
} }
@ -46,7 +59,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
if (entry.used) { if (entry.used) {
it.index += 1; it.index += 1;
it.count += 1; it.count += 1;
return entry; return &entry.kv;
} }
} }
unreachable; // no next item unreachable; // no next item
@ -71,7 +84,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
}; };
} }
pub fn deinit(hm: *const Self) void { pub fn deinit(hm: Self) void {
hm.allocator.free(hm.entries); hm.allocator.free(hm.entries);
} }
@ -84,34 +97,65 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
hm.incrementModificationCount(); hm.incrementModificationCount();
} }
pub fn count(hm: *const Self) usize { pub fn count(self: Self) usize {
return hm.size; return self.size;
} }
/// Returns the value that was already there. /// If key exists this function cannot fail.
pub fn put(hm: *Self, key: K, value: *const V) !?V { /// If there is an existing item with `key`, then the result
if (hm.entries.len == 0) { /// kv pointer points to it, and found_existing is true.
try hm.initCapacity(16); /// Otherwise, puts a new item with undefined value, and
/// the kv pointer points to it. Caller should then initialize
/// the data.
pub fn getOrPut(self: *Self, key: K) !GetOrPutResult {
// TODO this implementation can be improved - we should only
// have to hash once and find the entry once.
if (self.get(key)) |kv| {
return GetOrPutResult{
.kv = kv,
.found_existing = true,
};
}
self.incrementModificationCount();
try self.ensureCapacity();
const put_result = self.internalPut(key);
assert(put_result.old_kv == null);
return GetOrPutResult{
.kv = &put_result.new_entry.kv,
.found_existing = false,
};
}
fn ensureCapacity(self: *Self) !void {
if (self.entries.len == 0) {
return self.initCapacity(16);
} }
hm.incrementModificationCount();
// if we get too full (60%), double the capacity // if we get too full (60%), double the capacity
if (hm.size * 5 >= hm.entries.len * 3) { if (self.size * 5 >= self.entries.len * 3) {
const old_entries = hm.entries; const old_entries = self.entries;
try hm.initCapacity(hm.entries.len * 2); try self.initCapacity(self.entries.len * 2);
// dump all of the old elements into the new table // dump all of the old elements into the new table
for (old_entries) |*old_entry| { for (old_entries) |*old_entry| {
if (old_entry.used) { if (old_entry.used) {
_ = hm.internalPut(old_entry.key, old_entry.value); self.internalPut(old_entry.kv.key).new_entry.kv.value = old_entry.kv.value;
} }
} }
hm.allocator.free(old_entries); self.allocator.free(old_entries);
}
} }
return hm.internalPut(key, value); /// Returns the kv pair that was already there.
pub fn put(self: *Self, key: K, value: V) !?KV {
self.incrementModificationCount();
try self.ensureCapacity();
const put_result = self.internalPut(key);
put_result.new_entry.kv.value = value;
return put_result.old_kv;
} }
pub fn get(hm: *const Self, key: K) ?*Entry { pub fn get(hm: *const Self, key: K) ?*KV {
if (hm.entries.len == 0) { if (hm.entries.len == 0) {
return null; return null;
} }
@ -122,7 +166,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
return hm.get(key) != null; return hm.get(key) != null;
} }
pub fn remove(hm: *Self, key: K) ?*Entry { pub fn remove(hm: *Self, key: K) ?*KV {
if (hm.entries.len == 0) return null; if (hm.entries.len == 0) return null;
hm.incrementModificationCount(); hm.incrementModificationCount();
const start_index = hm.keyToIndex(key); const start_index = hm.keyToIndex(key);
@ -134,7 +178,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
if (!entry.used) return null; if (!entry.used) return null;
if (!eql(entry.key, key)) continue; if (!eql(entry.kv.key, key)) continue;
while (roll_over < hm.entries.len) : (roll_over += 1) { while (roll_over < hm.entries.len) : (roll_over += 1) {
const next_index = (start_index + roll_over + 1) % hm.entries.len; const next_index = (start_index + roll_over + 1) % hm.entries.len;
@ -142,7 +186,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
if (!next_entry.used or next_entry.distance_from_start_index == 0) { if (!next_entry.used or next_entry.distance_from_start_index == 0) {
entry.used = false; entry.used = false;
hm.size -= 1; hm.size -= 1;
return entry; return &entry.kv;
} }
entry.* = next_entry.*; entry.* = next_entry.*;
entry.distance_from_start_index -= 1; entry.distance_from_start_index -= 1;
@ -163,6 +207,16 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
}; };
} }
pub fn clone(self: Self) !Self {
var other = Self.init(self.allocator);
try other.initCapacity(self.entries.len);
var it = self.iterator();
while (it.next()) |entry| {
assert((try other.put(entry.key, entry.value)) == null);
}
return other;
}
fn initCapacity(hm: *Self, capacity: usize) !void { fn initCapacity(hm: *Self, capacity: usize) !void {
hm.entries = try hm.allocator.alloc(Entry, capacity); hm.entries = try hm.allocator.alloc(Entry, capacity);
hm.size = 0; hm.size = 0;
@ -178,60 +232,81 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
} }
} }
/// Returns the value that was already there. const InternalPutResult = struct {
fn internalPut(hm: *Self, orig_key: K, orig_value: *const V) ?V { new_entry: *Entry,
old_kv: ?KV,
};
/// Returns a pointer to the new entry.
/// Asserts that there is enough space for the new item.
fn internalPut(self: *Self, orig_key: K) InternalPutResult {
var key = orig_key; var key = orig_key;
var value = orig_value.*; var value: V = undefined;
const start_index = hm.keyToIndex(key); const start_index = self.keyToIndex(key);
var roll_over: usize = 0; var roll_over: usize = 0;
var distance_from_start_index: usize = 0; var distance_from_start_index: usize = 0;
while (roll_over < hm.entries.len) : ({ var got_result_entry = false;
var result = InternalPutResult{
.new_entry = undefined,
.old_kv = null,
};
while (roll_over < self.entries.len) : ({
roll_over += 1; roll_over += 1;
distance_from_start_index += 1; distance_from_start_index += 1;
}) { }) {
const index = (start_index + roll_over) % hm.entries.len; const index = (start_index + roll_over) % self.entries.len;
const entry = &hm.entries[index]; const entry = &self.entries[index];
if (entry.used and !eql(entry.key, key)) { if (entry.used and !eql(entry.kv.key, key)) {
if (entry.distance_from_start_index < distance_from_start_index) { if (entry.distance_from_start_index < distance_from_start_index) {
// robin hood to the rescue // robin hood to the rescue
const tmp = entry.*; const tmp = entry.*;
hm.max_distance_from_start_index = math.max(hm.max_distance_from_start_index, distance_from_start_index); self.max_distance_from_start_index = math.max(self.max_distance_from_start_index, distance_from_start_index);
if (!got_result_entry) {
got_result_entry = true;
result.new_entry = entry;
}
entry.* = Entry{ entry.* = Entry{
.used = true, .used = true,
.distance_from_start_index = distance_from_start_index, .distance_from_start_index = distance_from_start_index,
.kv = KV{
.key = key, .key = key,
.value = value, .value = value,
},
}; };
key = tmp.key; key = tmp.kv.key;
value = tmp.value; value = tmp.kv.value;
distance_from_start_index = tmp.distance_from_start_index; distance_from_start_index = tmp.distance_from_start_index;
} }
continue; continue;
} }
var result: ?V = null;
if (entry.used) { if (entry.used) {
result = entry.value; result.old_kv = entry.kv;
} else { } else {
// adding an entry. otherwise overwriting old value with // adding an entry. otherwise overwriting old value with
// same key // same key
hm.size += 1; self.size += 1;
} }
hm.max_distance_from_start_index = math.max(distance_from_start_index, hm.max_distance_from_start_index); self.max_distance_from_start_index = math.max(distance_from_start_index, self.max_distance_from_start_index);
if (!got_result_entry) {
result.new_entry = entry;
}
entry.* = Entry{ entry.* = Entry{
.used = true, .used = true,
.distance_from_start_index = distance_from_start_index, .distance_from_start_index = distance_from_start_index,
.kv = KV{
.key = key, .key = key,
.value = value, .value = value,
},
}; };
return result; return result;
} }
unreachable; // put into a full map unreachable; // put into a full map
} }
fn internalGet(hm: *const Self, key: K) ?*Entry { fn internalGet(hm: Self, key: K) ?*KV {
const start_index = hm.keyToIndex(key); const start_index = hm.keyToIndex(key);
{ {
var roll_over: usize = 0; var roll_over: usize = 0;
@ -240,13 +315,13 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
const entry = &hm.entries[index]; const entry = &hm.entries[index];
if (!entry.used) return null; if (!entry.used) return null;
if (eql(entry.key, key)) return entry; if (eql(entry.kv.key, key)) return &entry.kv;
} }
} }
return null; return null;
} }
fn keyToIndex(hm: *const Self, key: K) usize { fn keyToIndex(hm: Self, key: K) usize {
return usize(hash(key)) % hm.entries.len; return usize(hash(key)) % hm.entries.len;
} }
}; };
@ -256,7 +331,7 @@ test "basic hash map usage" {
var direct_allocator = std.heap.DirectAllocator.init(); var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit(); defer direct_allocator.deinit();
var map = HashMap(i32, i32, hash_i32, eql_i32).init(&direct_allocator.allocator); var map = AutoHashMap(i32, i32).init(&direct_allocator.allocator);
defer map.deinit(); defer map.deinit();
assert((try map.put(1, 11)) == null); assert((try map.put(1, 11)) == null);
@ -265,8 +340,19 @@ test "basic hash map usage" {
assert((try map.put(4, 44)) == null); assert((try map.put(4, 44)) == null);
assert((try map.put(5, 55)) == null); assert((try map.put(5, 55)) == null);
assert((try map.put(5, 66)).? == 55); assert((try map.put(5, 66)).?.value == 55);
assert((try map.put(5, 55)).? == 66); assert((try map.put(5, 55)).?.value == 66);
const gop1 = try map.getOrPut(5);
assert(gop1.found_existing == true);
assert(gop1.kv.value == 55);
gop1.kv.value = 77;
assert(map.get(5).?.value == 77);
const gop2 = try map.getOrPut(99);
assert(gop2.found_existing == false);
gop2.kv.value = 42;
assert(map.get(99).?.value == 42);
assert(map.contains(2)); assert(map.contains(2));
assert(map.get(2).?.value == 22); assert(map.get(2).?.value == 22);
@ -279,7 +365,7 @@ test "iterator hash map" {
var direct_allocator = std.heap.DirectAllocator.init(); var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit(); defer direct_allocator.deinit();
var reset_map = HashMap(i32, i32, hash_i32, eql_i32).init(&direct_allocator.allocator); var reset_map = AutoHashMap(i32, i32).init(&direct_allocator.allocator);
defer reset_map.deinit(); defer reset_map.deinit();
assert((try reset_map.put(1, 11)) == null); assert((try reset_map.put(1, 11)) == null);
@ -287,14 +373,14 @@ test "iterator hash map" {
assert((try reset_map.put(3, 33)) == null); assert((try reset_map.put(3, 33)) == null);
var keys = []i32{ var keys = []i32{
1,
2,
3, 3,
2,
1,
}; };
var values = []i32{ var values = []i32{
11,
22,
33, 33,
22,
11,
}; };
var it = reset_map.iterator(); var it = reset_map.iterator();
@ -322,10 +408,124 @@ test "iterator hash map" {
assert(entry.value == values[0]); assert(entry.value == values[0]);
} }
fn hash_i32(x: i32) u32 { pub fn getAutoHashFn(comptime K: type) (fn (K) u32) {
return @bitCast(u32, x); return struct {
fn hash(key: K) u32 {
comptime var rng = comptime std.rand.DefaultPrng.init(0);
return autoHash(key, &rng.random, u32);
}
}.hash;
} }
fn eql_i32(a: i32, b: i32) bool { pub fn getAutoEqlFn(comptime K: type) (fn (K, K) bool) {
return a == b; return struct {
fn eql(a: K, b: K) bool {
return autoEql(a, b);
}
}.eql;
}
// TODO improve these hash functions
pub fn autoHash(key: var, comptime rng: *std.rand.Random, comptime HashInt: type) HashInt {
switch (@typeInfo(@typeOf(key))) {
builtin.TypeId.NoReturn,
builtin.TypeId.Opaque,
builtin.TypeId.Undefined,
builtin.TypeId.ArgTuple,
=> @compileError("cannot hash this type"),
builtin.TypeId.Void,
builtin.TypeId.Null,
=> return 0,
builtin.TypeId.Int => |info| {
const unsigned_x = @bitCast(@IntType(false, info.bits), key);
if (info.bits <= HashInt.bit_count) {
return HashInt(unsigned_x) ^ comptime rng.scalar(HashInt);
} else {
return @truncate(HashInt, unsigned_x ^ comptime rng.scalar(@typeOf(unsigned_x)));
}
},
builtin.TypeId.Float => |info| {
return autoHash(@bitCast(@IntType(false, info.bits), key), rng);
},
builtin.TypeId.Bool => return autoHash(@boolToInt(key), rng),
builtin.TypeId.Enum => return autoHash(@enumToInt(key), rng),
builtin.TypeId.ErrorSet => return autoHash(@errorToInt(key), rng),
builtin.TypeId.Promise, builtin.TypeId.Fn => return autoHash(@ptrToInt(key), rng),
builtin.TypeId.Namespace,
builtin.TypeId.Block,
builtin.TypeId.BoundFn,
builtin.TypeId.ComptimeFloat,
builtin.TypeId.ComptimeInt,
builtin.TypeId.Type,
=> return 0,
builtin.TypeId.Pointer => |info| switch (info.size) {
builtin.TypeInfo.Pointer.Size.One => @compileError("TODO auto hash for single item pointers"),
builtin.TypeInfo.Pointer.Size.Many => @compileError("TODO auto hash for many item pointers"),
builtin.TypeInfo.Pointer.Size.Slice => {
const interval = std.math.max(1, key.len / 256);
var i: usize = 0;
var h = comptime rng.scalar(HashInt);
while (i < key.len) : (i += interval) {
h ^= autoHash(key[i], rng, HashInt);
}
return h;
},
},
builtin.TypeId.Optional => @compileError("TODO auto hash for optionals"),
builtin.TypeId.Array => @compileError("TODO auto hash for arrays"),
builtin.TypeId.Struct => @compileError("TODO auto hash for structs"),
builtin.TypeId.Union => @compileError("TODO auto hash for unions"),
builtin.TypeId.ErrorUnion => @compileError("TODO auto hash for unions"),
}
}
pub fn autoEql(a: var, b: @typeOf(a)) bool {
switch (@typeInfo(@typeOf(a))) {
builtin.TypeId.NoReturn,
builtin.TypeId.Opaque,
builtin.TypeId.Undefined,
builtin.TypeId.ArgTuple,
=> @compileError("cannot test equality of this type"),
builtin.TypeId.Void,
builtin.TypeId.Null,
=> return true,
builtin.TypeId.Bool,
builtin.TypeId.Int,
builtin.TypeId.Float,
builtin.TypeId.ComptimeFloat,
builtin.TypeId.ComptimeInt,
builtin.TypeId.Namespace,
builtin.TypeId.Block,
builtin.TypeId.Promise,
builtin.TypeId.Enum,
builtin.TypeId.BoundFn,
builtin.TypeId.Fn,
builtin.TypeId.ErrorSet,
builtin.TypeId.Type,
=> return a == b,
builtin.TypeId.Pointer => |info| switch (info.size) {
builtin.TypeInfo.Pointer.Size.One => @compileError("TODO auto eql for single item pointers"),
builtin.TypeInfo.Pointer.Size.Many => @compileError("TODO auto eql for many item pointers"),
builtin.TypeInfo.Pointer.Size.Slice => {
if (a.len != b.len) return false;
for (a) |a_item, i| {
if (!autoEql(a_item, b[i])) return false;
}
return true;
},
},
builtin.TypeId.Optional => @compileError("TODO auto eql for optionals"),
builtin.TypeId.Array => @compileError("TODO auto eql for arrays"),
builtin.TypeId.Struct => @compileError("TODO auto eql for structs"),
builtin.TypeId.Union => @compileError("TODO auto eql for unions"),
builtin.TypeId.ErrorUnion => @compileError("TODO auto eql for unions"),
}
} }

View File

@ -5,10 +5,11 @@ pub const BufSet = @import("buf_set.zig").BufSet;
pub const Buffer = @import("buffer.zig").Buffer; pub const Buffer = @import("buffer.zig").Buffer;
pub const BufferOutStream = @import("buffer.zig").BufferOutStream; pub const BufferOutStream = @import("buffer.zig").BufferOutStream;
pub const HashMap = @import("hash_map.zig").HashMap; pub const HashMap = @import("hash_map.zig").HashMap;
pub const AutoHashMap = @import("hash_map.zig").AutoHashMap;
pub const LinkedList = @import("linked_list.zig").LinkedList; pub const LinkedList = @import("linked_list.zig").LinkedList;
pub const IntrusiveLinkedList = @import("linked_list.zig").IntrusiveLinkedList;
pub const SegmentedList = @import("segmented_list.zig").SegmentedList; pub const SegmentedList = @import("segmented_list.zig").SegmentedList;
pub const DynLib = @import("dynamic_library.zig").DynLib; pub const DynLib = @import("dynamic_library.zig").DynLib;
pub const Mutex = @import("mutex.zig").Mutex;
pub const atomic = @import("atomic/index.zig"); pub const atomic = @import("atomic/index.zig");
pub const base64 = @import("base64.zig"); pub const base64 = @import("base64.zig");
@ -49,6 +50,7 @@ test "std" {
_ = @import("hash_map.zig"); _ = @import("hash_map.zig");
_ = @import("linked_list.zig"); _ = @import("linked_list.zig");
_ = @import("segmented_list.zig"); _ = @import("segmented_list.zig");
_ = @import("mutex.zig");
_ = @import("base64.zig"); _ = @import("base64.zig");
_ = @import("build.zig"); _ = @import("build.zig");

View File

@ -415,7 +415,6 @@ pub fn PeekStream(comptime buffer_size: usize, comptime InStreamError: type) typ
self.at_end = (read < left); self.at_end = (read < left);
return pos + read; return pos + read;
} }
}; };
} }
@ -481,8 +480,7 @@ pub const SliceOutStream = struct {
assert(self.pos <= self.slice.len); assert(self.pos <= self.slice.len);
const n = const n = if (self.pos + bytes.len <= self.slice.len)
if (self.pos + bytes.len <= self.slice.len)
bytes.len bytes.len
else else
self.slice.len - self.pos; self.slice.len - self.pos;
@ -586,7 +584,7 @@ pub const BufferedAtomicFile = struct {
}); });
errdefer allocator.destroy(self); errdefer allocator.destroy(self);
self.atomic_file = try os.AtomicFile.init(allocator, dest_path, os.default_file_mode); self.atomic_file = try os.AtomicFile.init(allocator, dest_path, os.File.default_mode);
errdefer self.atomic_file.deinit(); errdefer self.atomic_file.deinit();
self.file_stream = FileOutStream.init(&self.atomic_file.file); self.file_stream = FileOutStream.init(&self.atomic_file.file);

View File

@ -1318,7 +1318,7 @@ pub const Parser = struct {
_ = p.stack.pop(); _ = p.stack.pop();
var object = &p.stack.items[p.stack.len - 1].Object; var object = &p.stack.items[p.stack.len - 1].Object;
_ = try object.put(key, value); _ = try object.put(key, value.*);
p.state = State.ObjectKey; p.state = State.ObjectKey;
}, },
// Array Parent -> [ ..., <array>, value ] // Array Parent -> [ ..., <array>, value ]

View File

@ -4,18 +4,8 @@ const assert = debug.assert;
const mem = std.mem; const mem = std.mem;
const Allocator = mem.Allocator; const Allocator = mem.Allocator;
/// Generic non-intrusive doubly linked list.
pub fn LinkedList(comptime T: type) type {
return BaseLinkedList(T, void, "");
}
/// Generic intrusive doubly linked list.
pub fn IntrusiveLinkedList(comptime ParentType: type, comptime field_name: []const u8) type {
return BaseLinkedList(void, ParentType, field_name);
}
/// Generic doubly linked list. /// Generic doubly linked list.
fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_name: []const u8) type { pub fn LinkedList(comptime T: type) type {
return struct { return struct {
const Self = this; const Self = this;
@ -25,23 +15,13 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
next: ?*Node, next: ?*Node,
data: T, data: T,
pub fn init(value: *const T) Node { pub fn init(data: T) Node {
return Node{ return Node{
.prev = null, .prev = null,
.next = null, .next = null,
.data = value.*, .data = data,
}; };
} }
pub fn initIntrusive() Node {
// TODO: when #678 is solved this can become `init`.
return Node.init({});
}
pub fn toData(node: *Node) *ParentType {
comptime assert(isIntrusive());
return @fieldParentPtr(ParentType, field_name, node);
}
}; };
first: ?*Node, first: ?*Node,
@ -60,10 +40,6 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
}; };
} }
fn isIntrusive() bool {
return ParentType != void or field_name.len != 0;
}
/// Insert a new node after an existing one. /// Insert a new node after an existing one.
/// ///
/// Arguments: /// Arguments:
@ -192,7 +168,6 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Returns: /// Returns:
/// A pointer to the new node. /// A pointer to the new node.
pub fn allocateNode(list: *Self, allocator: *Allocator) !*Node { pub fn allocateNode(list: *Self, allocator: *Allocator) !*Node {
comptime assert(!isIntrusive());
return allocator.create(Node(undefined)); return allocator.create(Node(undefined));
} }
@ -202,7 +177,6 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// node: Pointer to the node to deallocate. /// node: Pointer to the node to deallocate.
/// allocator: Dynamic memory allocator. /// allocator: Dynamic memory allocator.
pub fn destroyNode(list: *Self, node: *Node, allocator: *Allocator) void { pub fn destroyNode(list: *Self, node: *Node, allocator: *Allocator) void {
comptime assert(!isIntrusive());
allocator.destroy(node); allocator.destroy(node);
} }
@ -214,8 +188,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// ///
/// Returns: /// Returns:
/// A pointer to the new node. /// A pointer to the new node.
pub fn createNode(list: *Self, data: *const T, allocator: *Allocator) !*Node { pub fn createNode(list: *Self, data: T, allocator: *Allocator) !*Node {
comptime assert(!isIntrusive());
var node = try list.allocateNode(allocator); var node = try list.allocateNode(allocator);
node.* = Node.init(data); node.* = Node.init(data);
return node; return node;
@ -274,69 +247,3 @@ test "basic linked list test" {
assert(list.last.?.data == 4); assert(list.last.?.data == 4);
assert(list.len == 2); assert(list.len == 2);
} }
const ElementList = IntrusiveLinkedList(Element, "link");
const Element = struct {
value: u32,
link: IntrusiveLinkedList(Element, "link").Node,
};
test "basic intrusive linked list test" {
const allocator = debug.global_allocator;
var list = ElementList.init();
var one = Element{
.value = 1,
.link = ElementList.Node.initIntrusive(),
};
var two = Element{
.value = 2,
.link = ElementList.Node.initIntrusive(),
};
var three = Element{
.value = 3,
.link = ElementList.Node.initIntrusive(),
};
var four = Element{
.value = 4,
.link = ElementList.Node.initIntrusive(),
};
var five = Element{
.value = 5,
.link = ElementList.Node.initIntrusive(),
};
list.append(&two.link); // {2}
list.append(&five.link); // {2, 5}
list.prepend(&one.link); // {1, 2, 5}
list.insertBefore(&five.link, &four.link); // {1, 2, 4, 5}
list.insertAfter(&two.link, &three.link); // {1, 2, 3, 4, 5}
// Traverse forwards.
{
var it = list.first;
var index: u32 = 1;
while (it) |node| : (it = node.next) {
assert(node.toData().value == index);
index += 1;
}
}
// Traverse backwards.
{
var it = list.last;
var index: u32 = 1;
while (it) |node| : (it = node.prev) {
assert(node.toData().value == (6 - index));
index += 1;
}
}
var first = list.popFirst(); // {2, 3, 4, 5}
var last = list.pop(); // {2, 3, 4}
list.remove(&three.link); // {2, 4}
assert(list.first.?.toData().value == 2);
assert(list.last.?.toData().value == 4);
assert(list.len == 2);
}

View File

@ -577,7 +577,7 @@ pub fn join(allocator: *Allocator, sep: u8, strings: ...) ![]u8 {
} }
} }
return buf[0..buf_index]; return allocator.shrink(u8, buf, buf_index);
} }
test "mem.join" { test "mem.join" {

27
std/mutex.zig Normal file
View File

@ -0,0 +1,27 @@
const std = @import("index.zig");
const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder;
const AtomicRmwOp = builtin.AtomicRmwOp;
const assert = std.debug.assert;
/// TODO use syscalls instead of a spinlock
pub const Mutex = struct {
lock: u8, // TODO use a bool
pub const Held = struct {
mutex: *Mutex,
pub fn release(self: Held) void {
assert(@atomicRmw(u8, &self.mutex.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
}
};
pub fn init() Mutex {
return Mutex{ .lock = 0 };
}
pub fn acquire(self: *Mutex) Held {
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
return Held{ .mutex = self };
}
};

View File

@ -482,91 +482,98 @@ pub const NOTE_MACH_CONTINUOUS_TIME = 0x00000080;
/// data is mach absolute time units /// data is mach absolute time units
pub const NOTE_MACHTIME = 0x00000100; pub const NOTE_MACHTIME = 0x00000100;
pub const AF_UNSPEC: c_int = 0; pub const AF_UNSPEC = 0;
pub const AF_LOCAL: c_int = 1; pub const AF_LOCAL = 1;
pub const AF_UNIX: c_int = AF_LOCAL; pub const AF_UNIX = AF_LOCAL;
pub const AF_INET: c_int = 2; pub const AF_INET = 2;
pub const AF_SYS_CONTROL: c_int = 2; pub const AF_SYS_CONTROL = 2;
pub const AF_IMPLINK: c_int = 3; pub const AF_IMPLINK = 3;
pub const AF_PUP: c_int = 4; pub const AF_PUP = 4;
pub const AF_CHAOS: c_int = 5; pub const AF_CHAOS = 5;
pub const AF_NS: c_int = 6; pub const AF_NS = 6;
pub const AF_ISO: c_int = 7; pub const AF_ISO = 7;
pub const AF_OSI: c_int = AF_ISO; pub const AF_OSI = AF_ISO;
pub const AF_ECMA: c_int = 8; pub const AF_ECMA = 8;
pub const AF_DATAKIT: c_int = 9; pub const AF_DATAKIT = 9;
pub const AF_CCITT: c_int = 10; pub const AF_CCITT = 10;
pub const AF_SNA: c_int = 11; pub const AF_SNA = 11;
pub const AF_DECnet: c_int = 12; pub const AF_DECnet = 12;
pub const AF_DLI: c_int = 13; pub const AF_DLI = 13;
pub const AF_LAT: c_int = 14; pub const AF_LAT = 14;
pub const AF_HYLINK: c_int = 15; pub const AF_HYLINK = 15;
pub const AF_APPLETALK: c_int = 16; pub const AF_APPLETALK = 16;
pub const AF_ROUTE: c_int = 17; pub const AF_ROUTE = 17;
pub const AF_LINK: c_int = 18; pub const AF_LINK = 18;
pub const AF_XTP: c_int = 19; pub const AF_XTP = 19;
pub const AF_COIP: c_int = 20; pub const AF_COIP = 20;
pub const AF_CNT: c_int = 21; pub const AF_CNT = 21;
pub const AF_RTIP: c_int = 22; pub const AF_RTIP = 22;
pub const AF_IPX: c_int = 23; pub const AF_IPX = 23;
pub const AF_SIP: c_int = 24; pub const AF_SIP = 24;
pub const AF_PIP: c_int = 25; pub const AF_PIP = 25;
pub const AF_ISDN: c_int = 28; pub const AF_ISDN = 28;
pub const AF_E164: c_int = AF_ISDN; pub const AF_E164 = AF_ISDN;
pub const AF_KEY: c_int = 29; pub const AF_KEY = 29;
pub const AF_INET6: c_int = 30; pub const AF_INET6 = 30;
pub const AF_NATM: c_int = 31; pub const AF_NATM = 31;
pub const AF_SYSTEM: c_int = 32; pub const AF_SYSTEM = 32;
pub const AF_NETBIOS: c_int = 33; pub const AF_NETBIOS = 33;
pub const AF_PPP: c_int = 34; pub const AF_PPP = 34;
pub const AF_MAX: c_int = 40; pub const AF_MAX = 40;
pub const PF_UNSPEC: c_int = AF_UNSPEC; pub const PF_UNSPEC = AF_UNSPEC;
pub const PF_LOCAL: c_int = AF_LOCAL; pub const PF_LOCAL = AF_LOCAL;
pub const PF_UNIX: c_int = PF_LOCAL; pub const PF_UNIX = PF_LOCAL;
pub const PF_INET: c_int = AF_INET; pub const PF_INET = AF_INET;
pub const PF_IMPLINK: c_int = AF_IMPLINK; pub const PF_IMPLINK = AF_IMPLINK;
pub const PF_PUP: c_int = AF_PUP; pub const PF_PUP = AF_PUP;
pub const PF_CHAOS: c_int = AF_CHAOS; pub const PF_CHAOS = AF_CHAOS;
pub const PF_NS: c_int = AF_NS; pub const PF_NS = AF_NS;
pub const PF_ISO: c_int = AF_ISO; pub const PF_ISO = AF_ISO;
pub const PF_OSI: c_int = AF_ISO; pub const PF_OSI = AF_ISO;
pub const PF_ECMA: c_int = AF_ECMA; pub const PF_ECMA = AF_ECMA;
pub const PF_DATAKIT: c_int = AF_DATAKIT; pub const PF_DATAKIT = AF_DATAKIT;
pub const PF_CCITT: c_int = AF_CCITT; pub const PF_CCITT = AF_CCITT;
pub const PF_SNA: c_int = AF_SNA; pub const PF_SNA = AF_SNA;
pub const PF_DECnet: c_int = AF_DECnet; pub const PF_DECnet = AF_DECnet;
pub const PF_DLI: c_int = AF_DLI; pub const PF_DLI = AF_DLI;
pub const PF_LAT: c_int = AF_LAT; pub const PF_LAT = AF_LAT;
pub const PF_HYLINK: c_int = AF_HYLINK; pub const PF_HYLINK = AF_HYLINK;
pub const PF_APPLETALK: c_int = AF_APPLETALK; pub const PF_APPLETALK = AF_APPLETALK;
pub const PF_ROUTE: c_int = AF_ROUTE; pub const PF_ROUTE = AF_ROUTE;
pub const PF_LINK: c_int = AF_LINK; pub const PF_LINK = AF_LINK;
pub const PF_XTP: c_int = AF_XTP; pub const PF_XTP = AF_XTP;
pub const PF_COIP: c_int = AF_COIP; pub const PF_COIP = AF_COIP;
pub const PF_CNT: c_int = AF_CNT; pub const PF_CNT = AF_CNT;
pub const PF_SIP: c_int = AF_SIP; pub const PF_SIP = AF_SIP;
pub const PF_IPX: c_int = AF_IPX; pub const PF_IPX = AF_IPX;
pub const PF_RTIP: c_int = AF_RTIP; pub const PF_RTIP = AF_RTIP;
pub const PF_PIP: c_int = AF_PIP; pub const PF_PIP = AF_PIP;
pub const PF_ISDN: c_int = AF_ISDN; pub const PF_ISDN = AF_ISDN;
pub const PF_KEY: c_int = AF_KEY; pub const PF_KEY = AF_KEY;
pub const PF_INET6: c_int = AF_INET6; pub const PF_INET6 = AF_INET6;
pub const PF_NATM: c_int = AF_NATM; pub const PF_NATM = AF_NATM;
pub const PF_SYSTEM: c_int = AF_SYSTEM; pub const PF_SYSTEM = AF_SYSTEM;
pub const PF_NETBIOS: c_int = AF_NETBIOS; pub const PF_NETBIOS = AF_NETBIOS;
pub const PF_PPP: c_int = AF_PPP; pub const PF_PPP = AF_PPP;
pub const PF_MAX: c_int = AF_MAX; pub const PF_MAX = AF_MAX;
pub const SYSPROTO_EVENT: c_int = 1; pub const SYSPROTO_EVENT = 1;
pub const SYSPROTO_CONTROL: c_int = 2; pub const SYSPROTO_CONTROL = 2;
pub const SOCK_STREAM: c_int = 1; pub const SOCK_STREAM = 1;
pub const SOCK_DGRAM: c_int = 2; pub const SOCK_DGRAM = 2;
pub const SOCK_RAW: c_int = 3; pub const SOCK_RAW = 3;
pub const SOCK_RDM: c_int = 4; pub const SOCK_RDM = 4;
pub const SOCK_SEQPACKET: c_int = 5; pub const SOCK_SEQPACKET = 5;
pub const SOCK_MAXADDRLEN: c_int = 255; pub const SOCK_MAXADDRLEN = 255;
pub const IPPROTO_ICMP = 1;
pub const IPPROTO_ICMPV6 = 58;
pub const IPPROTO_TCP = 6;
pub const IPPROTO_UDP = 17;
pub const IPPROTO_IP = 0;
pub const IPPROTO_IPV6 = 41;
fn wstatus(x: i32) i32 { fn wstatus(x: i32) i32 {
return x & 0o177; return x & 0o177;
@ -605,6 +612,11 @@ pub fn abort() noreturn {
c.abort(); c.abort();
} }
// bind(int socket, const struct sockaddr *address, socklen_t address_len)
pub fn bind(fd: i32, addr: *const sockaddr, len: socklen_t) usize {
return errnoWrap(c.bind(@bitCast(c_int, fd), addr, len));
}
pub fn exit(code: i32) noreturn { pub fn exit(code: i32) noreturn {
c.exit(code); c.exit(code);
} }
@ -634,6 +646,10 @@ pub fn read(fd: i32, buf: [*]u8, nbyte: usize) usize {
return errnoWrap(c.read(fd, @ptrCast(*c_void, buf), nbyte)); return errnoWrap(c.read(fd, @ptrCast(*c_void, buf), nbyte));
} }
pub fn pread(fd: i32, buf: [*]u8, nbyte: usize, offset: u64) usize {
return errnoWrap(c.pread(fd, @ptrCast(*c_void, buf), nbyte, offset));
}
pub fn stat(noalias path: [*]const u8, noalias buf: *stat) usize { pub fn stat(noalias path: [*]const u8, noalias buf: *stat) usize {
return errnoWrap(c.stat(path, buf)); return errnoWrap(c.stat(path, buf));
} }
@ -642,6 +658,10 @@ pub fn write(fd: i32, buf: [*]const u8, nbyte: usize) usize {
return errnoWrap(c.write(fd, @ptrCast(*const c_void, buf), nbyte)); return errnoWrap(c.write(fd, @ptrCast(*const c_void, buf), nbyte));
} }
pub fn pwrite(fd: i32, buf: [*]const u8, nbyte: usize, offset: u64) usize {
return errnoWrap(c.pwrite(fd, @ptrCast(*const c_void, buf), nbyte, offset));
}
pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize { pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize {
const ptr_result = c.mmap( const ptr_result = c.mmap(
@ptrCast(*c_void, address), @ptrCast(*c_void, address),
@ -805,6 +825,20 @@ pub fn sigaction(sig: u5, noalias act: *const Sigaction, noalias oact: ?*Sigacti
return result; return result;
} }
pub fn socket(domain: u32, socket_type: u32, protocol: u32) usize {
return errnoWrap(c.socket(@bitCast(c_int, domain), @bitCast(c_int, socket_type), @bitCast(c_int, protocol)));
}
pub const iovec = extern struct {
iov_base: [*]u8,
iov_len: usize,
};
pub const iovec_const = extern struct {
iov_base: [*]const u8,
iov_len: usize,
};
pub const sigset_t = c.sigset_t; pub const sigset_t = c.sigset_t;
pub const empty_sigset = sigset_t(0); pub const empty_sigset = sigset_t(0);
@ -812,8 +846,13 @@ pub const timespec = c.timespec;
pub const Stat = c.Stat; pub const Stat = c.Stat;
pub const dirent = c.dirent; pub const dirent = c.dirent;
pub const in_port_t = c.in_port_t;
pub const sa_family_t = c.sa_family_t; pub const sa_family_t = c.sa_family_t;
pub const socklen_t = c.socklen_t;
pub const sockaddr = c.sockaddr; pub const sockaddr = c.sockaddr;
pub const sockaddr_in = c.sockaddr_in;
pub const sockaddr_in6 = c.sockaddr_in6;
/// Renamed from `kevent` to `Kevent` to avoid conflict with the syscall. /// Renamed from `kevent` to `Kevent` to avoid conflict with the syscall.
pub const Kevent = c.Kevent; pub const Kevent = c.Kevent;

View File

@ -15,6 +15,16 @@ pub const File = struct {
/// The OS-specific file descriptor or file handle. /// The OS-specific file descriptor or file handle.
handle: os.FileHandle, handle: os.FileHandle,
pub const Mode = switch (builtin.os) {
Os.windows => void,
else => u32,
};
pub const default_mode = switch (builtin.os) {
Os.windows => {},
else => 0o666,
};
pub const OpenError = os.WindowsOpenError || os.PosixOpenError; pub const OpenError = os.WindowsOpenError || os.PosixOpenError;
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator. /// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
@ -39,16 +49,16 @@ pub const File = struct {
} }
} }
/// Calls `openWriteMode` with os.default_file_mode for the mode. /// Calls `openWriteMode` with os.File.default_mode for the mode.
pub fn openWrite(allocator: *mem.Allocator, path: []const u8) OpenError!File { pub fn openWrite(allocator: *mem.Allocator, path: []const u8) OpenError!File {
return openWriteMode(allocator, path, os.default_file_mode); return openWriteMode(allocator, path, os.File.default_mode);
} }
/// If the path does not exist it will be created. /// If the path does not exist it will be created.
/// If a file already exists in the destination it will be truncated. /// If a file already exists in the destination it will be truncated.
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator. /// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
/// Call close to clean up. /// Call close to clean up.
pub fn openWriteMode(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File { pub fn openWriteMode(allocator: *mem.Allocator, path: []const u8, file_mode: Mode) OpenError!File {
if (is_posix) { if (is_posix) {
const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_TRUNC; const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_TRUNC;
const fd = try os.posixOpen(allocator, path, flags, file_mode); const fd = try os.posixOpen(allocator, path, flags, file_mode);
@ -72,7 +82,7 @@ pub const File = struct {
/// If a file already exists in the destination this returns OpenError.PathAlreadyExists /// If a file already exists in the destination this returns OpenError.PathAlreadyExists
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator. /// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
/// Call close to clean up. /// Call close to clean up.
pub fn openWriteNoClobber(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File { pub fn openWriteNoClobber(allocator: *mem.Allocator, path: []const u8, file_mode: Mode) OpenError!File {
if (is_posix) { if (is_posix) {
const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_EXCL; const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_EXCL;
const fd = try os.posixOpen(allocator, path, flags, file_mode); const fd = try os.posixOpen(allocator, path, flags, file_mode);
@ -282,7 +292,7 @@ pub const File = struct {
Unexpected, Unexpected,
}; };
pub fn mode(self: *File) ModeError!os.FileMode { pub fn mode(self: *File) ModeError!Mode {
if (is_posix) { if (is_posix) {
var stat: posix.Stat = undefined; var stat: posix.Stat = undefined;
const err = posix.getErrno(posix.fstat(self.handle, &stat)); const err = posix.getErrno(posix.fstat(self.handle, &stat));
@ -296,7 +306,7 @@ pub const File = struct {
// TODO: we should be able to cast u16 to ModeError!u32, making this // TODO: we should be able to cast u16 to ModeError!u32, making this
// explicit cast not necessary // explicit cast not necessary
return os.FileMode(stat.mode); return Mode(stat.mode);
} else if (is_windows) { } else if (is_windows) {
return {}; return {};
} else { } else {
@ -305,9 +315,11 @@ pub const File = struct {
} }
pub const ReadError = error{ pub const ReadError = error{
BadFd, FileClosed,
Io, InputOutput,
IsDir, IsDir,
WouldBlock,
SystemResources,
Unexpected, Unexpected,
}; };
@ -323,9 +335,12 @@ pub const File = struct {
posix.EINTR => continue, posix.EINTR => continue,
posix.EINVAL => unreachable, posix.EINVAL => unreachable,
posix.EFAULT => unreachable, posix.EFAULT => unreachable,
posix.EBADF => return error.BadFd, posix.EAGAIN => return error.WouldBlock,
posix.EIO => return error.Io, posix.EBADF => return error.FileClosed,
posix.EIO => return error.InputOutput,
posix.EISDIR => return error.IsDir, posix.EISDIR => return error.IsDir,
posix.ENOBUFS => return error.SystemResources,
posix.ENOMEM => return error.SystemResources,
else => return os.unexpectedErrorPosix(read_err), else => return os.unexpectedErrorPosix(read_err),
} }
} }
@ -338,7 +353,7 @@ pub const File = struct {
while (index < buffer.len) { while (index < buffer.len) {
const want_read_count = @intCast(windows.DWORD, math.min(windows.DWORD(@maxValue(windows.DWORD)), buffer.len - index)); const want_read_count = @intCast(windows.DWORD, math.min(windows.DWORD(@maxValue(windows.DWORD)), buffer.len - index));
var amt_read: windows.DWORD = undefined; var amt_read: windows.DWORD = undefined;
if (windows.ReadFile(self.handle, @ptrCast(*c_void, buffer.ptr + index), want_read_count, &amt_read, null) == 0) { if (windows.ReadFile(self.handle, buffer.ptr + index, want_read_count, &amt_read, null) == 0) {
const err = windows.GetLastError(); const err = windows.GetLastError();
return switch (err) { return switch (err) {
windows.ERROR.OPERATION_ABORTED => continue, windows.ERROR.OPERATION_ABORTED => continue,

View File

@ -38,16 +38,6 @@ pub const path = @import("path.zig");
pub const File = @import("file.zig").File; pub const File = @import("file.zig").File;
pub const time = @import("time.zig"); pub const time = @import("time.zig");
pub const FileMode = switch (builtin.os) {
Os.windows => void,
else => u32,
};
pub const default_file_mode = switch (builtin.os) {
Os.windows => {},
else => 0o666,
};
pub const page_size = 4 * 1024; pub const page_size = 4 * 1024;
pub const UserInfo = @import("get_user_id.zig").UserInfo; pub const UserInfo = @import("get_user_id.zig").UserInfo;
@ -256,6 +246,67 @@ pub fn posixRead(fd: i32, buf: []u8) !void {
} }
} }
/// Number of bytes read is returned. Upon reading end-of-file, zero is returned.
pub fn posix_preadv(fd: i32, iov: [*]const posix.iovec, count: usize, offset: u64) !usize {
switch (builtin.os) {
builtin.Os.macosx => {
// Darwin does not have preadv but it does have pread.
var off: usize = 0;
var iov_i: usize = 0;
var inner_off: usize = 0;
while (true) {
const v = iov[iov_i];
const rc = darwin.pread(fd, v.iov_base + inner_off, v.iov_len - inner_off, offset + off);
const err = darwin.getErrno(rc);
switch (err) {
0 => {
off += rc;
inner_off += rc;
if (inner_off == v.iov_len) {
iov_i += 1;
inner_off = 0;
if (iov_i == count) {
return off;
}
}
if (rc == 0) return off; // EOF
continue;
},
posix.EINTR => continue,
posix.EINVAL => unreachable,
posix.EFAULT => unreachable,
posix.ESPIPE => unreachable, // fd is not seekable
posix.EAGAIN => return error.WouldBlock,
posix.EBADF => return error.FileClosed,
posix.EIO => return error.InputOutput,
posix.EISDIR => return error.IsDir,
posix.ENOBUFS => return error.SystemResources,
posix.ENOMEM => return error.SystemResources,
else => return unexpectedErrorPosix(err),
}
}
},
builtin.Os.linux, builtin.Os.freebsd => while (true) {
const rc = posix.preadv(fd, iov, count, offset);
const err = posix.getErrno(rc);
switch (err) {
0 => return rc,
posix.EINTR => continue,
posix.EINVAL => unreachable,
posix.EFAULT => unreachable,
posix.EAGAIN => return error.WouldBlock,
posix.EBADF => return error.FileClosed,
posix.EIO => return error.InputOutput,
posix.EISDIR => return error.IsDir,
posix.ENOBUFS => return error.SystemResources,
posix.ENOMEM => return error.SystemResources,
else => return unexpectedErrorPosix(err),
}
},
else => @compileError("Unsupported OS"),
}
}
pub const PosixWriteError = error{ pub const PosixWriteError = error{
WouldBlock, WouldBlock,
FileClosed, FileClosed,
@ -300,6 +351,71 @@ pub fn posixWrite(fd: i32, bytes: []const u8) !void {
} }
} }
pub fn posix_pwritev(fd: i32, iov: [*]const posix.iovec_const, count: usize, offset: u64) PosixWriteError!void {
switch (builtin.os) {
builtin.Os.macosx => {
// Darwin does not have pwritev but it does have pwrite.
var off: usize = 0;
var iov_i: usize = 0;
var inner_off: usize = 0;
while (true) {
const v = iov[iov_i];
const rc = darwin.pwrite(fd, v.iov_base + inner_off, v.iov_len - inner_off, offset + off);
const err = darwin.getErrno(rc);
switch (err) {
0 => {
off += rc;
inner_off += rc;
if (inner_off == v.iov_len) {
iov_i += 1;
inner_off = 0;
if (iov_i == count) {
return;
}
}
continue;
},
posix.EINTR => continue,
posix.ESPIPE => unreachable, // fd is not seekable
posix.EINVAL => unreachable,
posix.EFAULT => unreachable,
posix.EAGAIN => return PosixWriteError.WouldBlock,
posix.EBADF => return PosixWriteError.FileClosed,
posix.EDESTADDRREQ => return PosixWriteError.DestinationAddressRequired,
posix.EDQUOT => return PosixWriteError.DiskQuota,
posix.EFBIG => return PosixWriteError.FileTooBig,
posix.EIO => return PosixWriteError.InputOutput,
posix.ENOSPC => return PosixWriteError.NoSpaceLeft,
posix.EPERM => return PosixWriteError.AccessDenied,
posix.EPIPE => return PosixWriteError.BrokenPipe,
else => return unexpectedErrorPosix(err),
}
}
},
builtin.Os.linux => while (true) {
const rc = posix.pwritev(fd, iov, count, offset);
const err = posix.getErrno(rc);
switch (err) {
0 => return,
posix.EINTR => continue,
posix.EINVAL => unreachable,
posix.EFAULT => unreachable,
posix.EAGAIN => return PosixWriteError.WouldBlock,
posix.EBADF => return PosixWriteError.FileClosed,
posix.EDESTADDRREQ => return PosixWriteError.DestinationAddressRequired,
posix.EDQUOT => return PosixWriteError.DiskQuota,
posix.EFBIG => return PosixWriteError.FileTooBig,
posix.EIO => return PosixWriteError.InputOutput,
posix.ENOSPC => return PosixWriteError.NoSpaceLeft,
posix.EPERM => return PosixWriteError.AccessDenied,
posix.EPIPE => return PosixWriteError.BrokenPipe,
else => return unexpectedErrorPosix(err),
}
},
else => @compileError("Unsupported OS"),
}
}
pub const PosixOpenError = error{ pub const PosixOpenError = error{
OutOfMemory, OutOfMemory,
AccessDenied, AccessDenied,
@ -853,7 +969,7 @@ pub fn copyFile(allocator: *Allocator, source_path: []const u8, dest_path: []con
/// Guaranteed to be atomic. However until https://patchwork.kernel.org/patch/9636735/ is /// Guaranteed to be atomic. However until https://patchwork.kernel.org/patch/9636735/ is
/// merged and readily available, /// merged and readily available,
/// there is a possibility of power loss or application termination leaving temporary files present /// there is a possibility of power loss or application termination leaving temporary files present
pub fn copyFileMode(allocator: *Allocator, source_path: []const u8, dest_path: []const u8, mode: FileMode) !void { pub fn copyFileMode(allocator: *Allocator, source_path: []const u8, dest_path: []const u8, mode: File.Mode) !void {
var in_file = try os.File.openRead(allocator, source_path); var in_file = try os.File.openRead(allocator, source_path);
defer in_file.close(); defer in_file.close();
@ -879,7 +995,7 @@ pub const AtomicFile = struct {
/// dest_path must remain valid for the lifetime of AtomicFile /// dest_path must remain valid for the lifetime of AtomicFile
/// call finish to atomically replace dest_path with contents /// call finish to atomically replace dest_path with contents
pub fn init(allocator: *Allocator, dest_path: []const u8, mode: FileMode) !AtomicFile { pub fn init(allocator: *Allocator, dest_path: []const u8, mode: File.Mode) !AtomicFile {
const dirname = os.path.dirname(dest_path); const dirname = os.path.dirname(dest_path);
var rand_buf: [12]u8 = undefined; var rand_buf: [12]u8 = undefined;
@ -2943,3 +3059,44 @@ pub fn bsdKEvent(
} }
} }
} }
pub fn linuxINotifyInit1(flags: u32) !i32 {
const rc = linux.inotify_init1(flags);
const err = posix.getErrno(rc);
switch (err) {
0 => return @intCast(i32, rc),
posix.EINVAL => unreachable,
posix.EMFILE => return error.ProcessFdQuotaExceeded,
posix.ENFILE => return error.SystemFdQuotaExceeded,
posix.ENOMEM => return error.SystemResources,
else => return unexpectedErrorPosix(err),
}
}
pub fn linuxINotifyAddWatchC(inotify_fd: i32, pathname: [*]const u8, mask: u32) !i32 {
const rc = linux.inotify_add_watch(inotify_fd, pathname, mask);
const err = posix.getErrno(rc);
switch (err) {
0 => return @intCast(i32, rc),
posix.EACCES => return error.AccessDenied,
posix.EBADF => unreachable,
posix.EFAULT => unreachable,
posix.EINVAL => unreachable,
posix.ENAMETOOLONG => return error.NameTooLong,
posix.ENOENT => return error.FileNotFound,
posix.ENOMEM => return error.SystemResources,
posix.ENOSPC => return error.UserResourceLimitReached,
else => return unexpectedErrorPosix(err),
}
}
pub fn linuxINotifyRmWatch(inotify_fd: i32, wd: i32) !void {
const rc = linux.inotify_rm_watch(inotify_fd, wd);
const err = posix.getErrno(rc);
switch (err) {
0 => return rc,
posix.EBADF => unreachable,
posix.EINVAL => unreachable,
else => unreachable,
}
}

View File

@ -567,6 +567,37 @@ pub const MNT_DETACH = 2;
pub const MNT_EXPIRE = 4; pub const MNT_EXPIRE = 4;
pub const UMOUNT_NOFOLLOW = 8; pub const UMOUNT_NOFOLLOW = 8;
pub const IN_CLOEXEC = O_CLOEXEC;
pub const IN_NONBLOCK = O_NONBLOCK;
pub const IN_ACCESS = 0x00000001;
pub const IN_MODIFY = 0x00000002;
pub const IN_ATTRIB = 0x00000004;
pub const IN_CLOSE_WRITE = 0x00000008;
pub const IN_CLOSE_NOWRITE = 0x00000010;
pub const IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE;
pub const IN_OPEN = 0x00000020;
pub const IN_MOVED_FROM = 0x00000040;
pub const IN_MOVED_TO = 0x00000080;
pub const IN_MOVE = IN_MOVED_FROM | IN_MOVED_TO;
pub const IN_CREATE = 0x00000100;
pub const IN_DELETE = 0x00000200;
pub const IN_DELETE_SELF = 0x00000400;
pub const IN_MOVE_SELF = 0x00000800;
pub const IN_ALL_EVENTS = 0x00000fff;
pub const IN_UNMOUNT = 0x00002000;
pub const IN_Q_OVERFLOW = 0x00004000;
pub const IN_IGNORED = 0x00008000;
pub const IN_ONLYDIR = 0x01000000;
pub const IN_DONT_FOLLOW = 0x02000000;
pub const IN_EXCL_UNLINK = 0x04000000;
pub const IN_MASK_ADD = 0x20000000;
pub const IN_ISDIR = 0x40000000;
pub const IN_ONESHOT = 0x80000000;
pub const S_IFMT = 0o170000; pub const S_IFMT = 0o170000;
pub const S_IFDIR = 0o040000; pub const S_IFDIR = 0o040000;
@ -692,6 +723,10 @@ pub fn futex_wait(uaddr: usize, futex_op: u32, val: i32, timeout: ?*timespec) us
return syscall4(SYS_futex, uaddr, futex_op, @bitCast(u32, val), @ptrToInt(timeout)); return syscall4(SYS_futex, uaddr, futex_op, @bitCast(u32, val), @ptrToInt(timeout));
} }
pub fn futex_wake(uaddr: usize, futex_op: u32, val: i32) usize {
return syscall3(SYS_futex, uaddr, futex_op, @bitCast(u32, val));
}
pub fn getcwd(buf: [*]u8, size: usize) usize { pub fn getcwd(buf: [*]u8, size: usize) usize {
return syscall2(SYS_getcwd, @ptrToInt(buf), size); return syscall2(SYS_getcwd, @ptrToInt(buf), size);
} }
@ -700,6 +735,18 @@ pub fn getdents(fd: i32, dirp: [*]u8, count: usize) usize {
return syscall3(SYS_getdents, @intCast(usize, fd), @ptrToInt(dirp), count); return syscall3(SYS_getdents, @intCast(usize, fd), @ptrToInt(dirp), count);
} }
pub fn inotify_init1(flags: u32) usize {
return syscall1(SYS_inotify_init1, flags);
}
pub fn inotify_add_watch(fd: i32, pathname: [*]const u8, mask: u32) usize {
return syscall3(SYS_inotify_add_watch, @intCast(usize, fd), @ptrToInt(pathname), mask);
}
pub fn inotify_rm_watch(fd: i32, wd: i32) usize {
return syscall2(SYS_inotify_rm_watch, @intCast(usize, fd), @intCast(usize, wd));
}
pub fn isatty(fd: i32) bool { pub fn isatty(fd: i32) bool {
var wsz: winsize = undefined; var wsz: winsize = undefined;
return syscall3(SYS_ioctl, @intCast(usize, fd), TIOCGWINSZ, @ptrToInt(&wsz)) == 0; return syscall3(SYS_ioctl, @intCast(usize, fd), TIOCGWINSZ, @ptrToInt(&wsz)) == 0;
@ -742,6 +789,14 @@ pub fn read(fd: i32, buf: [*]u8, count: usize) usize {
return syscall3(SYS_read, @intCast(usize, fd), @ptrToInt(buf), count); return syscall3(SYS_read, @intCast(usize, fd), @ptrToInt(buf), count);
} }
pub fn preadv(fd: i32, iov: [*]const iovec, count: usize, offset: u64) usize {
return syscall4(SYS_preadv, @intCast(usize, fd), @ptrToInt(iov), count, offset);
}
pub fn pwritev(fd: i32, iov: [*]const iovec_const, count: usize, offset: u64) usize {
return syscall4(SYS_pwritev, @intCast(usize, fd), @ptrToInt(iov), count, offset);
}
// TODO https://github.com/ziglang/zig/issues/265 // TODO https://github.com/ziglang/zig/issues/265
pub fn rmdir(path: [*]const u8) usize { pub fn rmdir(path: [*]const u8) usize {
return syscall1(SYS_rmdir, @ptrToInt(path)); return syscall1(SYS_rmdir, @ptrToInt(path));
@ -1064,6 +1119,11 @@ pub const iovec = extern struct {
iov_len: usize, iov_len: usize,
}; };
pub const iovec_const = extern struct {
iov_base: [*]const u8,
iov_len: usize,
};
pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize { pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
return syscall3(SYS_getsockname, @intCast(usize, fd), @ptrToInt(addr), @ptrToInt(len)); return syscall3(SYS_getsockname, @intCast(usize, fd), @ptrToInt(addr), @ptrToInt(len));
} }
@ -1372,6 +1432,14 @@ pub fn capset(hdrp: *cap_user_header_t, datap: *const cap_user_data_t) usize {
return syscall2(SYS_capset, @ptrToInt(hdrp), @ptrToInt(datap)); return syscall2(SYS_capset, @ptrToInt(hdrp), @ptrToInt(datap));
} }
pub const inotify_event = extern struct {
wd: i32,
mask: u32,
cookie: u32,
len: u32,
//name: [?]u8,
};
test "import" { test "import" {
if (builtin.os == builtin.Os.linux) { if (builtin.os == builtin.Os.linux) {
_ = @import("test.zig"); _ = @import("test.zig");

View File

@ -506,7 +506,7 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
result_index += 1; result_index += 1;
} }
return result[0..result_index]; return allocator.shrink(u8, result, result_index);
} }
/// This function is like a series of `cd` statements executed one after another. /// This function is like a series of `cd` statements executed one after another.

View File

@ -67,8 +67,9 @@ pub const INVALID_FILE_ATTRIBUTES = DWORD(@maxValue(DWORD));
pub const OVERLAPPED = extern struct { pub const OVERLAPPED = extern struct {
Internal: ULONG_PTR, Internal: ULONG_PTR,
InternalHigh: ULONG_PTR, InternalHigh: ULONG_PTR,
Pointer: PVOID, Offset: DWORD,
hEvent: HANDLE, OffsetHigh: DWORD,
hEvent: ?HANDLE,
}; };
pub const LPOVERLAPPED = *OVERLAPPED; pub const LPOVERLAPPED = *OVERLAPPED;
@ -350,3 +351,15 @@ pub const E_ACCESSDENIED = @bitCast(c_long, c_ulong(0x80070005));
pub const E_HANDLE = @bitCast(c_long, c_ulong(0x80070006)); pub const E_HANDLE = @bitCast(c_long, c_ulong(0x80070006));
pub const E_OUTOFMEMORY = @bitCast(c_long, c_ulong(0x8007000E)); pub const E_OUTOFMEMORY = @bitCast(c_long, c_ulong(0x8007000E));
pub const E_INVALIDARG = @bitCast(c_long, c_ulong(0x80070057)); pub const E_INVALIDARG = @bitCast(c_long, c_ulong(0x80070057));
pub const FILE_FLAG_BACKUP_SEMANTICS = 0x02000000;
pub const FILE_FLAG_DELETE_ON_CLOSE = 0x04000000;
pub const FILE_FLAG_NO_BUFFERING = 0x20000000;
pub const FILE_FLAG_OPEN_NO_RECALL = 0x00100000;
pub const FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000;
pub const FILE_FLAG_OVERLAPPED = 0x40000000;
pub const FILE_FLAG_POSIX_SEMANTICS = 0x0100000;
pub const FILE_FLAG_RANDOM_ACCESS = 0x10000000;
pub const FILE_FLAG_SESSION_AWARE = 0x00800000;
pub const FILE_FLAG_SEQUENTIAL_SCAN = 0x08000000;
pub const FILE_FLAG_WRITE_THROUGH = 0x80000000;

View File

@ -1,5 +1,8 @@
use @import("index.zig"); use @import("index.zig");
pub extern "kernel32" stdcallcc fn CancelIoEx(hFile: HANDLE, lpOverlapped: LPOVERLAPPED) BOOL;
pub extern "kernel32" stdcallcc fn CloseHandle(hObject: HANDLE) BOOL; pub extern "kernel32" stdcallcc fn CloseHandle(hObject: HANDLE) BOOL;
pub extern "kernel32" stdcallcc fn CreateDirectoryA( pub extern "kernel32" stdcallcc fn CreateDirectoryA(
@ -8,7 +11,17 @@ pub extern "kernel32" stdcallcc fn CreateDirectoryA(
) BOOL; ) BOOL;
pub extern "kernel32" stdcallcc fn CreateFileA( pub extern "kernel32" stdcallcc fn CreateFileA(
lpFileName: LPCSTR, lpFileName: [*]const u8, // TODO null terminated pointer type
dwDesiredAccess: DWORD,
dwShareMode: DWORD,
lpSecurityAttributes: ?LPSECURITY_ATTRIBUTES,
dwCreationDisposition: DWORD,
dwFlagsAndAttributes: DWORD,
hTemplateFile: ?HANDLE,
) HANDLE;
pub extern "kernel32" stdcallcc fn CreateFileW(
lpFileName: [*]const u16, // TODO null terminated pointer type
dwDesiredAccess: DWORD, dwDesiredAccess: DWORD,
dwShareMode: DWORD, dwShareMode: DWORD,
lpSecurityAttributes: ?LPSECURITY_ATTRIBUTES, lpSecurityAttributes: ?LPSECURITY_ATTRIBUTES,
@ -94,6 +107,9 @@ pub extern "kernel32" stdcallcc fn GetFinalPathNameByHandleA(
dwFlags: DWORD, dwFlags: DWORD,
) DWORD; ) DWORD;
pub extern "kernel32" stdcallcc fn GetOverlappedResult(hFile: HANDLE, lpOverlapped: *OVERLAPPED, lpNumberOfBytesTransferred: *DWORD, bWait: BOOL) BOOL;
pub extern "kernel32" stdcallcc fn GetProcessHeap() ?HANDLE; pub extern "kernel32" stdcallcc fn GetProcessHeap() ?HANDLE;
pub extern "kernel32" stdcallcc fn GetQueuedCompletionStatus(CompletionPort: HANDLE, lpNumberOfBytesTransferred: LPDWORD, lpCompletionKey: *ULONG_PTR, lpOverlapped: *?*OVERLAPPED, dwMilliseconds: DWORD) BOOL; pub extern "kernel32" stdcallcc fn GetQueuedCompletionStatus(CompletionPort: HANDLE, lpNumberOfBytesTransferred: LPDWORD, lpCompletionKey: *ULONG_PTR, lpOverlapped: *?*OVERLAPPED, dwMilliseconds: DWORD) BOOL;
@ -104,7 +120,6 @@ pub extern "kernel32" stdcallcc fn HeapCreate(flOptions: DWORD, dwInitialSize: S
pub extern "kernel32" stdcallcc fn HeapDestroy(hHeap: HANDLE) BOOL; pub extern "kernel32" stdcallcc fn HeapDestroy(hHeap: HANDLE) BOOL;
pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void, dwBytes: SIZE_T) ?*c_void; pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void, dwBytes: SIZE_T) ?*c_void;
pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) SIZE_T; pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) SIZE_T;
pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) BOOL;
pub extern "kernel32" stdcallcc fn HeapCompact(hHeap: HANDLE, dwFlags: DWORD) SIZE_T; pub extern "kernel32" stdcallcc fn HeapCompact(hHeap: HANDLE, dwFlags: DWORD) SIZE_T;
pub extern "kernel32" stdcallcc fn HeapSummary(hHeap: HANDLE, dwFlags: DWORD, lpSummary: LPHEAP_SUMMARY) BOOL; pub extern "kernel32" stdcallcc fn HeapSummary(hHeap: HANDLE, dwFlags: DWORD, lpSummary: LPHEAP_SUMMARY) BOOL;
@ -114,6 +129,8 @@ pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBy
pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void) BOOL; pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void) BOOL;
pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: ?*const c_void) BOOL;
pub extern "kernel32" stdcallcc fn MoveFileExA( pub extern "kernel32" stdcallcc fn MoveFileExA(
lpExistingFileName: LPCSTR, lpExistingFileName: LPCSTR,
lpNewFileName: LPCSTR, lpNewFileName: LPCSTR,
@ -126,11 +143,22 @@ pub extern "kernel32" stdcallcc fn QueryPerformanceCounter(lpPerformanceCount: *
pub extern "kernel32" stdcallcc fn QueryPerformanceFrequency(lpFrequency: *LARGE_INTEGER) BOOL; pub extern "kernel32" stdcallcc fn QueryPerformanceFrequency(lpFrequency: *LARGE_INTEGER) BOOL;
pub extern "kernel32" stdcallcc fn ReadDirectoryChangesW(
hDirectory: HANDLE,
lpBuffer: [*]align(@alignOf(FILE_NOTIFY_INFORMATION)) u8,
nBufferLength: DWORD,
bWatchSubtree: BOOL,
dwNotifyFilter: DWORD,
lpBytesReturned: ?*DWORD,
lpOverlapped: ?*OVERLAPPED,
lpCompletionRoutine: LPOVERLAPPED_COMPLETION_ROUTINE,
) BOOL;
pub extern "kernel32" stdcallcc fn ReadFile( pub extern "kernel32" stdcallcc fn ReadFile(
in_hFile: HANDLE, in_hFile: HANDLE,
out_lpBuffer: *c_void, out_lpBuffer: [*]u8,
in_nNumberOfBytesToRead: DWORD, in_nNumberOfBytesToRead: DWORD,
out_lpNumberOfBytesRead: *DWORD, out_lpNumberOfBytesRead: ?*DWORD,
in_out_lpOverlapped: ?*OVERLAPPED, in_out_lpOverlapped: ?*OVERLAPPED,
) BOOL; ) BOOL;
@ -153,13 +181,42 @@ pub extern "kernel32" stdcallcc fn WaitForSingleObject(hHandle: HANDLE, dwMillis
pub extern "kernel32" stdcallcc fn WriteFile( pub extern "kernel32" stdcallcc fn WriteFile(
in_hFile: HANDLE, in_hFile: HANDLE,
in_lpBuffer: *const c_void, in_lpBuffer: [*]const u8,
in_nNumberOfBytesToWrite: DWORD, in_nNumberOfBytesToWrite: DWORD,
out_lpNumberOfBytesWritten: ?*DWORD, out_lpNumberOfBytesWritten: ?*DWORD,
in_out_lpOverlapped: ?*OVERLAPPED, in_out_lpOverlapped: ?*OVERLAPPED,
) BOOL; ) BOOL;
pub extern "kernel32" stdcallcc fn WriteFileEx(hFile: HANDLE, lpBuffer: [*]const u8, nNumberOfBytesToWrite: DWORD, lpOverlapped: LPOVERLAPPED, lpCompletionRoutine: LPOVERLAPPED_COMPLETION_ROUTINE) BOOL;
//TODO: call unicode versions instead of relying on ANSI code page //TODO: call unicode versions instead of relying on ANSI code page
pub extern "kernel32" stdcallcc fn LoadLibraryA(lpLibFileName: LPCSTR) ?HMODULE; pub extern "kernel32" stdcallcc fn LoadLibraryA(lpLibFileName: LPCSTR) ?HMODULE;
pub extern "kernel32" stdcallcc fn FreeLibrary(hModule: HMODULE) BOOL; pub extern "kernel32" stdcallcc fn FreeLibrary(hModule: HMODULE) BOOL;
pub const FILE_NOTIFY_INFORMATION = extern struct {
NextEntryOffset: DWORD,
Action: DWORD,
FileNameLength: DWORD,
FileName: [1]WCHAR,
};
pub const FILE_ACTION_ADDED = 0x00000001;
pub const FILE_ACTION_REMOVED = 0x00000002;
pub const FILE_ACTION_MODIFIED = 0x00000003;
pub const FILE_ACTION_RENAMED_OLD_NAME = 0x00000004;
pub const FILE_ACTION_RENAMED_NEW_NAME = 0x00000005;
pub const LPOVERLAPPED_COMPLETION_ROUTINE = ?extern fn(DWORD, DWORD, *OVERLAPPED) void;
pub const FILE_LIST_DIRECTORY = 1;
pub const FILE_NOTIFY_CHANGE_CREATION = 64;
pub const FILE_NOTIFY_CHANGE_SIZE = 8;
pub const FILE_NOTIFY_CHANGE_SECURITY = 256;
pub const FILE_NOTIFY_CHANGE_LAST_ACCESS = 32;
pub const FILE_NOTIFY_CHANGE_LAST_WRITE = 16;
pub const FILE_NOTIFY_CHANGE_DIR_NAME = 2;
pub const FILE_NOTIFY_CHANGE_FILE_NAME = 1;
pub const FILE_NOTIFY_CHANGE_ATTRIBUTES = 4;

View File

@ -36,20 +36,19 @@ pub fn windowsClose(handle: windows.HANDLE) void {
pub const WriteError = error{ pub const WriteError = error{
SystemResources, SystemResources,
OperationAborted, OperationAborted,
IoPending,
BrokenPipe, BrokenPipe,
Unexpected, Unexpected,
}; };
pub fn windowsWrite(handle: windows.HANDLE, bytes: []const u8) WriteError!void { pub fn windowsWrite(handle: windows.HANDLE, bytes: []const u8) WriteError!void {
if (windows.WriteFile(handle, @ptrCast(*const c_void, bytes.ptr), @intCast(u32, bytes.len), null, null) == 0) { if (windows.WriteFile(handle, bytes.ptr, @intCast(u32, bytes.len), null, null) == 0) {
const err = windows.GetLastError(); const err = windows.GetLastError();
return switch (err) { return switch (err) {
windows.ERROR.INVALID_USER_BUFFER => WriteError.SystemResources, windows.ERROR.INVALID_USER_BUFFER => WriteError.SystemResources,
windows.ERROR.NOT_ENOUGH_MEMORY => WriteError.SystemResources, windows.ERROR.NOT_ENOUGH_MEMORY => WriteError.SystemResources,
windows.ERROR.OPERATION_ABORTED => WriteError.OperationAborted, windows.ERROR.OPERATION_ABORTED => WriteError.OperationAborted,
windows.ERROR.NOT_ENOUGH_QUOTA => WriteError.SystemResources, windows.ERROR.NOT_ENOUGH_QUOTA => WriteError.SystemResources,
windows.ERROR.IO_PENDING => WriteError.IoPending, windows.ERROR.IO_PENDING => unreachable,
windows.ERROR.BROKEN_PIPE => WriteError.BrokenPipe, windows.ERROR.BROKEN_PIPE => WriteError.BrokenPipe,
else => os.unexpectedErrorWindows(err), else => os.unexpectedErrorWindows(err),
}; };
@ -221,6 +220,7 @@ pub fn windowsCreateIoCompletionPort(file_handle: windows.HANDLE, existing_compl
const handle = windows.CreateIoCompletionPort(file_handle, existing_completion_port, completion_key, concurrent_thread_count) orelse { const handle = windows.CreateIoCompletionPort(file_handle, existing_completion_port, completion_key, concurrent_thread_count) orelse {
const err = windows.GetLastError(); const err = windows.GetLastError();
switch (err) { switch (err) {
windows.ERROR.INVALID_PARAMETER => unreachable,
else => return os.unexpectedErrorWindows(err), else => return os.unexpectedErrorWindows(err),
} }
}; };
@ -238,21 +238,24 @@ pub fn windowsPostQueuedCompletionStatus(completion_port: windows.HANDLE, bytes_
} }
} }
pub const WindowsWaitResult = error{ pub const WindowsWaitResult = enum{
Normal, Normal,
Aborted, Aborted,
Cancelled,
}; };
pub fn windowsGetQueuedCompletionStatus(completion_port: windows.HANDLE, bytes_transferred_count: *windows.DWORD, lpCompletionKey: *usize, lpOverlapped: *?*windows.OVERLAPPED, dwMilliseconds: windows.DWORD) WindowsWaitResult { pub fn windowsGetQueuedCompletionStatus(completion_port: windows.HANDLE, bytes_transferred_count: *windows.DWORD, lpCompletionKey: *usize, lpOverlapped: *?*windows.OVERLAPPED, dwMilliseconds: windows.DWORD) WindowsWaitResult {
if (windows.GetQueuedCompletionStatus(completion_port, bytes_transferred_count, lpCompletionKey, lpOverlapped, dwMilliseconds) == windows.FALSE) { if (windows.GetQueuedCompletionStatus(completion_port, bytes_transferred_count, lpCompletionKey, lpOverlapped, dwMilliseconds) == windows.FALSE) {
if (std.debug.runtime_safety) {
const err = windows.GetLastError(); const err = windows.GetLastError();
if (err != windows.ERROR.ABANDONED_WAIT_0) { switch (err) {
std.debug.warn("err: {}\n", err); windows.ERROR.ABANDONED_WAIT_0 => return WindowsWaitResult.Aborted,
windows.ERROR.OPERATION_ABORTED => return WindowsWaitResult.Cancelled,
else => {
if (std.debug.runtime_safety) {
std.debug.panic("unexpected error: {}\n", err);
}
} }
assert(err == windows.ERROR.ABANDONED_WAIT_0);
} }
return WindowsWaitResult.Aborted;
} }
return WindowsWaitResult.Normal; return WindowsWaitResult.Normal;
} }

View File

@ -2,7 +2,7 @@ const std = @import("index.zig");
const assert = std.debug.assert; const assert = std.debug.assert;
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
// Imagine that `fn at(self: &Self, index: usize) &T` is a customer asking for a box // Imagine that `fn at(self: *Self, index: usize) &T` is a customer asking for a box
// from a warehouse, based on a flat array, boxes ordered from 0 to N - 1. // from a warehouse, based on a flat array, boxes ordered from 0 to N - 1.
// But the warehouse actually stores boxes in shelves of increasing powers of 2 sizes. // But the warehouse actually stores boxes in shelves of increasing powers of 2 sizes.
// So when the customer requests a box index, we have to translate it to shelf index // So when the customer requests a box index, we have to translate it to shelf index
@ -93,6 +93,14 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
pub const prealloc_count = prealloc_item_count; pub const prealloc_count = prealloc_item_count;
fn AtType(comptime SelfType: type) type {
if (@typeInfo(SelfType).Pointer.is_const) {
return *const T;
} else {
return *T;
}
}
/// Deinitialize with `deinit` /// Deinitialize with `deinit`
pub fn init(allocator: *Allocator) Self { pub fn init(allocator: *Allocator) Self {
return Self{ return Self{
@ -109,7 +117,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
self.* = undefined; self.* = undefined;
} }
pub fn at(self: *Self, i: usize) *T { pub fn at(self: var, i: usize) AtType(@typeOf(self)) {
assert(i < self.len); assert(i < self.len);
return self.uncheckedAt(i); return self.uncheckedAt(i);
} }
@ -133,7 +141,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
if (self.len == 0) return null; if (self.len == 0) return null;
const index = self.len - 1; const index = self.len - 1;
const result = self.uncheckedAt(index).*; const result = uncheckedAt(self, index).*;
self.len = index; self.len = index;
return result; return result;
} }
@ -141,7 +149,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
pub fn addOne(self: *Self) !*T { pub fn addOne(self: *Self) !*T {
const new_length = self.len + 1; const new_length = self.len + 1;
try self.growCapacity(new_length); try self.growCapacity(new_length);
const result = self.uncheckedAt(self.len); const result = uncheckedAt(self, self.len);
self.len = new_length; self.len = new_length;
return result; return result;
} }
@ -193,7 +201,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
self.dynamic_segments = self.allocator.shrink([*]T, self.dynamic_segments, new_cap_shelf_count); self.dynamic_segments = self.allocator.shrink([*]T, self.dynamic_segments, new_cap_shelf_count);
} }
pub fn uncheckedAt(self: *Self, index: usize) *T { pub fn uncheckedAt(self: var, index: usize) AtType(@typeOf(self)) {
if (index < prealloc_item_count) { if (index < prealloc_item_count) {
return &self.prealloc_segment[index]; return &self.prealloc_segment[index];
} }

View File

@ -72,10 +72,10 @@ pub fn main() !void {
if (mem.indexOfScalar(u8, option_contents, '=')) |name_end| { if (mem.indexOfScalar(u8, option_contents, '=')) |name_end| {
const option_name = option_contents[0..name_end]; const option_name = option_contents[0..name_end];
const option_value = option_contents[name_end + 1 ..]; const option_value = option_contents[name_end + 1 ..];
if (builder.addUserInputOption(option_name, option_value)) if (try builder.addUserInputOption(option_name, option_value))
return usageAndErr(&builder, false, try stderr_stream); return usageAndErr(&builder, false, try stderr_stream);
} else { } else {
if (builder.addUserInputFlag(option_contents)) if (try builder.addUserInputFlag(option_contents))
return usageAndErr(&builder, false, try stderr_stream); return usageAndErr(&builder, false, try stderr_stream);
} }
} else if (mem.startsWith(u8, arg, "-")) { } else if (mem.startsWith(u8, arg, "-")) {

View File

@ -188,6 +188,7 @@ pub const Utf8View = struct {
return Utf8View{ .bytes = s }; return Utf8View{ .bytes = s };
} }
/// TODO: https://github.com/ziglang/zig/issues/425
pub fn initComptime(comptime s: []const u8) Utf8View { pub fn initComptime(comptime s: []const u8) Utf8View {
if (comptime init(s)) |r| { if (comptime init(s)) |r| {
return r; return r;
@ -199,7 +200,7 @@ pub const Utf8View = struct {
} }
} }
pub fn iterator(s: *const Utf8View) Utf8Iterator { pub fn iterator(s: Utf8View) Utf8Iterator {
return Utf8Iterator{ return Utf8Iterator{
.bytes = s.bytes, .bytes = s.bytes,
.i = 0, .i = 0,
@ -530,3 +531,20 @@ test "utf16leToUtf8" {
assert(mem.eql(u8, utf8, "\xf4\x8f\xb0\x80")); assert(mem.eql(u8, utf8, "\xf4\x8f\xb0\x80"));
} }
} }
/// TODO support codepoints bigger than 16 bits
/// TODO type for null terminated pointer
pub fn utf8ToUtf16LeWithNull(allocator: *mem.Allocator, utf8: []const u8) ![]u16 {
var result = std.ArrayList(u16).init(allocator);
// optimistically guess that it will not require surrogate pairs
try result.ensureCapacity(utf8.len + 1);
const view = try Utf8View.init(utf8);
var it = view.iterator();
while (it.nextCodepoint()) |codepoint| {
try result.append(@intCast(u16, codepoint)); // TODO surrogate pairs
}
try result.append(0);
return result.toOwnedSlice();
}

View File

@ -32,6 +32,12 @@ pub const Tree = struct {
return self.source[token.start..token.end]; return self.source[token.start..token.end];
} }
pub fn getNodeSource(self: *const Tree, node: *const Node) []const u8 {
const first_token = self.tokens.at(node.firstToken());
const last_token = self.tokens.at(node.lastToken());
return self.source[first_token.start..last_token.end];
}
pub const Location = struct { pub const Location = struct {
line: usize, line: usize,
column: usize, column: usize,
@ -338,7 +344,7 @@ pub const Node = struct {
unreachable; unreachable;
} }
pub fn firstToken(base: *Node) TokenIndex { pub fn firstToken(base: *const Node) TokenIndex {
comptime var i = 0; comptime var i = 0;
inline while (i < @memberCount(Id)) : (i += 1) { inline while (i < @memberCount(Id)) : (i += 1) {
if (base.id == @field(Id, @memberName(Id, i))) { if (base.id == @field(Id, @memberName(Id, i))) {
@ -349,7 +355,7 @@ pub const Node = struct {
unreachable; unreachable;
} }
pub fn lastToken(base: *Node) TokenIndex { pub fn lastToken(base: *const Node) TokenIndex {
comptime var i = 0; comptime var i = 0;
inline while (i < @memberCount(Id)) : (i += 1) { inline while (i < @memberCount(Id)) : (i += 1) {
if (base.id == @field(Id, @memberName(Id, i))) { if (base.id == @field(Id, @memberName(Id, i))) {
@ -473,11 +479,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Root) TokenIndex { pub fn firstToken(self: *const Root) TokenIndex {
return if (self.decls.len == 0) self.eof_token else (self.decls.at(0).*).firstToken(); return if (self.decls.len == 0) self.eof_token else (self.decls.at(0).*).firstToken();
} }
pub fn lastToken(self: *Root) TokenIndex { pub fn lastToken(self: *const Root) TokenIndex {
return if (self.decls.len == 0) self.eof_token else (self.decls.at(self.decls.len - 1).*).lastToken(); return if (self.decls.len == 0) self.eof_token else (self.decls.at(self.decls.len - 1).*).lastToken();
} }
}; };
@ -518,7 +524,7 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *VarDecl) TokenIndex { pub fn firstToken(self: *const VarDecl) TokenIndex {
if (self.visib_token) |visib_token| return visib_token; if (self.visib_token) |visib_token| return visib_token;
if (self.comptime_token) |comptime_token| return comptime_token; if (self.comptime_token) |comptime_token| return comptime_token;
if (self.extern_export_token) |extern_export_token| return extern_export_token; if (self.extern_export_token) |extern_export_token| return extern_export_token;
@ -526,7 +532,7 @@ pub const Node = struct {
return self.mut_token; return self.mut_token;
} }
pub fn lastToken(self: *VarDecl) TokenIndex { pub fn lastToken(self: *const VarDecl) TokenIndex {
return self.semicolon_token; return self.semicolon_token;
} }
}; };
@ -548,12 +554,12 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Use) TokenIndex { pub fn firstToken(self: *const Use) TokenIndex {
if (self.visib_token) |visib_token| return visib_token; if (self.visib_token) |visib_token| return visib_token;
return self.use_token; return self.use_token;
} }
pub fn lastToken(self: *Use) TokenIndex { pub fn lastToken(self: *const Use) TokenIndex {
return self.semicolon_token; return self.semicolon_token;
} }
}; };
@ -575,11 +581,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *ErrorSetDecl) TokenIndex { pub fn firstToken(self: *const ErrorSetDecl) TokenIndex {
return self.error_token; return self.error_token;
} }
pub fn lastToken(self: *ErrorSetDecl) TokenIndex { pub fn lastToken(self: *const ErrorSetDecl) TokenIndex {
return self.rbrace_token; return self.rbrace_token;
} }
}; };
@ -618,14 +624,14 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *ContainerDecl) TokenIndex { pub fn firstToken(self: *const ContainerDecl) TokenIndex {
if (self.layout_token) |layout_token| { if (self.layout_token) |layout_token| {
return layout_token; return layout_token;
} }
return self.kind_token; return self.kind_token;
} }
pub fn lastToken(self: *ContainerDecl) TokenIndex { pub fn lastToken(self: *const ContainerDecl) TokenIndex {
return self.rbrace_token; return self.rbrace_token;
} }
}; };
@ -646,12 +652,12 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *StructField) TokenIndex { pub fn firstToken(self: *const StructField) TokenIndex {
if (self.visib_token) |visib_token| return visib_token; if (self.visib_token) |visib_token| return visib_token;
return self.name_token; return self.name_token;
} }
pub fn lastToken(self: *StructField) TokenIndex { pub fn lastToken(self: *const StructField) TokenIndex {
return self.type_expr.lastToken(); return self.type_expr.lastToken();
} }
}; };
@ -679,11 +685,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *UnionTag) TokenIndex { pub fn firstToken(self: *const UnionTag) TokenIndex {
return self.name_token; return self.name_token;
} }
pub fn lastToken(self: *UnionTag) TokenIndex { pub fn lastToken(self: *const UnionTag) TokenIndex {
if (self.value_expr) |value_expr| { if (self.value_expr) |value_expr| {
return value_expr.lastToken(); return value_expr.lastToken();
} }
@ -712,11 +718,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *EnumTag) TokenIndex { pub fn firstToken(self: *const EnumTag) TokenIndex {
return self.name_token; return self.name_token;
} }
pub fn lastToken(self: *EnumTag) TokenIndex { pub fn lastToken(self: *const EnumTag) TokenIndex {
if (self.value) |value| { if (self.value) |value| {
return value.lastToken(); return value.lastToken();
} }
@ -741,11 +747,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *ErrorTag) TokenIndex { pub fn firstToken(self: *const ErrorTag) TokenIndex {
return self.name_token; return self.name_token;
} }
pub fn lastToken(self: *ErrorTag) TokenIndex { pub fn lastToken(self: *const ErrorTag) TokenIndex {
return self.name_token; return self.name_token;
} }
}; };
@ -758,11 +764,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Identifier) TokenIndex { pub fn firstToken(self: *const Identifier) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *Identifier) TokenIndex { pub fn lastToken(self: *const Identifier) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -784,11 +790,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *AsyncAttribute) TokenIndex { pub fn firstToken(self: *const AsyncAttribute) TokenIndex {
return self.async_token; return self.async_token;
} }
pub fn lastToken(self: *AsyncAttribute) TokenIndex { pub fn lastToken(self: *const AsyncAttribute) TokenIndex {
if (self.rangle_bracket) |rangle_bracket| { if (self.rangle_bracket) |rangle_bracket| {
return rangle_bracket; return rangle_bracket;
} }
@ -856,7 +862,7 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *FnProto) TokenIndex { pub fn firstToken(self: *const FnProto) TokenIndex {
if (self.visib_token) |visib_token| return visib_token; if (self.visib_token) |visib_token| return visib_token;
if (self.async_attr) |async_attr| return async_attr.firstToken(); if (self.async_attr) |async_attr| return async_attr.firstToken();
if (self.extern_export_inline_token) |extern_export_inline_token| return extern_export_inline_token; if (self.extern_export_inline_token) |extern_export_inline_token| return extern_export_inline_token;
@ -865,7 +871,7 @@ pub const Node = struct {
return self.fn_token; return self.fn_token;
} }
pub fn lastToken(self: *FnProto) TokenIndex { pub fn lastToken(self: *const FnProto) TokenIndex {
if (self.body_node) |body_node| return body_node.lastToken(); if (self.body_node) |body_node| return body_node.lastToken();
switch (self.return_type) { switch (self.return_type) {
// TODO allow this and next prong to share bodies since the types are the same // TODO allow this and next prong to share bodies since the types are the same
@ -896,11 +902,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *PromiseType) TokenIndex { pub fn firstToken(self: *const PromiseType) TokenIndex {
return self.promise_token; return self.promise_token;
} }
pub fn lastToken(self: *PromiseType) TokenIndex { pub fn lastToken(self: *const PromiseType) TokenIndex {
if (self.result) |result| return result.return_type.lastToken(); if (self.result) |result| return result.return_type.lastToken();
return self.promise_token; return self.promise_token;
} }
@ -923,14 +929,14 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *ParamDecl) TokenIndex { pub fn firstToken(self: *const ParamDecl) TokenIndex {
if (self.comptime_token) |comptime_token| return comptime_token; if (self.comptime_token) |comptime_token| return comptime_token;
if (self.noalias_token) |noalias_token| return noalias_token; if (self.noalias_token) |noalias_token| return noalias_token;
if (self.name_token) |name_token| return name_token; if (self.name_token) |name_token| return name_token;
return self.type_node.firstToken(); return self.type_node.firstToken();
} }
pub fn lastToken(self: *ParamDecl) TokenIndex { pub fn lastToken(self: *const ParamDecl) TokenIndex {
if (self.var_args_token) |var_args_token| return var_args_token; if (self.var_args_token) |var_args_token| return var_args_token;
return self.type_node.lastToken(); return self.type_node.lastToken();
} }
@ -954,7 +960,7 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Block) TokenIndex { pub fn firstToken(self: *const Block) TokenIndex {
if (self.label) |label| { if (self.label) |label| {
return label; return label;
} }
@ -962,7 +968,7 @@ pub const Node = struct {
return self.lbrace; return self.lbrace;
} }
pub fn lastToken(self: *Block) TokenIndex { pub fn lastToken(self: *const Block) TokenIndex {
return self.rbrace; return self.rbrace;
} }
}; };
@ -981,11 +987,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Defer) TokenIndex { pub fn firstToken(self: *const Defer) TokenIndex {
return self.defer_token; return self.defer_token;
} }
pub fn lastToken(self: *Defer) TokenIndex { pub fn lastToken(self: *const Defer) TokenIndex {
return self.expr.lastToken(); return self.expr.lastToken();
} }
}; };
@ -1005,11 +1011,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Comptime) TokenIndex { pub fn firstToken(self: *const Comptime) TokenIndex {
return self.comptime_token; return self.comptime_token;
} }
pub fn lastToken(self: *Comptime) TokenIndex { pub fn lastToken(self: *const Comptime) TokenIndex {
return self.expr.lastToken(); return self.expr.lastToken();
} }
}; };
@ -1029,11 +1035,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Payload) TokenIndex { pub fn firstToken(self: *const Payload) TokenIndex {
return self.lpipe; return self.lpipe;
} }
pub fn lastToken(self: *Payload) TokenIndex { pub fn lastToken(self: *const Payload) TokenIndex {
return self.rpipe; return self.rpipe;
} }
}; };
@ -1054,11 +1060,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *PointerPayload) TokenIndex { pub fn firstToken(self: *const PointerPayload) TokenIndex {
return self.lpipe; return self.lpipe;
} }
pub fn lastToken(self: *PointerPayload) TokenIndex { pub fn lastToken(self: *const PointerPayload) TokenIndex {
return self.rpipe; return self.rpipe;
} }
}; };
@ -1085,11 +1091,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *PointerIndexPayload) TokenIndex { pub fn firstToken(self: *const PointerIndexPayload) TokenIndex {
return self.lpipe; return self.lpipe;
} }
pub fn lastToken(self: *PointerIndexPayload) TokenIndex { pub fn lastToken(self: *const PointerIndexPayload) TokenIndex {
return self.rpipe; return self.rpipe;
} }
}; };
@ -1114,11 +1120,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Else) TokenIndex { pub fn firstToken(self: *const Else) TokenIndex {
return self.else_token; return self.else_token;
} }
pub fn lastToken(self: *Else) TokenIndex { pub fn lastToken(self: *const Else) TokenIndex {
return self.body.lastToken(); return self.body.lastToken();
} }
}; };
@ -1146,11 +1152,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Switch) TokenIndex { pub fn firstToken(self: *const Switch) TokenIndex {
return self.switch_token; return self.switch_token;
} }
pub fn lastToken(self: *Switch) TokenIndex { pub fn lastToken(self: *const Switch) TokenIndex {
return self.rbrace; return self.rbrace;
} }
}; };
@ -1181,11 +1187,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *SwitchCase) TokenIndex { pub fn firstToken(self: *const SwitchCase) TokenIndex {
return (self.items.at(0).*).firstToken(); return (self.items.at(0).*).firstToken();
} }
pub fn lastToken(self: *SwitchCase) TokenIndex { pub fn lastToken(self: *const SwitchCase) TokenIndex {
return self.expr.lastToken(); return self.expr.lastToken();
} }
}; };
@ -1198,11 +1204,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *SwitchElse) TokenIndex { pub fn firstToken(self: *const SwitchElse) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *SwitchElse) TokenIndex { pub fn lastToken(self: *const SwitchElse) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -1245,7 +1251,7 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *While) TokenIndex { pub fn firstToken(self: *const While) TokenIndex {
if (self.label) |label| { if (self.label) |label| {
return label; return label;
} }
@ -1257,7 +1263,7 @@ pub const Node = struct {
return self.while_token; return self.while_token;
} }
pub fn lastToken(self: *While) TokenIndex { pub fn lastToken(self: *const While) TokenIndex {
if (self.@"else") |@"else"| { if (self.@"else") |@"else"| {
return @"else".body.lastToken(); return @"else".body.lastToken();
} }
@ -1298,7 +1304,7 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *For) TokenIndex { pub fn firstToken(self: *const For) TokenIndex {
if (self.label) |label| { if (self.label) |label| {
return label; return label;
} }
@ -1310,7 +1316,7 @@ pub const Node = struct {
return self.for_token; return self.for_token;
} }
pub fn lastToken(self: *For) TokenIndex { pub fn lastToken(self: *const For) TokenIndex {
if (self.@"else") |@"else"| { if (self.@"else") |@"else"| {
return @"else".body.lastToken(); return @"else".body.lastToken();
} }
@ -1349,11 +1355,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *If) TokenIndex { pub fn firstToken(self: *const If) TokenIndex {
return self.if_token; return self.if_token;
} }
pub fn lastToken(self: *If) TokenIndex { pub fn lastToken(self: *const If) TokenIndex {
if (self.@"else") |@"else"| { if (self.@"else") |@"else"| {
return @"else".body.lastToken(); return @"else".body.lastToken();
} }
@ -1480,11 +1486,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *InfixOp) TokenIndex { pub fn firstToken(self: *const InfixOp) TokenIndex {
return self.lhs.firstToken(); return self.lhs.firstToken();
} }
pub fn lastToken(self: *InfixOp) TokenIndex { pub fn lastToken(self: *const InfixOp) TokenIndex {
return self.rhs.lastToken(); return self.rhs.lastToken();
} }
}; };
@ -1570,11 +1576,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *PrefixOp) TokenIndex { pub fn firstToken(self: *const PrefixOp) TokenIndex {
return self.op_token; return self.op_token;
} }
pub fn lastToken(self: *PrefixOp) TokenIndex { pub fn lastToken(self: *const PrefixOp) TokenIndex {
return self.rhs.lastToken(); return self.rhs.lastToken();
} }
}; };
@ -1594,11 +1600,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *FieldInitializer) TokenIndex { pub fn firstToken(self: *const FieldInitializer) TokenIndex {
return self.period_token; return self.period_token;
} }
pub fn lastToken(self: *FieldInitializer) TokenIndex { pub fn lastToken(self: *const FieldInitializer) TokenIndex {
return self.expr.lastToken(); return self.expr.lastToken();
} }
}; };
@ -1673,7 +1679,7 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *SuffixOp) TokenIndex { pub fn firstToken(self: *const SuffixOp) TokenIndex {
switch (self.op) { switch (self.op) {
@TagType(Op).Call => |*call_info| if (call_info.async_attr) |async_attr| return async_attr.firstToken(), @TagType(Op).Call => |*call_info| if (call_info.async_attr) |async_attr| return async_attr.firstToken(),
else => {}, else => {},
@ -1681,7 +1687,7 @@ pub const Node = struct {
return self.lhs.firstToken(); return self.lhs.firstToken();
} }
pub fn lastToken(self: *SuffixOp) TokenIndex { pub fn lastToken(self: *const SuffixOp) TokenIndex {
return self.rtoken; return self.rtoken;
} }
}; };
@ -1701,11 +1707,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *GroupedExpression) TokenIndex { pub fn firstToken(self: *const GroupedExpression) TokenIndex {
return self.lparen; return self.lparen;
} }
pub fn lastToken(self: *GroupedExpression) TokenIndex { pub fn lastToken(self: *const GroupedExpression) TokenIndex {
return self.rparen; return self.rparen;
} }
}; };
@ -1749,11 +1755,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *ControlFlowExpression) TokenIndex { pub fn firstToken(self: *const ControlFlowExpression) TokenIndex {
return self.ltoken; return self.ltoken;
} }
pub fn lastToken(self: *ControlFlowExpression) TokenIndex { pub fn lastToken(self: *const ControlFlowExpression) TokenIndex {
if (self.rhs) |rhs| { if (self.rhs) |rhs| {
return rhs.lastToken(); return rhs.lastToken();
} }
@ -1792,11 +1798,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Suspend) TokenIndex { pub fn firstToken(self: *const Suspend) TokenIndex {
return self.suspend_token; return self.suspend_token;
} }
pub fn lastToken(self: *Suspend) TokenIndex { pub fn lastToken(self: *const Suspend) TokenIndex {
if (self.body) |body| { if (self.body) |body| {
return body.lastToken(); return body.lastToken();
} }
@ -1813,11 +1819,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *IntegerLiteral) TokenIndex { pub fn firstToken(self: *const IntegerLiteral) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *IntegerLiteral) TokenIndex { pub fn lastToken(self: *const IntegerLiteral) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -1830,11 +1836,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *FloatLiteral) TokenIndex { pub fn firstToken(self: *const FloatLiteral) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *FloatLiteral) TokenIndex { pub fn lastToken(self: *const FloatLiteral) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -1856,11 +1862,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *BuiltinCall) TokenIndex { pub fn firstToken(self: *const BuiltinCall) TokenIndex {
return self.builtin_token; return self.builtin_token;
} }
pub fn lastToken(self: *BuiltinCall) TokenIndex { pub fn lastToken(self: *const BuiltinCall) TokenIndex {
return self.rparen_token; return self.rparen_token;
} }
}; };
@ -1873,11 +1879,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *StringLiteral) TokenIndex { pub fn firstToken(self: *const StringLiteral) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *StringLiteral) TokenIndex { pub fn lastToken(self: *const StringLiteral) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -1892,11 +1898,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *MultilineStringLiteral) TokenIndex { pub fn firstToken(self: *const MultilineStringLiteral) TokenIndex {
return self.lines.at(0).*; return self.lines.at(0).*;
} }
pub fn lastToken(self: *MultilineStringLiteral) TokenIndex { pub fn lastToken(self: *const MultilineStringLiteral) TokenIndex {
return self.lines.at(self.lines.len - 1).*; return self.lines.at(self.lines.len - 1).*;
} }
}; };
@ -1909,11 +1915,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *CharLiteral) TokenIndex { pub fn firstToken(self: *const CharLiteral) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *CharLiteral) TokenIndex { pub fn lastToken(self: *const CharLiteral) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -1926,11 +1932,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *BoolLiteral) TokenIndex { pub fn firstToken(self: *const BoolLiteral) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *BoolLiteral) TokenIndex { pub fn lastToken(self: *const BoolLiteral) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -1943,11 +1949,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *NullLiteral) TokenIndex { pub fn firstToken(self: *const NullLiteral) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *NullLiteral) TokenIndex { pub fn lastToken(self: *const NullLiteral) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -1960,11 +1966,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *UndefinedLiteral) TokenIndex { pub fn firstToken(self: *const UndefinedLiteral) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *UndefinedLiteral) TokenIndex { pub fn lastToken(self: *const UndefinedLiteral) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -1977,11 +1983,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *ThisLiteral) TokenIndex { pub fn firstToken(self: *const ThisLiteral) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *ThisLiteral) TokenIndex { pub fn lastToken(self: *const ThisLiteral) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -2022,11 +2028,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *AsmOutput) TokenIndex { pub fn firstToken(self: *const AsmOutput) TokenIndex {
return self.lbracket; return self.lbracket;
} }
pub fn lastToken(self: *AsmOutput) TokenIndex { pub fn lastToken(self: *const AsmOutput) TokenIndex {
return self.rparen; return self.rparen;
} }
}; };
@ -2054,11 +2060,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *AsmInput) TokenIndex { pub fn firstToken(self: *const AsmInput) TokenIndex {
return self.lbracket; return self.lbracket;
} }
pub fn lastToken(self: *AsmInput) TokenIndex { pub fn lastToken(self: *const AsmInput) TokenIndex {
return self.rparen; return self.rparen;
} }
}; };
@ -2089,11 +2095,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Asm) TokenIndex { pub fn firstToken(self: *const Asm) TokenIndex {
return self.asm_token; return self.asm_token;
} }
pub fn lastToken(self: *Asm) TokenIndex { pub fn lastToken(self: *const Asm) TokenIndex {
return self.rparen; return self.rparen;
} }
}; };
@ -2106,11 +2112,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *Unreachable) TokenIndex { pub fn firstToken(self: *const Unreachable) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *Unreachable) TokenIndex { pub fn lastToken(self: *const Unreachable) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -2123,11 +2129,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *ErrorType) TokenIndex { pub fn firstToken(self: *const ErrorType) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *ErrorType) TokenIndex { pub fn lastToken(self: *const ErrorType) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -2140,11 +2146,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *VarType) TokenIndex { pub fn firstToken(self: *const VarType) TokenIndex {
return self.token; return self.token;
} }
pub fn lastToken(self: *VarType) TokenIndex { pub fn lastToken(self: *const VarType) TokenIndex {
return self.token; return self.token;
} }
}; };
@ -2159,11 +2165,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *DocComment) TokenIndex { pub fn firstToken(self: *const DocComment) TokenIndex {
return self.lines.at(0).*; return self.lines.at(0).*;
} }
pub fn lastToken(self: *DocComment) TokenIndex { pub fn lastToken(self: *const DocComment) TokenIndex {
return self.lines.at(self.lines.len - 1).*; return self.lines.at(self.lines.len - 1).*;
} }
}; };
@ -2184,11 +2190,11 @@ pub const Node = struct {
return null; return null;
} }
pub fn firstToken(self: *TestDecl) TokenIndex { pub fn firstToken(self: *const TestDecl) TokenIndex {
return self.test_token; return self.test_token;
} }
pub fn lastToken(self: *TestDecl) TokenIndex { pub fn lastToken(self: *const TestDecl) TokenIndex {
return self.body_node.lastToken(); return self.body_node.lastToken();
} }
}; };