fix segfault with var args

This commit is contained in:
Andrew Kelley 2019-08-08 15:13:05 -04:00
parent bfa1d12fba
commit cfe84423c9
No known key found for this signature in database
GPG Key ID: 7C5F548F728501A9
3 changed files with 143 additions and 148 deletions

View File

@ -15746,7 +15746,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
size_t impl_param_count = impl_fn_type_id->param_count;
if (call_instruction->is_async) {
IrInstruction *result = ir_analyze_async_call(ira, call_instruction, impl_fn, impl_fn->type_entry,
nullptr, casted_args, call_param_count, casted_new_stack);
nullptr, casted_args, impl_param_count, casted_new_stack);
return ir_finish_anal(ira, result);
}
@ -15756,7 +15756,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
IrInstructionCallGen *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base,
impl_fn, nullptr, impl_param_count, casted_args, fn_inline,
call_instruction->is_async, casted_new_stack, result_loc,
false, casted_new_stack, result_loc,
impl_fn_type_id->return_type);
parent_fn_entry->call_list.append(new_call_instruction);
@ -15799,7 +15799,9 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *c
casted_args[next_arg_index] = casted_arg;
next_arg_index += 1;
}
for (size_t call_i = 0; call_i < call_instruction->arg_count; call_i += 1) {
size_t iter_count = (call_param_count < call_instruction->arg_count) ?
call_param_count : call_instruction->arg_count;
for (size_t call_i = 0; call_i < iter_count; call_i += 1) {
IrInstruction *old_arg = call_instruction->args[call_i]->child;
if (type_is_invalid(old_arg->value.type))
return ira->codegen->invalid_instruction;

View File

@ -83,10 +83,10 @@ pub async fn pwritev(loop: *Loop, fd: fd_t, data: []const []const u8, offset: us
resume @handle();
}
switch (builtin.os) {
builtin.Os.macosx,
builtin.Os.linux,
builtin.Os.freebsd,
builtin.Os.netbsd,
.macosx,
.linux,
.freebsd,
.netbsd,
=> {
const iovecs = try loop.allocator.alloc(os.iovec_const, data.len);
defer loop.allocator.free(iovecs);
@ -100,7 +100,7 @@ pub async fn pwritev(loop: *Loop, fd: fd_t, data: []const []const u8, offset: us
return await (async pwritevPosix(loop, fd, iovecs, offset) catch unreachable);
},
builtin.Os.windows => {
.windows => {
const data_copy = try std.mem.dupe(loop.allocator, []const u8, data);
defer loop.allocator.free(data_copy);
return await (async pwritevWindows(loop, fd, data, offset) catch unreachable);
@ -220,10 +220,10 @@ pub async fn preadv(loop: *Loop, fd: fd_t, data: []const []u8, offset: usize) PR
assert(data.len != 0);
switch (builtin.os) {
builtin.Os.macosx,
builtin.Os.linux,
builtin.Os.freebsd,
builtin.Os.netbsd,
.macosx,
.linux,
.freebsd,
.netbsd,
=> {
const iovecs = try loop.allocator.alloc(os.iovec, data.len);
defer loop.allocator.free(iovecs);
@ -237,7 +237,7 @@ pub async fn preadv(loop: *Loop, fd: fd_t, data: []const []u8, offset: usize) PR
return await (async preadvPosix(loop, fd, iovecs, offset) catch unreachable);
},
builtin.Os.windows => {
.windows => {
const data_copy = try std.mem.dupe(loop.allocator, []u8, data);
defer loop.allocator.free(data_copy);
return await (async preadvWindows(loop, fd, data_copy, offset) catch unreachable);
@ -403,12 +403,12 @@ pub async fn openPosix(
pub async fn openRead(loop: *Loop, path: []const u8) File.OpenError!fd_t {
switch (builtin.os) {
builtin.Os.macosx, builtin.Os.linux, builtin.Os.freebsd, builtin.Os.netbsd => {
.macosx, .linux, .freebsd, .netbsd => {
const flags = os.O_LARGEFILE | os.O_RDONLY | os.O_CLOEXEC;
return await (async openPosix(loop, path, flags, File.default_mode) catch unreachable);
},
builtin.Os.windows => return windows.CreateFile(
.windows => return windows.CreateFile(
path,
windows.GENERIC_READ,
windows.FILE_SHARE_READ,
@ -431,15 +431,15 @@ pub async fn openWrite(loop: *Loop, path: []const u8) File.OpenError!fd_t {
/// Creates if does not exist. Truncates the file if it exists.
pub async fn openWriteMode(loop: *Loop, path: []const u8, mode: File.Mode) File.OpenError!fd_t {
switch (builtin.os) {
builtin.Os.macosx,
builtin.Os.linux,
builtin.Os.freebsd,
builtin.Os.netbsd,
.macosx,
.linux,
.freebsd,
.netbsd,
=> {
const flags = os.O_LARGEFILE | os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC | os.O_TRUNC;
return await (async openPosix(loop, path, flags, File.default_mode) catch unreachable);
},
builtin.Os.windows => return windows.CreateFile(
.windows => return windows.CreateFile(
path,
windows.GENERIC_WRITE,
windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE,
@ -459,12 +459,12 @@ pub async fn openReadWrite(
mode: File.Mode,
) File.OpenError!fd_t {
switch (builtin.os) {
builtin.Os.macosx, builtin.Os.linux, builtin.Os.freebsd, builtin.Os.netbsd => {
.macosx, .linux, .freebsd, .netbsd => {
const flags = os.O_LARGEFILE | os.O_RDWR | os.O_CREAT | os.O_CLOEXEC;
return await (async openPosix(loop, path, flags, mode) catch unreachable);
},
builtin.Os.windows => return windows.CreateFile(
.windows => return windows.CreateFile(
path,
windows.GENERIC_WRITE | windows.GENERIC_READ,
windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE,
@ -489,9 +489,9 @@ pub const CloseOperation = struct {
os_data: OsData,
const OsData = switch (builtin.os) {
builtin.Os.linux, builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => OsDataPosix,
.linux, .macosx, .freebsd, .netbsd => OsDataPosix,
builtin.Os.windows => struct {
.windows => struct {
handle: ?fd_t,
},
@ -508,8 +508,8 @@ pub const CloseOperation = struct {
self.* = CloseOperation{
.loop = loop,
.os_data = switch (builtin.os) {
builtin.Os.linux, builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => initOsDataPosix(self),
builtin.Os.windows => OsData{ .handle = null },
.linux, .macosx, .freebsd, .netbsd => initOsDataPosix(self),
.windows => OsData{ .handle = null },
else => @compileError("Unsupported OS"),
},
};
@ -535,10 +535,10 @@ pub const CloseOperation = struct {
/// Defer this after creating.
pub fn finish(self: *CloseOperation) void {
switch (builtin.os) {
builtin.Os.linux,
builtin.Os.macosx,
builtin.Os.freebsd,
builtin.Os.netbsd,
.linux,
.macosx,
.freebsd,
.netbsd,
=> {
if (self.os_data.have_fd) {
self.loop.posixFsRequest(&self.os_data.close_req_node);
@ -546,7 +546,7 @@ pub const CloseOperation = struct {
self.loop.allocator.destroy(self);
}
},
builtin.Os.windows => {
.windows => {
if (self.os_data.handle) |handle| {
os.close(handle);
}
@ -558,15 +558,15 @@ pub const CloseOperation = struct {
pub fn setHandle(self: *CloseOperation, handle: fd_t) void {
switch (builtin.os) {
builtin.Os.linux,
builtin.Os.macosx,
builtin.Os.freebsd,
builtin.Os.netbsd,
.linux,
.macosx,
.freebsd,
.netbsd,
=> {
self.os_data.close_req_node.data.msg.Close.fd = handle;
self.os_data.have_fd = true;
},
builtin.Os.windows => {
.windows => {
self.os_data.handle = handle;
},
else => @compileError("Unsupported OS"),
@ -576,14 +576,14 @@ pub const CloseOperation = struct {
/// Undo a `setHandle`.
pub fn clearHandle(self: *CloseOperation) void {
switch (builtin.os) {
builtin.Os.linux,
builtin.Os.macosx,
builtin.Os.freebsd,
builtin.Os.netbsd,
.linux,
.macosx,
.freebsd,
.netbsd,
=> {
self.os_data.have_fd = false;
},
builtin.Os.windows => {
.windows => {
self.os_data.handle = null;
},
else => @compileError("Unsupported OS"),
@ -592,15 +592,15 @@ pub const CloseOperation = struct {
pub fn getHandle(self: *CloseOperation) fd_t {
switch (builtin.os) {
builtin.Os.linux,
builtin.Os.macosx,
builtin.Os.freebsd,
builtin.Os.netbsd,
.linux,
.macosx,
.freebsd,
.netbsd,
=> {
assert(self.os_data.have_fd);
return self.os_data.close_req_node.data.msg.Close.fd;
},
builtin.Os.windows => {
.windows => {
return self.os_data.handle.?;
},
else => @compileError("Unsupported OS"),
@ -617,12 +617,12 @@ pub async fn writeFile(loop: *Loop, path: []const u8, contents: []const u8) !voi
/// contents must remain alive until writeFile completes.
pub async fn writeFileMode(loop: *Loop, path: []const u8, contents: []const u8, mode: File.Mode) !void {
switch (builtin.os) {
builtin.Os.linux,
builtin.Os.macosx,
builtin.Os.freebsd,
builtin.Os.netbsd,
.linux,
.macosx,
.freebsd,
.netbsd,
=> return await (async writeFileModeThread(loop, path, contents, mode) catch unreachable),
builtin.Os.windows => return await (async writeFileWindows(loop, path, contents) catch unreachable),
.windows => return await (async writeFileWindows(loop, path, contents) catch unreachable),
else => @compileError("Unsupported OS"),
}
}
@ -728,7 +728,7 @@ pub fn Watch(comptime V: type) type {
os_data: OsData,
const OsData = switch (builtin.os) {
builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => struct {
.macosx, .freebsd, .netbsd => struct {
file_table: FileTable,
table_lock: event.Lock,
@ -739,8 +739,8 @@ pub fn Watch(comptime V: type) type {
};
},
builtin.Os.linux => LinuxOsData,
builtin.Os.windows => WindowsOsData,
.linux => LinuxOsData,
.windows => WindowsOsData,
else => @compileError("Unsupported OS"),
};
@ -793,7 +793,7 @@ pub fn Watch(comptime V: type) type {
errdefer channel.destroy();
switch (builtin.os) {
builtin.Os.linux => {
.linux => {
const inotify_fd = try os.inotify_init1(os.linux.IN_NONBLOCK | os.linux.IN_CLOEXEC);
errdefer os.close(inotify_fd);
@ -802,7 +802,7 @@ pub fn Watch(comptime V: type) type {
return result;
},
builtin.Os.windows => {
.windows => {
const self = try loop.allocator.create(Self);
errdefer loop.allocator.destroy(self);
self.* = Self{
@ -817,7 +817,7 @@ pub fn Watch(comptime V: type) type {
return self;
},
builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => {
.macosx, .freebsd, .netbsd => {
const self = try loop.allocator.create(Self);
errdefer loop.allocator.destroy(self);
@ -837,7 +837,7 @@ pub fn Watch(comptime V: type) type {
/// All addFile calls and removeFile calls must have completed.
pub fn destroy(self: *Self) void {
switch (builtin.os) {
builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => {
.macosx, .freebsd, .netbsd => {
// TODO we need to cancel the coroutines before destroying the lock
self.os_data.table_lock.deinit();
var it = self.os_data.file_table.iterator();
@ -847,8 +847,8 @@ pub fn Watch(comptime V: type) type {
}
self.channel.destroy();
},
builtin.Os.linux => cancel self.os_data.putter,
builtin.Os.windows => {
.linux => cancel self.os_data.putter,
.windows => {
while (self.os_data.all_putters.get()) |putter_node| {
cancel putter_node.data;
}
@ -879,9 +879,9 @@ pub fn Watch(comptime V: type) type {
pub async fn addFile(self: *Self, file_path: []const u8, value: V) !?V {
switch (builtin.os) {
builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => return await (async addFileKEvent(self, file_path, value) catch unreachable),
builtin.Os.linux => return await (async addFileLinux(self, file_path, value) catch unreachable),
builtin.Os.windows => return await (async addFileWindows(self, file_path, value) catch unreachable),
.macosx, .freebsd, .netbsd => return await (async addFileKEvent(self, file_path, value) catch unreachable),
.linux => return await (async addFileLinux(self, file_path, value) catch unreachable),
.windows => return await (async addFileWindows(self, file_path, value) catch unreachable),
else => @compileError("Unsupported OS"),
}
}

View File

@ -13,7 +13,7 @@ const Thread = std.Thread;
pub const Loop = struct {
allocator: *mem.Allocator,
next_tick_queue: std.atomic.Queue(promise),
next_tick_queue: std.atomic.Queue(anyframe),
os_data: OsData,
final_resume_node: ResumeNode,
pending_event_count: usize,
@ -24,11 +24,11 @@ pub const Loop = struct {
available_eventfd_resume_nodes: std.atomic.Stack(ResumeNode.EventFd),
eventfd_resume_nodes: []std.atomic.Stack(ResumeNode.EventFd).Node,
pub const NextTickNode = std.atomic.Queue(promise).Node;
pub const NextTickNode = std.atomic.Queue(anyframe).Node;
pub const ResumeNode = struct {
id: Id,
handle: promise,
handle: anyframe,
overlapped: Overlapped,
pub const overlapped_init = switch (builtin.os) {
@ -110,7 +110,7 @@ pub const Loop = struct {
.pending_event_count = 1,
.allocator = allocator,
.os_data = undefined,
.next_tick_queue = std.atomic.Queue(promise).init(),
.next_tick_queue = std.atomic.Queue(anyframe).init(),
.extra_threads = undefined,
.available_eventfd_resume_nodes = std.atomic.Stack(ResumeNode.EventFd).init(),
.eventfd_resume_nodes = undefined,
@ -148,18 +148,18 @@ pub const Loop = struct {
fn initOsData(self: *Loop, extra_thread_count: usize) InitOsDataError!void {
switch (builtin.os) {
.linux => {
self.os_data.fs_queue = std.atomic.Queue(fs.Request).init();
self.os_data.fs_queue_item = 0;
// we need another thread for the file system because Linux does not have an async
// file system I/O API.
self.os_data.fs_end_request = fs.RequestNode{
.prev = undefined,
.next = undefined,
.data = fs.Request{
.msg = fs.Request.Msg.End,
.finish = fs.Request.Finish.NoAction,
},
};
// TODO self.os_data.fs_queue = std.atomic.Queue(fs.Request).init();
// TODO self.os_data.fs_queue_item = 0;
// TODO // we need another thread for the file system because Linux does not have an async
// TODO // file system I/O API.
// TODO self.os_data.fs_end_request = fs.RequestNode{
// TODO .prev = undefined,
// TODO .next = undefined,
// TODO .data = fs.Request{
// TODO .msg = fs.Request.Msg.End,
// TODO .finish = fs.Request.Finish.NoAction,
// TODO },
// TODO };
errdefer {
while (self.available_eventfd_resume_nodes.pop()) |node| os.close(node.data.eventfd);
@ -197,10 +197,10 @@ pub const Loop = struct {
&self.os_data.final_eventfd_event,
);
self.os_data.fs_thread = try Thread.spawn(self, posixFsRun);
// TODO self.os_data.fs_thread = try Thread.spawn(self, posixFsRun);
errdefer {
self.posixFsRequest(&self.os_data.fs_end_request);
self.os_data.fs_thread.wait();
// TODO self.posixFsRequest(&self.os_data.fs_end_request);
// TODO self.os_data.fs_thread.wait();
}
if (builtin.single_threaded) {
@ -302,10 +302,10 @@ pub const Loop = struct {
.udata = undefined,
};
self.os_data.fs_thread = try Thread.spawn(self, posixFsRun);
// TODO self.os_data.fs_thread = try Thread.spawn(self, posixFsRun);
errdefer {
self.posixFsRequest(&self.os_data.fs_end_request);
self.os_data.fs_thread.wait();
// TODO self.posixFsRequest(&self.os_data.fs_end_request);
// TODO self.os_data.fs_thread.wait();
}
if (builtin.single_threaded) {
@ -397,7 +397,7 @@ pub const Loop = struct {
}
}
/// resume_node must live longer than the promise that it holds a reference to.
/// resume_node must live longer than the anyframe that it holds a reference to.
/// flags must contain EPOLLET
pub fn linuxAddFd(self: *Loop, fd: i32, resume_node: *ResumeNode, flags: u32) !void {
assert(flags & os.EPOLLET == os.EPOLLET);
@ -460,7 +460,7 @@ pub const Loop = struct {
return resume_node.kev;
}
/// resume_node must live longer than the promise that it holds a reference to.
/// resume_node must live longer than the anyframe that it holds a reference to.
pub fn bsdAddKev(self: *Loop, resume_node: *ResumeNode.Basic, ident: usize, filter: i16, fflags: u32) !void {
self.beginOneEvent();
errdefer self.finishOneEvent();
@ -561,11 +561,11 @@ pub const Loop = struct {
self.workerRun();
switch (builtin.os) {
builtin.Os.linux,
builtin.Os.macosx,
builtin.Os.freebsd,
builtin.Os.netbsd,
=> self.os_data.fs_thread.wait(),
.linux,
.macosx,
.freebsd,
.netbsd,
=> {}, // TODO self.os_data.fs_thread.wait(),
else => {},
}
@ -574,45 +574,39 @@ pub const Loop = struct {
}
}
/// This is equivalent to an async call, except instead of beginning execution of the async function,
/// it immediately returns to the caller, and the async function is queued in the event loop. It still
/// returns a promise to be awaited.
pub fn call(self: *Loop, comptime func: var, args: ...) !(promise->@typeOf(func).ReturnType) {
const S = struct {
async fn asyncFunc(loop: *Loop, handle: *promise->@typeOf(func).ReturnType, args2: ...) @typeOf(func).ReturnType {
suspend {
handle.* = @handle();
var my_tick_node = Loop.NextTickNode{
.prev = undefined,
.next = undefined,
.data = @handle(),
};
loop.onNextTick(&my_tick_node);
}
// TODO guaranteed allocation elision for await in same func as async
return await (async func(args2) catch unreachable);
}
};
var handle: promise->@typeOf(func).ReturnType = undefined;
return async<self.allocator> S.asyncFunc(self, &handle, args);
/// This is equivalent to function call, except it calls `startCpuBoundOperation` first.
pub fn call(comptime func: var, args: ...) @typeOf(func).ReturnType {
startCpuBoundOperation();
return func(args);
}
/// Awaiting a yield lets the event loop run, starting any unstarted async operations.
/// Yielding lets the event loop run, starting any unstarted async operations.
/// Note that async operations automatically start when a function yields for any other reason,
/// for example, when async I/O is performed. This function is intended to be used only when
/// CPU bound tasks would be waiting in the event loop but never get started because no async I/O
/// is performed.
pub async fn yield(self: *Loop) void {
pub fn yield(self: *Loop) void {
suspend {
var my_tick_node = Loop.NextTickNode{
var my_tick_node = NextTickNode{
.prev = undefined,
.next = undefined,
.data = @handle(),
.data = @frame(),
};
self.onNextTick(&my_tick_node);
}
}
/// If the build is multi-threaded and there is an event loop, then it calls `yield`. Otherwise,
/// does nothing.
pub fn startCpuBoundOperation() void {
if (builtin.is_single_threaded) {
return;
} else if (instance) |event_loop| {
event_loop.yield();
}
}
/// call finishOneEvent when done
pub fn beginOneEvent(self: *Loop) void {
_ = @atomicRmw(usize, &self.pending_event_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
@ -624,7 +618,7 @@ pub const Loop = struct {
// cause all the threads to stop
switch (builtin.os) {
.linux => {
self.posixFsRequest(&self.os_data.fs_end_request);
// TODO self.posixFsRequest(&self.os_data.fs_end_request);
// writing 8 bytes to an eventfd cannot fail
os.write(self.os_data.final_eventfd, wakeup_bytes) catch unreachable;
return;
@ -672,9 +666,9 @@ pub const Loop = struct {
const handle = resume_node.handle;
const resume_node_id = resume_node.id;
switch (resume_node_id) {
ResumeNode.Id.Basic => {},
ResumeNode.Id.Stop => return,
ResumeNode.Id.EventFd => {
.Basic => {},
.Stop => return,
.EventFd => {
const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node);
event_fd_node.epoll_op = os.EPOLL_CTL_MOD;
const stack_node = @fieldParentPtr(std.atomic.Stack(ResumeNode.EventFd).Node, "data", event_fd_node);
@ -696,12 +690,12 @@ pub const Loop = struct {
const handle = resume_node.handle;
const resume_node_id = resume_node.id;
switch (resume_node_id) {
ResumeNode.Id.Basic => {
.Basic => {
const basic_node = @fieldParentPtr(ResumeNode.Basic, "base", resume_node);
basic_node.kev = ev;
},
ResumeNode.Id.Stop => return,
ResumeNode.Id.EventFd => {
.Stop => return,
.EventFd => {
const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node);
const stack_node = @fieldParentPtr(std.atomic.Stack(ResumeNode.EventFd).Node, "data", event_fd_node);
self.available_eventfd_resume_nodes.push(stack_node);
@ -730,9 +724,9 @@ pub const Loop = struct {
const handle = resume_node.handle;
const resume_node_id = resume_node.id;
switch (resume_node_id) {
ResumeNode.Id.Basic => {},
ResumeNode.Id.Stop => return,
ResumeNode.Id.EventFd => {
.Basic => {},
.Stop => return,
.EventFd => {
const event_fd_node = @fieldParentPtr(ResumeNode.EventFd, "base", resume_node);
const stack_node = @fieldParentPtr(std.atomic.Stack(ResumeNode.EventFd).Node, "data", event_fd_node);
self.available_eventfd_resume_nodes.push(stack_node);
@ -750,12 +744,12 @@ pub const Loop = struct {
self.beginOneEvent(); // finished in posixFsRun after processing the msg
self.os_data.fs_queue.put(request_node);
switch (builtin.os) {
builtin.Os.macosx, builtin.Os.freebsd, builtin.Os.netbsd => {
.macosx, .freebsd, .netbsd => {
const fs_kevs = (*const [1]os.Kevent)(&self.os_data.fs_kevent_wake);
const empty_kevs = ([*]os.Kevent)(undefined)[0..0];
_ = os.kevent(self.os_data.fs_kqfd, fs_kevs, empty_kevs, null) catch unreachable;
},
builtin.Os.linux => {
.linux => {
_ = @atomicRmw(i32, &self.os_data.fs_queue_item, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
const rc = os.linux.futex_wake(&self.os_data.fs_queue_item, os.linux.FUTEX_WAKE, 1);
switch (os.linux.getErrno(rc)) {
@ -781,18 +775,18 @@ pub const Loop = struct {
}
while (self.os_data.fs_queue.get()) |node| {
switch (node.data.msg) {
@TagType(fs.Request.Msg).End => return,
@TagType(fs.Request.Msg).PWriteV => |*msg| {
.End => return,
.PWriteV => |*msg| {
msg.result = os.pwritev(msg.fd, msg.iov, msg.offset);
},
@TagType(fs.Request.Msg).PReadV => |*msg| {
.PReadV => |*msg| {
msg.result = os.preadv(msg.fd, msg.iov, msg.offset);
},
@TagType(fs.Request.Msg).Open => |*msg| {
.Open => |*msg| {
msg.result = os.openC(msg.path.ptr, msg.flags, msg.mode);
},
@TagType(fs.Request.Msg).Close => |*msg| os.close(msg.fd),
@TagType(fs.Request.Msg).WriteFile => |*msg| blk: {
.Close => |*msg| os.close(msg.fd),
.WriteFile => |*msg| blk: {
const flags = os.O_LARGEFILE | os.O_WRONLY | os.O_CREAT |
os.O_CLOEXEC | os.O_TRUNC;
const fd = os.openC(msg.path.ptr, flags, msg.mode) catch |err| {
@ -804,11 +798,11 @@ pub const Loop = struct {
},
}
switch (node.data.finish) {
@TagType(fs.Request.Finish).TickNode => |*tick_node| self.onNextTick(tick_node),
@TagType(fs.Request.Finish).DeallocCloseOperation => |close_op| {
.TickNode => |*tick_node| self.onNextTick(tick_node),
.DeallocCloseOperation => |close_op| {
self.allocator.destroy(close_op);
},
@TagType(fs.Request.Finish).NoAction => {},
.NoAction => {},
}
self.finishOneEvent();
}
@ -855,16 +849,16 @@ pub const Loop = struct {
epollfd: i32,
final_eventfd: i32,
final_eventfd_event: os.linux.epoll_event,
fs_thread: *Thread,
fs_queue_item: i32,
fs_queue: std.atomic.Queue(fs.Request),
fs_end_request: fs.RequestNode,
// TODO fs_thread: *Thread,
// TODO fs_queue_item: i32,
// TODO fs_queue: std.atomic.Queue(fs.Request),
// TODO fs_end_request: fs.RequestNode,
};
};
test "std.event.Loop - basic" {
// https://github.com/ziglang/zig/issues/1908
if (builtin.single_threaded or builtin.os != builtin.Os.linux) return error.SkipZigTest;
if (builtin.single_threaded) return error.SkipZigTest;
const allocator = std.heap.direct_allocator;
@ -877,7 +871,7 @@ test "std.event.Loop - basic" {
test "std.event.Loop - call" {
// https://github.com/ziglang/zig/issues/1908
if (builtin.single_threaded or builtin.os != builtin.Os.linux) return error.SkipZigTest;
if (builtin.single_threaded) return error.SkipZigTest;
const allocator = std.heap.direct_allocator;
@ -886,9 +880,8 @@ test "std.event.Loop - call" {
defer loop.deinit();
var did_it = false;
const handle = try loop.call(testEventLoop);
const handle2 = try loop.call(testEventLoop2, handle, &did_it);
defer cancel handle2;
const handle = async Loop.call(testEventLoop);
const handle2 = async Loop.call(testEventLoop2, handle, &did_it);
loop.run();
@ -899,7 +892,7 @@ async fn testEventLoop() i32 {
return 1234;
}
async fn testEventLoop2(h: promise->i32, did_it: *bool) void {
async fn testEventLoop2(h: anyframe->i32, did_it: *bool) void {
const value = await h;
testing.expect(value == 1234);
did_it.* = true;