Merge pull request #4404 from ziglang/async-std

a big step towards std lib integration with async I/O
This commit is contained in:
Andrew Kelley 2020-02-10 00:22:59 -05:00 committed by GitHub
commit 014f66e6de
42 changed files with 1538 additions and 1181 deletions

View File

@ -72,14 +72,13 @@ pub fn build(b: *Builder) !void {
const skip_release_safe = b.option(bool, "skip-release-safe", "Main test suite skips release-safe builds") orelse skip_release;
const skip_non_native = b.option(bool, "skip-non-native", "Main test suite skips non-native builds") orelse false;
const skip_libc = b.option(bool, "skip-libc", "Main test suite skips tests that link libc") orelse false;
const skip_self_hosted = b.option(bool, "skip-self-hosted", "Main test suite skips building self hosted compiler") orelse false;
if (!skip_self_hosted and builtin.os == .linux) {
// TODO evented I/O other OS's
const skip_self_hosted = (b.option(bool, "skip-self-hosted", "Main test suite skips building self hosted compiler") orelse false) or true; // TODO evented I/O good enough that this passes everywhere
if (!skip_self_hosted) {
test_step.dependOn(&exe.step);
}
const only_install_lib_files = b.option(bool, "lib-files-only", "Only install library files") orelse false;
if (!only_install_lib_files) {
if (!only_install_lib_files and !skip_self_hosted) {
b.default_step.dependOn(&exe.step);
exe.install();
}

View File

@ -34,10 +34,10 @@ pub fn main() !void {
const out_file_name = try (args_it.next(allocator) orelse @panic("expected output arg"));
defer allocator.free(out_file_name);
var in_file = try fs.File.openRead(in_file_name);
var in_file = try fs.cwd().openFile(in_file_name, .{ .read = true });
defer in_file.close();
var out_file = try fs.File.openWrite(out_file_name);
var out_file = try fs.cwd().createFile(out_file_name, .{});
defer out_file.close();
var file_in_stream = in_file.inStream();

View File

@ -113,11 +113,20 @@ pub fn Queue(comptime T: type) type {
pub fn dumpToStream(self: *Self, comptime Error: type, stream: *std.io.OutStream(Error)) Error!void {
const S = struct {
fn dumpRecursive(s: *std.io.OutStream(Error), optional_node: ?*Node, indent: usize) Error!void {
fn dumpRecursive(
s: *std.io.OutStream(Error),
optional_node: ?*Node,
indent: usize,
comptime depth: comptime_int,
) Error!void {
try s.writeByteNTimes(' ', indent);
if (optional_node) |node| {
try s.print("0x{x}={}\n", .{ @ptrToInt(node), node.data });
try dumpRecursive(s, node.next, indent + 1);
if (depth == 0) {
try s.print("(max depth)\n", .{});
return;
}
try dumpRecursive(s, node.next, indent + 1, depth - 1);
} else {
try s.print("(null)\n", .{});
}
@ -127,9 +136,9 @@ pub fn Queue(comptime T: type) type {
defer held.release();
try stream.print("head: ", .{});
try S.dumpRecursive(stream, self.head, 0);
try S.dumpRecursive(stream, self.head, 0, 4);
try stream.print("tail: ", .{});
try S.dumpRecursive(stream, self.tail, 0);
try S.dumpRecursive(stream, self.tail, 0, 4);
}
};
}

View File

@ -458,6 +458,7 @@ pub const ExportOptions = struct {
pub const TestFn = struct {
name: []const u8,
func: fn () anyerror!void,
async_frame_size: ?usize,
};
/// This function type is used by the Zig language code generation and

View File

@ -121,6 +121,7 @@ pub extern "c" fn sysctlbyname(name: [*:0]const u8, oldp: ?*c_void, oldlenp: ?*u
pub extern "c" fn sysctlnametomib(name: [*:0]const u8, mibp: ?*c_int, sizep: ?*usize) c_int;
pub extern "c" fn tcgetattr(fd: fd_t, termios_p: *termios) c_int;
pub extern "c" fn tcsetattr(fd: fd_t, optional_action: TCSA, termios_p: *const termios) c_int;
pub extern "c" fn fcntl(fd: fd_t, cmd: c_int, ...) c_int;
pub extern "c" fn gethostname(name: [*]u8, len: usize) c_int;
pub extern "c" fn bind(socket: fd_t, address: ?*const sockaddr, address_len: socklen_t) c_int;

View File

@ -329,17 +329,18 @@ pub const ChildProcess = struct {
}
fn spawnPosix(self: *ChildProcess) SpawnError!void {
const stdin_pipe = if (self.stdin_behavior == StdIo.Pipe) try os.pipe() else undefined;
const pipe_flags = if (io.is_async) os.O_NONBLOCK else 0;
const stdin_pipe = if (self.stdin_behavior == StdIo.Pipe) try os.pipe2(pipe_flags) else undefined;
errdefer if (self.stdin_behavior == StdIo.Pipe) {
destroyPipe(stdin_pipe);
};
const stdout_pipe = if (self.stdout_behavior == StdIo.Pipe) try os.pipe() else undefined;
const stdout_pipe = if (self.stdout_behavior == StdIo.Pipe) try os.pipe2(pipe_flags) else undefined;
errdefer if (self.stdout_behavior == StdIo.Pipe) {
destroyPipe(stdout_pipe);
};
const stderr_pipe = if (self.stderr_behavior == StdIo.Pipe) try os.pipe() else undefined;
const stderr_pipe = if (self.stderr_behavior == StdIo.Pipe) try os.pipe2(pipe_flags) else undefined;
errdefer if (self.stderr_behavior == StdIo.Pipe) {
destroyPipe(stderr_pipe);
};
@ -426,17 +427,26 @@ pub const ChildProcess = struct {
// we are the parent
const pid = @intCast(i32, pid_result);
if (self.stdin_behavior == StdIo.Pipe) {
self.stdin = File.openHandle(stdin_pipe[1]);
self.stdin = File{
.handle = stdin_pipe[1],
.io_mode = std.io.mode,
};
} else {
self.stdin = null;
}
if (self.stdout_behavior == StdIo.Pipe) {
self.stdout = File.openHandle(stdout_pipe[0]);
self.stdout = File{
.handle = stdout_pipe[0],
.io_mode = std.io.mode,
};
} else {
self.stdout = null;
}
if (self.stderr_behavior == StdIo.Pipe) {
self.stderr = File.openHandle(stderr_pipe[0]);
self.stderr = File{
.handle = stderr_pipe[0],
.io_mode = std.io.mode,
};
} else {
self.stderr = null;
}
@ -661,17 +671,26 @@ pub const ChildProcess = struct {
};
if (g_hChildStd_IN_Wr) |h| {
self.stdin = File.openHandle(h);
self.stdin = File{
.handle = h,
.io_mode = io.mode,
};
} else {
self.stdin = null;
}
if (g_hChildStd_OUT_Rd) |h| {
self.stdout = File.openHandle(h);
self.stdout = File{
.handle = h,
.io_mode = io.mode,
};
} else {
self.stdout = null;
}
if (g_hChildStd_ERR_Rd) |h| {
self.stderr = File.openHandle(h);
self.stderr = File{
.handle = h,
.io_mode = io.mode,
};
} else {
self.stderr = null;
}
@ -693,10 +712,10 @@ pub const ChildProcess = struct {
fn setUpChildIo(stdio: StdIo, pipe_fd: i32, std_fileno: i32, dev_null_fd: i32) !void {
switch (stdio) {
StdIo.Pipe => try os.dup2(pipe_fd, std_fileno),
StdIo.Close => os.close(std_fileno),
StdIo.Inherit => {},
StdIo.Ignore => try os.dup2(dev_null_fd, std_fileno),
.Pipe => try os.dup2(pipe_fd, std_fileno),
.Close => os.close(std_fileno),
.Inherit => {},
.Ignore => try os.dup2(dev_null_fd, std_fileno),
}
}
};
@ -811,12 +830,22 @@ fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn {
const ErrInt = @IntType(false, @sizeOf(anyerror) * 8);
fn writeIntFd(fd: i32, value: ErrInt) !void {
const stream = &File.openHandle(fd).outStream().stream;
const file = File{
.handle = fd,
.io_mode = .blocking,
.async_block_allowed = File.async_block_allowed_yes,
};
const stream = &file.outStream().stream;
stream.writeIntNative(u64, @intCast(u64, value)) catch return error.SystemResources;
}
fn readIntFd(fd: i32) !ErrInt {
const stream = &File.openHandle(fd).inStream().stream;
const file = File{
.handle = fd,
.io_mode = .blocking,
.async_block_allowed = File.async_block_allowed_yes,
};
const stream = &file.inStream().stream;
return @intCast(ErrInt, stream.readIntNative(u64) catch return error.SystemResources);
}

View File

@ -50,7 +50,7 @@ pub fn warn(comptime fmt: []const u8, args: var) void {
const held = stderr_mutex.acquire();
defer held.release();
const stderr = getStderrStream();
stderr.print(fmt, args) catch return;
noasync stderr.print(fmt, args) catch return;
}
pub fn getStderrStream() *io.OutStream(File.WriteError) {
@ -102,15 +102,15 @@ pub fn detectTTYConfig() TTY.Config {
pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
const stderr = getStderrStream();
if (builtin.strip_debug_info) {
stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return;
noasync stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return;
return;
}
const debug_info = getSelfDebugInfo() catch |err| {
stderr.print("Unable to dump stack trace: Unable to open debug info: {}\n", .{@errorName(err)}) catch return;
noasync stderr.print("Unable to dump stack trace: Unable to open debug info: {}\n", .{@errorName(err)}) catch return;
return;
};
writeCurrentStackTrace(stderr, debug_info, detectTTYConfig(), start_addr) catch |err| {
stderr.print("Unable to dump stack trace: {}\n", .{@errorName(err)}) catch return;
noasync stderr.print("Unable to dump stack trace: {}\n", .{@errorName(err)}) catch return;
return;
};
}
@ -121,11 +121,11 @@ pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
pub fn dumpStackTraceFromBase(bp: usize, ip: usize) void {
const stderr = getStderrStream();
if (builtin.strip_debug_info) {
stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return;
noasync stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return;
return;
}
const debug_info = getSelfDebugInfo() catch |err| {
stderr.print("Unable to dump stack trace: Unable to open debug info: {}\n", .{@errorName(err)}) catch return;
noasync stderr.print("Unable to dump stack trace: Unable to open debug info: {}\n", .{@errorName(err)}) catch return;
return;
};
const tty_config = detectTTYConfig();
@ -189,15 +189,15 @@ pub fn captureStackTrace(first_address: ?usize, stack_trace: *builtin.StackTrace
pub fn dumpStackTrace(stack_trace: builtin.StackTrace) void {
const stderr = getStderrStream();
if (builtin.strip_debug_info) {
stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return;
noasync stderr.print("Unable to dump stack trace: debug info stripped\n", .{}) catch return;
return;
}
const debug_info = getSelfDebugInfo() catch |err| {
stderr.print("Unable to dump stack trace: Unable to open debug info: {}\n", .{@errorName(err)}) catch return;
noasync stderr.print("Unable to dump stack trace: Unable to open debug info: {}\n", .{@errorName(err)}) catch return;
return;
};
writeStackTrace(stack_trace, stderr, getDebugInfoAllocator(), debug_info, detectTTYConfig()) catch |err| {
stderr.print("Unable to dump stack trace: {}\n", .{@errorName(err)}) catch return;
noasync stderr.print("Unable to dump stack trace: {}\n", .{@errorName(err)}) catch return;
return;
};
}
@ -238,7 +238,7 @@ pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, c
switch (@atomicRmw(u8, &panicking, .Add, 1, .SeqCst)) {
0 => {
const stderr = getStderrStream();
stderr.print(format ++ "\n", args) catch os.abort();
noasync stderr.print(format ++ "\n", args) catch os.abort();
if (trace) |t| {
dumpStackTrace(t.*);
}
@ -568,12 +568,12 @@ pub const TTY = struct {
switch (conf) {
.no_color => return,
.escape_codes => switch (color) {
.Red => out_stream.write(RED) catch return,
.Green => out_stream.write(GREEN) catch return,
.Cyan => out_stream.write(CYAN) catch return,
.White, .Bold => out_stream.write(WHITE) catch return,
.Dim => out_stream.write(DIM) catch return,
.Reset => out_stream.write(RESET) catch return,
.Red => noasync out_stream.write(RED) catch return,
.Green => noasync out_stream.write(GREEN) catch return,
.Cyan => noasync out_stream.write(CYAN) catch return,
.White, .Bold => noasync out_stream.write(WHITE) catch return,
.Dim => noasync out_stream.write(DIM) catch return,
.Reset => noasync out_stream.write(RESET) catch return,
},
.windows_api => if (builtin.os == .windows) {
const S = struct {
@ -729,17 +729,17 @@ fn printLineInfo(
tty_config.setColor(out_stream, .White);
if (line_info) |*li| {
try out_stream.print("{}:{}:{}", .{ li.file_name, li.line, li.column });
try noasync out_stream.print("{}:{}:{}", .{ li.file_name, li.line, li.column });
} else {
try out_stream.print("???:?:?", .{});
try noasync out_stream.write("???:?:?");
}
tty_config.setColor(out_stream, .Reset);
try out_stream.write(": ");
try noasync out_stream.write(": ");
tty_config.setColor(out_stream, .Dim);
try out_stream.print("0x{x} in {} ({})", .{ address, symbol_name, compile_unit_name });
try noasync out_stream.print("0x{x} in {} ({})", .{ address, symbol_name, compile_unit_name });
tty_config.setColor(out_stream, .Reset);
try out_stream.write("\n");
try noasync out_stream.write("\n");
// Show the matching source code line if possible
if (line_info) |li| {
@ -748,12 +748,12 @@ fn printLineInfo(
// The caret already takes one char
const space_needed = @intCast(usize, li.column - 1);
try out_stream.writeByteNTimes(' ', space_needed);
try noasync out_stream.writeByteNTimes(' ', space_needed);
tty_config.setColor(out_stream, .Green);
try out_stream.write("^");
try noasync out_stream.write("^");
tty_config.setColor(out_stream, .Reset);
}
try out_stream.write("\n");
try noasync out_stream.write("\n");
} else |err| switch (err) {
error.EndOfFile, error.FileNotFound => {},
error.BadPathName => {},

View File

@ -6,11 +6,9 @@ pub const Locked = @import("event/locked.zig").Locked;
pub const RwLock = @import("event/rwlock.zig").RwLock;
pub const RwLocked = @import("event/rwlocked.zig").RwLocked;
pub const Loop = @import("event/loop.zig").Loop;
pub const fs = @import("event/fs.zig");
test "import event tests" {
_ = @import("event/channel.zig");
_ = @import("event/fs.zig");
_ = @import("event/future.zig");
_ = @import("event/group.zig");
_ = @import("event/lock.zig");

View File

@ -267,17 +267,16 @@ pub fn Channel(comptime T: type) type {
}
test "std.event.Channel" {
if (!std.io.is_async) return error.SkipZigTest;
// https://github.com/ziglang/zig/issues/1908
if (builtin.single_threaded) return error.SkipZigTest;
// https://github.com/ziglang/zig/issues/3251
if (builtin.os == .freebsd) return error.SkipZigTest;
// TODO provide a way to run tests in evented I/O mode
if (!std.io.is_async) return error.SkipZigTest;
var channel: Channel(i32) = undefined;
channel.init([0]i32{});
channel.init(&[0]i32{});
defer channel.deinit();
var handle = async testChannelGetter(&channel);

View File

@ -22,7 +22,7 @@ pub fn Group(comptime ReturnType: type) type {
const AllocStack = std.atomic.Stack(Node);
pub const Node = struct {
bytes: []const u8 = [0]u8{},
bytes: []const u8 = &[0]u8{},
handle: anyframe->ReturnType,
};

View File

@ -117,21 +117,21 @@ pub const Lock = struct {
};
test "std.event.Lock" {
if (!std.io.is_async) return error.SkipZigTest;
// TODO https://github.com/ziglang/zig/issues/1908
if (builtin.single_threaded) return error.SkipZigTest;
// TODO https://github.com/ziglang/zig/issues/3251
if (builtin.os == .freebsd) return error.SkipZigTest;
// TODO provide a way to run tests in evented I/O mode
if (!std.io.is_async) return error.SkipZigTest;
var lock = Lock.init();
defer lock.deinit();
_ = async testLock(&lock);
testing.expectEqualSlices(i32, [1]i32{3 * @intCast(i32, shared_test_data.len)} ** shared_test_data.len, shared_test_data);
const expected_result = [1]i32{3 * @intCast(i32, shared_test_data.len)} ** shared_test_data.len;
testing.expectEqualSlices(i32, &expected_result, &shared_test_data);
}
async fn testLock(lock: *Lock) void {

View File

@ -6,7 +6,6 @@ const testing = std.testing;
const mem = std.mem;
const AtomicRmwOp = builtin.AtomicRmwOp;
const AtomicOrder = builtin.AtomicOrder;
const fs = std.event.fs;
const os = std.os;
const windows = os.windows;
const maxInt = std.math.maxInt;
@ -174,21 +173,19 @@ pub const Loop = struct {
fn initOsData(self: *Loop, extra_thread_count: usize) InitOsDataError!void {
switch (builtin.os) {
.linux => {
self.os_data.fs_queue = std.atomic.Queue(fs.Request).init();
self.os_data.fs_queue = std.atomic.Queue(Request).init();
self.os_data.fs_queue_item = 0;
// we need another thread for the file system because Linux does not have an async
// file system I/O API.
self.os_data.fs_end_request = fs.RequestNode{
.prev = undefined,
.next = undefined,
.data = fs.Request{
.msg = fs.Request.Msg.End,
.finish = fs.Request.Finish.NoAction,
self.os_data.fs_end_request = Request.Node{
.data = Request{
.msg = .end,
.finish = .NoAction,
},
};
errdefer {
while (self.available_eventfd_resume_nodes.pop()) |node| os.close(node.data.eventfd);
while (self.available_eventfd_resume_nodes.pop()) |node| noasync os.close(node.data.eventfd);
}
for (self.eventfd_resume_nodes) |*eventfd_node| {
eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
@ -207,10 +204,10 @@ pub const Loop = struct {
}
self.os_data.epollfd = try os.epoll_create1(os.EPOLL_CLOEXEC);
errdefer os.close(self.os_data.epollfd);
errdefer noasync os.close(self.os_data.epollfd);
self.os_data.final_eventfd = try os.eventfd(0, os.EFD_CLOEXEC | os.EFD_NONBLOCK);
errdefer os.close(self.os_data.final_eventfd);
errdefer noasync os.close(self.os_data.final_eventfd);
self.os_data.final_eventfd_event = os.epoll_event{
.events = os.EPOLLIN,
@ -237,7 +234,7 @@ pub const Loop = struct {
var extra_thread_index: usize = 0;
errdefer {
// writing 8 bytes to an eventfd cannot fail
os.write(self.os_data.final_eventfd, &wakeup_bytes) catch unreachable;
noasync os.write(self.os_data.final_eventfd, &wakeup_bytes) catch unreachable;
while (extra_thread_index != 0) {
extra_thread_index -= 1;
self.extra_threads[extra_thread_index].wait();
@ -249,20 +246,20 @@ pub const Loop = struct {
},
.macosx, .freebsd, .netbsd, .dragonfly => {
self.os_data.kqfd = try os.kqueue();
errdefer os.close(self.os_data.kqfd);
errdefer noasync os.close(self.os_data.kqfd);
self.os_data.fs_kqfd = try os.kqueue();
errdefer os.close(self.os_data.fs_kqfd);
errdefer noasync os.close(self.os_data.fs_kqfd);
self.os_data.fs_queue = std.atomic.Queue(fs.Request).init();
self.os_data.fs_queue = std.atomic.Queue(Request).init();
// we need another thread for the file system because Darwin does not have an async
// file system I/O API.
self.os_data.fs_end_request = fs.RequestNode{
self.os_data.fs_end_request = Request.Node{
.prev = undefined,
.next = undefined,
.data = fs.Request{
.msg = fs.Request.Msg.End,
.finish = fs.Request.Finish.NoAction,
.data = Request{
.msg = .end,
.finish = .NoAction,
},
};
@ -407,14 +404,14 @@ pub const Loop = struct {
fn deinitOsData(self: *Loop) void {
switch (builtin.os) {
.linux => {
os.close(self.os_data.final_eventfd);
while (self.available_eventfd_resume_nodes.pop()) |node| os.close(node.data.eventfd);
os.close(self.os_data.epollfd);
noasync os.close(self.os_data.final_eventfd);
while (self.available_eventfd_resume_nodes.pop()) |node| noasync os.close(node.data.eventfd);
noasync os.close(self.os_data.epollfd);
self.allocator.free(self.eventfd_resume_nodes);
},
.macosx, .freebsd, .netbsd, .dragonfly => {
os.close(self.os_data.kqfd);
os.close(self.os_data.fs_kqfd);
noasync os.close(self.os_data.kqfd);
noasync os.close(self.os_data.fs_kqfd);
},
.windows => {
windows.CloseHandle(self.os_data.io_port);
@ -711,6 +708,190 @@ pub const Loop = struct {
}
}
/// Performs an async `os.open` using a separate thread.
pub fn openZ(self: *Loop, file_path: [*:0]const u8, flags: u32, mode: usize) os.OpenError!os.fd_t {
var req_node = Request.Node{
.data = .{
.msg = .{
.open = .{
.path = file_path,
.flags = flags,
.mode = mode,
.result = undefined,
},
},
.finish = .{ .TickNode = .{ .data = @frame() } },
},
};
suspend {
self.posixFsRequest(&req_node);
}
return req_node.data.msg.open.result;
}
/// Performs an async `os.opent` using a separate thread.
pub fn openatZ(self: *Loop, fd: os.fd_t, file_path: [*:0]const u8, flags: u32, mode: usize) os.OpenError!os.fd_t {
var req_node = Request.Node{
.data = .{
.msg = .{
.openat = .{
.fd = fd,
.path = file_path,
.flags = flags,
.mode = mode,
.result = undefined,
},
},
.finish = .{ .TickNode = .{ .data = @frame() } },
},
};
suspend {
self.posixFsRequest(&req_node);
}
return req_node.data.msg.openat.result;
}
/// Performs an async `os.close` using a separate thread.
pub fn close(self: *Loop, fd: os.fd_t) void {
var req_node = Request.Node{
.data = .{
.msg = .{ .close = .{ .fd = fd } },
.finish = .{ .TickNode = .{ .data = @frame() } },
},
};
suspend {
self.posixFsRequest(&req_node);
}
}
/// Performs an async `os.read` using a separate thread.
/// `fd` must block and not return EAGAIN.
pub fn read(self: *Loop, fd: os.fd_t, buf: []u8) os.ReadError!usize {
var req_node = Request.Node{
.data = .{
.msg = .{
.read = .{
.fd = fd,
.buf = buf,
.result = undefined,
},
},
.finish = .{ .TickNode = .{ .data = @frame() } },
},
};
suspend {
self.posixFsRequest(&req_node);
}
return req_node.data.msg.read.result;
}
/// Performs an async `os.readv` using a separate thread.
/// `fd` must block and not return EAGAIN.
pub fn readv(self: *Loop, fd: os.fd_t, iov: []const os.iovec) os.ReadError!usize {
var req_node = Request.Node{
.data = .{
.msg = .{
.readv = .{
.fd = fd,
.iov = iov,
.result = undefined,
},
},
.finish = .{ .TickNode = .{ .data = @frame() } },
},
};
suspend {
self.posixFsRequest(&req_node);
}
return req_node.data.msg.readv.result;
}
/// Performs an async `os.preadv` using a separate thread.
/// `fd` must block and not return EAGAIN.
pub fn preadv(self: *Loop, fd: os.fd_t, iov: []const os.iovec, offset: u64) os.ReadError!usize {
var req_node = Request.Node{
.data = .{
.msg = .{
.preadv = .{
.fd = fd,
.iov = iov,
.offset = offset,
.result = undefined,
},
},
.finish = .{ .TickNode = .{ .data = @frame() } },
},
};
suspend {
self.posixFsRequest(&req_node);
}
return req_node.data.msg.preadv.result;
}
/// Performs an async `os.write` using a separate thread.
/// `fd` must block and not return EAGAIN.
pub fn write(self: *Loop, fd: os.fd_t, bytes: []const u8) os.WriteError!void {
var req_node = Request.Node{
.data = .{
.msg = .{
.write = .{
.fd = fd,
.bytes = bytes,
.result = undefined,
},
},
.finish = .{ .TickNode = .{ .data = @frame() } },
},
};
suspend {
self.posixFsRequest(&req_node);
}
return req_node.data.msg.write.result;
}
/// Performs an async `os.writev` using a separate thread.
/// `fd` must block and not return EAGAIN.
pub fn writev(self: *Loop, fd: os.fd_t, iov: []const os.iovec_const) os.WriteError!void {
var req_node = Request.Node{
.data = .{
.msg = .{
.writev = .{
.fd = fd,
.iov = iov,
.result = undefined,
},
},
.finish = .{ .TickNode = .{ .data = @frame() } },
},
};
suspend {
self.posixFsRequest(&req_node);
}
return req_node.data.msg.writev.result;
}
/// Performs an async `os.pwritev` using a separate thread.
/// `fd` must block and not return EAGAIN.
pub fn pwritev(self: *Loop, fd: os.fd_t, iov: []const os.iovec_const, offset: u64) os.WriteError!void {
var req_node = Request.Node{
.data = .{
.msg = .{
.pwritev = .{
.fd = fd,
.iov = iov,
.offset = offset,
.result = undefined,
},
},
.finish = .{ .TickNode = .{ .data = @frame() } },
},
};
suspend {
self.posixFsRequest(&req_node);
}
return req_node.data.msg.pwritev.result;
}
fn workerRun(self: *Loop) void {
while (true) {
while (true) {
@ -804,7 +985,7 @@ pub const Loop = struct {
}
}
fn posixFsRequest(self: *Loop, request_node: *fs.RequestNode) void {
fn posixFsRequest(self: *Loop, request_node: *Request.Node) void {
self.beginOneEvent(); // finished in posixFsRun after processing the msg
self.os_data.fs_queue.put(request_node);
switch (builtin.os) {
@ -826,7 +1007,7 @@ pub const Loop = struct {
}
}
fn posixFsCancel(self: *Loop, request_node: *fs.RequestNode) void {
fn posixFsCancel(self: *Loop, request_node: *Request.Node) void {
if (self.os_data.fs_queue.remove(request_node)) {
self.finishOneEvent();
}
@ -841,37 +1022,32 @@ pub const Loop = struct {
}
while (self.os_data.fs_queue.get()) |node| {
switch (node.data.msg) {
.End => return,
.WriteV => |*msg| {
.end => return,
.read => |*msg| {
msg.result = noasync os.read(msg.fd, msg.buf);
},
.write => |*msg| {
msg.result = noasync os.write(msg.fd, msg.bytes);
},
.writev => |*msg| {
msg.result = noasync os.writev(msg.fd, msg.iov);
},
.PWriteV => |*msg| {
.pwritev => |*msg| {
msg.result = noasync os.pwritev(msg.fd, msg.iov, msg.offset);
},
.PReadV => |*msg| {
.preadv => |*msg| {
msg.result = noasync os.preadv(msg.fd, msg.iov, msg.offset);
},
.Open => |*msg| {
msg.result = noasync os.openC(msg.path.ptr, msg.flags, msg.mode);
.open => |*msg| {
msg.result = noasync os.openC(msg.path, msg.flags, msg.mode);
},
.Close => |*msg| noasync os.close(msg.fd),
.WriteFile => |*msg| blk: {
const O_LARGEFILE = if (@hasDecl(os, "O_LARGEFILE")) os.O_LARGEFILE else 0;
const flags = O_LARGEFILE | os.O_WRONLY | os.O_CREAT |
os.O_CLOEXEC | os.O_TRUNC;
const fd = noasync os.openC(msg.path.ptr, flags, msg.mode) catch |err| {
msg.result = err;
break :blk;
};
defer noasync os.close(fd);
msg.result = noasync os.write(fd, msg.contents);
.openat => |*msg| {
msg.result = noasync os.openatC(msg.fd, msg.path, msg.flags, msg.mode);
},
.close => |*msg| noasync os.close(msg.fd),
}
switch (node.data.finish) {
.TickNode => |*tick_node| self.onNextTick(tick_node),
.DeallocCloseOperation => |close_op| {
self.allocator.destroy(close_op);
},
.NoAction => {},
}
self.finishOneEvent();
@ -911,8 +1087,8 @@ pub const Loop = struct {
fs_kevent_wait: os.Kevent,
fs_thread: *Thread,
fs_kqfd: i32,
fs_queue: std.atomic.Queue(fs.Request),
fs_end_request: fs.RequestNode,
fs_queue: std.atomic.Queue(Request),
fs_end_request: Request.Node,
};
const LinuxOsData = struct {
@ -921,8 +1097,99 @@ pub const Loop = struct {
final_eventfd_event: os.linux.epoll_event,
fs_thread: *Thread,
fs_queue_item: i32,
fs_queue: std.atomic.Queue(fs.Request),
fs_end_request: fs.RequestNode,
fs_queue: std.atomic.Queue(Request),
fs_end_request: Request.Node,
};
pub const Request = struct {
msg: Msg,
finish: Finish,
pub const Node = std.atomic.Queue(Request).Node;
pub const Finish = union(enum) {
TickNode: Loop.NextTickNode,
NoAction,
};
pub const Msg = union(enum) {
read: Read,
write: Write,
writev: WriteV,
pwritev: PWriteV,
preadv: PReadV,
open: Open,
openat: OpenAt,
close: Close,
/// special - means the fs thread should exit
end,
pub const Read = struct {
fd: os.fd_t,
buf: []u8,
result: Error!usize,
pub const Error = os.ReadError;
};
pub const Write = struct {
fd: os.fd_t,
bytes: []const u8,
result: Error!void,
pub const Error = os.WriteError;
};
pub const WriteV = struct {
fd: os.fd_t,
iov: []const os.iovec_const,
result: Error!void,
pub const Error = os.WriteError;
};
pub const PWriteV = struct {
fd: os.fd_t,
iov: []const os.iovec_const,
offset: usize,
result: Error!void,
pub const Error = os.WriteError;
};
pub const PReadV = struct {
fd: os.fd_t,
iov: []const os.iovec,
offset: usize,
result: Error!usize,
pub const Error = os.ReadError;
};
pub const Open = struct {
path: [*:0]const u8,
flags: u32,
mode: os.mode_t,
result: Error!os.fd_t,
pub const Error = os.OpenError;
};
pub const OpenAt = struct {
fd: os.fd_t,
path: [*:0]const u8,
flags: u32,
mode: os.mode_t,
result: Error!os.fd_t,
pub const Error = os.OpenError;
};
pub const Close = struct {
fd: os.fd_t,
};
};
};
};

View File

@ -78,7 +78,7 @@ fn peekIsAlign(comptime fmt: []const u8) bool {
pub fn format(
context: var,
comptime Errors: type,
output: fn (@TypeOf(context), []const u8) Errors!void,
comptime output: fn (@TypeOf(context), []const u8) Errors!void,
comptime fmt: []const u8,
args: var,
) Errors!void {
@ -326,7 +326,7 @@ pub fn formatType(
options: FormatOptions,
context: var,
comptime Errors: type,
output: fn (@TypeOf(context), []const u8) Errors!void,
comptime output: fn (@TypeOf(context), []const u8) Errors!void,
max_depth: usize,
) Errors!void {
if (comptime std.mem.eql(u8, fmt, "*")) {
@ -488,7 +488,7 @@ fn formatValue(
options: FormatOptions,
context: var,
comptime Errors: type,
output: fn (@TypeOf(context), []const u8) Errors!void,
comptime output: fn (@TypeOf(context), []const u8) Errors!void,
) Errors!void {
if (comptime std.mem.eql(u8, fmt, "B")) {
return formatBytes(value, options, 1000, context, Errors, output);
@ -510,7 +510,7 @@ pub fn formatIntValue(
options: FormatOptions,
context: var,
comptime Errors: type,
output: fn (@TypeOf(context), []const u8) Errors!void,
comptime output: fn (@TypeOf(context), []const u8) Errors!void,
) Errors!void {
comptime var radix = 10;
comptime var uppercase = false;
@ -552,7 +552,7 @@ fn formatFloatValue(
options: FormatOptions,
context: var,
comptime Errors: type,
output: fn (@TypeOf(context), []const u8) Errors!void,
comptime output: fn (@TypeOf(context), []const u8) Errors!void,
) Errors!void {
if (fmt.len == 0 or comptime std.mem.eql(u8, fmt, "e")) {
return formatFloatScientific(value, options, context, Errors, output);
@ -569,7 +569,7 @@ pub fn formatText(
options: FormatOptions,
context: var,
comptime Errors: type,
output: fn (@TypeOf(context), []const u8) Errors!void,
comptime output: fn (@TypeOf(context), []const u8) Errors!void,
) Errors!void {
if (fmt.len == 0) {
return output(context, bytes);
@ -590,7 +590,7 @@ pub fn formatAsciiChar(
options: FormatOptions,
context: var,
comptime Errors: type,
output: fn (@TypeOf(context), []const u8) Errors!void,
comptime output: fn (@TypeOf(context), []const u8) Errors!void,
) Errors!void {
return output(context, @as(*const [1]u8, &c)[0..]);
}
@ -600,7 +600,7 @@ pub fn formatBuf(
options: FormatOptions,
context: var,
comptime Errors: type,
output: fn (@TypeOf(context), []const u8) Errors!void,
comptime output: fn (@TypeOf(context), []const u8) Errors!void,
) Errors!void {
try output(context, buf);
@ -620,7 +620,7 @@ pub fn formatFloatScientific(
options: FormatOptions,
context: var,
comptime Errors: type,
output: fn (@TypeOf(context), []const u8) Errors!void,
comptime output: fn (@TypeOf(context), []const u8) Errors!void,
) Errors!void {
var x = @floatCast(f64, value);
@ -715,7 +715,7 @@ pub fn formatFloatDecimal(
options: FormatOptions,
context: var,
comptime Errors: type,
output: fn (@TypeOf(context), []const u8) Errors!void,
comptime output: fn (@TypeOf(context), []const u8) Errors!void,
) Errors!void {
var x = @as(f64, value);
@ -861,7 +861,7 @@ pub fn formatBytes(
comptime radix: usize,
context: var,
comptime Errors: type,
output: fn (@TypeOf(context), []const u8) Errors!void,
comptime output: fn (@TypeOf(context), []const u8) Errors!void,
) Errors!void {
if (value == 0) {
return output(context, "0B");
@ -902,7 +902,7 @@ pub fn formatInt(
options: FormatOptions,
context: var,
comptime Errors: type,
output: fn (@TypeOf(context), []const u8) Errors!void,
comptime output: fn (@TypeOf(context), []const u8) Errors!void,
) Errors!void {
const int_value = if (@TypeOf(value) == comptime_int) blk: {
const Int = math.IntFittingRange(value, value);
@ -924,7 +924,7 @@ fn formatIntSigned(
options: FormatOptions,
context: var,
comptime Errors: type,
output: fn (@TypeOf(context), []const u8) Errors!void,
comptime output: fn (@TypeOf(context), []const u8) Errors!void,
) Errors!void {
const new_options = FormatOptions{
.width = if (options.width) |w| (if (w == 0) 0 else w - 1) else null,
@ -955,7 +955,7 @@ fn formatIntUnsigned(
options: FormatOptions,
context: var,
comptime Errors: type,
output: fn (@TypeOf(context), []const u8) Errors!void,
comptime output: fn (@TypeOf(context), []const u8) Errors!void,
) Errors!void {
assert(base >= 2);
var buf: [math.max(@TypeOf(value).bit_count, 1)]u8 = undefined;
@ -1419,7 +1419,7 @@ test "custom" {
options: FormatOptions,
context: var,
comptime Errors: type,
output: fn (@TypeOf(context), []const u8) Errors!void,
comptime output: fn (@TypeOf(context), []const u8) Errors!void,
) Errors!void {
if (fmt.len == 0 or comptime std.mem.eql(u8, fmt, "p")) {
return std.fmt.format(context, Errors, output, "({d:.3},{d:.3})", .{ self.x, self.y });
@ -1626,7 +1626,7 @@ test "formatType max_depth" {
options: FormatOptions,
context: var,
comptime Errors: type,
output: fn (@TypeOf(context), []const u8) Errors!void,
comptime output: fn (@TypeOf(context), []const u8) Errors!void,
) Errors!void {
if (fmt.len == 0) {
return std.fmt.format(context, Errors, output, "({d:.3},{d:.3})", .{ self.x, self.y });

View File

@ -23,6 +23,8 @@ pub const realpathW = os.realpathW;
pub const getAppDataDir = @import("fs/get_app_data_dir.zig").getAppDataDir;
pub const GetAppDataDirError = @import("fs/get_app_data_dir.zig").GetAppDataDirError;
pub const Watch = @import("fs/watch.zig").Watch;
/// This represents the maximum size of a UTF-8 encoded file path.
/// All file system operations which return a path are guaranteed to
/// fit into a UTF-8 encoded array of this length.
@ -43,6 +45,13 @@ pub const base64_encoder = base64.Base64Encoder.init(
base64.standard_pad_char,
);
/// Whether or not async file system syscalls need a dedicated thread because the operating
/// system does not support non-blocking I/O on the file system.
pub const need_async_thread = std.io.is_async and switch (builtin.os) {
.windows, .other => false,
else => true,
};
/// TODO remove the allocator requirement from this API
pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void {
if (symLink(existing_path, new_path)) {
@ -688,11 +697,16 @@ pub const Dir = struct {
}
pub fn close(self: *Dir) void {
os.close(self.fd);
if (need_async_thread) {
std.event.Loop.instance.?.close(self.fd);
} else {
os.close(self.fd);
}
self.* = undefined;
}
/// Opens a file for reading or writing, without attempting to create a new file.
/// To create a new file, see `createFile`.
/// Call `File.close` to release the resource.
/// Asserts that the path parameter has no null bytes.
pub fn openFile(self: Dir, sub_path: []const u8, flags: File.OpenFlags) File.OpenError!File {
@ -718,8 +732,11 @@ pub const Dir = struct {
@as(u32, os.O_WRONLY)
else
@as(u32, os.O_RDONLY);
const fd = try os.openatC(self.fd, sub_path, os_flags, 0);
return File{ .handle = fd };
const fd = if (need_async_thread)
try std.event.Loop.instance.?.openatZ(self.fd, sub_path, os_flags, 0)
else
try os.openatC(self.fd, sub_path, os_flags, 0);
return File{ .handle = fd, .io_mode = .blocking };
}
/// Same as `openFile` but Windows-only and the path parameter is
@ -756,8 +773,11 @@ pub const Dir = struct {
(if (flags.truncate) @as(u32, os.O_TRUNC) else 0) |
(if (flags.read) @as(u32, os.O_RDWR) else os.O_WRONLY) |
(if (flags.exclusive) @as(u32, os.O_EXCL) else 0);
const fd = try os.openatC(self.fd, sub_path_c, os_flags, flags.mode);
return File{ .handle = fd };
const fd = if (need_async_thread)
try std.event.Loop.instance.?.openatZ(self.fd, sub_path_c, os_flags, flags.mode)
else
try os.openatC(self.fd, sub_path_c, os_flags, flags.mode);
return File{ .handle = fd, .io_mode = .blocking };
}
/// Same as `createFile` but Windows-only and the path parameter is
@ -798,7 +818,10 @@ pub const Dir = struct {
) File.OpenError!File {
const w = os.windows;
var result = File{ .handle = undefined };
var result = File{
.handle = undefined,
.io_mode = .blocking,
};
const path_len_bytes = math.cast(u16, mem.toSliceConst(u16, sub_path_w).len * 2) catch |err| switch (err) {
error.Overflow => return error.NameTooLong,
@ -919,7 +942,12 @@ pub const Dir = struct {
}
fn openDirFlagsC(self: Dir, sub_path_c: [*:0]const u8, flags: u32) OpenError!Dir {
const fd = os.openatC(self.fd, sub_path_c, flags | os.O_DIRECTORY, 0) catch |err| switch (err) {
const os_flags = flags | os.O_DIRECTORY;
const result = if (need_async_thread)
std.event.Loop.instance.?.openatZ(self.fd, sub_path_c, os_flags, 0)
else
os.openatC(self.fd, sub_path_c, os_flags, 0);
const fd = result catch |err| switch (err) {
error.FileTooBig => unreachable, // can't happen for directories
error.IsDir => unreachable, // we're providing O_DIRECTORY
error.NoSpaceLeft => unreachable, // not providing O_CREAT
@ -1588,4 +1616,5 @@ test "" {
_ = @import("fs/path.zig");
_ = @import("fs/file.zig");
_ = @import("fs/get_app_data_dir.zig");
_ = @import("fs/watch.zig");
}

View File

@ -8,18 +8,29 @@ const assert = std.debug.assert;
const windows = os.windows;
const Os = builtin.Os;
const maxInt = std.math.maxInt;
const need_async_thread = std.fs.need_async_thread;
pub const File = struct {
/// The OS-specific file descriptor or file handle.
handle: os.fd_t,
pub const Mode = switch (builtin.os) {
Os.windows => void,
else => u32,
};
/// On some systems, such as Linux, file system file descriptors are incapable of non-blocking I/O.
/// This forces us to perform asynchronous I/O on a dedicated thread, to achieve non-blocking
/// file-system I/O. To do this, `File` must be aware of whether it is a file system file descriptor,
/// or, more specifically, whether the I/O is blocking.
io_mode: io.Mode,
/// Even when std.io.mode is async, it is still sometimes desirable to perform blocking I/O, although
/// not by default. For example, when printing a stack trace to stderr.
async_block_allowed: @TypeOf(async_block_allowed_no) = async_block_allowed_no,
pub const async_block_allowed_yes = if (io.is_async) true else {};
pub const async_block_allowed_no = if (io.is_async) false else {};
pub const Mode = os.mode_t;
pub const default_mode = switch (builtin.os) {
Os.windows => {},
.windows => 0,
else => 0o666,
};
@ -49,87 +60,27 @@ pub const File = struct {
mode: Mode = default_mode,
};
/// Deprecated; call `std.fs.Dir.openFile` directly.
pub fn openRead(path: []const u8) OpenError!File {
return std.fs.cwd().openFile(path, .{});
}
/// Deprecated; call `std.fs.Dir.openFileC` directly.
pub fn openReadC(path_c: [*:0]const u8) OpenError!File {
return std.fs.cwd().openFileC(path_c, .{});
}
/// Deprecated; call `std.fs.Dir.openFileW` directly.
pub fn openReadW(path_w: [*:0]const u16) OpenError!File {
return std.fs.cwd().openFileW(path_w, .{});
}
/// Deprecated; call `std.fs.Dir.createFile` directly.
pub fn openWrite(path: []const u8) OpenError!File {
return std.fs.cwd().createFile(path, .{});
}
/// Deprecated; call `std.fs.Dir.createFile` directly.
pub fn openWriteMode(path: []const u8, file_mode: Mode) OpenError!File {
return std.fs.cwd().createFile(path, .{ .mode = file_mode });
}
/// Deprecated; call `std.fs.Dir.createFileC` directly.
pub fn openWriteModeC(path_c: [*:0]const u8, file_mode: Mode) OpenError!File {
return std.fs.cwd().createFileC(path_c, .{ .mode = file_mode });
}
/// Deprecated; call `std.fs.Dir.createFileW` directly.
pub fn openWriteModeW(path_w: [*:0]const u16, file_mode: Mode) OpenError!File {
return std.fs.cwd().createFileW(path_w, .{ .mode = file_mode });
}
/// Deprecated; call `std.fs.Dir.createFile` directly.
pub fn openWriteNoClobber(path: []const u8, file_mode: Mode) OpenError!File {
return std.fs.cwd().createFile(path, .{
.mode = file_mode,
.exclusive = true,
});
}
/// Deprecated; call `std.fs.Dir.createFileC` directly.
pub fn openWriteNoClobberC(path_c: [*:0]const u8, file_mode: Mode) OpenError!File {
return std.fs.cwd().createFileC(path_c, .{
.mode = file_mode,
.exclusive = true,
});
}
/// Deprecated; call `std.fs.Dir.createFileW` directly.
pub fn openWriteNoClobberW(path_w: [*:0]const u16, file_mode: Mode) OpenError!File {
return std.fs.cwd().createFileW(path_w, .{
.mode = file_mode,
.exclusive = true,
});
}
pub fn openHandle(handle: os.fd_t) File {
return File{ .handle = handle };
}
/// Test for the existence of `path`.
/// `path` is UTF8-encoded.
/// In general it is recommended to avoid this function. For example,
/// instead of testing if a file exists and then opening it, just
/// open it and handle the error for file not found.
/// TODO: deprecate this and move it to `std.fs.Dir`.
/// TODO: integrate with async I/O
pub fn access(path: []const u8) !void {
return os.access(path, os.F_OK);
}
/// Same as `access` except the parameter is null-terminated.
/// TODO: deprecate this and move it to `std.fs.Dir`.
/// TODO: integrate with async I/O
pub fn accessC(path: [*:0]const u8) !void {
return os.accessC(path, os.F_OK);
}
/// Same as `access` except the parameter is null-terminated UTF16LE-encoded.
/// TODO: deprecate this and move it to `std.fs.Dir`.
/// TODO: integrate with async I/O
pub fn accessW(path: [*:0]const u16) !void {
return os.accessW(path, os.F_OK);
}
@ -137,7 +88,11 @@ pub const File = struct {
/// Upon success, the stream is in an uninitialized state. To continue using it,
/// you must use the open() function.
pub fn close(self: File) void {
return os.close(self.handle);
if (need_async_thread and self.io_mode == .blocking and !self.async_block_allowed) {
std.event.Loop.instance.?.close(self.handle);
} else {
return os.close(self.handle);
}
}
/// Test whether the file refers to a terminal.
@ -167,26 +122,31 @@ pub const File = struct {
pub const SeekError = os.SeekError;
/// Repositions read/write file offset relative to the current offset.
/// TODO: integrate with async I/O
pub fn seekBy(self: File, offset: i64) SeekError!void {
return os.lseek_CUR(self.handle, offset);
}
/// Repositions read/write file offset relative to the end.
/// TODO: integrate with async I/O
pub fn seekFromEnd(self: File, offset: i64) SeekError!void {
return os.lseek_END(self.handle, offset);
}
/// Repositions read/write file offset relative to the beginning.
/// TODO: integrate with async I/O
pub fn seekTo(self: File, offset: u64) SeekError!void {
return os.lseek_SET(self.handle, offset);
}
pub const GetPosError = os.SeekError || os.FStatError;
/// TODO: integrate with async I/O
pub fn getPos(self: File) GetPosError!u64 {
return os.lseek_CUR_get(self.handle);
}
/// TODO: integrate with async I/O
pub fn getEndPos(self: File) GetPosError!u64 {
if (builtin.os == .windows) {
return windows.GetFileSizeEx(self.handle);
@ -196,6 +156,7 @@ pub const File = struct {
pub const ModeError = os.FStatError;
/// TODO: integrate with async I/O
pub fn mode(self: File) ModeError!Mode {
if (builtin.os == .windows) {
return {};
@ -219,6 +180,7 @@ pub const File = struct {
pub const StatError = os.FStatError;
/// TODO: integrate with async I/O
pub fn stat(self: File) StatError!Stat {
if (builtin.os == .windows) {
var io_status_block: windows.IO_STATUS_BLOCK = undefined;
@ -233,7 +195,7 @@ pub const File = struct {
}
return Stat{
.size = @bitCast(u64, info.StandardInformation.EndOfFile),
.mode = {},
.mode = 0,
.atime = windows.fromSysTime(info.BasicInformation.LastAccessTime),
.mtime = windows.fromSysTime(info.BasicInformation.LastWriteTime),
.ctime = windows.fromSysTime(info.BasicInformation.CreationTime),
@ -259,6 +221,7 @@ pub const File = struct {
/// and therefore this function cannot guarantee any precision will be stored.
/// Further, the maximum value is limited by the system ABI. When a value is provided
/// that exceeds this range, the value is clamped to the maximum.
/// TODO: integrate with async I/O
pub fn updateTimes(
self: File,
/// access timestamp in nanoseconds
@ -287,21 +250,61 @@ pub const File = struct {
pub const ReadError = os.ReadError;
pub fn read(self: File, buffer: []u8) ReadError!usize {
if (need_async_thread and self.io_mode == .blocking and !self.async_block_allowed) {
return std.event.Loop.instance.?.read(self.handle, buffer);
}
return os.read(self.handle, buffer);
}
pub fn pread(self: File, buffer: []u8, offset: u64) ReadError!usize {
if (need_async_thread and self.io_mode == .blocking and !self.async_block_allowed) {
return std.event.Loop.instance.?.pread(self.handle, buffer);
}
return os.pread(self.handle, buffer, offset);
}
pub fn readv(self: File, iovecs: []const os.iovec) ReadError!usize {
if (need_async_thread and self.io_mode == .blocking and !self.async_block_allowed) {
return std.event.Loop.instance.?.readv(self.handle, iovecs);
}
return os.readv(self.handle, iovecs);
}
pub fn preadv(self: File, iovecs: []const os.iovec, offset: u64) ReadError!usize {
if (need_async_thread and self.io_mode == .blocking and !self.async_block_allowed) {
return std.event.Loop.instance.?.preadv(self.handle, iovecs, offset);
}
return os.preadv(self.handle, iovecs, offset);
}
pub const WriteError = os.WriteError;
pub fn write(self: File, bytes: []const u8) WriteError!void {
if (need_async_thread and self.io_mode == .blocking and !self.async_block_allowed) {
return std.event.Loop.instance.?.write(self.handle, bytes);
}
return os.write(self.handle, bytes);
}
pub fn writev_iovec(self: File, iovecs: []const os.iovec_const) WriteError!void {
if (std.event.Loop.instance) |loop| {
return std.event.fs.writevPosix(loop, self.handle, iovecs);
} else {
return os.writev(self.handle, iovecs);
pub fn pwrite(self: File, bytes: []const u8, offset: u64) WriteError!void {
if (need_async_thread and self.io_mode == .blocking and !self.async_block_allowed) {
return std.event.Loop.instance.?.pwrite(self.handle, bytes, offset);
}
return os.pwrite(self.handle, bytes, offset);
}
pub fn writev(self: File, iovecs: []const os.iovec_const) WriteError!void {
if (need_async_thread and self.io_mode == .blocking and !self.async_block_allowed) {
return std.event.Loop.instance.?.writev(self.handle, iovecs);
}
return os.writev(self.handle, iovecs);
}
pub fn pwritev(self: File, iovecs: []const os.iovec_const, offset: usize) WriteError!void {
if (need_async_thread and self.io_mode == .blocking and !self.async_block_allowed) {
return std.event.Loop.instance.?.pwritev(self.handle, iovecs);
}
return os.pwritev(self.handle, iovecs);
}
pub fn inStream(file: File) InStream {

View File

@ -1,5 +1,5 @@
const builtin = @import("builtin");
const std = @import("../std.zig");
const builtin = @import("builtin");
const event = std.event;
const assert = std.debug.assert;
const testing = std.testing;
@ -11,702 +11,10 @@ const fd_t = os.fd_t;
const File = std.fs.File;
const Allocator = mem.Allocator;
//! TODO mege this with `std.fs`
const global_event_loop = Loop.instance orelse
@compileError("std.event.fs currently only works with event-based I/O");
@compileError("std.fs.Watch currently only works with event-based I/O");
pub const RequestNode = std.atomic.Queue(Request).Node;
pub const Request = struct {
msg: Msg,
finish: Finish,
pub const Finish = union(enum) {
TickNode: Loop.NextTickNode,
DeallocCloseOperation: *CloseOperation,
NoAction,
};
pub const Msg = union(enum) {
WriteV: WriteV,
PWriteV: PWriteV,
PReadV: PReadV,
Open: Open,
Close: Close,
WriteFile: WriteFile,
End, // special - means the fs thread should exit
pub const WriteV = struct {
fd: fd_t,
iov: []const os.iovec_const,
result: Error!void,
pub const Error = os.WriteError;
};
pub const PWriteV = struct {
fd: fd_t,
iov: []const os.iovec_const,
offset: usize,
result: Error!void,
pub const Error = os.WriteError;
};
pub const PReadV = struct {
fd: fd_t,
iov: []const os.iovec,
offset: usize,
result: Error!usize,
pub const Error = os.ReadError;
};
pub const Open = struct {
path: [:0]const u8,
flags: u32,
mode: File.Mode,
result: Error!fd_t,
pub const Error = File.OpenError;
};
pub const WriteFile = struct {
path: [:0]const u8,
contents: []const u8,
mode: File.Mode,
result: Error!void,
pub const Error = File.OpenError || File.WriteError;
};
pub const Close = struct {
fd: fd_t,
};
};
};
pub const PWriteVError = error{OutOfMemory} || File.WriteError;
/// data - just the inner references - must live until pwritev frame completes.
pub fn pwritev(allocator: *Allocator, fd: fd_t, data: []const []const u8, offset: usize) PWriteVError!void {
switch (builtin.os) {
.macosx,
.linux,
.freebsd,
.netbsd,
.dragonfly,
=> {
const iovecs = try allocator.alloc(os.iovec_const, data.len);
defer allocator.free(iovecs);
for (data) |buf, i| {
iovecs[i] = os.iovec_const{
.iov_base = buf.ptr,
.iov_len = buf.len,
};
}
return pwritevPosix(fd, iovecs, offset);
},
.windows => {
const data_copy = try std.mem.dupe(allocator, []const u8, data);
defer allocator.free(data_copy);
return pwritevWindows(fd, data, offset);
},
else => @compileError("Unsupported OS"),
}
}
/// data must outlive the returned frame
pub fn pwritevWindows(fd: fd_t, data: []const []const u8, offset: usize) os.WindowsWriteError!void {
if (data.len == 0) return;
if (data.len == 1) return pwriteWindows(fd, data[0], offset);
// TODO do these in parallel
var off = offset;
for (data) |buf| {
try pwriteWindows(fd, buf, off);
off += buf.len;
}
}
pub fn pwriteWindows(fd: fd_t, data: []const u8, offset: u64) os.WindowsWriteError!void {
var resume_node = Loop.ResumeNode.Basic{
.base = Loop.ResumeNode{
.id = Loop.ResumeNode.Id.Basic,
.handle = @frame(),
.overlapped = windows.OVERLAPPED{
.Internal = 0,
.InternalHigh = 0,
.Offset = @truncate(u32, offset),
.OffsetHigh = @truncate(u32, offset >> 32),
.hEvent = null,
},
},
};
// TODO only call create io completion port once per fd
_ = windows.CreateIoCompletionPort(fd, global_event_loop.os_data.io_port, undefined, undefined);
global_event_loop.beginOneEvent();
errdefer global_event_loop.finishOneEvent();
errdefer {
_ = windows.kernel32.CancelIoEx(fd, &resume_node.base.overlapped);
}
suspend {
_ = windows.kernel32.WriteFile(fd, data.ptr, @intCast(windows.DWORD, data.len), null, &resume_node.base.overlapped);
}
var bytes_transferred: windows.DWORD = undefined;
if (windows.kernel32.GetOverlappedResult(fd, &resume_node.base.overlapped, &bytes_transferred, windows.FALSE) == 0) {
switch (windows.kernel32.GetLastError()) {
.IO_PENDING => unreachable,
.INVALID_USER_BUFFER => return error.SystemResources,
.NOT_ENOUGH_MEMORY => return error.SystemResources,
.OPERATION_ABORTED => return error.OperationAborted,
.NOT_ENOUGH_QUOTA => return error.SystemResources,
.BROKEN_PIPE => return error.BrokenPipe,
else => |err| return windows.unexpectedError(err),
}
}
}
/// iovecs must live until pwritev frame completes.
pub fn pwritevPosix(fd: fd_t, iovecs: []const os.iovec_const, offset: usize) os.WriteError!void {
var req_node = RequestNode{
.prev = null,
.next = null,
.data = Request{
.msg = Request.Msg{
.PWriteV = Request.Msg.PWriteV{
.fd = fd,
.iov = iovecs,
.offset = offset,
.result = undefined,
},
},
.finish = Request.Finish{
.TickNode = Loop.NextTickNode{
.prev = null,
.next = null,
.data = @frame(),
},
},
},
};
errdefer global_event_loop.posixFsCancel(&req_node);
suspend {
global_event_loop.posixFsRequest(&req_node);
}
return req_node.data.msg.PWriteV.result;
}
/// iovecs must live until pwritev frame completes.
pub fn writevPosix(fd: fd_t, iovecs: []const os.iovec_const) os.WriteError!void {
var req_node = RequestNode{
.prev = null,
.next = null,
.data = Request{
.msg = Request.Msg{
.WriteV = Request.Msg.WriteV{
.fd = fd,
.iov = iovecs,
.result = undefined,
},
},
.finish = Request.Finish{
.TickNode = Loop.NextTickNode{
.prev = null,
.next = null,
.data = @frame(),
},
},
},
};
suspend {
global_event_loop.posixFsRequest(&req_node);
}
return req_node.data.msg.WriteV.result;
}
pub const PReadVError = error{OutOfMemory} || File.ReadError;
/// data - just the inner references - must live until preadv frame completes.
pub fn preadv(allocator: *Allocator, fd: fd_t, data: []const []u8, offset: usize) PReadVError!usize {
assert(data.len != 0);
switch (builtin.os) {
.macosx,
.linux,
.freebsd,
.netbsd,
.dragonfly,
=> {
const iovecs = try allocator.alloc(os.iovec, data.len);
defer allocator.free(iovecs);
for (data) |buf, i| {
iovecs[i] = os.iovec{
.iov_base = buf.ptr,
.iov_len = buf.len,
};
}
return preadvPosix(fd, iovecs, offset);
},
.windows => {
const data_copy = try std.mem.dupe(allocator, []u8, data);
defer allocator.free(data_copy);
return preadvWindows(fd, data_copy, offset);
},
else => @compileError("Unsupported OS"),
}
}
/// data must outlive the returned frame
pub fn preadvWindows(fd: fd_t, data: []const []u8, offset: u64) !usize {
assert(data.len != 0);
if (data.len == 1) return preadWindows(fd, data[0], offset);
// TODO do these in parallel?
var off: usize = 0;
var iov_i: usize = 0;
var inner_off: usize = 0;
while (true) {
const v = data[iov_i];
const amt_read = try preadWindows(fd, v[inner_off .. v.len - inner_off], offset + off);
off += amt_read;
inner_off += amt_read;
if (inner_off == v.len) {
iov_i += 1;
inner_off = 0;
if (iov_i == data.len) {
return off;
}
}
if (amt_read == 0) return off; // EOF
}
}
pub fn preadWindows(fd: fd_t, data: []u8, offset: u64) !usize {
var resume_node = Loop.ResumeNode.Basic{
.base = Loop.ResumeNode{
.id = Loop.ResumeNode.Id.Basic,
.handle = @frame(),
.overlapped = windows.OVERLAPPED{
.Internal = 0,
.InternalHigh = 0,
.Offset = @truncate(u32, offset),
.OffsetHigh = @truncate(u32, offset >> 32),
.hEvent = null,
},
},
};
// TODO only call create io completion port once per fd
_ = windows.CreateIoCompletionPort(fd, global_event_loop.os_data.io_port, undefined, undefined) catch undefined;
global_event_loop.beginOneEvent();
errdefer global_event_loop.finishOneEvent();
errdefer {
_ = windows.kernel32.CancelIoEx(fd, &resume_node.base.overlapped);
}
suspend {
_ = windows.kernel32.ReadFile(fd, data.ptr, @intCast(windows.DWORD, data.len), null, &resume_node.base.overlapped);
}
var bytes_transferred: windows.DWORD = undefined;
if (windows.kernel32.GetOverlappedResult(fd, &resume_node.base.overlapped, &bytes_transferred, windows.FALSE) == 0) {
switch (windows.kernel32.GetLastError()) {
.IO_PENDING => unreachable,
.OPERATION_ABORTED => return error.OperationAborted,
.BROKEN_PIPE => return error.BrokenPipe,
.HANDLE_EOF => return @as(usize, bytes_transferred),
else => |err| return windows.unexpectedError(err),
}
}
return @as(usize, bytes_transferred);
}
/// iovecs must live until preadv frame completes
pub fn preadvPosix(fd: fd_t, iovecs: []const os.iovec, offset: usize) os.ReadError!usize {
var req_node = RequestNode{
.prev = null,
.next = null,
.data = Request{
.msg = Request.Msg{
.PReadV = Request.Msg.PReadV{
.fd = fd,
.iov = iovecs,
.offset = offset,
.result = undefined,
},
},
.finish = Request.Finish{
.TickNode = Loop.NextTickNode{
.prev = null,
.next = null,
.data = @frame(),
},
},
},
};
errdefer global_event_loop.posixFsCancel(&req_node);
suspend {
global_event_loop.posixFsRequest(&req_node);
}
return req_node.data.msg.PReadV.result;
}
pub fn openPosix(path: []const u8, flags: u32, mode: File.Mode) File.OpenError!fd_t {
const path_c = try std.os.toPosixPath(path);
var req_node = RequestNode{
.prev = null,
.next = null,
.data = Request{
.msg = Request.Msg{
.Open = Request.Msg.Open{
.path = path_c[0..path.len],
.flags = flags,
.mode = mode,
.result = undefined,
},
},
.finish = Request.Finish{
.TickNode = Loop.NextTickNode{
.prev = null,
.next = null,
.data = @frame(),
},
},
},
};
errdefer global_event_loop.posixFsCancel(&req_node);
suspend {
global_event_loop.posixFsRequest(&req_node);
}
return req_node.data.msg.Open.result;
}
pub fn openRead(path: []const u8) File.OpenError!fd_t {
switch (builtin.os) {
.macosx, .linux, .freebsd, .netbsd, .dragonfly => {
const O_LARGEFILE = if (@hasDecl(os, "O_LARGEFILE")) os.O_LARGEFILE else 0;
const flags = O_LARGEFILE | os.O_RDONLY | os.O_CLOEXEC;
return openPosix(path, flags, File.default_mode);
},
.windows => return windows.CreateFile(
path,
windows.GENERIC_READ,
windows.FILE_SHARE_READ,
null,
windows.OPEN_EXISTING,
windows.FILE_ATTRIBUTE_NORMAL | windows.FILE_FLAG_OVERLAPPED,
null,
),
else => @compileError("Unsupported OS"),
}
}
/// Creates if does not exist. Truncates the file if it exists.
/// Uses the default mode.
pub fn openWrite(path: []const u8) File.OpenError!fd_t {
return openWriteMode(path, File.default_mode);
}
/// Creates if does not exist. Truncates the file if it exists.
pub fn openWriteMode(path: []const u8, mode: File.Mode) File.OpenError!fd_t {
switch (builtin.os) {
.macosx,
.linux,
.freebsd,
.netbsd,
.dragonfly,
=> {
const O_LARGEFILE = if (@hasDecl(os, "O_LARGEFILE")) os.O_LARGEFILE else 0;
const flags = O_LARGEFILE | os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC | os.O_TRUNC;
return openPosix(path, flags, File.default_mode);
},
.windows => return windows.CreateFile(
path,
windows.GENERIC_WRITE,
windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE,
null,
windows.CREATE_ALWAYS,
windows.FILE_ATTRIBUTE_NORMAL | windows.FILE_FLAG_OVERLAPPED,
null,
),
else => @compileError("Unsupported OS"),
}
}
/// Creates if does not exist. Does not truncate.
pub fn openReadWrite(path: []const u8, mode: File.Mode) File.OpenError!fd_t {
switch (builtin.os) {
.macosx, .linux, .freebsd, .netbsd, .dragonfly => {
const O_LARGEFILE = if (@hasDecl(os, "O_LARGEFILE")) os.O_LARGEFILE else 0;
const flags = O_LARGEFILE | os.O_RDWR | os.O_CREAT | os.O_CLOEXEC;
return openPosix(path, flags, mode);
},
.windows => return windows.CreateFile(
path,
windows.GENERIC_WRITE | windows.GENERIC_READ,
windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE,
null,
windows.OPEN_ALWAYS,
windows.FILE_ATTRIBUTE_NORMAL | windows.FILE_FLAG_OVERLAPPED,
null,
),
else => @compileError("Unsupported OS"),
}
}
/// This abstraction helps to close file handles in defer expressions
/// without the possibility of failure and without the use of suspend points.
/// Start a `CloseOperation` before opening a file, so that you can defer
/// `CloseOperation.finish`.
/// If you call `setHandle` then finishing will close the fd; otherwise finishing
/// will deallocate the `CloseOperation`.
pub const CloseOperation = struct {
allocator: *Allocator,
os_data: OsData,
const OsData = switch (builtin.os) {
.linux, .macosx, .freebsd, .netbsd, .dragonfly => OsDataPosix,
.windows => struct {
handle: ?fd_t,
},
else => @compileError("Unsupported OS"),
};
const OsDataPosix = struct {
have_fd: bool,
close_req_node: RequestNode,
};
pub fn start(allocator: *Allocator) (error{OutOfMemory}!*CloseOperation) {
const self = try allocator.create(CloseOperation);
self.* = CloseOperation{
.allocator = allocator,
.os_data = switch (builtin.os) {
.linux, .macosx, .freebsd, .netbsd, .dragonfly => initOsDataPosix(self),
.windows => OsData{ .handle = null },
else => @compileError("Unsupported OS"),
},
};
return self;
}
fn initOsDataPosix(self: *CloseOperation) OsData {
return OsData{
.have_fd = false,
.close_req_node = RequestNode{
.prev = null,
.next = null,
.data = Request{
.msg = Request.Msg{
.Close = Request.Msg.Close{ .fd = undefined },
},
.finish = Request.Finish{ .DeallocCloseOperation = self },
},
},
};
}
/// Defer this after creating.
pub fn finish(self: *CloseOperation) void {
switch (builtin.os) {
.linux,
.macosx,
.freebsd,
.netbsd,
.dragonfly,
=> {
if (self.os_data.have_fd) {
global_event_loop.posixFsRequest(&self.os_data.close_req_node);
} else {
self.allocator.destroy(self);
}
},
.windows => {
if (self.os_data.handle) |handle| {
os.close(handle);
}
self.allocator.destroy(self);
},
else => @compileError("Unsupported OS"),
}
}
pub fn setHandle(self: *CloseOperation, handle: fd_t) void {
switch (builtin.os) {
.linux,
.macosx,
.freebsd,
.netbsd,
.dragonfly,
=> {
self.os_data.close_req_node.data.msg.Close.fd = handle;
self.os_data.have_fd = true;
},
.windows => {
self.os_data.handle = handle;
},
else => @compileError("Unsupported OS"),
}
}
/// Undo a `setHandle`.
pub fn clearHandle(self: *CloseOperation) void {
switch (builtin.os) {
.linux,
.macosx,
.freebsd,
.netbsd,
.dragonfly,
=> {
self.os_data.have_fd = false;
},
.windows => {
self.os_data.handle = null;
},
else => @compileError("Unsupported OS"),
}
}
pub fn getHandle(self: *CloseOperation) fd_t {
switch (builtin.os) {
.linux,
.macosx,
.freebsd,
.netbsd,
.dragonfly,
=> {
assert(self.os_data.have_fd);
return self.os_data.close_req_node.data.msg.Close.fd;
},
.windows => {
return self.os_data.handle.?;
},
else => @compileError("Unsupported OS"),
}
}
};
/// contents must remain alive until writeFile completes.
/// TODO make this atomic or provide writeFileAtomic and rename this one to writeFileTruncate
pub fn writeFile(allocator: *Allocator, path: []const u8, contents: []const u8) !void {
return writeFileMode(allocator, path, contents, File.default_mode);
}
/// contents must remain alive until writeFile completes.
pub fn writeFileMode(allocator: *Allocator, path: []const u8, contents: []const u8, mode: File.Mode) !void {
switch (builtin.os) {
.linux,
.macosx,
.freebsd,
.netbsd,
.dragonfly,
=> return writeFileModeThread(allocator, path, contents, mode),
.windows => return writeFileWindows(path, contents),
else => @compileError("Unsupported OS"),
}
}
fn writeFileWindows(path: []const u8, contents: []const u8) !void {
const handle = try windows.CreateFile(
path,
windows.GENERIC_WRITE,
windows.FILE_SHARE_WRITE | windows.FILE_SHARE_READ | windows.FILE_SHARE_DELETE,
null,
windows.CREATE_ALWAYS,
windows.FILE_ATTRIBUTE_NORMAL | windows.FILE_FLAG_OVERLAPPED,
null,
);
defer os.close(handle);
try pwriteWindows(handle, contents, 0);
}
fn writeFileModeThread(allocator: *Allocator, path: []const u8, contents: []const u8, mode: File.Mode) !void {
const path_with_null = try std.cstr.addNullByte(allocator, path);
defer allocator.free(path_with_null);
var req_node = RequestNode{
.prev = null,
.next = null,
.data = Request{
.msg = Request.Msg{
.WriteFile = Request.Msg.WriteFile{
.path = path_with_null[0..path.len],
.contents = contents,
.mode = mode,
.result = undefined,
},
},
.finish = Request.Finish{
.TickNode = Loop.NextTickNode{
.prev = null,
.next = null,
.data = @frame(),
},
},
},
};
errdefer global_event_loop.posixFsCancel(&req_node);
suspend {
global_event_loop.posixFsRequest(&req_node);
}
return req_node.data.msg.WriteFile.result;
}
/// The frame resumes when the last data has been confirmed written, but before the file handle
/// is closed.
/// Caller owns returned memory.
pub fn readFile(allocator: *Allocator, file_path: []const u8, max_size: usize) ![]u8 {
var close_op = try CloseOperation.start(allocator);
defer close_op.finish();
const fd = try openRead(file_path);
close_op.setHandle(fd);
var list = std.ArrayList(u8).init(allocator);
defer list.deinit();
while (true) {
try list.ensureCapacity(list.len + mem.page_size);
const buf = list.items[list.len..];
const buf_array = [_][]u8{buf};
const amt = try preadv(allocator, fd, &buf_array, list.len);
list.len += amt;
if (list.len > max_size) {
return error.FileTooBig;
}
if (amt < buf.len) {
return list.toOwnedSlice();
}
}
}
pub const WatchEventId = enum {
const WatchEventId = enum {
CloseWrite,
Delete,
};
@ -721,7 +29,7 @@ fn hashString(s: []const u16) u32 {
return @truncate(u32, std.hash.Wyhash.hash(0, @sliceToBytes(s)));
}
pub const WatchEventError = error{
const WatchEventError = error{
UserResourceLimitReached,
SystemResources,
AccessDenied,
@ -1307,12 +615,11 @@ pub fn Watch(comptime V: type) type {
const test_tmp_dir = "std_event_fs_test";
test "write a file, watch it, write it again" {
// TODO provide a way to run tests in evented I/O mode
if (!std.io.is_async) return error.SkipZigTest;
// TODO re-enable this test
if (true) return error.SkipZigTest;
const allocator = std.heap.page_allocator;
// TODO move this into event loop too
try os.makePath(allocator, test_tmp_dir);
defer os.deleteTree(test_tmp_dir) catch {};
@ -1366,53 +673,3 @@ fn testFsWatch(allocator: *Allocator) !void {
// TODO test deleting the file and then re-adding it. we should get events for both
}
pub const OutStream = struct {
fd: fd_t,
stream: Stream,
allocator: *Allocator,
offset: usize,
pub const Error = File.WriteError;
pub const Stream = event.io.OutStream(Error);
pub fn init(allocator: *Allocator, fd: fd_t, offset: usize) OutStream {
return OutStream{
.fd = fd,
.offset = offset,
.stream = Stream{ .writeFn = writeFn },
};
}
fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void {
const self = @fieldParentPtr(OutStream, "stream", out_stream);
const offset = self.offset;
self.offset += bytes.len;
return pwritev(self.allocator, self.fd, [_][]const u8{bytes}, offset);
}
};
pub const InStream = struct {
fd: fd_t,
stream: Stream,
allocator: *Allocator,
offset: usize,
pub const Error = PReadVError; // TODO make this not have OutOfMemory
pub const Stream = event.io.InStream(Error);
pub fn init(allocator: *Allocator, fd: fd_t, offset: usize) InStream {
return InStream{
.fd = fd,
.offset = offset,
.stream = Stream{ .readFn = readFn },
};
}
fn readFn(in_stream: *Stream, bytes: []u8) Error!usize {
const self = @fieldParentPtr(InStream, "stream", in_stream);
const amt = try preadv(self.allocator, self.fd, [_][]u8{bytes}, self.offset);
self.offset += amt;
return amt;
}
};

View File

@ -47,7 +47,10 @@ fn getStdOutHandle() os.fd_t {
}
pub fn getStdOut() File {
return File.openHandle(getStdOutHandle());
return File{
.handle = getStdOutHandle(),
.io_mode = .blocking,
};
}
fn getStdErrHandle() os.fd_t {
@ -63,7 +66,11 @@ fn getStdErrHandle() os.fd_t {
}
pub fn getStdErr() File {
return File.openHandle(getStdErrHandle());
return File{
.handle = getStdErrHandle(),
.io_mode = .blocking,
.async_block_allowed = File.async_block_allowed_yes,
};
}
fn getStdInHandle() os.fd_t {
@ -79,7 +86,10 @@ fn getStdInHandle() os.fd_t {
}
pub fn getStdIn() File {
return File.openHandle(getStdInHandle());
return File{
.handle = getStdInHandle(),
.io_mode = .blocking,
};
}
pub const SeekableStream = @import("io/seekable_stream.zig").SeekableStream;

View File

@ -9,14 +9,11 @@ pub const stack_size: usize = if (@hasDecl(root, "stack_size_std_io_OutStream"))
else
default_stack_size;
/// TODO this is not integrated with evented I/O yet.
/// https://github.com/ziglang/zig/issues/3557
pub fn OutStream(comptime WriteError: type) type {
return struct {
const Self = @This();
pub const Error = WriteError;
// TODO https://github.com/ziglang/zig/issues/3557
pub const WriteFn = if (std.io.is_async and false)
pub const WriteFn = if (std.io.is_async)
async fn (self: *Self, bytes: []const u8) Error!void
else
fn (self: *Self, bytes: []const u8) Error!void;
@ -24,8 +21,7 @@ pub fn OutStream(comptime WriteError: type) type {
writeFn: WriteFn,
pub fn write(self: *Self, bytes: []const u8) Error!void {
// TODO https://github.com/ziglang/zig/issues/3557
if (std.io.is_async and false) {
if (std.io.is_async) {
// Let's not be writing 0xaa in safe modes for upwards of 4 MiB for every stream write.
@setRuntimeSafety(false);
var stack_frame: [stack_size]u8 align(std.Target.stack_align) = undefined;
@ -36,12 +32,12 @@ pub fn OutStream(comptime WriteError: type) type {
}
pub fn print(self: *Self, comptime format: []const u8, args: var) Error!void {
return std.fmt.format(self, Error, self.writeFn, format, args);
return std.fmt.format(self, Error, write, format, args);
}
pub fn writeByte(self: *Self, byte: u8) Error!void {
const slice = @as(*const [1]u8, &byte)[0..];
return self.writeFn(self, slice);
const array = [1]u8{byte};
return self.write(&array);
}
pub fn writeByteNTimes(self: *Self, byte: u8, n: usize) Error!void {
@ -51,7 +47,7 @@ pub fn OutStream(comptime WriteError: type) type {
var remaining: usize = n;
while (remaining > 0) {
const to_write = std.math.min(remaining, bytes.len);
try self.writeFn(self, bytes[0..to_write]);
try self.write(bytes[0..to_write]);
remaining -= to_write;
}
}
@ -60,32 +56,32 @@ pub fn OutStream(comptime WriteError: type) type {
pub fn writeIntNative(self: *Self, comptime T: type, value: T) Error!void {
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
mem.writeIntNative(T, &bytes, value);
return self.writeFn(self, &bytes);
return self.write(&bytes);
}
/// Write a foreign-endian integer.
pub fn writeIntForeign(self: *Self, comptime T: type, value: T) Error!void {
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
mem.writeIntForeign(T, &bytes, value);
return self.writeFn(self, &bytes);
return self.write(&bytes);
}
pub fn writeIntLittle(self: *Self, comptime T: type, value: T) Error!void {
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
mem.writeIntLittle(T, &bytes, value);
return self.writeFn(self, &bytes);
return self.write(&bytes);
}
pub fn writeIntBig(self: *Self, comptime T: type, value: T) Error!void {
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
mem.writeIntBig(T, &bytes, value);
return self.writeFn(self, &bytes);
return self.write(&bytes);
}
pub fn writeInt(self: *Self, comptime T: type, value: T, endian: builtin.Endian) Error!void {
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
mem.writeInt(T, &bytes, value, endian);
return self.writeFn(self, &bytes);
return self.write(&bytes);
}
};
}

View File

@ -18,12 +18,11 @@ pub fn SinglyLinkedList(comptime T: type) type {
/// Node inside the linked list wrapping the actual data.
pub const Node = struct {
next: ?*Node,
next: ?*Node = null,
data: T,
pub fn init(data: T) Node {
return Node{
.next = null,
.data = data,
};
}
@ -196,14 +195,12 @@ pub fn TailQueue(comptime T: type) type {
/// Node inside the linked list wrapping the actual data.
pub const Node = struct {
prev: ?*Node,
next: ?*Node,
prev: ?*Node = null,
next: ?*Node = null,
data: T,
pub fn init(data: T) Node {
return Node{
.prev = null,
.next = null,
.data = data,
};
}

View File

@ -271,7 +271,7 @@ pub const Address = extern union {
options: std.fmt.FormatOptions,
context: var,
comptime Errors: type,
output: fn (@TypeOf(context), []const u8) Errors!void,
comptime output: fn (@TypeOf(context), []const u8) Errors!void,
) !void {
switch (self.any.family) {
os.AF_INET => {
@ -361,7 +361,7 @@ pub const Address = extern union {
};
pub fn connectUnixSocket(path: []const u8) !fs.File {
const opt_non_block = if (std.io.mode == .evented) os.SOCK_NONBLOCK else 0;
const opt_non_block = if (std.io.is_async) os.SOCK_NONBLOCK else 0;
const sockfd = try os.socket(
os.AF_UNIX,
os.SOCK_STREAM | os.SOCK_CLOEXEC | opt_non_block,
@ -377,7 +377,10 @@ pub fn connectUnixSocket(path: []const u8) !fs.File {
addr.getOsSockLen(),
);
return fs.File.openHandle(sockfd);
return fs.File{
.handle = sockfd,
.io_mode = std.io.mode,
};
}
pub const AddressList = struct {
@ -412,7 +415,7 @@ pub fn tcpConnectToAddress(address: Address) !fs.File {
errdefer os.close(sockfd);
try os.connect(sockfd, &address.any, address.getOsSockLen());
return fs.File{ .handle = sockfd };
return fs.File{ .handle = sockfd, .io_mode = std.io.mode };
}
/// Call `AddressList.deinit` on the result.
@ -1379,7 +1382,10 @@ pub const StreamServer = struct {
var adr_len: os.socklen_t = @sizeOf(Address);
if (os.accept4(self.sockfd.?, &accepted_addr.any, &adr_len, accept_flags)) |fd| {
return Connection{
.file = fs.File.openHandle(fd),
.file = fs.File{
.handle = fd,
.io_mode = std.io.mode,
},
.address = accepted_addr,
};
} else |err| switch (err) {

View File

@ -81,17 +81,15 @@ test "resolve DNS" {
}
test "listen on a port, send bytes, receive bytes" {
if (!std.io.is_async) return error.SkipZigTest;
if (std.builtin.os != .linux) {
// TODO build abstractions for other operating systems
return error.SkipZigTest;
}
if (std.io.mode != .evented) {
// TODO add ability to run tests in non-blocking I/O mode
return error.SkipZigTest;
}
// TODO doing this at comptime crashed the compiler
const localhost = net.Address.parseIp("127.0.0.1", 0);
const localhost = try net.Address.parseIp("127.0.0.1", 0);
var server = net.StreamServer.init(net.StreamServer.Options{});
defer server.deinit();

View File

@ -169,7 +169,12 @@ fn getRandomBytesDevURandom(buf: []u8) !void {
return error.NoDevice;
}
const stream = &std.fs.File.openHandle(fd).inStream().stream;
const file = std.fs.File{
.handle = fd,
.io_mode = .blocking,
.async_block_allowed = std.fs.File.async_block_allowed_yes,
};
const stream = &file.inStream().stream;
stream.readNoEof(buf) catch return error.Unexpected;
}
@ -293,7 +298,7 @@ pub const ReadError = error{
/// via the event loop. Otherwise EAGAIN results in error.WouldBlock.
pub fn read(fd: fd_t, buf: []u8) ReadError!usize {
if (builtin.os == .windows) {
return windows.ReadFile(fd, buf);
return windows.ReadFile(fd, buf, null);
}
if (builtin.os == .wasi and !builtin.link_libc) {
@ -335,9 +340,37 @@ pub fn read(fd: fd_t, buf: []u8) ReadError!usize {
}
/// Number of bytes read is returned. Upon reading end-of-file, zero is returned.
/// If the application has a global event loop enabled, EAGAIN is handled
/// via the event loop. Otherwise EAGAIN results in error.WouldBlock.
///
/// For POSIX systems, if the application has a global event loop enabled, EAGAIN is handled
/// via the event loop. Otherwise EAGAIN results in `error.WouldBlock`.
/// On Windows, if the application has a global event loop enabled, I/O Completion Ports are
/// used to perform the I/O. `error.WouldBlock` is not possible on Windows.
///
/// This operation is non-atomic on the following systems:
/// * Windows
/// On these systems, the read races with concurrent writes to the same file descriptor.
pub fn readv(fd: fd_t, iov: []const iovec) ReadError!usize {
if (builtin.os == .windows) {
// TODO batch these into parallel requests
var off: usize = 0;
var iov_i: usize = 0;
var inner_off: usize = 0;
while (true) {
const v = iov[iov_i];
const amt_read = try read(fd, v.iov_base[inner_off .. v.iov_len - inner_off]);
off += amt_read;
inner_off += amt_read;
if (inner_off == v.len) {
iov_i += 1;
inner_off = 0;
if (iov_i == iov.len) {
return off;
}
}
if (amt_read == 0) return off; // EOF
} else unreachable; // TODO https://github.com/ziglang/zig/issues/707
}
while (true) {
// TODO handle the case when iov_len is too large and get rid of this @intCast
const rc = system.readv(fd, iov.ptr, @intCast(u32, iov.len));
@ -363,8 +396,56 @@ pub fn readv(fd: fd_t, iov: []const iovec) ReadError!usize {
}
/// Number of bytes read is returned. Upon reading end-of-file, zero is returned.
/// If the application has a global event loop enabled, EAGAIN is handled
/// via the event loop. Otherwise EAGAIN results in error.WouldBlock.
///
/// Retries when interrupted by a signal.
///
/// For POSIX systems, if the application has a global event loop enabled, EAGAIN is handled
/// via the event loop. Otherwise EAGAIN results in `error.WouldBlock`.
/// On Windows, if the application has a global event loop enabled, I/O Completion Ports are
/// used to perform the I/O. `error.WouldBlock` is not possible on Windows.
pub fn pread(fd: fd_t, buf: []u8, offset: u64) ReadError!usize {
if (builtin.os == .windows) {
return windows.ReadFile(fd, buf, offset);
}
while (true) {
const rc = system.pread(fd, buf.ptr, buf.len, offset);
switch (errno(rc)) {
0 => return @intCast(usize, rc),
EINTR => continue,
EINVAL => unreachable,
EFAULT => unreachable,
EAGAIN => if (std.event.Loop.instance) |loop| {
loop.waitUntilFdReadable(fd);
continue;
} else {
return error.WouldBlock;
},
EBADF => unreachable, // Always a race condition.
EIO => return error.InputOutput,
EISDIR => return error.IsDir,
ENOBUFS => return error.SystemResources,
ENOMEM => return error.SystemResources,
ECONNRESET => return error.ConnectionResetByPeer,
else => |err| return unexpectedErrno(err),
}
}
return index;
}
/// Number of bytes read is returned. Upon reading end-of-file, zero is returned.
///
/// Retries when interrupted by a signal.
///
/// For POSIX systems, if the application has a global event loop enabled, EAGAIN is handled
/// via the event loop. Otherwise EAGAIN results in `error.WouldBlock`.
/// On Windows, if the application has a global event loop enabled, I/O Completion Ports are
/// used to perform the I/O. `error.WouldBlock` is not possible on Windows.
///
/// This operation is non-atomic on the following systems:
/// * Darwin
/// * Windows
/// On these systems, the read races with concurrent writes to the same file descriptor.
pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) ReadError!usize {
if (comptime std.Target.current.isDarwin()) {
// Darwin does not have preadv but it does have pread.
@ -409,6 +490,28 @@ pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) ReadError!usize {
}
}
}
if (builtin.os == .windows) {
// TODO batch these into parallel requests
var off: usize = 0;
var iov_i: usize = 0;
var inner_off: usize = 0;
while (true) {
const v = iov[iov_i];
const amt_read = try pread(fd, v.iov_base[inner_off .. v.iov_len - inner_off], offset + off);
off += amt_read;
inner_off += amt_read;
if (inner_off == v.len) {
iov_i += 1;
inner_off = 0;
if (iov_i == iov.len) {
return off;
}
}
if (amt_read == 0) return off; // EOF
} else unreachable; // TODO https://github.com/ziglang/zig/issues/707
}
while (true) {
// TODO handle the case when iov_len is too large and get rid of this @intCast
const rc = system.preadv(fd, iov.ptr, @intCast(u32, iov.len), offset);
@ -451,11 +554,9 @@ pub const WriteError = error{
/// Write to a file descriptor. Keeps trying if it gets interrupted.
/// If the application has a global event loop enabled, EAGAIN is handled
/// via the event loop. Otherwise EAGAIN results in error.WouldBlock.
/// TODO evented I/O integration is disabled until
/// https://github.com/ziglang/zig/issues/3557 is solved.
pub fn write(fd: fd_t, bytes: []const u8) WriteError!void {
if (builtin.os == .windows) {
return windows.WriteFile(fd, bytes);
return windows.WriteFile(fd, bytes, null);
}
if (builtin.os == .wasi and !builtin.link_libc) {
@ -488,14 +589,12 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!void {
EINTR => continue,
EINVAL => unreachable,
EFAULT => unreachable,
// TODO https://github.com/ziglang/zig/issues/3557
EAGAIN => return error.WouldBlock,
//EAGAIN => if (std.event.Loop.instance) |loop| {
// loop.waitUntilFdWritable(fd);
// continue;
//} else {
// return error.WouldBlock;
//},
EAGAIN => if (std.event.Loop.instance) |loop| {
loop.waitUntilFdWritable(fd);
continue;
} else {
return error.WouldBlock;
},
EBADF => unreachable, // Always a race condition.
EDESTADDRREQ => unreachable, // `connect` was never called.
EDQUOT => return error.DiskQuota,
@ -540,8 +639,57 @@ pub fn writev(fd: fd_t, iov: []const iovec_const) WriteError!void {
}
}
/// Write to a file descriptor, with a position offset.
///
/// Retries when interrupted by a signal.
///
/// For POSIX systems, if the application has a global event loop enabled, EAGAIN is handled
/// via the event loop. Otherwise EAGAIN results in `error.WouldBlock`.
/// On Windows, if the application has a global event loop enabled, I/O Completion Ports are
/// used to perform the I/O. `error.WouldBlock` is not possible on Windows.
pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) WriteError!void {
if (comptime std.Target.current.isWindows()) {
return windows.WriteFile(fd, bytes, offset);
}
while (true) {
const rc = system.pwrite(fd, bytes.ptr, bytes.len, offset);
switch (errno(rc)) {
0 => return,
EINTR => continue,
EINVAL => unreachable,
EFAULT => unreachable,
EAGAIN => if (std.event.Loop.instance) |loop| {
loop.waitUntilFdWritable(fd);
continue;
} else {
return error.WouldBlock;
},
EBADF => unreachable, // Always a race condition.
EDESTADDRREQ => unreachable, // `connect` was never called.
EDQUOT => return error.DiskQuota,
EFBIG => return error.FileTooBig,
EIO => return error.InputOutput,
ENOSPC => return error.NoSpaceLeft,
EPERM => return error.AccessDenied,
EPIPE => return error.BrokenPipe,
else => |err| return unexpectedErrno(err),
}
}
}
/// Write multiple buffers to a file descriptor, with a position offset.
/// Keeps trying if it gets interrupted.
///
/// Retries when interrupted by a signal.
///
/// If the application has a global event loop enabled, EAGAIN is handled
/// via the event loop. Otherwise EAGAIN results in `error.WouldBlock`.
///
/// This operation is non-atomic on the following systems:
/// * Darwin
/// * Windows
/// On these systems, the write races with concurrent writes to the same file descriptor, and
/// the file can be in a partially written state when an error occurs.
pub fn pwritev(fd: fd_t, iov: []const iovec_const, offset: u64) WriteError!void {
if (comptime std.Target.current.isDarwin()) {
// Darwin does not have pwritev but it does have pwrite.
@ -589,6 +737,15 @@ pub fn pwritev(fd: fd_t, iov: []const iovec_const, offset: u64) WriteError!void
}
}
if (comptime std.Target.current.isWindows()) {
var off = offset;
for (iov) |item| {
try pwrite(fd, item.iov_base[0..item.iov_len], off);
off += buf.len;
}
return;
}
while (true) {
// TODO handle the case when iov_len is too large and get rid of this @intCast
const rc = system.pwritev(fd, iov.ptr, @intCast(u32, iov.len), offset);
@ -694,7 +851,7 @@ pub fn openC(file_path: [*:0]const u8, flags: u32, perm: usize) OpenError!fd_t {
/// Open and possibly create a file. Keeps trying if it gets interrupted.
/// `file_path` is relative to the open directory handle `dir_fd`.
/// See also `openatC`.
pub fn openat(dir_fd: fd_t, file_path: []const u8, flags: u32, mode: usize) OpenError!fd_t {
pub fn openat(dir_fd: fd_t, file_path: []const u8, flags: u32, mode: mode_t) OpenError!fd_t {
const file_path_c = try toPosixPath(file_path);
return openatC(dir_fd, &file_path_c, flags, mode);
}
@ -702,7 +859,7 @@ pub fn openat(dir_fd: fd_t, file_path: []const u8, flags: u32, mode: usize) Open
/// Open and possibly create a file. Keeps trying if it gets interrupted.
/// `file_path` is relative to the open directory handle `dir_fd`.
/// See also `openat`.
pub fn openatC(dir_fd: fd_t, file_path: [*:0]const u8, flags: u32, mode: usize) OpenError!fd_t {
pub fn openatC(dir_fd: fd_t, file_path: [*:0]const u8, flags: u32, mode: mode_t) OpenError!fd_t {
while (true) {
const rc = system.openat(dir_fd, file_path, flags, mode);
switch (errno(rc)) {
@ -2372,6 +2529,22 @@ pub fn pipe() PipeError![2]fd_t {
}
pub fn pipe2(flags: u32) PipeError![2]fd_t {
if (comptime std.Target.current.isDarwin()) {
var fds: [2]fd_t = try pipe();
if (flags == 0) return fds;
errdefer {
close(fds[0]);
close(fds[1]);
}
for (fds) |fd| switch (errno(system.fcntl(fd, F_SETFL, flags))) {
0 => {},
EINVAL => unreachable, // Invalid flags
EBADF => unreachable, // Always a race condition
else => |err| return unexpectedErrno(err),
};
return fds;
}
var fds: [2]fd_t = undefined;
switch (errno(system.pipe2(&fds, flags))) {
0 => return fds,

View File

@ -4,6 +4,7 @@ const maxInt = std.math.maxInt;
pub const fd_t = c_int;
pub const pid_t = c_int;
pub const mode_t = c_uint;
pub const in_port_t = u16;
pub const sa_family_t = u8;
@ -1223,3 +1224,161 @@ pub const RTLD_NEXT = @intToPtr(*c_void, ~maxInt(usize));
pub const RTLD_DEFAULT = @intToPtr(*c_void, ~maxInt(usize) - 1);
pub const RTLD_SELF = @intToPtr(*c_void, ~maxInt(usize) - 2);
pub const RTLD_MAIN_ONLY = @intToPtr(*c_void, ~maxInt(usize) - 4);
/// duplicate file descriptor
pub const F_DUPFD = 0;
/// get file descriptor flags
pub const F_GETFD = 1;
/// set file descriptor flags
pub const F_SETFD = 2;
/// get file status flags
pub const F_GETFL = 3;
/// set file status flags
pub const F_SETFL = 4;
/// get SIGIO/SIGURG proc/pgrp
pub const F_GETOWN = 5;
/// set SIGIO/SIGURG proc/pgrp
pub const F_SETOWN = 6;
/// get record locking information
pub const F_GETLK = 7;
/// set record locking information
pub const F_SETLK = 8;
/// F_SETLK; wait if blocked
pub const F_SETLKW = 9;
/// F_SETLK; wait if blocked, return on timeout
pub const F_SETLKWTIMEOUT = 10;
pub const F_FLUSH_DATA = 40;
/// Used for regression test
pub const F_CHKCLEAN = 41;
/// Preallocate storage
pub const F_PREALLOCATE = 42;
/// Truncate a file without zeroing space
pub const F_SETSIZE = 43;
/// Issue an advisory read async with no copy to user
pub const F_RDADVISE = 44;
/// turn read ahead off/on for this fd
pub const F_RDAHEAD = 45;
/// turn data caching off/on for this fd
pub const F_NOCACHE = 48;
/// file offset to device offset
pub const F_LOG2PHYS = 49;
/// return the full path of the fd
pub const F_GETPATH = 50;
/// fsync + ask the drive to flush to the media
pub const F_FULLFSYNC = 51;
/// find which component (if any) is a package
pub const F_PATHPKG_CHECK = 52;
/// "freeze" all fs operations
pub const F_FREEZE_FS = 53;
/// "thaw" all fs operations
pub const F_THAW_FS = 54;
/// turn data caching off/on (globally) for this file
pub const F_GLOBAL_NOCACHE = 55;
/// add detached signatures
pub const F_ADDSIGS = 59;
/// add signature from same file (used by dyld for shared libs)
pub const F_ADDFILESIGS = 61;
/// used in conjunction with F_NOCACHE to indicate that DIRECT, synchonous writes
/// should not be used (i.e. its ok to temporaily create cached pages)
pub const F_NODIRECT = 62;
///Get the protection class of a file from the EA, returns int
pub const F_GETPROTECTIONCLASS = 63;
///Set the protection class of a file for the EA, requires int
pub const F_SETPROTECTIONCLASS = 64;
///file offset to device offset, extended
pub const F_LOG2PHYS_EXT = 65;
///get record locking information, per-process
pub const F_GETLKPID = 66;
///Mark the file as being the backing store for another filesystem
pub const F_SETBACKINGSTORE = 70;
///return the full path of the FD, but error in specific mtmd circumstances
pub const F_GETPATH_MTMINFO = 71;
///Returns the code directory, with associated hashes, to the caller
pub const F_GETCODEDIR = 72;
///No SIGPIPE generated on EPIPE
pub const F_SETNOSIGPIPE = 73;
///Status of SIGPIPE for this fd
pub const F_GETNOSIGPIPE = 74;
///For some cases, we need to rewrap the key for AKS/MKB
pub const F_TRANSCODEKEY = 75;
///file being written to a by single writer... if throttling enabled, writes
///may be broken into smaller chunks with throttling in between
pub const F_SINGLE_WRITER = 76;
///Get the protection version number for this filesystem
pub const F_GETPROTECTIONLEVEL = 77;
///Add detached code signatures (used by dyld for shared libs)
pub const F_FINDSIGS = 78;
///Add signature from same file, only if it is signed by Apple (used by dyld for simulator)
pub const F_ADDFILESIGS_FOR_DYLD_SIM = 83;
///fsync + issue barrier to drive
pub const F_BARRIERFSYNC = 85;
///Add signature from same file, return end offset in structure on success
pub const F_ADDFILESIGS_RETURN = 97;
///Check if Library Validation allows this Mach-O file to be mapped into the calling process
pub const F_CHECK_LV = 98;
///Deallocate a range of the file
pub const F_PUNCHHOLE = 99;
///Trim an active file
pub const F_TRIM_ACTIVE_FILE = 100;
pub const FCNTL_FS_SPECIFIC_BASE = 0x00010000;
///mark the dup with FD_CLOEXEC
pub const F_DUPFD_CLOEXEC = 67;
///close-on-exec flag
pub const FD_CLOEXEC = 1;
/// shared or read lock
pub const F_RDLCK = 1;
/// unlock
pub const F_UNLCK = 2;
/// exclusive or write lock
pub const F_WRLCK = 3;

View File

@ -7,6 +7,7 @@ pub fn S_ISCHR(m: u32) bool {
pub const fd_t = c_int;
pub const pid_t = c_int;
pub const off_t = c_long;
pub const mode_t = c_uint;
pub const ENOTSUP = EOPNOTSUPP;
pub const EWOULDBLOCK = EAGAIN;

View File

@ -3,6 +3,7 @@ const maxInt = std.math.maxInt;
pub const fd_t = c_int;
pub const pid_t = c_int;
pub const mode_t = c_uint;
pub const socklen_t = u32;

View File

@ -12,6 +12,8 @@ const socklen_t = linux.socklen_t;
const iovec = linux.iovec;
const iovec_const = linux.iovec_const;
pub const mode_t = usize;
pub const SYS_read = 0;
pub const SYS_write = 1;
pub const SYS_open = 2;

View File

@ -3,6 +3,7 @@ const maxInt = std.math.maxInt;
pub const fd_t = c_int;
pub const pid_t = c_int;
pub const mode_t = c_uint;
/// Renamed from `kevent` to `Kevent` to avoid conflict with function name.
pub const Kevent = extern struct {

View File

@ -130,6 +130,7 @@ pub const EVENTTYPE_FD_WRITE: eventtype_t = 2;
pub const exitcode_t = u32;
pub const fd_t = u32;
pub const mode_t = u32;
pub const fdflags_t = u16;
pub const FDFLAG_APPEND: fdflags_t = 0x0001;

View File

@ -5,6 +5,7 @@ const ws2_32 = @import("../windows/ws2_32.zig");
pub const fd_t = HANDLE;
pub const pid_t = HANDLE;
pub const mode_t = u0;
pub const PATH_MAX = 260;

View File

@ -344,24 +344,77 @@ pub fn FindClose(hFindFile: HANDLE) void {
assert(kernel32.FindClose(hFindFile) != 0);
}
pub const ReadFileError = error{Unexpected};
pub const ReadFileError = error{
OperationAborted,
BrokenPipe,
Unexpected,
};
pub fn ReadFile(in_hFile: HANDLE, buffer: []u8) ReadFileError!usize {
var index: usize = 0;
while (index < buffer.len) {
const want_read_count = @intCast(DWORD, math.min(@as(DWORD, maxInt(DWORD)), buffer.len - index));
var amt_read: DWORD = undefined;
if (kernel32.ReadFile(in_hFile, buffer.ptr + index, want_read_count, &amt_read, null) == 0) {
switch (kernel32.GetLastError()) {
.OPERATION_ABORTED => continue,
.BROKEN_PIPE => return index,
else => |err| return unexpectedError(err),
/// If buffer's length exceeds what a Windows DWORD integer can hold, it will be broken into
/// multiple non-atomic reads.
pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64) ReadFileError!usize {
if (std.event.Loop.instance) |loop| {
// TODO support async ReadFile with no offset
const off = offset.?;
var resume_node = std.event.Loop.ResumeNode.Basic{
.base = .{
.id = .Basic,
.handle = @frame(),
.overlapped = OVERLAPPED{
.Internal = 0,
.InternalHigh = 0,
.Offset = @truncate(u32, off),
.OffsetHigh = @truncate(u32, off >> 32),
.hEvent = null,
},
},
};
// TODO only call create io completion port once per fd
_ = windows.CreateIoCompletionPort(fd, loop.os_data.io_port, undefined, undefined) catch undefined;
loop.beginOneEvent();
suspend {
// TODO handle buffer bigger than DWORD can hold
_ = windows.kernel32.ReadFile(fd, buffer.ptr, @intCast(windows.DWORD, buffer.len), null, &resume_node.base.overlapped);
}
var bytes_transferred: windows.DWORD = undefined;
if (windows.kernel32.GetOverlappedResult(fd, &resume_node.base.overlapped, &bytes_transferred, windows.FALSE) == 0) {
switch (windows.kernel32.GetLastError()) {
.IO_PENDING => unreachable,
.OPERATION_ABORTED => return error.OperationAborted,
.BROKEN_PIPE => return error.BrokenPipe,
.HANDLE_EOF => return @as(usize, bytes_transferred),
else => |err| return windows.unexpectedError(err),
}
}
if (amt_read == 0) return index;
index += amt_read;
return @as(usize, bytes_transferred);
} else {
var index: usize = 0;
while (index < buffer.len) {
const want_read_count = @intCast(DWORD, math.min(@as(DWORD, maxInt(DWORD)), buffer.len - index));
var amt_read: DWORD = undefined;
var overlapped_data: OVERLAPPED = undefined;
const overlapped: ?*OVERLAPPED = if (offset) |off| blk: {
overlapped_data = .{
.Internal = 0,
.InternalHigh = 0,
.Offset = @truncate(u32, off + index),
.OffsetHigh = @truncate(u32, (off + index) >> 32),
.hEvent = null,
};
break :blk &overlapped_data;
} else null;
if (kernel32.ReadFile(in_hFile, buffer.ptr + index, want_read_count, &amt_read, overlapped) == 0) {
switch (kernel32.GetLastError()) {
.OPERATION_ABORTED => continue,
.BROKEN_PIPE => return index,
else => |err| return unexpectedError(err),
}
}
if (amt_read == 0) return index;
index += amt_read;
}
return index;
}
return index;
}
pub const WriteFileError = error{
@ -371,20 +424,66 @@ pub const WriteFileError = error{
Unexpected,
};
/// This function is for blocking file descriptors only. For non-blocking, see
/// `WriteFileAsync`.
pub fn WriteFile(handle: HANDLE, bytes: []const u8) WriteFileError!void {
var bytes_written: DWORD = undefined;
// TODO replace this @intCast with a loop that writes all the bytes
if (kernel32.WriteFile(handle, bytes.ptr, @intCast(u32, bytes.len), &bytes_written, null) == 0) {
switch (kernel32.GetLastError()) {
.INVALID_USER_BUFFER => return error.SystemResources,
.NOT_ENOUGH_MEMORY => return error.SystemResources,
.OPERATION_ABORTED => return error.OperationAborted,
.NOT_ENOUGH_QUOTA => return error.SystemResources,
.IO_PENDING => unreachable, // this function is for blocking files only
.BROKEN_PIPE => return error.BrokenPipe,
else => |err| return unexpectedError(err),
pub fn WriteFile(handle: HANDLE, bytes: []const u8, offset: ?u64) WriteFileError!void {
if (std.event.Loop.instance) |loop| {
// TODO support async WriteFile with no offset
const off = offset.?;
var resume_node = std.event.Loop.ResumeNode.Basic{
.base = .{
.id = .Basic,
.handle = @frame(),
.overlapped = OVERLAPPED{
.Internal = 0,
.InternalHigh = 0,
.Offset = @truncate(u32, off),
.OffsetHigh = @truncate(u32, off >> 32),
.hEvent = null,
},
},
};
// TODO only call create io completion port once per fd
_ = CreateIoCompletionPort(fd, loop.os_data.io_port, undefined, undefined);
loop.beginOneEvent();
suspend {
// TODO replace this @intCast with a loop that writes all the bytes
_ = kernel32.WriteFile(fd, bytes.ptr, @intCast(windows.DWORD, bytes.len), null, &resume_node.base.overlapped);
}
var bytes_transferred: windows.DWORD = undefined;
if (kernel32.GetOverlappedResult(fd, &resume_node.base.overlapped, &bytes_transferred, FALSE) == 0) {
switch (kernel32.GetLastError()) {
.IO_PENDING => unreachable,
.INVALID_USER_BUFFER => return error.SystemResources,
.NOT_ENOUGH_MEMORY => return error.SystemResources,
.OPERATION_ABORTED => return error.OperationAborted,
.NOT_ENOUGH_QUOTA => return error.SystemResources,
.BROKEN_PIPE => return error.BrokenPipe,
else => |err| return windows.unexpectedError(err),
}
}
} else {
var bytes_written: DWORD = undefined;
var overlapped_data: OVERLAPPED = undefined;
const overlapped: ?*OVERLAPPED = if (offset) |off| blk: {
overlapped_data = .{
.Internal = 0,
.InternalHigh = 0,
.Offset = @truncate(u32, off),
.OffsetHigh = @truncate(u32, off >> 32),
.hEvent = null,
};
break :blk &overlapped_data;
} else null;
// TODO replace this @intCast with a loop that writes all the bytes
if (kernel32.WriteFile(handle, bytes.ptr, @intCast(u32, bytes.len), &bytes_written, overlapped) == 0) {
switch (kernel32.GetLastError()) {
.INVALID_USER_BUFFER => return error.SystemResources,
.NOT_ENOUGH_MEMORY => return error.SystemResources,
.OPERATION_ABORTED => return error.OperationAborted,
.NOT_ENOUGH_QUOTA => return error.SystemResources,
.IO_PENDING => unreachable, // this function is for blocking files only
.BROKEN_PIPE => return error.BrokenPipe,
else => |err| return unexpectedError(err),
}
}
}
}

View File

@ -2,6 +2,8 @@ const std = @import("std");
const io = std.io;
const builtin = @import("builtin");
pub const io_mode: io.Mode = builtin.test_io_mode;
pub fn main() anyerror!void {
const test_fn_list = builtin.test_functions;
var ok_count: usize = 0;
@ -12,6 +14,11 @@ pub fn main() anyerror!void {
error.TimerUnsupported => @panic("timer unsupported"),
};
var async_frame_buffer: []align(std.Target.stack_align) u8 = undefined;
// TODO this is on the next line (using `undefined` above) because otherwise zig incorrectly
// ignores the alignment of the slice.
async_frame_buffer = &[_]u8{};
for (test_fn_list) |test_fn, i| {
std.testing.base_allocator_instance.reset();
@ -21,7 +28,24 @@ pub fn main() anyerror!void {
if (progress.terminal == null) {
std.debug.warn("{}/{} {}...", .{ i + 1, test_fn_list.len, test_fn.name });
}
if (test_fn.func()) |_| {
const result = if (test_fn.async_frame_size) |size| switch (io_mode) {
.evented => blk: {
if (async_frame_buffer.len < size) {
std.heap.page_allocator.free(async_frame_buffer);
async_frame_buffer = try std.heap.page_allocator.alignedAlloc(u8, std.Target.stack_align, size);
}
const casted_fn = @ptrCast(async fn () anyerror!void, test_fn.func);
break :blk await @asyncCall(async_frame_buffer, {}, casted_fn);
},
.blocking => {
skip_count += 1;
test_node.end();
progress.log("{}...SKIP (async test)\n", .{test_fn.name});
if (progress.terminal == null) std.debug.warn("SKIP (async test)\n", .{});
continue;
},
} else test_fn.func();
if (result) |_| {
ok_count += 1;
test_node.end();
std.testing.allocator_instance.validate() catch |err| switch (err) {

View File

@ -29,7 +29,7 @@ const Package = @import("package.zig").Package;
const link = @import("link.zig").link;
const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
const CInt = @import("c_int.zig").CInt;
const fs = event.fs;
const fs = std.fs;
const util = @import("util.zig");
const max_src_size = 2 * 1024 * 1024 * 1024; // 2 GiB
@ -442,7 +442,7 @@ pub const Compilation = struct {
comp.name = try Buffer.init(comp.arena(), name);
comp.llvm_triple = try util.getTriple(comp.arena(), target);
comp.llvm_target = try util.llvmTargetFromTriple(comp.llvm_triple);
comp.zig_std_dir = try std.fs.path.join(comp.arena(), &[_][]const u8{ zig_lib_dir, "std" });
comp.zig_std_dir = try fs.path.join(comp.arena(), &[_][]const u8{ zig_lib_dir, "std" });
const opt_level = switch (build_mode) {
.Debug => llvm.CodeGenLevelNone,
@ -488,8 +488,8 @@ pub const Compilation = struct {
defer comp.events.deinit();
if (root_src_path) |root_src| {
const dirname = std.fs.path.dirname(root_src) orelse ".";
const basename = std.fs.path.basename(root_src);
const dirname = fs.path.dirname(root_src) orelse ".";
const basename = fs.path.basename(root_src);
comp.root_package = try Package.create(comp.arena(), dirname, basename);
comp.std_package = try Package.create(comp.arena(), comp.zig_std_dir, "std.zig");
@ -521,7 +521,7 @@ pub const Compilation = struct {
if (comp.tmp_dir.getOrNull()) |tmp_dir_result|
if (tmp_dir_result.*) |tmp_dir| {
// TODO evented I/O?
std.fs.deleteTree(tmp_dir) catch {};
fs.deleteTree(tmp_dir) catch {};
} else |_| {};
}
@ -797,7 +797,7 @@ pub const Compilation = struct {
async fn rebuildFile(self: *Compilation, root_scope: *Scope.Root) BuildError!void {
const tree_scope = blk: {
const source_code = fs.readFile(
const source_code = fs.cwd().readFileAlloc(
self.gpa(),
root_scope.realpath,
max_src_size,
@ -935,8 +935,8 @@ pub const Compilation = struct {
fn initialCompile(self: *Compilation) !void {
if (self.root_src_path) |root_src_path| {
const root_scope = blk: {
// TODO async/await std.fs.realpath
const root_src_real_path = std.fs.realpathAlloc(self.gpa(), root_src_path) catch |err| {
// TODO async/await fs.realpath
const root_src_real_path = fs.realpathAlloc(self.gpa(), root_src_path) catch |err| {
try self.addCompileErrorCli(root_src_path, "unable to open: {}", .{@errorName(err)});
return;
};
@ -1157,7 +1157,7 @@ pub const Compilation = struct {
const file_name = try std.fmt.allocPrint(self.gpa(), "{}{}", .{ file_prefix[0..], suffix });
defer self.gpa().free(file_name);
const full_path = try std.fs.path.join(self.gpa(), &[_][]const u8{ tmp_dir, file_name[0..] });
const full_path = try fs.path.join(self.gpa(), &[_][]const u8{ tmp_dir, file_name[0..] });
errdefer self.gpa().free(full_path);
return Buffer.fromOwnedSlice(self.gpa(), full_path);
@ -1178,8 +1178,8 @@ pub const Compilation = struct {
const zig_dir_path = try getZigDir(self.gpa());
defer self.gpa().free(zig_dir_path);
const tmp_dir = try std.fs.path.join(self.arena(), &[_][]const u8{ zig_dir_path, comp_dir_name[0..] });
try std.fs.makePath(self.gpa(), tmp_dir);
const tmp_dir = try fs.path.join(self.arena(), &[_][]const u8{ zig_dir_path, comp_dir_name[0..] });
try fs.makePath(self.gpa(), tmp_dir);
return tmp_dir;
}
@ -1351,7 +1351,7 @@ async fn addFnToLinkSet(comp: *Compilation, fn_val: *Value.Fn) Compilation.Build
}
fn getZigDir(allocator: *mem.Allocator) ![]u8 {
return std.fs.getAppDataDir(allocator, "zig");
return fs.getAppDataDir(allocator, "zig");
}
fn analyzeFnType(

View File

@ -998,7 +998,8 @@ fn printCharValues(out: var, bytes: []const u8) !void {
fn printUnderstandableChar(out: var, char: u8) !void {
if (!std.ascii.isPrint(char) or char == ' ') {
std.fmt.format(out.context, anyerror, out.output, "\\x{X:2}", .{char}) catch {};
const output = @typeInfo(@TypeOf(out)).Pointer.child.output;
std.fmt.format(out.context, anyerror, output, "\\x{X:2}", .{char}) catch {};
} else {
try out.write("'");
try out.write(&[_]u8{printable_char_tab[char]});
@ -1021,34 +1022,20 @@ comptime {
// output: must be a function that takes a `self` idiom parameter
// and a bytes parameter
// context: must be that self
fn makeOutput(output: var, context: var) Output(@TypeOf(output)) {
return Output(@TypeOf(output)){
.output = output,
fn makeOutput(comptime output: var, context: var) Output(output, @TypeOf(context)) {
return Output(output, @TypeOf(context)){
.context = context,
};
}
fn Output(comptime T: type) type {
const args = switch (@typeInfo(T)) {
.Fn => |f| f.args,
else => @compileError("output parameter is not a function"),
};
if (args.len != 2) {
@compileError("output function must take 2 arguments");
}
const at0 = args[0].arg_type orelse @compileError("output arg[0] does not have a type");
const at1 = args[1].arg_type orelse @compileError("output arg[1] does not have a type");
const arg1p = switch (@typeInfo(at1)) {
.Pointer => |p| p,
else => @compileError("output arg[1] is not a slice"),
};
if (arg1p.child != u8) @compileError("output arg[1] is not a u8 slice");
fn Output(comptime output_func: var, comptime Context: type) type {
return struct {
output: T,
context: at0,
context: Context,
fn write(self: *@This(), bytes: []const u8) !void {
try self.output(self.context, bytes);
pub const output = output_func;
fn write(self: @This(), bytes: []const u8) !void {
try output_func(self.context, bytes);
}
};
}

View File

@ -14,7 +14,7 @@ pub fn testZigInstallPrefix(allocator: *mem.Allocator, test_path: []const u8) ![
const test_index_file = try fs.path.join(allocator, &[_][]const u8{ test_zig_dir, "std", "std.zig" });
defer allocator.free(test_index_file);
var file = try fs.File.openRead(test_index_file);
var file = try fs.cwd().openRead(test_index_file);
file.close();
return test_zig_dir;

View File

@ -724,7 +724,7 @@ async fn fmtPath(fmt: *Fmt, file_path_ref: []const u8, check_mode: bool) FmtErro
if (try held.value.put(file_path, {})) |_| return;
}
const source_code = event.fs.readFile(
const source_code = fs.cwd().readFileAlloc(
fmt.allocator,
file_path,
max_src_size,

View File

@ -2246,6 +2246,7 @@ struct CodeGen {
bool enable_dump_analysis;
bool enable_doc_generation;
bool disable_bin_generation;
bool test_is_evented;
CodeModel code_model;
Buf *mmacosx_version_min;
@ -2491,6 +2492,9 @@ struct ScopeExpr {
size_t children_len;
MemoizedBool need_spill;
// This is a hack. I apologize for this, I need this to work so that I
// can make progress on other fronts. I'll pay off this tech debt eventually.
bool spill_harder;
};
// synchronized with code in define_builtin_compile_vars

View File

@ -6108,11 +6108,14 @@ static void mark_suspension_point(Scope *scope) {
continue;
}
case ScopeIdExpr: {
ScopeExpr *parent_expr_scope = reinterpret_cast<ScopeExpr *>(scope);
if (!looking_for_exprs) {
if (parent_expr_scope->spill_harder) {
parent_expr_scope->need_spill = MemoizedBoolTrue;
}
// Now we're only looking for a block, to see if it's in a loop (see the case ScopeIdBlock)
continue;
}
ScopeExpr *parent_expr_scope = reinterpret_cast<ScopeExpr *>(scope);
if (child_expr_scope != nullptr) {
for (size_t i = 0; parent_expr_scope->children_ptr[i] != child_expr_scope; i += 1) {
assert(i < parent_expr_scope->children_len);
@ -6148,6 +6151,15 @@ static bool scope_needs_spill(Scope *scope) {
zig_unreachable();
}
static ZigType *resolve_type_isf(ZigType *ty) {
if (ty->id != ZigTypeIdPointer) return ty;
InferredStructField *isf = ty->data.pointer.inferred_struct_field;
if (isf == nullptr) return ty;
TypeStructField *field = find_struct_type_field(isf->inferred_struct_type, isf->field_name);
assert(field != nullptr);
return field->type_entry;
}
static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
Error err;
@ -6249,6 +6261,9 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
}
ZigFn *callee = call->fn_entry;
if (callee == nullptr) {
if (call->fn_ref->value->type->data.fn.fn_type_id.cc != CallingConventionAsync) {
continue;
}
add_node_error(g, call->base.base.source_node,
buf_sprintf("function is not comptime-known; @asyncCall required"));
return ErrorSemanticAnalyzeFail;
@ -6356,11 +6371,19 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
IrInstGen *instruction = block->instruction_list.at(instr_i);
if (instruction->id == IrInstGenIdAwait ||
instruction->id == IrInstGenIdVarPtr ||
instruction->id == IrInstGenIdAlloca)
instruction->id == IrInstGenIdAlloca ||
instruction->id == IrInstGenIdSpillBegin ||
instruction->id == IrInstGenIdSpillEnd)
{
// This instruction does its own spilling specially, or otherwise doesn't need it.
continue;
}
if (instruction->id == IrInstGenIdCast &&
reinterpret_cast<IrInstGenCast *>(instruction)->cast_op == CastOpNoop)
{
// The IR instruction exists only to change the type according to Zig. No spill needed.
continue;
}
if (instruction->value->special != ConstValSpecialRuntime)
continue;
if (instruction->base.ref_count == 0)
@ -6406,7 +6429,7 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
} else {
param_name = buf_sprintf("@arg%" ZIG_PRI_usize, arg_i);
}
ZigType *param_type = param_info->type;
ZigType *param_type = resolve_type_isf(param_info->type);
if ((err = type_resolve(g, param_type, ResolveStatusSizeKnown))) {
return err;
}
@ -6425,7 +6448,7 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
instruction->field_index = SIZE_MAX;
ZigType *ptr_type = instruction->base.value->type;
assert(ptr_type->id == ZigTypeIdPointer);
ZigType *child_type = ptr_type->data.pointer.child_type;
ZigType *child_type = resolve_type_isf(ptr_type->data.pointer.child_type);
if (!type_has_bits(child_type))
continue;
if (instruction->base.base.ref_count == 0)
@ -6452,8 +6475,6 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
}
instruction->field_index = fields.length;
src_assert(child_type->id != ZigTypeIdPointer || child_type->data.pointer.inferred_struct_field == nullptr,
instruction->base.base.source_node);
fields.append({name, child_type, instruction->align});
}
@ -8255,6 +8276,8 @@ static void resolve_llvm_types_struct(CodeGen *g, ZigType *struct_type, ResolveS
size_t debug_field_index = 0;
for (size_t i = 0; i < field_count; i += 1) {
TypeStructField *field = struct_type->data.structure.fields[i];
//fprintf(stderr, "%s at gen index %zu\n", buf_ptr(field->name), field->gen_index);
size_t gen_field_index = field->gen_index;
if (gen_field_index == SIZE_MAX) {
continue;

View File

@ -343,33 +343,67 @@ static LLVMLinkage to_llvm_linkage(GlobalLinkageId id) {
zig_unreachable();
}
// label (grep this): [fn_frame_struct_layout]
static uint32_t frame_index_trace_arg(CodeGen *g, ZigType *return_type) {
// [0] *ReturnType (callee's)
// [1] *ReturnType (awaiter's)
// [2] ReturnType
uint32_t return_field_count = type_has_bits(return_type) ? 3 : 0;
return frame_ret_start + return_field_count;
}
struct CalcLLVMFieldIndex {
uint32_t offset;
uint32_t field_index;
};
// label (grep this): [fn_frame_struct_layout]
static uint32_t frame_index_arg(CodeGen *g, ZigType *return_type) {
bool have_stack_trace = codegen_fn_has_err_ret_tracing_arg(g, return_type);
// [0] *StackTrace (callee's)
// [1] *StackTrace (awaiter's)
uint32_t trace_field_count = have_stack_trace ? 2 : 0;
return frame_index_trace_arg(g, return_type) + trace_field_count;
}
// label (grep this): [fn_frame_struct_layout]
static uint32_t frame_index_trace_stack(CodeGen *g, FnTypeId *fn_type_id) {
uint32_t result = frame_index_arg(g, fn_type_id->return_type);
for (size_t i = 0; i < fn_type_id->param_count; i += 1) {
if (type_has_bits(fn_type_id->param_info->type)) {
result += 1;
static void calc_llvm_field_index_add(CodeGen *g, CalcLLVMFieldIndex *calc, ZigType *ty) {
if (!type_has_bits(ty)) return;
uint32_t ty_align = get_abi_alignment(g, ty);
if (calc->offset % ty_align != 0) {
uint32_t llvm_align = LLVMABIAlignmentOfType(g->target_data_ref, get_llvm_type(g, ty));
if (llvm_align >= ty_align) {
ty_align = llvm_align; // llvm's padding is sufficient
} else if (calc->offset) {
calc->field_index += 1; // zig will insert an extra padding field here
}
calc->offset += ty_align - (calc->offset % ty_align); // padding bytes
}
return result;
calc->offset += ty->abi_size;
calc->field_index += 1;
}
// label (grep this): [fn_frame_struct_layout]
static void frame_index_trace_arg_calc(CodeGen *g, CalcLLVMFieldIndex *calc, ZigType *return_type) {
calc_llvm_field_index_add(g, calc, g->builtin_types.entry_usize); // function pointer
calc_llvm_field_index_add(g, calc, g->builtin_types.entry_usize); // resume index
calc_llvm_field_index_add(g, calc, g->builtin_types.entry_usize); // awaiter index
if (type_has_bits(return_type)) {
calc_llvm_field_index_add(g, calc, g->builtin_types.entry_usize); // *ReturnType (callee's)
calc_llvm_field_index_add(g, calc, g->builtin_types.entry_usize); // *ReturnType (awaiter's)
calc_llvm_field_index_add(g, calc, return_type); // ReturnType
}
}
static uint32_t frame_index_trace_arg(CodeGen *g, ZigType *return_type) {
CalcLLVMFieldIndex calc = {0};
frame_index_trace_arg_calc(g, &calc, return_type);
return calc.field_index;
}
// label (grep this): [fn_frame_struct_layout]
static void frame_index_arg_calc(CodeGen *g, CalcLLVMFieldIndex *calc, ZigType *return_type) {
frame_index_trace_arg_calc(g, calc, return_type);
if (codegen_fn_has_err_ret_tracing_arg(g, return_type)) {
calc_llvm_field_index_add(g, calc, g->builtin_types.entry_usize); // *StackTrace (callee's)
calc_llvm_field_index_add(g, calc, g->builtin_types.entry_usize); // *StackTrace (awaiter's)
}
}
// label (grep this): [fn_frame_struct_layout]
static uint32_t frame_index_trace_stack(CodeGen *g, ZigFn *fn) {
size_t field_index = 6;
bool have_stack_trace = codegen_fn_has_err_ret_tracing_arg(g, fn->type_entry->data.fn.fn_type_id.return_type);
if (have_stack_trace) {
field_index += 2;
}
field_index += fn->type_entry->data.fn.fn_type_id.param_count;
ZigType *locals_struct = fn->frame_type->data.frame.locals_struct;
TypeStructField *field = locals_struct->data.structure.fields[field_index];
return field->gen_index;
}
@ -2527,7 +2561,12 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutableGen *executable, Ir
LLVMBuildRet(g->builder, by_val_value);
}
} else if (instruction->operand == nullptr) {
LLVMBuildRetVoid(g->builder);
if (g->cur_ret_ptr == nullptr) {
LLVMBuildRetVoid(g->builder);
} else {
LLVMValueRef by_val_value = gen_load_untyped(g, g->cur_ret_ptr, 0, false, "");
LLVMBuildRet(g->builder, by_val_value);
}
} else {
LLVMValueRef value = ir_llvm_value(g, instruction->operand);
LLVMBuildRet(g->builder, value);
@ -3920,7 +3959,9 @@ static void set_call_instr_sret(CodeGen *g, LLVMValueRef call_instr) {
static void render_async_spills(CodeGen *g) {
ZigType *fn_type = g->cur_fn->type_entry;
ZigType *import = get_scope_import(&g->cur_fn->fndef_scope->base);
uint32_t async_var_index = frame_index_arg(g, fn_type->data.fn.fn_type_id.return_type);
CalcLLVMFieldIndex arg_calc = {0};
frame_index_arg_calc(g, &arg_calc, fn_type->data.fn.fn_type_id.return_type);
for (size_t var_i = 0; var_i < g->cur_fn->variable_list.length; var_i += 1) {
ZigVar *var = g->cur_fn->variable_list.at(var_i);
@ -3941,8 +3982,8 @@ static void render_async_spills(CodeGen *g) {
continue;
}
var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, async_var_index, var->name);
async_var_index += 1;
calc_llvm_field_index_add(g, &arg_calc, var->var_type);
var->value_ref = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr, arg_calc.field_index - 1, var->name);
if (var->decl_node) {
var->di_loc_var = ZigLLVMCreateAutoVariable(g->dbuilder, get_di_scope(g, var->parent_scope),
var->name, import->data.structure.root_struct->di_file,
@ -4023,6 +4064,8 @@ static void gen_init_stack_trace(CodeGen *g, LLVMValueRef trace_field_ptr, LLVMV
}
static LLVMValueRef ir_render_call(CodeGen *g, IrExecutableGen *executable, IrInstGenCall *instruction) {
Error err;
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
LLVMValueRef fn_val;
@ -4053,6 +4096,8 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutableGen *executable, IrIn
ZigList<ZigType *> gen_param_types = {};
LLVMValueRef result_loc = instruction->result_loc ? ir_llvm_value(g, instruction->result_loc) : nullptr;
LLVMValueRef zero = LLVMConstNull(usize_type_ref);
bool need_frame_ptr_ptr_spill = false;
ZigType *anyframe_type = nullptr;
LLVMValueRef frame_result_loc_uncasted = nullptr;
LLVMValueRef frame_result_loc;
LLVMValueRef awaiter_init_val;
@ -4091,14 +4136,17 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutableGen *executable, IrIn
LLVMPositionBuilderAtEnd(g->builder, ok_block);
}
need_frame_ptr_ptr_spill = true;
LLVMValueRef frame_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_slice_ptr, slice_ptr_index, "");
LLVMValueRef frame_ptr = LLVMBuildLoad(g->builder, frame_ptr_ptr, "");
if (instruction->fn_entry == nullptr) {
ZigType *anyframe_type = get_any_frame_type(g, src_return_type);
anyframe_type = get_any_frame_type(g, src_return_type);
frame_result_loc = LLVMBuildBitCast(g->builder, frame_ptr, get_llvm_type(g, anyframe_type), "");
} else {
ZigType *ptr_frame_type = get_pointer_to_type(g,
get_fn_frame_type(g, instruction->fn_entry), false);
ZigType *frame_type = get_fn_frame_type(g, instruction->fn_entry);
if ((err = type_resolve(g, frame_type, ResolveStatusLLVMFull)))
codegen_report_errors_and_exit(g);
ZigType *ptr_frame_type = get_pointer_to_type(g, frame_type, false);
frame_result_loc = LLVMBuildBitCast(g->builder, frame_ptr,
get_llvm_type(g, ptr_frame_type), "");
}
@ -4265,17 +4313,35 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutableGen *executable, IrIn
LLVMValueRef result;
if (callee_is_async) {
uint32_t arg_start_i = frame_index_arg(g, fn_type->data.fn.fn_type_id.return_type);
CalcLLVMFieldIndex arg_calc_start = {0};
frame_index_arg_calc(g, &arg_calc_start, fn_type->data.fn.fn_type_id.return_type);
LLVMValueRef casted_frame;
if (instruction->new_stack != nullptr && instruction->fn_entry == nullptr) {
// We need the frame type to be a pointer to a struct that includes the args
size_t field_count = arg_start_i + gen_param_values.length;
// Count ahead to determine how many llvm struct fields we need.
CalcLLVMFieldIndex arg_calc = arg_calc_start;
for (size_t i = 0; i < gen_param_types.length; i += 1) {
calc_llvm_field_index_add(g, &arg_calc, gen_param_types.at(i));
}
size_t field_count = arg_calc.field_index;
LLVMTypeRef *field_types = allocate_nonzero<LLVMTypeRef>(field_count);
LLVMGetStructElementTypes(LLVMGetElementType(LLVMTypeOf(frame_result_loc)), field_types);
assert(LLVMCountStructElementTypes(LLVMGetElementType(LLVMTypeOf(frame_result_loc))) == arg_start_i);
assert(LLVMCountStructElementTypes(LLVMGetElementType(LLVMTypeOf(frame_result_loc))) == arg_calc_start.field_index);
arg_calc = arg_calc_start;
for (size_t arg_i = 0; arg_i < gen_param_values.length; arg_i += 1) {
field_types[arg_start_i + arg_i] = LLVMTypeOf(gen_param_values.at(arg_i));
CalcLLVMFieldIndex prev = arg_calc;
calc_llvm_field_index_add(g, &arg_calc, gen_param_types.at(arg_i));
field_types[arg_calc.field_index - 1] = LLVMTypeOf(gen_param_values.at(arg_i));
if (arg_calc.field_index - prev.field_index > 1) {
// Padding field
uint32_t pad_bytes = arg_calc.offset - prev.offset - gen_param_types.at(arg_i)->abi_size;
LLVMTypeRef pad_llvm_type = LLVMArrayType(LLVMInt8Type(), pad_bytes);
field_types[arg_calc.field_index - 2] = pad_llvm_type;
}
}
LLVMTypeRef frame_with_args_type = LLVMStructType(field_types, field_count, false);
LLVMTypeRef ptr_frame_with_args_type = LLVMPointerType(frame_with_args_type, 0);
@ -4285,8 +4351,10 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutableGen *executable, IrIn
casted_frame = frame_result_loc;
}
CalcLLVMFieldIndex arg_calc = arg_calc_start;
for (size_t arg_i = 0; arg_i < gen_param_values.length; arg_i += 1) {
LLVMValueRef arg_ptr = LLVMBuildStructGEP(g->builder, casted_frame, arg_start_i + arg_i, "");
calc_llvm_field_index_add(g, &arg_calc, gen_param_types.at(arg_i));
LLVMValueRef arg_ptr = LLVMBuildStructGEP(g->builder, casted_frame, arg_calc.field_index - 1, "");
gen_assign_raw(g, arg_ptr, get_pointer_to_type(g, gen_param_types.at(arg_i), true),
gen_param_values.at(arg_i));
}
@ -4349,11 +4417,19 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutableGen *executable, IrIn
}
}
if (frame_result_loc_uncasted != nullptr && instruction->fn_entry != nullptr) {
// Instead of a spill, we do the bitcast again. The uncasted LLVM IR instruction will
// be an Alloca from the entry block, so it does not need to be spilled.
frame_result_loc = LLVMBuildBitCast(g->builder, frame_result_loc_uncasted,
LLVMPointerType(get_llvm_type(g, instruction->fn_entry->frame_type), 0), "");
if (need_frame_ptr_ptr_spill) {
LLVMValueRef frame_slice_ptr = ir_llvm_value(g, instruction->new_stack);
LLVMValueRef frame_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_slice_ptr, slice_ptr_index, "");
frame_result_loc_uncasted = LLVMBuildLoad(g->builder, frame_ptr_ptr, "");
}
if (frame_result_loc_uncasted != nullptr) {
if (instruction->fn_entry != nullptr) {
frame_result_loc = LLVMBuildBitCast(g->builder, frame_result_loc_uncasted,
LLVMPointerType(get_llvm_type(g, instruction->fn_entry->frame_type), 0), "");
} else {
frame_result_loc = LLVMBuildBitCast(g->builder, frame_result_loc_uncasted,
get_llvm_type(g, anyframe_type), "");
}
}
LLVMValueRef result_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, "");
@ -5644,18 +5720,24 @@ static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutableGen *ex
bool want_safety = instruction->safety_check_on && ir_want_runtime_safety(g, &instruction->base) &&
g->errors_by_index.length > 1;
bool value_has_bits;
if ((err = type_has_bits2(g, instruction->base.value->type, &value_has_bits)))
codegen_report_errors_and_exit(g);
if (!want_safety && !value_has_bits)
return nullptr;
ZigType *ptr_type = instruction->value->value->type;
assert(ptr_type->id == ZigTypeIdPointer);
ZigType *err_union_type = ptr_type->data.pointer.child_type;
ZigType *payload_type = err_union_type->data.error_union.payload_type;
LLVMValueRef err_union_ptr = ir_llvm_value(g, instruction->value);
LLVMValueRef zero = LLVMConstNull(get_llvm_type(g, g->err_tag_type));
bool value_has_bits;
if ((err = type_has_bits2(g, instruction->base.value->type, &value_has_bits)))
codegen_report_errors_and_exit(g);
if (!want_safety && !value_has_bits) {
if (instruction->initializing) {
gen_store_untyped(g, zero, err_union_ptr, 0, false);
}
return nullptr;
}
LLVMValueRef err_union_handle = get_handle_value(g, err_union_ptr, err_union_type, ptr_type);
if (!type_has_bits(err_union_type->data.error_union.err_set_type)) {
@ -5670,7 +5752,6 @@ static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutableGen *ex
} else {
err_val = err_union_handle;
}
LLVMValueRef zero = LLVMConstNull(get_llvm_type(g, g->err_tag_type));
LLVMValueRef cond_val = LLVMBuildICmp(g->builder, LLVMIntEQ, err_val, zero, "");
LLVMBasicBlockRef err_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapErrError");
LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapErrOk");
@ -5690,6 +5771,9 @@ static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutableGen *ex
}
return LLVMBuildStructGEP(g->builder, err_union_handle, err_union_payload_index, "");
} else {
if (instruction->initializing) {
gen_store_untyped(g, zero, err_union_ptr, 0, false);
}
return nullptr;
}
}
@ -7742,7 +7826,7 @@ static void do_code_gen(CodeGen *g) {
}
uint32_t trace_field_index_stack = UINT32_MAX;
if (codegen_fn_has_err_ret_tracing_stack(g, fn_table_entry, true)) {
trace_field_index_stack = frame_index_trace_stack(g, fn_type_id);
trace_field_index_stack = frame_index_trace_stack(g, fn_table_entry);
g->cur_err_ret_trace_val_stack = LLVMBuildStructGEP(g->builder, g->cur_frame_ptr,
trace_field_index_stack, "");
}
@ -8602,6 +8686,9 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
buf_appendf(contents,
"pub var test_functions: []TestFn = undefined; // overwritten later\n"
);
buf_appendf(contents, "pub const test_io_mode = %s;\n",
g->test_is_evented ? ".evented" : ".blocking");
}
return contents;
@ -8635,6 +8722,7 @@ static Error define_builtin_compile_vars(CodeGen *g) {
cache_bool(&cache_hash, g->is_dynamic);
cache_bool(&cache_hash, g->is_test_build);
cache_bool(&cache_hash, g->is_single_threaded);
cache_bool(&cache_hash, g->test_is_evented);
cache_int(&cache_hash, g->code_model);
cache_int(&cache_hash, g->zig_target->is_native);
cache_int(&cache_hash, g->zig_target->arch);
@ -9392,22 +9480,13 @@ static void update_test_functions_builtin_decl(CodeGen *g) {
for (size_t i = 0; i < g->test_fns.length; i += 1) {
ZigFn *test_fn_entry = g->test_fns.at(i);
if (fn_is_async(test_fn_entry)) {
ErrorMsg *msg = add_node_error(g, test_fn_entry->proto_node,
buf_create_from_str("test functions cannot be async"));
add_error_note(g, msg, test_fn_entry->proto_node,
buf_sprintf("this restriction may be lifted in the future. See https://github.com/ziglang/zig/issues/3117 for more details"));
add_async_error_notes(g, msg, test_fn_entry);
continue;
}
ZigValue *this_val = &test_fn_array->data.x_array.data.s_none.elements[i];
this_val->special = ConstValSpecialStatic;
this_val->type = struct_type;
this_val->parent.id = ConstParentIdArray;
this_val->parent.data.p_array.array_val = test_fn_array;
this_val->parent.data.p_array.elem_index = i;
this_val->data.x_struct.fields = alloc_const_vals_ptrs(2);
this_val->data.x_struct.fields = alloc_const_vals_ptrs(3);
ZigValue *name_field = this_val->data.x_struct.fields[0];
ZigValue *name_array_val = create_const_str_lit(g, &test_fn_entry->symbol_name)->data.x_ptr.data.ref.pointee;
@ -9419,6 +9498,19 @@ static void update_test_functions_builtin_decl(CodeGen *g) {
fn_field->data.x_ptr.special = ConstPtrSpecialFunction;
fn_field->data.x_ptr.mut = ConstPtrMutComptimeConst;
fn_field->data.x_ptr.data.fn.fn_entry = test_fn_entry;
ZigValue *frame_size_field = this_val->data.x_struct.fields[2];
frame_size_field->type = get_optional_type(g, g->builtin_types.entry_usize);
frame_size_field->special = ConstValSpecialStatic;
frame_size_field->data.x_optional = nullptr;
if (fn_is_async(test_fn_entry)) {
frame_size_field->data.x_optional = create_const_vals(1);
frame_size_field->data.x_optional->special = ConstValSpecialStatic;
frame_size_field->data.x_optional->type = g->builtin_types.entry_usize;
bigint_init_unsigned(&frame_size_field->data.x_optional->data.x_bigint,
test_fn_entry->frame_type->abi_size);
}
}
report_errors_and_maybe_exit(g);
@ -10350,6 +10442,7 @@ static Error check_cache(CodeGen *g, Buf *manifest_dir, Buf *digest) {
if (g->is_test_build) {
cache_buf_opt(ch, g->test_filter);
cache_buf_opt(ch, g->test_name_prefix);
cache_bool(ch, g->test_is_evented);
}
cache_bool(ch, g->link_eh_frame_hdr);
cache_bool(ch, g->is_single_threaded);

View File

@ -5252,6 +5252,7 @@ static IrInstSrc *ir_gen_return(IrBuilderSrc *irb, Scope *scope, AstNode *node,
return irb->codegen->invalid_inst_src;
} else {
return_value = ir_build_const_void(irb, scope, node);
ir_build_end_expr(irb, scope, node, return_value, &result_loc_ret->base);
}
ir_mark_gen(ir_build_add_implicit_return_type(irb, scope, node, return_value, result_loc_ret));
@ -5262,7 +5263,7 @@ static IrInstSrc *ir_gen_return(IrBuilderSrc *irb, Scope *scope, AstNode *node,
if (!have_err_defers && !irb->codegen->have_err_ret_tracing) {
// only generate unconditional defers
ir_gen_defers_for_block(irb, scope, outer_scope, false);
IrInstSrc *result = ir_build_return_src(irb, scope, node, return_value);
IrInstSrc *result = ir_build_return_src(irb, scope, node, nullptr);
result_loc_ret->base.source_instruction = result;
return result;
}
@ -5271,10 +5272,6 @@ static IrInstSrc *ir_gen_return(IrBuilderSrc *irb, Scope *scope, AstNode *node,
IrBasicBlockSrc *err_block = ir_create_basic_block(irb, scope, "ErrRetErr");
IrBasicBlockSrc *ok_block = ir_create_basic_block(irb, scope, "ErrRetOk");
if (!have_err_defers) {
ir_gen_defers_for_block(irb, scope, outer_scope, false);
}
IrInstSrc *is_err = ir_build_test_err_src(irb, scope, node, return_value, false, true);
IrInstSrc *is_comptime;
@ -5288,22 +5285,18 @@ static IrInstSrc *ir_gen_return(IrBuilderSrc *irb, Scope *scope, AstNode *node,
IrBasicBlockSrc *ret_stmt_block = ir_create_basic_block(irb, scope, "RetStmt");
ir_set_cursor_at_end_and_append_block(irb, err_block);
if (have_err_defers) {
ir_gen_defers_for_block(irb, scope, outer_scope, true);
}
ir_gen_defers_for_block(irb, scope, outer_scope, true);
if (irb->codegen->have_err_ret_tracing && !should_inline) {
ir_build_save_err_ret_addr_src(irb, scope, node);
}
ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
ir_set_cursor_at_end_and_append_block(irb, ok_block);
if (have_err_defers) {
ir_gen_defers_for_block(irb, scope, outer_scope, false);
}
ir_gen_defers_for_block(irb, scope, outer_scope, false);
ir_build_br(irb, scope, node, ret_stmt_block, is_comptime);
ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block);
IrInstSrc *result = ir_build_return_src(irb, scope, node, return_value);
IrInstSrc *result = ir_build_return_src(irb, scope, node, nullptr);
result_loc_ret->base.source_instruction = result;
return result;
}
@ -8874,7 +8867,10 @@ static IrInstSrc *ir_gen_if_optional_expr(IrBuilderSrc *irb, Scope *scope, AstNo
AstNode *else_node = node->data.test_expr.else_node;
bool var_is_ptr = node->data.test_expr.var_is_ptr;
IrInstSrc *maybe_val_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr);
ScopeExpr *spill_scope = create_expr_scope(irb->codegen, expr_node, scope);
spill_scope->spill_harder = true;
IrInstSrc *maybe_val_ptr = ir_gen_node_extra(irb, expr_node, &spill_scope->base, LValPtr, nullptr);
if (maybe_val_ptr == irb->codegen->invalid_inst_src)
return maybe_val_ptr;
@ -8899,7 +8895,7 @@ static IrInstSrc *ir_gen_if_optional_expr(IrBuilderSrc *irb, Scope *scope, AstNo
ir_set_cursor_at_end_and_append_block(irb, then_block);
Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, scope, is_comptime);
Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, &spill_scope->base, is_comptime);
Scope *var_scope;
if (var_symbol) {
bool is_shadowable = false;
@ -9619,7 +9615,10 @@ static IrInstSrc *ir_gen_catch(IrBuilderSrc *irb, Scope *parent_scope, AstNode *
}
IrInstSrc *err_union_ptr = ir_gen_node_extra(irb, op1_node, parent_scope, LValPtr, nullptr);
ScopeExpr *spill_scope = create_expr_scope(irb->codegen, op1_node, parent_scope);
spill_scope->spill_harder = true;
IrInstSrc *err_union_ptr = ir_gen_node_extra(irb, op1_node, &spill_scope->base, LValPtr, nullptr);
if (err_union_ptr == irb->codegen->invalid_inst_src)
return irb->codegen->invalid_inst_src;
@ -9641,7 +9640,7 @@ static IrInstSrc *ir_gen_catch(IrBuilderSrc *irb, Scope *parent_scope, AstNode *
is_comptime);
ir_set_cursor_at_end_and_append_block(irb, err_block);
Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, parent_scope, is_comptime);
Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, &spill_scope->base, is_comptime);
Scope *err_scope;
if (var_node) {
assert(var_node->type == NodeTypeSymbol);
@ -15494,6 +15493,12 @@ static IrInstGen *ir_analyze_instruction_add_implicit_return_type(IrAnalyze *ira
}
static IrInstGen *ir_analyze_instruction_return(IrAnalyze *ira, IrInstSrcReturn *instruction) {
if (instruction->operand == nullptr) {
// result location mechanism took care of it.
IrInstGen *result = ir_build_return_gen(ira, &instruction->base.base, nullptr);
return ir_finish_anal(ira, result);
}
IrInstGen *operand = instruction->operand->child;
if (type_is_invalid(operand->value->type))
return ir_unreach_error(ira);
@ -19586,6 +19591,12 @@ static IrInstGen *ir_analyze_fn_call(IrAnalyze *ira, IrInst* source_instr,
if (type_is_invalid(result_loc->value->type) || result_loc->value->type->id == ZigTypeIdUnreachable) {
return result_loc;
}
IrInstGen *dummy_value = ir_const(ira, source_instr, impl_fn_type_id->return_type);
dummy_value->value->special = ConstValSpecialRuntime;
IrInstGen *dummy_result = ir_implicit_cast2(ira, source_instr,
dummy_value, result_loc->value->type->data.pointer.child_type);
if (type_is_invalid(dummy_result->value->type))
return ira->codegen->invalid_inst_gen;
ZigType *res_child_type = result_loc->value->type->data.pointer.child_type;
if (res_child_type == ira->codegen->builtin_types.entry_var) {
res_child_type = impl_fn_type_id->return_type;
@ -19718,6 +19729,12 @@ static IrInstGen *ir_analyze_fn_call(IrAnalyze *ira, IrInst* source_instr,
if (type_is_invalid(result_loc->value->type) || result_loc->value->type->id == ZigTypeIdUnreachable) {
return result_loc;
}
IrInstGen *dummy_value = ir_const(ira, source_instr, return_type);
dummy_value->value->special = ConstValSpecialRuntime;
IrInstGen *dummy_result = ir_implicit_cast2(ira, source_instr,
dummy_value, result_loc->value->type->data.pointer.child_type);
if (type_is_invalid(dummy_result->value->type))
return ira->codegen->invalid_inst_gen;
ZigType *res_child_type = result_loc->value->type->data.pointer.child_type;
if (res_child_type == ira->codegen->builtin_types.entry_var) {
res_child_type = return_type;
@ -29548,8 +29565,13 @@ static IrInstGen *ir_analyze_instruction_spill_begin(IrAnalyze *ira, IrInstSrcSp
if (!type_has_bits(operand->value->type))
return ir_const_void(ira, &instruction->base.base);
ir_assert(instruction->spill_id == SpillIdRetErrCode, &instruction->base.base);
ira->new_irb.exec->need_err_code_spill = true;
switch (instruction->spill_id) {
case SpillIdInvalid:
zig_unreachable();
case SpillIdRetErrCode:
ira->new_irb.exec->need_err_code_spill = true;
break;
}
return ir_build_spill_begin_gen(ira, &instruction->base.base, operand, instruction->spill_id);
}
@ -29559,8 +29581,12 @@ static IrInstGen *ir_analyze_instruction_spill_end(IrAnalyze *ira, IrInstSrcSpil
if (type_is_invalid(operand->value->type))
return ira->codegen->invalid_inst_gen;
if (ir_should_inline(ira->old_irb.exec, instruction->base.base.scope) || !type_has_bits(operand->value->type))
if (ir_should_inline(ira->old_irb.exec, instruction->base.base.scope) ||
!type_has_bits(operand->value->type) ||
instr_is_comptime(operand))
{
return operand;
}
ir_assert(instruction->begin->base.child->id == IrInstGenIdSpillBegin, &instruction->base.base);
IrInstGenSpillBegin *begin = reinterpret_cast<IrInstGenSpillBegin *>(instruction->begin->base.child);

View File

@ -135,6 +135,7 @@ static int print_full_usage(const char *arg0, FILE *file, int return_code) {
" --test-name-prefix [text] add prefix to all tests\n"
" --test-cmd [arg] specify test execution command one arg at a time\n"
" --test-cmd-bin appends test binary path to test cmd args\n"
" --test-evented-io runs the test in evented I/O mode\n"
, arg0);
return return_code;
}
@ -428,6 +429,7 @@ int main(int argc, char **argv) {
ZigList<CFile *> c_source_files = {0};
const char *test_filter = nullptr;
const char *test_name_prefix = nullptr;
bool test_evented_io = false;
size_t ver_major = 0;
size_t ver_minor = 0;
size_t ver_patch = 0;
@ -709,6 +711,8 @@ int main(int argc, char **argv) {
cur_pkg = cur_pkg->parent;
} else if (strcmp(arg, "-ffunction-sections") == 0) {
function_sections = true;
} else if (strcmp(arg, "--test-evented-io") == 0) {
test_evented_io = true;
} else if (i + 1 >= argc) {
fprintf(stderr, "Expected another argument after %s\n", arg);
return print_error_usage(arg0);
@ -1059,6 +1063,7 @@ int main(int argc, char **argv) {
g->want_stack_check = want_stack_check;
g->want_sanitize_c = want_sanitize_c;
g->want_single_threaded = want_single_threaded;
g->test_is_evented = test_evented_io;
Buf *builtin_source = codegen_generate_builtin_source(g);
if (fwrite(buf_ptr(builtin_source), 1, buf_len(builtin_source), stdout) != buf_len(builtin_source)) {
fprintf(stderr, "unable to write to stdout: %s\n", strerror(ferror(stdout)));
@ -1232,6 +1237,7 @@ int main(int argc, char **argv) {
if (test_filter) {
codegen_set_test_filter(g, buf_create_from_str(test_filter));
}
g->test_is_evented = test_evented_io;
if (test_name_prefix) {
codegen_set_test_name_prefix(g, buf_create_from_str(test_name_prefix));

View File

@ -20,6 +20,30 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
"tmp.zig:1:20: error: dependency loop detected",
});
cases.add("function call assigned to incorrect type",
\\export fn entry() void {
\\ var arr: [4]f32 = undefined;
\\ arr = concat();
\\}
\\fn concat() [16]f32 {
\\ return [1]f32{0}**16;
\\}
, &[_][]const u8{
"tmp.zig:3:17: error: expected type '[4]f32', found '[16]f32'",
});
cases.add("generic function call assigned to incorrect type",
\\pub export fn entry() void {
\\ var res: []i32 = undefined;
\\ res = myAlloc(i32);
\\}
\\fn myAlloc(comptime arg: type) anyerror!arg{
\\ unreachable;
\\}
, &[_][]const u8{
"tmp.zig:3:18: error: expected type '[]i32', found 'anyerror!i32",
});
cases.addTest("non-exhaustive enums",
\\const A = enum {
\\ a,
@ -5279,25 +5303,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
"tmp.zig:2:30: error: cannot set section of local variable 'foo'",
});
cases.add("returning address of local variable - simple",
\\export fn foo() *i32 {
\\ var a: i32 = undefined;
\\ return &a;
\\}
, &[_][]const u8{
"tmp.zig:3:13: error: function returns address of local variable",
});
cases.add("returning address of local variable - phi",
\\export fn foo(c: bool) *i32 {
\\ var a: i32 = undefined;
\\ var b: i32 = undefined;
\\ return if (c) &a else &b;
\\}
, &[_][]const u8{
"tmp.zig:4:12: error: function returns address of local variable",
});
cases.add("inner struct member shadowing outer struct member",
\\fn A() type {
\\ return struct {

View File

@ -2,6 +2,7 @@ const std = @import("std");
const builtin = @import("builtin");
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const expectError = std.testing.expectError;
var global_x: i32 = 1;
@ -1329,3 +1330,154 @@ test "async call with @call" {
};
S.doTheTest();
}
test "async function passed 0-bit arg after non-0-bit arg" {
const S = struct {
var global_frame: anyframe = undefined;
var global_int: i32 = 0;
fn foo() void {
bar(1, .{}) catch unreachable;
}
fn bar(x: i32, args: var) anyerror!void {
global_frame = @frame();
suspend;
global_int = x;
}
};
_ = async S.foo();
resume S.global_frame;
expect(S.global_int == 1);
}
test "async function passed align(16) arg after align(8) arg" {
const S = struct {
var global_frame: anyframe = undefined;
var global_int: u128 = 0;
fn foo() void {
var a: u128 = 99;
bar(10, .{a}) catch unreachable;
}
fn bar(x: u64, args: var) anyerror!void {
expect(x == 10);
global_frame = @frame();
suspend;
global_int = args[0];
}
};
_ = async S.foo();
resume S.global_frame;
expect(S.global_int == 99);
}
test "async function call resolves target fn frame, comptime func" {
const S = struct {
var global_frame: anyframe = undefined;
var global_int: i32 = 9;
fn foo() anyerror!void {
const stack_size = 1000;
var stack_frame: [stack_size]u8 align(std.Target.stack_align) = undefined;
return await @asyncCall(&stack_frame, {}, bar);
}
fn bar() anyerror!void {
global_frame = @frame();
suspend;
global_int += 1;
}
};
_ = async S.foo();
resume S.global_frame;
expect(S.global_int == 10);
}
test "async function call resolves target fn frame, runtime func" {
const S = struct {
var global_frame: anyframe = undefined;
var global_int: i32 = 9;
fn foo() anyerror!void {
const stack_size = 1000;
var stack_frame: [stack_size]u8 align(std.Target.stack_align) = undefined;
var func: async fn () anyerror!void = bar;
return await @asyncCall(&stack_frame, {}, func);
}
fn bar() anyerror!void {
global_frame = @frame();
suspend;
global_int += 1;
}
};
_ = async S.foo();
resume S.global_frame;
expect(S.global_int == 10);
}
test "properly spill optional payload capture value" {
const S = struct {
var global_frame: anyframe = undefined;
var global_int: usize = 2;
fn foo() void {
var opt: ?usize = 1234;
if (opt) |x| {
bar();
global_int += x;
}
}
fn bar() void {
global_frame = @frame();
suspend;
global_int += 1;
}
};
_ = async S.foo();
resume S.global_frame;
expect(S.global_int == 1237);
}
test "handle defer interfering with return value spill" {
const S = struct {
var global_frame1: anyframe = undefined;
var global_frame2: anyframe = undefined;
var finished = false;
var baz_happened = false;
fn doTheTest() void {
_ = async testFoo();
resume global_frame1;
resume global_frame2;
expect(baz_happened);
expect(finished);
}
fn testFoo() void {
expectError(error.Bad, foo());
finished = true;
}
fn foo() anyerror!void {
defer baz();
return bar() catch |err| return err;
}
fn bar() anyerror!void {
global_frame1 = @frame();
suspend;
return error.Bad;
}
fn baz() void {
global_frame2 = @frame();
suspend;
baz_happened = true;
}
};
S.doTheTest();
}