mirror of
https://github.com/ziglang/zig.git
synced 2026-01-03 20:13:21 +00:00
Merge branch 'async-std-lib'
This introduces the concept of "IO mode" which is configurable by the root source file (e.g. next to `pub fn main`). Applications can put this in their root source file: ``` pub const io_mode = .evented; ``` This will populate `std.io.mode` to be `std.io.Mode.evented`. When I/O mode is evented, `std.os.read` handles EAGAIN by suspending until the file descriptor becomes available for reading. Although the std lib event loop supports epoll, kqueue, and Windows I/O Completion Ports, this integration with `std.os.read` currently only works on Linux. This integration is currently only hooked up to `std.os.read`, and not, for example, `std.os.write`, child processes, and timers. The fact that we can do this and still have a working master branch is thanks to Zig's lazy analysis, comptime, and inferred async. We can continue to make incremental progress on async std lib features, enabling more and more test cases and coverage. In addition to `std.io.mode` there is `std.io.is_async` which is equal to `std.io.mode == .evented`. In case I/O mode is async, `std.io.InStream` notices this and the read function pointer becomes an async function pointer rather than a blocking function pointer. Even in this case, `std.io.InStream` can *still be used as a blocking input stream*. Users of the API control whether it is blocking or async at runtime by whether or not the read function suspends. In case of file descriptors, for example, this might correspond to whether it was opened with `O_NONBLOCK`. The `noasync` keyword makes a function call or `await` assert that no suspension happens. This assertion has runtime safety enabled. `std.io.InStream`, in the case of async I/O, uses by default a 4 MiB frame size for calling the read function. If this is too large or too small, the application can globally increase the frame size used by declaring `pub const stack_size_std_io_InStream = 1234;` in their root source file. This way, `std.io.InStream` will only be generated once, avoiding bloat, and as long as this number is configured to be high enough, everything works fine. Zig has runtime safety to detect when `@asyncCall` is given too small of a buffer for the frame size. This merge introduces -fstack-report which can help identify large async function frame sizes and explain what is making them so big. Until #3069 is solved, it's recommended to stick with blocking IO mode. -fstack-report outputs JSON format, which can then be viewed in a GUI that represents the tree structure. As an example, Firefox does a decent job of this. One feature that is currently missing is detecting that the call stack upper bound is greater than the default for a given target, and passing this upper bound to the linker. As an example, if Zig detects that 20 MiB stack upper bound is needed - which would be quite reasonable - currently on Linux the application would only be given the default of 16 MiB. Unrelated miscellaneous change: added std.c.readv
This commit is contained in:
commit
e657b73f30
@ -449,6 +449,7 @@ set(ZIG_SOURCES
|
||||
"${CMAKE_SOURCE_DIR}/src/os.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/parser.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/range_set.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/stack_report.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/target.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/tokenizer.cpp"
|
||||
"${CMAKE_SOURCE_DIR}/src/translate_c.cpp"
|
||||
|
||||
@ -1972,6 +1972,8 @@ struct CodeGen {
|
||||
ZigFn *panic_fn;
|
||||
TldFn *panic_tld_fn;
|
||||
|
||||
ZigFn *largest_frame_fn;
|
||||
|
||||
WantPIC want_pic;
|
||||
WantStackCheck want_stack_check;
|
||||
CacheHash cache_hash;
|
||||
@ -2004,6 +2006,7 @@ struct CodeGen {
|
||||
bool generate_error_name_table;
|
||||
bool enable_cache; // mutually exclusive with output_dir
|
||||
bool enable_time_report;
|
||||
bool enable_stack_report;
|
||||
bool system_linker_hack;
|
||||
bool reported_bad_link_libc_error;
|
||||
bool is_dynamic; // shared library rather than static library. dynamic musl rather than static musl.
|
||||
|
||||
@ -5737,11 +5737,19 @@ static void mark_suspension_point(Scope *scope) {
|
||||
return;
|
||||
case ScopeIdVarDecl:
|
||||
case ScopeIdDefer:
|
||||
case ScopeIdBlock:
|
||||
looking_for_exprs = false;
|
||||
continue;
|
||||
case ScopeIdLoop:
|
||||
case ScopeIdRuntime:
|
||||
continue;
|
||||
case ScopeIdLoop: {
|
||||
ScopeLoop *loop_scope = reinterpret_cast<ScopeLoop *>(scope);
|
||||
if (loop_scope->spill_scope != nullptr) {
|
||||
loop_scope->spill_scope->need_spill = MemoizedBoolTrue;
|
||||
}
|
||||
looking_for_exprs = false;
|
||||
continue;
|
||||
}
|
||||
case ScopeIdExpr: {
|
||||
if (!looking_for_exprs) {
|
||||
// Now we're only looking for a block, to see if it's in a loop (see the case ScopeIdBlock)
|
||||
@ -5758,14 +5766,6 @@ static void mark_suspension_point(Scope *scope) {
|
||||
child_expr_scope = parent_expr_scope;
|
||||
continue;
|
||||
}
|
||||
case ScopeIdBlock:
|
||||
if (scope->parent->parent->id == ScopeIdLoop) {
|
||||
ScopeLoop *loop_scope = reinterpret_cast<ScopeLoop *>(scope->parent->parent);
|
||||
if (loop_scope->spill_scope != nullptr) {
|
||||
loop_scope->spill_scope->need_spill = MemoizedBoolTrue;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -6082,6 +6082,11 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
|
||||
frame_type->abi_size = frame_type->data.frame.locals_struct->abi_size;
|
||||
frame_type->abi_align = frame_type->data.frame.locals_struct->abi_align;
|
||||
frame_type->size_in_bits = frame_type->data.frame.locals_struct->size_in_bits;
|
||||
|
||||
if (g->largest_frame_fn == nullptr || frame_type->abi_size > g->largest_frame_fn->frame_type->abi_size) {
|
||||
g->largest_frame_fn = fn;
|
||||
}
|
||||
|
||||
return ErrorNone;
|
||||
}
|
||||
|
||||
|
||||
12
src/main.cpp
12
src/main.cpp
@ -16,6 +16,7 @@
|
||||
#include "libc_installation.hpp"
|
||||
#include "userland.h"
|
||||
#include "glibc.hpp"
|
||||
#include "stack_report.hpp"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
@ -62,6 +63,7 @@ static int print_full_usage(const char *arg0, FILE *file, int return_code) {
|
||||
" -fPIC enable Position Independent Code\n"
|
||||
" -fno-PIC disable Position Independent Code\n"
|
||||
" -ftime-report print timing diagnostics\n"
|
||||
" -fstack-report print stack size diagnostics\n"
|
||||
" --libc [file] Provide a file which specifies libc paths\n"
|
||||
" --name [name] override output name\n"
|
||||
" --output-dir [dir] override output directory (defaults to cwd)\n"
|
||||
@ -476,6 +478,7 @@ int main(int argc, char **argv) {
|
||||
size_t ver_minor = 0;
|
||||
size_t ver_patch = 0;
|
||||
bool timing_info = false;
|
||||
bool stack_report = false;
|
||||
const char *cache_dir = nullptr;
|
||||
CliPkg *cur_pkg = allocate<CliPkg>(1);
|
||||
BuildMode build_mode = BuildModeDebug;
|
||||
@ -664,6 +667,8 @@ int main(int argc, char **argv) {
|
||||
each_lib_rpath = true;
|
||||
} else if (strcmp(arg, "-ftime-report") == 0) {
|
||||
timing_info = true;
|
||||
} else if (strcmp(arg, "-fstack-report") == 0) {
|
||||
stack_report = true;
|
||||
} else if (strcmp(arg, "--enable-valgrind") == 0) {
|
||||
valgrind_support = ValgrindSupportEnabled;
|
||||
} else if (strcmp(arg, "--disable-valgrind") == 0) {
|
||||
@ -1136,6 +1141,7 @@ int main(int argc, char **argv) {
|
||||
g->subsystem = subsystem;
|
||||
|
||||
g->enable_time_report = timing_info;
|
||||
g->enable_stack_report = stack_report;
|
||||
codegen_set_out_name(g, buf_out_name);
|
||||
codegen_set_lib_version(g, ver_major, ver_minor, ver_patch);
|
||||
g->want_single_threaded = want_single_threaded;
|
||||
@ -1223,6 +1229,8 @@ int main(int argc, char **argv) {
|
||||
codegen_build_and_link(g);
|
||||
if (timing_info)
|
||||
codegen_print_timing_report(g, stdout);
|
||||
if (stack_report)
|
||||
zig_print_stack_report(g, stdout);
|
||||
|
||||
if (cmd == CmdRun) {
|
||||
const char *exec_path = buf_ptr(&g->output_file_path);
|
||||
@ -1272,6 +1280,10 @@ int main(int argc, char **argv) {
|
||||
codegen_print_timing_report(g, stdout);
|
||||
}
|
||||
|
||||
if (stack_report) {
|
||||
zig_print_stack_report(g, stdout);
|
||||
}
|
||||
|
||||
Buf *test_exe_path_unresolved = &g->output_file_path;
|
||||
Buf *test_exe_path = buf_alloc();
|
||||
*test_exe_path = os_path_resolve(&test_exe_path_unresolved, 1);
|
||||
|
||||
121
src/stack_report.cpp
Normal file
121
src/stack_report.cpp
Normal file
@ -0,0 +1,121 @@
|
||||
/*
|
||||
* Copyright (c) 2019 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#include "stack_report.hpp"
|
||||
|
||||
static void tree_print(FILE *f, ZigType *ty, size_t indent);
|
||||
|
||||
static void pretty_print_bytes(FILE *f, double n) {
|
||||
if (n > 1024.0 * 1024.0 * 1024.0) {
|
||||
fprintf(f, "%.02f GiB", n / 1024.0 / 1024.0 / 1024.0);
|
||||
return;
|
||||
}
|
||||
if (n > 1024.0 * 1024.0) {
|
||||
fprintf(f, "%.02f MiB", n / 1024.0 / 1024.0);
|
||||
return;
|
||||
}
|
||||
if (n > 1024.0) {
|
||||
fprintf(f, "%.02f KiB", n / 1024.0);
|
||||
return;
|
||||
}
|
||||
fprintf(f, "%.02f bytes", n );
|
||||
return;
|
||||
}
|
||||
|
||||
static int compare_type_abi_sizes_desc(const void *a, const void *b) {
|
||||
uint64_t size_a = (*(ZigType * const*)(a))->abi_size;
|
||||
uint64_t size_b = (*(ZigType * const*)(b))->abi_size;
|
||||
if (size_a > size_b)
|
||||
return -1;
|
||||
if (size_a < size_b)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void start_child(FILE *f, size_t indent) {
|
||||
fprintf(f, "\n");
|
||||
for (size_t i = 0; i < indent; i += 1) {
|
||||
fprintf(f, " ");
|
||||
}
|
||||
}
|
||||
|
||||
static void start_peer(FILE *f, size_t indent) {
|
||||
fprintf(f, ",\n");
|
||||
for (size_t i = 0; i < indent; i += 1) {
|
||||
fprintf(f, " ");
|
||||
}
|
||||
}
|
||||
|
||||
static void tree_print_struct(FILE *f, ZigType *struct_type, size_t indent) {
|
||||
ZigList<ZigType *> children = {};
|
||||
uint64_t sum_from_fields = 0;
|
||||
for (size_t i = 0; i < struct_type->data.structure.src_field_count; i += 1) {
|
||||
TypeStructField *field = &struct_type->data.structure.fields[i];
|
||||
children.append(field->type_entry);
|
||||
sum_from_fields += field->type_entry->abi_size;
|
||||
}
|
||||
qsort(children.items, children.length, sizeof(ZigType *), compare_type_abi_sizes_desc);
|
||||
|
||||
start_peer(f, indent);
|
||||
fprintf(f, "\"padding\": \"%" ZIG_PRI_u64 "\"", struct_type->abi_size - sum_from_fields);
|
||||
|
||||
start_peer(f, indent);
|
||||
fprintf(f, "\"fields\": [");
|
||||
|
||||
for (size_t i = 0; i < children.length; i += 1) {
|
||||
if (i == 0) {
|
||||
start_child(f, indent + 1);
|
||||
} else {
|
||||
start_peer(f, indent + 1);
|
||||
}
|
||||
fprintf(f, "{");
|
||||
|
||||
ZigType *child_type = children.at(i);
|
||||
tree_print(f, child_type, indent + 2);
|
||||
|
||||
start_child(f, indent + 1);
|
||||
fprintf(f, "}");
|
||||
}
|
||||
|
||||
start_child(f, indent);
|
||||
fprintf(f, "]");
|
||||
}
|
||||
|
||||
static void tree_print(FILE *f, ZigType *ty, size_t indent) {
|
||||
start_child(f, indent);
|
||||
fprintf(f, "\"type\": \"%s\"", buf_ptr(&ty->name));
|
||||
|
||||
start_peer(f, indent);
|
||||
fprintf(f, "\"sizef\": \"");
|
||||
pretty_print_bytes(f, ty->abi_size);
|
||||
fprintf(f, "\"");
|
||||
|
||||
start_peer(f, indent);
|
||||
fprintf(f, "\"size\": \"%" ZIG_PRI_u64 "\"", ty->abi_size);
|
||||
|
||||
switch (ty->id) {
|
||||
case ZigTypeIdFnFrame:
|
||||
return tree_print_struct(f, ty->data.frame.locals_struct, indent);
|
||||
case ZigTypeIdStruct:
|
||||
return tree_print_struct(f, ty, indent);
|
||||
default:
|
||||
start_child(f, indent);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void zig_print_stack_report(CodeGen *g, FILE *f) {
|
||||
if (g->largest_frame_fn == nullptr) {
|
||||
fprintf(f, "{\"error\": \"No async function frames in entire compilation.\"}\n");
|
||||
return;
|
||||
}
|
||||
fprintf(f, "{");
|
||||
tree_print(f, g->largest_frame_fn->frame_type, 1);
|
||||
|
||||
start_child(f, 0);
|
||||
fprintf(f, "}\n");
|
||||
}
|
||||
16
src/stack_report.hpp
Normal file
16
src/stack_report.hpp
Normal file
@ -0,0 +1,16 @@
|
||||
/*
|
||||
* Copyright (c) 2019 Andrew Kelley
|
||||
*
|
||||
* This file is part of zig, which is MIT licensed.
|
||||
* See http://opensource.org/licenses/MIT
|
||||
*/
|
||||
|
||||
#ifndef ZIG_STACK_REPORT_HPP
|
||||
#define ZIG_STACK_REPORT_HPP
|
||||
|
||||
#include "all_types.hpp"
|
||||
#include <stdio.h>
|
||||
|
||||
void zig_print_stack_report(CodeGen *g, FILE *f);
|
||||
|
||||
#endif
|
||||
@ -68,6 +68,7 @@ pub extern "c" fn open(path: [*]const u8, oflag: c_uint, ...) c_int;
|
||||
pub extern "c" fn openat(fd: c_int, path: [*]const u8, oflag: c_uint, ...) c_int;
|
||||
pub extern "c" fn raise(sig: c_int) c_int;
|
||||
pub extern "c" fn read(fd: fd_t, buf: [*]u8, nbyte: usize) isize;
|
||||
pub extern "c" fn readv(fd: c_int, iov: [*]const iovec, iovcnt: c_uint) isize;
|
||||
pub extern "c" fn pread(fd: fd_t, buf: [*]u8, nbyte: usize, offset: u64) isize;
|
||||
pub extern "c" fn preadv(fd: c_int, iov: [*]const iovec, iovcnt: c_uint, offset: usize) isize;
|
||||
pub extern "c" fn writev(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint) isize;
|
||||
|
||||
@ -330,14 +330,16 @@ pub fn writeCurrentStackTraceWindows(
|
||||
}
|
||||
}
|
||||
|
||||
/// TODO once https://github.com/ziglang/zig/issues/3157 is fully implemented,
|
||||
/// make this `noasync fn` and remove the individual noasync calls.
|
||||
pub fn printSourceAtAddress(debug_info: *DebugInfo, out_stream: var, address: usize, tty_color: bool) !void {
|
||||
if (windows.is_the_target) {
|
||||
return printSourceAtAddressWindows(debug_info, out_stream, address, tty_color);
|
||||
return noasync printSourceAtAddressWindows(debug_info, out_stream, address, tty_color);
|
||||
}
|
||||
if (os.darwin.is_the_target) {
|
||||
return printSourceAtAddressMacOs(debug_info, out_stream, address, tty_color);
|
||||
return noasync printSourceAtAddressMacOs(debug_info, out_stream, address, tty_color);
|
||||
}
|
||||
return printSourceAtAddressPosix(debug_info, out_stream, address, tty_color);
|
||||
return noasync printSourceAtAddressPosix(debug_info, out_stream, address, tty_color);
|
||||
}
|
||||
|
||||
fn printSourceAtAddressWindows(di: *DebugInfo, out_stream: var, relocated_address: usize, tty_color: bool) !void {
|
||||
@ -793,7 +795,7 @@ fn printLineInfo(
|
||||
try out_stream.write(GREEN ++ "^" ++ RESET ++ "\n");
|
||||
}
|
||||
} else |err| switch (err) {
|
||||
error.EndOfFile, error.FileNotFound => {},
|
||||
error.EndOfFile, error.FileNotFound => {},
|
||||
else => return err,
|
||||
}
|
||||
} else {
|
||||
@ -816,16 +818,18 @@ pub const OpenSelfDebugInfoError = error{
|
||||
UnsupportedOperatingSystem,
|
||||
};
|
||||
|
||||
/// TODO once https://github.com/ziglang/zig/issues/3157 is fully implemented,
|
||||
/// make this `noasync fn` and remove the individual noasync calls.
|
||||
pub fn openSelfDebugInfo(allocator: *mem.Allocator) !DebugInfo {
|
||||
if (builtin.strip_debug_info)
|
||||
return error.MissingDebugInfo;
|
||||
if (windows.is_the_target) {
|
||||
return openSelfDebugInfoWindows(allocator);
|
||||
return noasync openSelfDebugInfoWindows(allocator);
|
||||
}
|
||||
if (os.darwin.is_the_target) {
|
||||
return openSelfDebugInfoMacOs(allocator);
|
||||
return noasync openSelfDebugInfoMacOs(allocator);
|
||||
}
|
||||
return openSelfDebugInfoPosix(allocator);
|
||||
return noasync openSelfDebugInfoPosix(allocator);
|
||||
}
|
||||
|
||||
fn openSelfDebugInfoWindows(allocator: *mem.Allocator) !DebugInfo {
|
||||
@ -1508,15 +1512,25 @@ fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: var, size: usize) !
|
||||
}
|
||||
|
||||
fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: var, signed: bool, comptime size: i32) !FormValue {
|
||||
// TODO: Please forgive me, I've worked around zig not properly spilling some intermediate values here.
|
||||
// `noasync` should be removed from all the function calls once it is fixed.
|
||||
return FormValue{
|
||||
.Const = Constant{
|
||||
.signed = signed,
|
||||
.payload = switch (size) {
|
||||
1 => try in_stream.readIntLittle(u8),
|
||||
2 => try in_stream.readIntLittle(u16),
|
||||
4 => try in_stream.readIntLittle(u32),
|
||||
8 => try in_stream.readIntLittle(u64),
|
||||
-1 => if (signed) @bitCast(u64, try leb.readILEB128(i64, in_stream)) else try leb.readULEB128(u64, in_stream),
|
||||
1 => try noasync in_stream.readIntLittle(u8),
|
||||
2 => try noasync in_stream.readIntLittle(u16),
|
||||
4 => try noasync in_stream.readIntLittle(u32),
|
||||
8 => try noasync in_stream.readIntLittle(u64),
|
||||
-1 => blk: {
|
||||
if (signed) {
|
||||
const x = try noasync leb.readILEB128(i64, in_stream);
|
||||
break :blk @bitCast(u64, x);
|
||||
} else {
|
||||
const x = try noasync leb.readULEB128(u64, in_stream);
|
||||
break :blk x;
|
||||
}
|
||||
},
|
||||
else => @compileError("Invalid size"),
|
||||
},
|
||||
},
|
||||
@ -1584,7 +1598,10 @@ fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, is_64
|
||||
DW.FORM_strp => FormValue{ .StrPtr = try parseFormValueDwarfOffsetSize(in_stream, is_64) },
|
||||
DW.FORM_indirect => {
|
||||
const child_form_id = try leb.readULEB128(u64, in_stream);
|
||||
return parseFormValue(allocator, in_stream, child_form_id, is_64);
|
||||
const F = @typeOf(async parseFormValue(allocator, in_stream, child_form_id, is_64));
|
||||
var frame = try allocator.create(F);
|
||||
defer allocator.destroy(frame);
|
||||
return await @asyncCall(frame, {}, parseFormValue, allocator, in_stream, child_form_id, is_64);
|
||||
},
|
||||
else => error.InvalidDebugInfo,
|
||||
};
|
||||
|
||||
@ -6,7 +6,6 @@ pub const Locked = @import("event/locked.zig").Locked;
|
||||
pub const RwLock = @import("event/rwlock.zig").RwLock;
|
||||
pub const RwLocked = @import("event/rwlocked.zig").RwLocked;
|
||||
pub const Loop = @import("event/loop.zig").Loop;
|
||||
pub const io = @import("event/io.zig");
|
||||
pub const fs = @import("event/fs.zig");
|
||||
pub const net = @import("event/net.zig");
|
||||
|
||||
@ -15,7 +14,6 @@ test "import event tests" {
|
||||
_ = @import("event/fs.zig");
|
||||
_ = @import("event/future.zig");
|
||||
_ = @import("event/group.zig");
|
||||
_ = @import("event/io.zig");
|
||||
_ = @import("event/lock.zig");
|
||||
_ = @import("event/locked.zig");
|
||||
_ = @import("event/rwlock.zig");
|
||||
|
||||
@ -1,76 +0,0 @@
|
||||
const std = @import("../std.zig");
|
||||
const builtin = @import("builtin");
|
||||
const assert = std.debug.assert;
|
||||
const mem = std.mem;
|
||||
|
||||
pub fn InStream(comptime ReadError: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
pub const Error = ReadError;
|
||||
|
||||
/// Return the number of bytes read. It may be less than buffer.len.
|
||||
/// If the number of bytes read is 0, it means end of stream.
|
||||
/// End of stream is not an error condition.
|
||||
readFn: async fn (self: *Self, buffer: []u8) Error!usize,
|
||||
|
||||
/// Return the number of bytes read. It may be less than buffer.len.
|
||||
/// If the number of bytes read is 0, it means end of stream.
|
||||
/// End of stream is not an error condition.
|
||||
pub async fn read(self: *Self, buffer: []u8) !usize {
|
||||
return self.readFn(self, buffer);
|
||||
}
|
||||
|
||||
/// Return the number of bytes read. If it is less than buffer.len
|
||||
/// it means end of stream.
|
||||
pub async fn readFull(self: *Self, buffer: []u8) !usize {
|
||||
var index: usize = 0;
|
||||
while (index != buf.len) {
|
||||
const amt_read = try self.read(buf[index..]);
|
||||
if (amt_read == 0) return index;
|
||||
index += amt_read;
|
||||
}
|
||||
return index;
|
||||
}
|
||||
|
||||
/// Same as `readFull` but end of stream returns `error.EndOfStream`.
|
||||
pub async fn readNoEof(self: *Self, buf: []u8) !void {
|
||||
const amt_read = try self.readFull(buf[index..]);
|
||||
if (amt_read < buf.len) return error.EndOfStream;
|
||||
}
|
||||
|
||||
pub async fn readIntLittle(self: *Self, comptime T: type) !T {
|
||||
var bytes: [@sizeOf(T)]u8 = undefined;
|
||||
try self.readNoEof(bytes[0..]);
|
||||
return mem.readIntLittle(T, &bytes);
|
||||
}
|
||||
|
||||
pub async fn readIntBe(self: *Self, comptime T: type) !T {
|
||||
var bytes: [@sizeOf(T)]u8 = undefined;
|
||||
try self.readNoEof(bytes[0..]);
|
||||
return mem.readIntBig(T, &bytes);
|
||||
}
|
||||
|
||||
pub async fn readInt(self: *Self, comptime T: type, endian: builtin.Endian) !T {
|
||||
var bytes: [@sizeOf(T)]u8 = undefined;
|
||||
try self.readNoEof(bytes[0..]);
|
||||
return mem.readInt(T, &bytes, endian);
|
||||
}
|
||||
|
||||
pub async fn readStruct(self: *Self, comptime T: type) !T {
|
||||
// Only extern and packed structs have defined in-memory layout.
|
||||
comptime assert(@typeInfo(T).Struct.layout != builtin.TypeInfo.ContainerLayout.Auto);
|
||||
var res: [1]T = undefined;
|
||||
try self.readNoEof(@sliceToBytes(res[0..]));
|
||||
return res[0];
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn OutStream(comptime WriteError: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
pub const Error = WriteError;
|
||||
|
||||
writeFn: async fn (self: *Self, buffer: []u8) Error!void,
|
||||
};
|
||||
}
|
||||
@ -86,18 +86,10 @@ pub const Loop = struct {
|
||||
};
|
||||
};
|
||||
|
||||
pub const IoMode = enum {
|
||||
blocking,
|
||||
evented,
|
||||
mixed,
|
||||
};
|
||||
pub const io_mode: IoMode = if (@hasDecl(root, "io_mode")) root.io_mode else IoMode.blocking;
|
||||
var global_instance_state: Loop = undefined;
|
||||
threadlocal var per_thread_instance: ?*Loop = null;
|
||||
const default_instance: ?*Loop = switch (io_mode) {
|
||||
const default_instance: ?*Loop = switch (std.io.mode) {
|
||||
.blocking => null,
|
||||
.evented => &global_instance_state,
|
||||
.mixed => per_thread_instance,
|
||||
};
|
||||
pub const instance: ?*Loop = if (@hasDecl(root, "event_loop")) root.event_loop else default_instance;
|
||||
|
||||
@ -470,6 +462,10 @@ pub const Loop = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn waitUntilFdReadable(self: *Loop, fd: os.fd_t) !void {
|
||||
return self.linuxWaitFd(fd, os.EPOLLET | os.EPOLLIN);
|
||||
}
|
||||
|
||||
pub async fn bsdWaitKev(self: *Loop, ident: usize, filter: i16, fflags: u32) !os.Kevent {
|
||||
var resume_node = ResumeNode.Basic{
|
||||
.base = ResumeNode{
|
||||
|
||||
188
std/io.zig
188
std/io.zig
@ -1,5 +1,6 @@
|
||||
const std = @import("std.zig");
|
||||
const builtin = @import("builtin");
|
||||
const root = @import("root");
|
||||
const c = std.c;
|
||||
|
||||
const math = std.math;
|
||||
@ -15,6 +16,18 @@ const fmt = std.fmt;
|
||||
const File = std.fs.File;
|
||||
const testing = std.testing;
|
||||
|
||||
pub const Mode = enum {
|
||||
blocking,
|
||||
evented,
|
||||
};
|
||||
pub const mode: Mode = if (@hasDecl(root, "io_mode"))
|
||||
root.io_mode
|
||||
else if (@hasDecl(root, "event_loop"))
|
||||
Mode.evented
|
||||
else
|
||||
Mode.blocking;
|
||||
pub const is_async = mode != .blocking;
|
||||
|
||||
pub const GetStdIoError = os.windows.GetStdHandleError;
|
||||
|
||||
pub fn getStdOut() GetStdIoError!File {
|
||||
@ -44,180 +57,7 @@ pub fn getStdIn() GetStdIoError!File {
|
||||
pub const SeekableStream = @import("io/seekable_stream.zig").SeekableStream;
|
||||
pub const SliceSeekableInStream = @import("io/seekable_stream.zig").SliceSeekableInStream;
|
||||
pub const COutStream = @import("io/c_out_stream.zig").COutStream;
|
||||
|
||||
pub fn InStream(comptime ReadError: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
pub const Error = ReadError;
|
||||
|
||||
/// Return the number of bytes read. If the number read is smaller than buf.len, it
|
||||
/// means the stream reached the end. Reaching the end of a stream is not an error
|
||||
/// condition.
|
||||
readFn: fn (self: *Self, buffer: []u8) Error!usize,
|
||||
|
||||
/// Replaces `buffer` contents by reading from the stream until it is finished.
|
||||
/// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and
|
||||
/// the contents read from the stream are lost.
|
||||
pub fn readAllBuffer(self: *Self, buffer: *Buffer, max_size: usize) !void {
|
||||
try buffer.resize(0);
|
||||
|
||||
var actual_buf_len: usize = 0;
|
||||
while (true) {
|
||||
const dest_slice = buffer.toSlice()[actual_buf_len..];
|
||||
const bytes_read = try self.readFull(dest_slice);
|
||||
actual_buf_len += bytes_read;
|
||||
|
||||
if (bytes_read != dest_slice.len) {
|
||||
buffer.shrink(actual_buf_len);
|
||||
return;
|
||||
}
|
||||
|
||||
const new_buf_size = math.min(max_size, actual_buf_len + mem.page_size);
|
||||
if (new_buf_size == actual_buf_len) return error.StreamTooLong;
|
||||
try buffer.resize(new_buf_size);
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates enough memory to hold all the contents of the stream. If the allocated
|
||||
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
|
||||
/// Caller owns returned memory.
|
||||
/// If this function returns an error, the contents from the stream read so far are lost.
|
||||
pub fn readAllAlloc(self: *Self, allocator: *mem.Allocator, max_size: usize) ![]u8 {
|
||||
var buf = Buffer.initNull(allocator);
|
||||
defer buf.deinit();
|
||||
|
||||
try self.readAllBuffer(&buf, max_size);
|
||||
return buf.toOwnedSlice();
|
||||
}
|
||||
|
||||
/// Replaces `buffer` contents by reading from the stream until `delimiter` is found.
|
||||
/// Does not include the delimiter in the result.
|
||||
/// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and the contents
|
||||
/// read from the stream so far are lost.
|
||||
pub fn readUntilDelimiterBuffer(self: *Self, buffer: *Buffer, delimiter: u8, max_size: usize) !void {
|
||||
try buffer.resize(0);
|
||||
|
||||
while (true) {
|
||||
var byte: u8 = try self.readByte();
|
||||
|
||||
if (byte == delimiter) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (buffer.len() == max_size) {
|
||||
return error.StreamTooLong;
|
||||
}
|
||||
|
||||
try buffer.appendByte(byte);
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates enough memory to read until `delimiter`. If the allocated
|
||||
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
|
||||
/// Caller owns returned memory.
|
||||
/// If this function returns an error, the contents from the stream read so far are lost.
|
||||
pub fn readUntilDelimiterAlloc(self: *Self, allocator: *mem.Allocator, delimiter: u8, max_size: usize) ![]u8 {
|
||||
var buf = Buffer.initNull(allocator);
|
||||
defer buf.deinit();
|
||||
|
||||
try self.readUntilDelimiterBuffer(&buf, delimiter, max_size);
|
||||
return buf.toOwnedSlice();
|
||||
}
|
||||
|
||||
/// Returns the number of bytes read. It may be less than buffer.len.
|
||||
/// If the number of bytes read is 0, it means end of stream.
|
||||
/// End of stream is not an error condition.
|
||||
pub fn read(self: *Self, buffer: []u8) Error!usize {
|
||||
return self.readFn(self, buffer);
|
||||
}
|
||||
|
||||
/// Returns the number of bytes read. If the number read is smaller than buf.len, it
|
||||
/// means the stream reached the end. Reaching the end of a stream is not an error
|
||||
/// condition.
|
||||
pub fn readFull(self: *Self, buffer: []u8) Error!usize {
|
||||
var index: usize = 0;
|
||||
while (index != buffer.len) {
|
||||
const amt = try self.read(buffer[index..]);
|
||||
if (amt == 0) return index;
|
||||
index += amt;
|
||||
}
|
||||
return index;
|
||||
}
|
||||
|
||||
/// Same as `readFull` but end of stream returns `error.EndOfStream`.
|
||||
pub fn readNoEof(self: *Self, buf: []u8) !void {
|
||||
const amt_read = try self.readFull(buf);
|
||||
if (amt_read < buf.len) return error.EndOfStream;
|
||||
}
|
||||
|
||||
/// Reads 1 byte from the stream or returns `error.EndOfStream`.
|
||||
pub fn readByte(self: *Self) !u8 {
|
||||
var result: [1]u8 = undefined;
|
||||
try self.readNoEof(result[0..]);
|
||||
return result[0];
|
||||
}
|
||||
|
||||
/// Same as `readByte` except the returned byte is signed.
|
||||
pub fn readByteSigned(self: *Self) !i8 {
|
||||
return @bitCast(i8, try self.readByte());
|
||||
}
|
||||
|
||||
/// Reads a native-endian integer
|
||||
pub fn readIntNative(self: *Self, comptime T: type) !T {
|
||||
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
|
||||
try self.readNoEof(bytes[0..]);
|
||||
return mem.readIntNative(T, &bytes);
|
||||
}
|
||||
|
||||
/// Reads a foreign-endian integer
|
||||
pub fn readIntForeign(self: *Self, comptime T: type) !T {
|
||||
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
|
||||
try self.readNoEof(bytes[0..]);
|
||||
return mem.readIntForeign(T, &bytes);
|
||||
}
|
||||
|
||||
pub fn readIntLittle(self: *Self, comptime T: type) !T {
|
||||
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
|
||||
try self.readNoEof(bytes[0..]);
|
||||
return mem.readIntLittle(T, &bytes);
|
||||
}
|
||||
|
||||
pub fn readIntBig(self: *Self, comptime T: type) !T {
|
||||
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
|
||||
try self.readNoEof(bytes[0..]);
|
||||
return mem.readIntBig(T, &bytes);
|
||||
}
|
||||
|
||||
pub fn readInt(self: *Self, comptime T: type, endian: builtin.Endian) !T {
|
||||
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
|
||||
try self.readNoEof(bytes[0..]);
|
||||
return mem.readInt(T, &bytes, endian);
|
||||
}
|
||||
|
||||
pub fn readVarInt(self: *Self, comptime ReturnType: type, endian: builtin.Endian, size: usize) !ReturnType {
|
||||
assert(size <= @sizeOf(ReturnType));
|
||||
var bytes_buf: [@sizeOf(ReturnType)]u8 = undefined;
|
||||
const bytes = bytes_buf[0..size];
|
||||
try self.readNoEof(bytes);
|
||||
return mem.readVarInt(ReturnType, bytes, endian);
|
||||
}
|
||||
|
||||
pub fn skipBytes(self: *Self, num_bytes: u64) !void {
|
||||
var i: u64 = 0;
|
||||
while (i < num_bytes) : (i += 1) {
|
||||
_ = try self.readByte();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn readStruct(self: *Self, comptime T: type) !T {
|
||||
// Only extern and packed structs have defined in-memory layout.
|
||||
comptime assert(@typeInfo(T).Struct.layout != builtin.TypeInfo.ContainerLayout.Auto);
|
||||
var res: [1]T = undefined;
|
||||
try self.readNoEof(@sliceToBytes(res[0..]));
|
||||
return res[0];
|
||||
}
|
||||
};
|
||||
}
|
||||
pub const InStream = @import("io/in_stream.zig").InStream;
|
||||
|
||||
pub fn OutStream(comptime WriteError: type) type {
|
||||
return struct {
|
||||
|
||||
200
std/io/in_stream.zig
Normal file
200
std/io/in_stream.zig
Normal file
@ -0,0 +1,200 @@
|
||||
const std = @import("../std.zig");
|
||||
const builtin = @import("builtin");
|
||||
const root = @import("root");
|
||||
const math = std.math;
|
||||
const assert = std.debug.assert;
|
||||
const mem = std.mem;
|
||||
const Buffer = std.Buffer;
|
||||
|
||||
pub const default_stack_size = 4 * 1024 * 1024;
|
||||
pub const stack_size: usize = if (@hasDecl(root, "stack_size_std_io_InStream"))
|
||||
root.stack_size_std_io_InStream
|
||||
else
|
||||
default_stack_size;
|
||||
pub const stack_align = 16;
|
||||
|
||||
pub fn InStream(comptime ReadError: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
pub const Error = ReadError;
|
||||
pub const ReadFn = if (std.io.is_async)
|
||||
async fn (self: *Self, buffer: []u8) Error!usize
|
||||
else
|
||||
fn (self: *Self, buffer: []u8) Error!usize;
|
||||
|
||||
/// Returns the number of bytes read. It may be less than buffer.len.
|
||||
/// If the number of bytes read is 0, it means end of stream.
|
||||
/// End of stream is not an error condition.
|
||||
readFn: ReadFn,
|
||||
|
||||
/// Returns the number of bytes read. It may be less than buffer.len.
|
||||
/// If the number of bytes read is 0, it means end of stream.
|
||||
/// End of stream is not an error condition.
|
||||
pub fn read(self: *Self, buffer: []u8) Error!usize {
|
||||
if (std.io.is_async) {
|
||||
var stack_frame: [stack_size]u8 align(stack_align) = undefined;
|
||||
// TODO https://github.com/ziglang/zig/issues/3068
|
||||
var result: Error!usize = undefined;
|
||||
return await @asyncCall(&stack_frame, &result, self.readFn, self, buffer);
|
||||
} else {
|
||||
return self.readFn(self, buffer);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of bytes read. If the number read is smaller than buf.len, it
|
||||
/// means the stream reached the end. Reaching the end of a stream is not an error
|
||||
/// condition.
|
||||
pub fn readFull(self: *Self, buffer: []u8) Error!usize {
|
||||
var index: usize = 0;
|
||||
while (index != buffer.len) {
|
||||
const amt = try self.read(buffer[index..]);
|
||||
if (amt == 0) return index;
|
||||
index += amt;
|
||||
}
|
||||
return index;
|
||||
}
|
||||
|
||||
/// Returns the number of bytes read. If the number read would be smaller than buf.len,
|
||||
/// error.EndOfStream is returned instead.
|
||||
pub fn readNoEof(self: *Self, buf: []u8) !void {
|
||||
const amt_read = try self.readFull(buf);
|
||||
if (amt_read < buf.len) return error.EndOfStream;
|
||||
}
|
||||
|
||||
/// Replaces `buffer` contents by reading from the stream until it is finished.
|
||||
/// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and
|
||||
/// the contents read from the stream are lost.
|
||||
pub fn readAllBuffer(self: *Self, buffer: *Buffer, max_size: usize) !void {
|
||||
try buffer.resize(0);
|
||||
|
||||
var actual_buf_len: usize = 0;
|
||||
while (true) {
|
||||
const dest_slice = buffer.toSlice()[actual_buf_len..];
|
||||
const bytes_read = try self.readFull(dest_slice);
|
||||
actual_buf_len += bytes_read;
|
||||
|
||||
if (bytes_read != dest_slice.len) {
|
||||
buffer.shrink(actual_buf_len);
|
||||
return;
|
||||
}
|
||||
|
||||
const new_buf_size = math.min(max_size, actual_buf_len + mem.page_size);
|
||||
if (new_buf_size == actual_buf_len) return error.StreamTooLong;
|
||||
try buffer.resize(new_buf_size);
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates enough memory to hold all the contents of the stream. If the allocated
|
||||
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
|
||||
/// Caller owns returned memory.
|
||||
/// If this function returns an error, the contents from the stream read so far are lost.
|
||||
pub fn readAllAlloc(self: *Self, allocator: *mem.Allocator, max_size: usize) ![]u8 {
|
||||
var buf = Buffer.initNull(allocator);
|
||||
defer buf.deinit();
|
||||
|
||||
try self.readAllBuffer(&buf, max_size);
|
||||
return buf.toOwnedSlice();
|
||||
}
|
||||
|
||||
/// Replaces `buffer` contents by reading from the stream until `delimiter` is found.
|
||||
/// Does not include the delimiter in the result.
|
||||
/// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and the contents
|
||||
/// read from the stream so far are lost.
|
||||
pub fn readUntilDelimiterBuffer(self: *Self, buffer: *Buffer, delimiter: u8, max_size: usize) !void {
|
||||
try buffer.resize(0);
|
||||
|
||||
while (true) {
|
||||
var byte: u8 = try self.readByte();
|
||||
|
||||
if (byte == delimiter) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (buffer.len() == max_size) {
|
||||
return error.StreamTooLong;
|
||||
}
|
||||
|
||||
try buffer.appendByte(byte);
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates enough memory to read until `delimiter`. If the allocated
|
||||
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
|
||||
/// Caller owns returned memory.
|
||||
/// If this function returns an error, the contents from the stream read so far are lost.
|
||||
pub fn readUntilDelimiterAlloc(self: *Self, allocator: *mem.Allocator, delimiter: u8, max_size: usize) ![]u8 {
|
||||
var buf = Buffer.initNull(allocator);
|
||||
defer buf.deinit();
|
||||
|
||||
try self.readUntilDelimiterBuffer(&buf, delimiter, max_size);
|
||||
return buf.toOwnedSlice();
|
||||
}
|
||||
|
||||
/// Reads 1 byte from the stream or returns `error.EndOfStream`.
|
||||
pub fn readByte(self: *Self) !u8 {
|
||||
var result: [1]u8 = undefined;
|
||||
try self.readNoEof(result[0..]);
|
||||
return result[0];
|
||||
}
|
||||
|
||||
/// Same as `readByte` except the returned byte is signed.
|
||||
pub fn readByteSigned(self: *Self) !i8 {
|
||||
return @bitCast(i8, try self.readByte());
|
||||
}
|
||||
|
||||
/// Reads a native-endian integer
|
||||
pub fn readIntNative(self: *Self, comptime T: type) !T {
|
||||
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
|
||||
try self.readNoEof(bytes[0..]);
|
||||
return mem.readIntNative(T, &bytes);
|
||||
}
|
||||
|
||||
/// Reads a foreign-endian integer
|
||||
pub fn readIntForeign(self: *Self, comptime T: type) !T {
|
||||
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
|
||||
try self.readNoEof(bytes[0..]);
|
||||
return mem.readIntForeign(T, &bytes);
|
||||
}
|
||||
|
||||
pub fn readIntLittle(self: *Self, comptime T: type) !T {
|
||||
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
|
||||
try self.readNoEof(bytes[0..]);
|
||||
return mem.readIntLittle(T, &bytes);
|
||||
}
|
||||
|
||||
pub fn readIntBig(self: *Self, comptime T: type) !T {
|
||||
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
|
||||
try self.readNoEof(bytes[0..]);
|
||||
return mem.readIntBig(T, &bytes);
|
||||
}
|
||||
|
||||
pub fn readInt(self: *Self, comptime T: type, endian: builtin.Endian) !T {
|
||||
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
|
||||
try self.readNoEof(bytes[0..]);
|
||||
return mem.readInt(T, &bytes, endian);
|
||||
}
|
||||
|
||||
pub fn readVarInt(self: *Self, comptime ReturnType: type, endian: builtin.Endian, size: usize) !ReturnType {
|
||||
assert(size <= @sizeOf(ReturnType));
|
||||
var bytes_buf: [@sizeOf(ReturnType)]u8 = undefined;
|
||||
const bytes = bytes_buf[0..size];
|
||||
try self.readNoEof(bytes);
|
||||
return mem.readVarInt(ReturnType, bytes, endian);
|
||||
}
|
||||
|
||||
pub fn skipBytes(self: *Self, num_bytes: u64) !void {
|
||||
var i: u64 = 0;
|
||||
while (i < num_bytes) : (i += 1) {
|
||||
_ = try self.readByte();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn readStruct(self: *Self, comptime T: type) !T {
|
||||
// Only extern and packed structs have defined in-memory layout.
|
||||
comptime assert(@typeInfo(T).Struct.layout != builtin.TypeInfo.ContainerLayout.Auto);
|
||||
var res: [1]T = undefined;
|
||||
try self.readNoEof(@sliceToBytes(res[0..]));
|
||||
return res[0];
|
||||
}
|
||||
};
|
||||
}
|
||||
42
std/os.zig
42
std/os.zig
@ -254,13 +254,18 @@ pub const ReadError = error{
|
||||
IsDir,
|
||||
OperationAborted,
|
||||
BrokenPipe,
|
||||
|
||||
/// This error occurs when no global event loop is configured,
|
||||
/// and reading from the file descriptor would block.
|
||||
WouldBlock,
|
||||
|
||||
Unexpected,
|
||||
};
|
||||
|
||||
/// Returns the number of bytes that were read, which can be less than
|
||||
/// buf.len. If 0 bytes were read, that means EOF.
|
||||
/// This function is for blocking file descriptors only. For non-blocking, see
|
||||
/// `readAsync`.
|
||||
/// If the application has a global event loop enabled, EAGAIN is handled
|
||||
/// via the event loop. Otherwise EAGAIN results in error.WouldBlock.
|
||||
pub fn read(fd: fd_t, buf: []u8) ReadError!usize {
|
||||
if (windows.is_the_target) {
|
||||
return windows.ReadFile(fd, buf);
|
||||
@ -279,28 +284,19 @@ pub fn read(fd: fd_t, buf: []u8) ReadError!usize {
|
||||
}
|
||||
}
|
||||
|
||||
// Linux can return EINVAL when read amount is > 0x7ffff000
|
||||
// See https://github.com/ziglang/zig/pull/743#issuecomment-363158274
|
||||
// TODO audit this. Shawn Landden says that this is not actually true.
|
||||
// if this logic should stay, move it to std.os.linux
|
||||
const max_buf_len = 0x7ffff000;
|
||||
|
||||
var index: usize = 0;
|
||||
while (index < buf.len) {
|
||||
const want_to_read = math.min(buf.len - index, usize(max_buf_len));
|
||||
const rc = system.read(fd, buf.ptr + index, want_to_read);
|
||||
while (true) {
|
||||
const rc = system.read(fd, buf.ptr, buf.len);
|
||||
switch (errno(rc)) {
|
||||
0 => {
|
||||
const amt_read = @intCast(usize, rc);
|
||||
index += amt_read;
|
||||
if (amt_read == want_to_read) continue;
|
||||
// Read returned less than buf.len.
|
||||
return index;
|
||||
},
|
||||
0 => return @intCast(usize, rc),
|
||||
EINTR => continue,
|
||||
EINVAL => unreachable,
|
||||
EFAULT => unreachable,
|
||||
EAGAIN => unreachable, // This function is for blocking reads.
|
||||
EAGAIN => if (std.event.Loop.instance) |loop| {
|
||||
loop.waitUntilFdReadable(fd) catch return error.WouldBlock;
|
||||
continue;
|
||||
} else {
|
||||
return error.WouldBlock;
|
||||
},
|
||||
EBADF => unreachable, // Always a race condition.
|
||||
EIO => return error.InputOutput,
|
||||
EISDIR => return error.IsDir,
|
||||
@ -313,8 +309,7 @@ pub fn read(fd: fd_t, buf: []u8) ReadError!usize {
|
||||
}
|
||||
|
||||
/// Number of bytes read is returned. Upon reading end-of-file, zero is returned.
|
||||
/// This function is for blocking file descriptors only. For non-blocking, see
|
||||
/// `preadvAsync`.
|
||||
/// This function is for blocking file descriptors only.
|
||||
pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) ReadError!usize {
|
||||
if (darwin.is_the_target) {
|
||||
// Darwin does not have preadv but it does have pread.
|
||||
@ -386,8 +381,7 @@ pub const WriteError = error{
|
||||
};
|
||||
|
||||
/// Write to a file descriptor. Keeps trying if it gets interrupted.
|
||||
/// This function is for blocking file descriptors only. For non-blocking, see
|
||||
/// `writeAsync`.
|
||||
/// This function is for blocking file descriptors only.
|
||||
pub fn write(fd: fd_t, bytes: []const u8) WriteError!void {
|
||||
if (windows.is_the_target) {
|
||||
return windows.WriteFile(fd, bytes);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user