Merge pull request #25592 from ziglang/init-std.Io

std: Introduce `Io` Interface
This commit is contained in:
Andrew Kelley 2025-10-29 13:51:37 -07:00 committed by GitHub
commit a072d821be
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
143 changed files with 17683 additions and 9737 deletions

View File

@ -413,7 +413,6 @@ set(ZIG_STAGE2_SOURCES
lib/std/Thread/Futex.zig
lib/std/Thread/Mutex.zig
lib/std/Thread/Pool.zig
lib/std/Thread/ResetEvent.zig
lib/std/Thread/WaitGroup.zig
lib/std/array_hash_map.zig
lib/std/array_list.zig

View File

@ -76,23 +76,15 @@ This produces a `zig2` executable in the current working directory. This is a
[without LLVM extensions](https://github.com/ziglang/zig/issues/16270), and is
therefore lacking these features:
- Release mode optimizations
- [aarch64 machine code backend](https://github.com/ziglang/zig/issues/21172)
- [@cImport](https://github.com/ziglang/zig/issues/20630)
- [zig translate-c](https://github.com/ziglang/zig/issues/20875)
- [Ability to compile assembly files](https://github.com/ziglang/zig/issues/21169)
- [Some ELF linking features](https://github.com/ziglang/zig/issues/17749)
- [Most COFF/PE linking features](https://github.com/ziglang/zig/issues/17751)
- [Some COFF/PE linking features](https://github.com/ziglang/zig/issues/17751)
- [Some WebAssembly linking features](https://github.com/ziglang/zig/issues/17750)
- [Ability to create import libs from def files](https://github.com/ziglang/zig/issues/17807)
- [Ability to create static archives from object files](https://github.com/ziglang/zig/issues/9828)
- [Ability to compile assembly files](https://github.com/ziglang/zig/issues/21169)
- Ability to compile C, C++, Objective-C, and Objective-C++ files
However, a compiler built this way does provide a C backend, which may be
useful for creating system packages of Zig projects using the system C
toolchain. **In this case, LLVM is not needed!**
Furthermore, a compiler built this way provides an LLVM backend that produces
bitcode files, which may be compiled into object files via a system Clang
Even when built this way, Zig provides an LLVM backend that produces bitcode
files, which may be optimized and compiled into object files via a system Clang
package. This can be used to produce system packages of Zig applications
without the Zig package dependency on LLVM.

View File

@ -95,7 +95,7 @@ Enter-VsDevShell -VsInstallPath "C:\Program Files (x86)\Microsoft Visual Studio\
CheckLastExitCode
Write-Output "Build and run behavior tests with msvc..."
& cl.exe -I..\lib test-x86_64-windows-msvc.c compiler_rt-x86_64-windows-msvc.c /W3 /Z7 -link -nologo -debug -subsystem:console kernel32.lib ntdll.lib libcmt.lib
& cl.exe -I..\lib test-x86_64-windows-msvc.c compiler_rt-x86_64-windows-msvc.c /W3 /Z7 -link -nologo -debug -subsystem:console kernel32.lib ntdll.lib libcmt.lib ws2_32.lib
CheckLastExitCode
& .\test-x86_64-windows-msvc.exe

View File

@ -113,7 +113,7 @@ Enter-VsDevShell -VsInstallPath "C:\Program Files (x86)\Microsoft Visual Studio\
CheckLastExitCode
Write-Output "Build and run behavior tests with msvc..."
& cl.exe -I..\lib test-x86_64-windows-msvc.c compiler_rt-x86_64-windows-msvc.c /W3 /Z7 -link -nologo -debug -subsystem:console kernel32.lib ntdll.lib libcmt.lib
& cl.exe -I..\lib test-x86_64-windows-msvc.c compiler_rt-x86_64-windows-msvc.c /W3 /Z7 -link -nologo -debug -subsystem:console kernel32.lib ntdll.lib libcmt.lib ws2_32.lib
CheckLastExitCode
& .\test-x86_64-windows-msvc.exe

View File

@ -1,4 +1,5 @@
const std = @import("std");
const Io = std.Io;
const assert = std.debug.assert;
const EpochSeconds = std.time.epoch.EpochSeconds;
const mem = std.mem;
@ -113,7 +114,7 @@ pub const Environment = struct {
if (parsed > max_timestamp) return error.InvalidEpoch;
return .{ .provided = parsed };
} else {
const timestamp = std.math.cast(u64, std.time.timestamp()) orelse return error.InvalidEpoch;
const timestamp = std.math.cast(u64, 0) orelse return error.InvalidEpoch;
return .{ .system = std.math.clamp(timestamp, 0, max_timestamp) };
}
}
@ -124,6 +125,7 @@ const Compilation = @This();
gpa: Allocator,
/// Allocations in this arena live all the way until `Compilation.deinit`.
arena: Allocator,
io: Io,
diagnostics: *Diagnostics,
code_gen_options: CodeGenOptions = .default,
@ -157,10 +159,11 @@ type_store: TypeStore = .{},
ms_cwd_source_id: ?Source.Id = null,
cwd: std.fs.Dir,
pub fn init(gpa: Allocator, arena: Allocator, diagnostics: *Diagnostics, cwd: std.fs.Dir) Compilation {
pub fn init(gpa: Allocator, arena: Allocator, io: Io, diagnostics: *Diagnostics, cwd: std.fs.Dir) Compilation {
return .{
.gpa = gpa,
.arena = arena,
.io = io,
.diagnostics = diagnostics,
.cwd = cwd,
};
@ -168,10 +171,11 @@ pub fn init(gpa: Allocator, arena: Allocator, diagnostics: *Diagnostics, cwd: st
/// Initialize Compilation with default environment,
/// pragma handlers and emulation mode set to target.
pub fn initDefault(gpa: Allocator, arena: Allocator, diagnostics: *Diagnostics, cwd: std.fs.Dir) !Compilation {
pub fn initDefault(gpa: Allocator, arena: Allocator, io: Io, diagnostics: *Diagnostics, cwd: std.fs.Dir) !Compilation {
var comp: Compilation = .{
.gpa = gpa,
.arena = arena,
.io = io,
.diagnostics = diagnostics,
.environment = try Environment.loadAll(gpa),
.cwd = cwd,
@ -222,14 +226,14 @@ pub const SystemDefinesMode = enum {
include_system_defines,
};
fn generateSystemDefines(comp: *Compilation, w: *std.Io.Writer) !void {
fn generateSystemDefines(comp: *Compilation, w: *Io.Writer) !void {
const define = struct {
fn define(_w: *std.Io.Writer, name: []const u8) !void {
fn define(_w: *Io.Writer, name: []const u8) !void {
try _w.print("#define {s} 1\n", .{name});
}
}.define;
const defineStd = struct {
fn defineStd(_w: *std.Io.Writer, name: []const u8, is_gnu: bool) !void {
fn defineStd(_w: *Io.Writer, name: []const u8, is_gnu: bool) !void {
if (is_gnu) {
try _w.print("#define {s} 1\n", .{name});
}
@ -956,7 +960,7 @@ fn generateSystemDefines(comp: *Compilation, w: *std.Io.Writer) !void {
pub fn generateBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefinesMode) AddSourceError!Source {
try comp.type_store.initNamedTypes(comp);
var allocating: std.Io.Writer.Allocating = try .initCapacity(comp.gpa, 2 << 13);
var allocating: Io.Writer.Allocating = try .initCapacity(comp.gpa, 2 << 13);
defer allocating.deinit();
comp.writeBuiltinMacros(system_defines_mode, &allocating.writer) catch |err| switch (err) {
@ -970,7 +974,7 @@ pub fn generateBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefi
return comp.addSourceFromOwnedBuffer("<builtin>", contents, .user);
}
fn writeBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefinesMode, w: *std.Io.Writer) !void {
fn writeBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefinesMode, w: *Io.Writer) !void {
if (system_defines_mode == .include_system_defines) {
try w.writeAll(
\\#define __VERSION__ "Aro
@ -1018,7 +1022,7 @@ fn writeBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefinesMode
}
}
fn generateFloatMacros(w: *std.Io.Writer, prefix: []const u8, semantics: target_util.FPSemantics, ext: []const u8) !void {
fn generateFloatMacros(w: *Io.Writer, prefix: []const u8, semantics: target_util.FPSemantics, ext: []const u8) !void {
const denormMin = semantics.chooseValue(
[]const u8,
.{
@ -1093,7 +1097,7 @@ fn generateFloatMacros(w: *std.Io.Writer, prefix: []const u8, semantics: target_
try w.print("#define __{s}_MIN__ {s}{s}\n", .{ prefix, min, ext });
}
fn generateTypeMacro(comp: *const Compilation, w: *std.Io.Writer, name: []const u8, qt: QualType) !void {
fn generateTypeMacro(comp: *const Compilation, w: *Io.Writer, name: []const u8, qt: QualType) !void {
try w.print("#define {s} ", .{name});
try qt.print(comp, w);
try w.writeByte('\n');
@ -1128,7 +1132,7 @@ fn generateFastOrLeastType(
bits: usize,
kind: enum { least, fast },
signedness: std.builtin.Signedness,
w: *std.Io.Writer,
w: *Io.Writer,
) !void {
const ty = comp.intLeastN(bits, signedness); // defining the fast types as the least types is permitted
@ -1158,7 +1162,7 @@ fn generateFastOrLeastType(
try comp.generateFmt(prefix, w, ty);
}
fn generateFastAndLeastWidthTypes(comp: *Compilation, w: *std.Io.Writer) !void {
fn generateFastAndLeastWidthTypes(comp: *Compilation, w: *Io.Writer) !void {
const sizes = [_]usize{ 8, 16, 32, 64 };
for (sizes) |size| {
try comp.generateFastOrLeastType(size, .least, .signed, w);
@ -1168,7 +1172,7 @@ fn generateFastAndLeastWidthTypes(comp: *Compilation, w: *std.Io.Writer) !void {
}
}
fn generateExactWidthTypes(comp: *Compilation, w: *std.Io.Writer) !void {
fn generateExactWidthTypes(comp: *Compilation, w: *Io.Writer) !void {
try comp.generateExactWidthType(w, .schar);
if (QualType.short.sizeof(comp) > QualType.char.sizeof(comp)) {
@ -1216,7 +1220,7 @@ fn generateExactWidthTypes(comp: *Compilation, w: *std.Io.Writer) !void {
}
}
fn generateFmt(comp: *const Compilation, prefix: []const u8, w: *std.Io.Writer, qt: QualType) !void {
fn generateFmt(comp: *const Compilation, prefix: []const u8, w: *Io.Writer, qt: QualType) !void {
const unsigned = qt.signedness(comp) == .unsigned;
const modifier = qt.formatModifier(comp);
const formats = if (unsigned) "ouxX" else "di";
@ -1225,7 +1229,7 @@ fn generateFmt(comp: *const Compilation, prefix: []const u8, w: *std.Io.Writer,
}
}
fn generateSuffixMacro(comp: *const Compilation, prefix: []const u8, w: *std.Io.Writer, qt: QualType) !void {
fn generateSuffixMacro(comp: *const Compilation, prefix: []const u8, w: *Io.Writer, qt: QualType) !void {
return w.print("#define {s}_C_SUFFIX__ {s}\n", .{ prefix, qt.intValueSuffix(comp) });
}
@ -1233,7 +1237,7 @@ fn generateSuffixMacro(comp: *const Compilation, prefix: []const u8, w: *std.Io.
/// Name macro (e.g. #define __UINT32_TYPE__ unsigned int)
/// Format strings (e.g. #define __UINT32_FMTu__ "u")
/// Suffix macro (e.g. #define __UINT32_C_SUFFIX__ U)
fn generateExactWidthType(comp: *Compilation, w: *std.Io.Writer, original_qt: QualType) !void {
fn generateExactWidthType(comp: *Compilation, w: *Io.Writer, original_qt: QualType) !void {
var qt = original_qt;
const width = qt.sizeof(comp) * 8;
const unsigned = qt.signedness(comp) == .unsigned;
@ -1266,7 +1270,7 @@ pub fn hasHalfPrecisionFloatABI(comp: *const Compilation) bool {
return comp.langopts.allow_half_args_and_returns or target_util.hasHalfPrecisionFloatABI(comp.target);
}
fn generateIntMax(comp: *const Compilation, w: *std.Io.Writer, name: []const u8, qt: QualType) !void {
fn generateIntMax(comp: *const Compilation, w: *Io.Writer, name: []const u8, qt: QualType) !void {
const unsigned = qt.signedness(comp) == .unsigned;
const max: u128 = switch (qt.bitSizeof(comp)) {
8 => if (unsigned) std.math.maxInt(u8) else std.math.maxInt(i8),
@ -1290,7 +1294,7 @@ pub fn wcharMax(comp: *const Compilation) u32 {
};
}
fn generateExactWidthIntMax(comp: *Compilation, w: *std.Io.Writer, original_qt: QualType) !void {
fn generateExactWidthIntMax(comp: *Compilation, w: *Io.Writer, original_qt: QualType) !void {
var qt = original_qt;
const bit_count: u8 = @intCast(qt.sizeof(comp) * 8);
const unsigned = qt.signedness(comp) == .unsigned;
@ -1307,16 +1311,16 @@ fn generateExactWidthIntMax(comp: *Compilation, w: *std.Io.Writer, original_qt:
return comp.generateIntMax(w, name, qt);
}
fn generateIntWidth(comp: *Compilation, w: *std.Io.Writer, name: []const u8, qt: QualType) !void {
fn generateIntWidth(comp: *Compilation, w: *Io.Writer, name: []const u8, qt: QualType) !void {
try w.print("#define __{s}_WIDTH__ {d}\n", .{ name, qt.sizeof(comp) * 8 });
}
fn generateIntMaxAndWidth(comp: *Compilation, w: *std.Io.Writer, name: []const u8, qt: QualType) !void {
fn generateIntMaxAndWidth(comp: *Compilation, w: *Io.Writer, name: []const u8, qt: QualType) !void {
try comp.generateIntMax(w, name, qt);
try comp.generateIntWidth(w, name, qt);
}
fn generateSizeofType(comp: *Compilation, w: *std.Io.Writer, name: []const u8, qt: QualType) !void {
fn generateSizeofType(comp: *Compilation, w: *Io.Writer, name: []const u8, qt: QualType) !void {
try w.print("#define {s} {d}\n", .{ name, qt.sizeof(comp) });
}
@ -1797,7 +1801,7 @@ pub const IncludeType = enum {
angle_brackets,
};
fn getPathContents(comp: *Compilation, path: []const u8, limit: std.Io.Limit) ![]u8 {
fn getPathContents(comp: *Compilation, path: []const u8, limit: Io.Limit) ![]u8 {
if (mem.indexOfScalar(u8, path, 0) != null) {
return error.FileNotFound;
}
@ -1807,11 +1811,12 @@ fn getPathContents(comp: *Compilation, path: []const u8, limit: std.Io.Limit) ![
return comp.getFileContents(file, limit);
}
fn getFileContents(comp: *Compilation, file: std.fs.File, limit: std.Io.Limit) ![]u8 {
fn getFileContents(comp: *Compilation, file: std.fs.File, limit: Io.Limit) ![]u8 {
const io = comp.io;
var file_buf: [4096]u8 = undefined;
var file_reader = file.reader(&file_buf);
var file_reader = file.reader(io, &file_buf);
var allocating: std.Io.Writer.Allocating = .init(comp.gpa);
var allocating: Io.Writer.Allocating = .init(comp.gpa);
defer allocating.deinit();
if (file_reader.getSize()) |size| {
const limited_size = limit.minInt64(size);
@ -1838,7 +1843,7 @@ pub fn findEmbed(
includer_token_source: Source.Id,
/// angle bracket vs quotes
include_type: IncludeType,
limit: std.Io.Limit,
limit: Io.Limit,
opt_dep_file: ?*DepFile,
) !?[]u8 {
if (std.fs.path.isAbsolute(filename)) {
@ -2002,8 +2007,7 @@ pub fn locSlice(comp: *const Compilation, loc: Source.Location) []const u8 {
pub fn getSourceMTimeUncached(comp: *const Compilation, source_id: Source.Id) ?u64 {
const source = comp.getSource(source_id);
if (comp.cwd.statFile(source.path)) |stat| {
const mtime = @divTrunc(stat.mtime, std.time.ns_per_s);
return std.math.cast(u64, mtime);
return std.math.cast(u64, stat.mtime.toSeconds());
} else |_| {
return null;
}

View File

@ -273,6 +273,7 @@ pub fn parseArgs(
macro_buf: *std.ArrayList(u8),
args: []const []const u8,
) (Compilation.Error || std.Io.Writer.Error)!bool {
const io = d.comp.io;
var i: usize = 1;
var comment_arg: []const u8 = "";
var hosted: ?bool = null;
@ -772,7 +773,7 @@ pub fn parseArgs(
opts.arch_os_abi, @errorName(e),
}),
};
d.comp.target = std.zig.system.resolveTargetQuery(query) catch |e| {
d.comp.target = std.zig.system.resolveTargetQuery(io, query) catch |e| {
return d.fatal("unable to resolve target: {s}", .{errorDescription(e)});
};
}
@ -916,8 +917,7 @@ pub fn errorDescription(e: anyerror) []const u8 {
error.NotDir => "is not a directory",
error.NotOpenForReading => "file is not open for reading",
error.NotOpenForWriting => "file is not open for writing",
error.InvalidUtf8 => "path is not valid UTF-8",
error.InvalidWtf8 => "path is not valid WTF-8",
error.BadPathName => "bad path name",
error.FileBusy => "file is busy",
error.NameTooLong => "file name is too long",
error.AccessDenied => "access denied",

View File

@ -1,5 +1,8 @@
const std = @import("std");
const runner = @This();
const builtin = @import("builtin");
const std = @import("std");
const Io = std.Io;
const assert = std.debug.assert;
const fmt = std.fmt;
const mem = std.mem;
@ -11,7 +14,6 @@ const WebServer = std.Build.WebServer;
const Allocator = std.mem.Allocator;
const fatal = std.process.fatal;
const Writer = std.Io.Writer;
const runner = @This();
const tty = std.Io.tty;
pub const root = @import("@build");
@ -38,6 +40,10 @@ pub fn main() !void {
const args = try process.argsAlloc(arena);
var threaded: std.Io.Threaded = .init(gpa);
defer threaded.deinit();
const io = threaded.io();
// skip my own exe name
var arg_idx: usize = 1;
@ -68,8 +74,10 @@ pub fn main() !void {
};
var graph: std.Build.Graph = .{
.io = io,
.arena = arena,
.cache = .{
.io = io,
.gpa = arena,
.manifest_dir = try local_cache_directory.handle.makeOpenPath("h", .{}),
},
@ -79,7 +87,7 @@ pub fn main() !void {
.zig_lib_directory = zig_lib_directory,
.host = .{
.query = .{},
.result = try std.zig.system.resolveTargetQuery(.{}),
.result = try std.zig.system.resolveTargetQuery(io, .{}),
},
.time_report = false,
};
@ -116,7 +124,7 @@ pub fn main() !void {
var watch = false;
var fuzz: ?std.Build.Fuzz.Mode = null;
var debounce_interval_ms: u16 = 50;
var webui_listen: ?std.net.Address = null;
var webui_listen: ?Io.net.IpAddress = null;
if (try std.zig.EnvVar.ZIG_BUILD_ERROR_STYLE.get(arena)) |str| {
if (std.meta.stringToEnum(ErrorStyle, str)) |style| {
@ -283,11 +291,11 @@ pub fn main() !void {
});
};
} else if (mem.eql(u8, arg, "--webui")) {
webui_listen = std.net.Address.parseIp("::1", 0) catch unreachable;
if (webui_listen == null) webui_listen = .{ .ip6 = .loopback(0) };
} else if (mem.startsWith(u8, arg, "--webui=")) {
const addr_str = arg["--webui=".len..];
if (std.mem.eql(u8, addr_str, "-")) fatal("web interface cannot listen on stdio", .{});
webui_listen = std.net.Address.parseIpAndPort(addr_str) catch |err| {
webui_listen = Io.net.IpAddress.parseLiteral(addr_str) catch |err| {
fatal("invalid web UI address '{s}': {s}", .{ addr_str, @errorName(err) });
};
} else if (mem.eql(u8, arg, "--debug-log")) {
@ -329,14 +337,10 @@ pub fn main() !void {
watch = true;
} else if (mem.eql(u8, arg, "--time-report")) {
graph.time_report = true;
if (webui_listen == null) {
webui_listen = std.net.Address.parseIp("::1", 0) catch unreachable;
}
if (webui_listen == null) webui_listen = .{ .ip6 = .loopback(0) };
} else if (mem.eql(u8, arg, "--fuzz")) {
fuzz = .{ .forever = undefined };
if (webui_listen == null) {
webui_listen = std.net.Address.parseIp("::1", 0) catch unreachable;
}
if (webui_listen == null) webui_listen = .{ .ip6 = .loopback(0) };
} else if (mem.startsWith(u8, arg, "--fuzz=")) {
const value = arg["--fuzz=".len..];
if (value.len == 0) fatal("missing argument to --fuzz", .{});
@ -545,13 +549,15 @@ pub fn main() !void {
var w: Watch = w: {
if (!watch) break :w undefined;
if (!Watch.have_impl) fatal("--watch not yet implemented for {s}", .{@tagName(builtin.os.tag)});
if (!Watch.have_impl) fatal("--watch not yet implemented for {t}", .{builtin.os.tag});
break :w try .init();
};
try run.thread_pool.init(thread_pool_options);
defer run.thread_pool.deinit();
const now = Io.Clock.Timestamp.now(io, .awake) catch |err| fatal("failed to collect timestamp: {t}", .{err});
run.web_server = if (webui_listen) |listen_address| ws: {
if (builtin.single_threaded) unreachable; // `fatal` above
break :ws .init(.{
@ -563,11 +569,12 @@ pub fn main() !void {
.root_prog_node = main_progress_node,
.watch = watch,
.listen_address = listen_address,
.base_timestamp = now,
});
} else null;
if (run.web_server) |*ws| {
ws.start() catch |err| fatal("failed to start web server: {s}", .{@errorName(err)});
ws.start() catch |err| fatal("failed to start web server: {t}", .{err});
}
rebuild: while (true) : (if (run.error_style.clearOnUpdate()) {
@ -750,6 +757,7 @@ fn runStepNames(
fuzz: ?std.Build.Fuzz.Mode,
) !void {
const gpa = run.gpa;
const io = b.graph.io;
const step_stack = &run.step_stack;
const thread_pool = &run.thread_pool;
@ -853,6 +861,7 @@ fn runStepNames(
assert(mode == .limit);
var f = std.Build.Fuzz.init(
gpa,
io,
thread_pool,
step_stack.keys(),
parent_prog_node,

View File

@ -29,6 +29,10 @@ pub fn main() !void {
const arena = arena_instance.allocator();
const gpa = arena;
var threaded: std.Io.Threaded = .init(gpa);
defer threaded.deinit();
const io = threaded.io();
const args = try std.process.argsAlloc(arena);
const zig_lib_directory = args[1];
@ -66,7 +70,7 @@ pub fn main() !void {
const target_query = std.zig.parseTargetQueryOrReportFatalError(gpa, .{
.arch_os_abi = target_arch_os_abi,
});
const target = std.zig.resolveTargetQueryOrFatal(target_query);
const target = std.zig.resolveTargetQueryOrFatal(io, target_query);
if (print_includes) {
const libc_installation: ?*LibCInstallation = libc: {

View File

@ -29,7 +29,6 @@ pub fn main() !void {
}
fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
_ = gpa;
var i: usize = 0;
var opt_out_fmt: ?std.Target.ObjectFormat = null;
var opt_input: ?[]const u8 = null;
@ -148,12 +147,16 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
const input = opt_input orelse fatal("expected input parameter", .{});
const output = opt_output orelse fatal("expected output parameter", .{});
var threaded: std.Io.Threaded = .init(gpa);
defer threaded.deinit();
const io = threaded.io();
const input_file = fs.cwd().openFile(input, .{}) catch |err| fatal("failed to open {s}: {t}", .{ input, err });
defer input_file.close();
const stat = input_file.stat() catch |err| fatal("failed to stat {s}: {t}", .{ input, err });
var in: File.Reader = .initSize(input_file, &input_buffer, stat.size);
var in: File.Reader = .initSize(input_file.adaptToNewApi(), io, &input_buffer, stat.size);
const elf_hdr = std.elf.Header.read(&in.interface) catch |err| switch (err) {
error.ReadFailed => fatal("unable to read {s}: {t}", .{ input, in.err.? }),
@ -218,7 +221,7 @@ fn cmdObjCopy(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
try out.end();
if (listen) {
var stdin_reader = fs.File.stdin().reader(&stdin_buffer);
var stdin_reader = fs.File.stdin().reader(io, &stdin_buffer);
var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
var server = try Server.init(.{
.in = &stdin_reader.interface,

View File

@ -1,6 +1,12 @@
const std = @import("std");
const builtin = @import("builtin");
const native_endian = builtin.cpu.arch.endian();
const std = @import("std");
const Io = std.Io;
const Allocator = std.mem.Allocator;
const WORD = std.os.windows.WORD;
const DWORD = std.os.windows.DWORD;
const Node = @import("ast.zig").Node;
const lex = @import("lex.zig");
const Parser = @import("parse.zig").Parser;
@ -17,8 +23,6 @@ const res = @import("res.zig");
const ico = @import("ico.zig");
const ani = @import("ani.zig");
const bmp = @import("bmp.zig");
const WORD = std.os.windows.WORD;
const DWORD = std.os.windows.DWORD;
const utils = @import("utils.zig");
const NameOrOrdinal = res.NameOrOrdinal;
const SupportedCodePage = @import("code_pages.zig").SupportedCodePage;
@ -28,7 +32,6 @@ const windows1252 = @import("windows1252.zig");
const lang = @import("lang.zig");
const code_pages = @import("code_pages.zig");
const errors = @import("errors.zig");
const native_endian = builtin.cpu.arch.endian();
pub const CompileOptions = struct {
cwd: std.fs.Dir,
@ -77,7 +80,7 @@ pub const Dependencies = struct {
}
};
pub fn compile(allocator: Allocator, source: []const u8, writer: *std.Io.Writer, options: CompileOptions) !void {
pub fn compile(allocator: Allocator, io: Io, source: []const u8, writer: *std.Io.Writer, options: CompileOptions) !void {
var lexer = lex.Lexer.init(source, .{
.default_code_page = options.default_code_page,
.source_mappings = options.source_mappings,
@ -166,10 +169,11 @@ pub fn compile(allocator: Allocator, source: []const u8, writer: *std.Io.Writer,
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
var compiler = Compiler{
var compiler: Compiler = .{
.source = source,
.arena = arena,
.allocator = allocator,
.io = io,
.cwd = options.cwd,
.diagnostics = options.diagnostics,
.dependencies = options.dependencies,
@ -191,6 +195,7 @@ pub const Compiler = struct {
source: []const u8,
arena: Allocator,
allocator: Allocator,
io: Io,
cwd: std.fs.Dir,
state: State = .{},
diagnostics: *Diagnostics,
@ -409,7 +414,7 @@ pub const Compiler = struct {
}
}
var first_error: ?std.fs.File.OpenError = null;
var first_error: ?(std.fs.File.OpenError || std.fs.File.StatError) = null;
for (self.search_dirs) |search_dir| {
if (utils.openFileNotDir(search_dir.dir, path, .{})) |file| {
errdefer file.close();
@ -496,6 +501,8 @@ pub const Compiler = struct {
}
pub fn writeResourceExternal(self: *Compiler, node: *Node.ResourceExternal, writer: *std.Io.Writer) !void {
const io = self.io;
// Init header with data size zero for now, will need to fill it in later
var header = try self.resourceHeader(node.id, node.type, .{});
defer header.deinit(self.allocator);
@ -582,7 +589,7 @@ pub const Compiler = struct {
};
defer file_handle.close();
var file_buffer: [2048]u8 = undefined;
var file_reader = file_handle.reader(&file_buffer);
var file_reader = file_handle.reader(io, &file_buffer);
if (maybe_predefined_type) |predefined_type| {
switch (predefined_type) {

View File

@ -1,5 +1,7 @@
const std = @import("std");
const Io = std.Io;
const Allocator = std.mem.Allocator;
const res = @import("res.zig");
const NameOrOrdinal = res.NameOrOrdinal;
const MemoryFlags = res.MemoryFlags;
@ -169,8 +171,7 @@ pub fn parseNameOrOrdinal(allocator: Allocator, reader: *std.Io.Reader) !NameOrO
pub const CoffOptions = struct {
target: std.coff.IMAGE.FILE.MACHINE = .AMD64,
/// If true, zeroes will be written to all timestamp fields
reproducible: bool = true,
timestamp: i64 = 0,
/// If true, the MEM_WRITE flag will not be set in the .rsrc section header
read_only: bool = false,
/// If non-null, a symbol with this name and storage class EXTERNAL will be added to the symbol table.
@ -188,7 +189,13 @@ pub const Diagnostics = union {
overflow_resource: usize,
};
pub fn writeCoff(allocator: Allocator, writer: *std.Io.Writer, resources: []const Resource, options: CoffOptions, diagnostics: ?*Diagnostics) !void {
pub fn writeCoff(
allocator: Allocator,
writer: *std.Io.Writer,
resources: []const Resource,
options: CoffOptions,
diagnostics: ?*Diagnostics,
) !void {
var resource_tree = ResourceTree.init(allocator, options);
defer resource_tree.deinit();
@ -215,7 +222,7 @@ pub fn writeCoff(allocator: Allocator, writer: *std.Io.Writer, resources: []cons
const pointer_to_rsrc02_data = pointer_to_relocations + relocations_len;
const pointer_to_symbol_table = pointer_to_rsrc02_data + lengths.rsrc02;
const timestamp: i64 = if (options.reproducible) 0 else std.time.timestamp();
const timestamp: i64 = options.timestamp;
const size_of_optional_header = 0;
const machine_type: std.coff.IMAGE.FILE.MACHINE = options.target;
const flags = std.coff.Header.Flags{

View File

@ -1,5 +1,11 @@
const builtin = @import("builtin");
const native_endian = builtin.cpu.arch.endian();
const std = @import("std");
const Io = std.Io;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const Token = @import("lex.zig").Token;
const SourceMappings = @import("source_mapping.zig").SourceMappings;
const utils = @import("utils.zig");
@ -11,19 +17,19 @@ const parse = @import("parse.zig");
const lang = @import("lang.zig");
const code_pages = @import("code_pages.zig");
const SupportedCodePage = code_pages.SupportedCodePage;
const builtin = @import("builtin");
const native_endian = builtin.cpu.arch.endian();
pub const Diagnostics = struct {
errors: std.ArrayList(ErrorDetails) = .empty,
/// Append-only, cannot handle removing strings.
/// Expects to own all strings within the list.
strings: std.ArrayList([]const u8) = .empty,
allocator: std.mem.Allocator,
allocator: Allocator,
io: Io,
pub fn init(allocator: std.mem.Allocator) Diagnostics {
pub fn init(allocator: Allocator, io: Io) Diagnostics {
return .{
.allocator = allocator,
.io = io,
};
}
@ -62,10 +68,11 @@ pub const Diagnostics = struct {
}
pub fn renderToStdErr(self: *Diagnostics, cwd: std.fs.Dir, source: []const u8, tty_config: std.Io.tty.Config, source_mappings: ?SourceMappings) void {
const io = self.io;
const stderr = std.debug.lockStderrWriter(&.{});
defer std.debug.unlockStderrWriter();
for (self.errors.items) |err_details| {
renderErrorMessage(stderr, tty_config, cwd, err_details, source, self.strings.items, source_mappings) catch return;
renderErrorMessage(io, stderr, tty_config, cwd, err_details, source, self.strings.items, source_mappings) catch return;
}
}
@ -167,9 +174,9 @@ pub const ErrorDetails = struct {
filename_string_index: FilenameStringIndex,
pub const FilenameStringIndex = std.meta.Int(.unsigned, 32 - @bitSizeOf(FileOpenErrorEnum));
pub const FileOpenErrorEnum = std.meta.FieldEnum(std.fs.File.OpenError);
pub const FileOpenErrorEnum = std.meta.FieldEnum(std.fs.File.OpenError || std.fs.File.StatError);
pub fn enumFromError(err: std.fs.File.OpenError) FileOpenErrorEnum {
pub fn enumFromError(err: (std.fs.File.OpenError || std.fs.File.StatError)) FileOpenErrorEnum {
return switch (err) {
inline else => |e| @field(ErrorDetails.FileOpenError.FileOpenErrorEnum, @errorName(e)),
};
@ -894,7 +901,16 @@ fn cellCount(code_page: SupportedCodePage, source: []const u8, start_index: usiz
const truncated_str = "<...truncated...>";
pub fn renderErrorMessage(writer: *std.Io.Writer, tty_config: std.Io.tty.Config, cwd: std.fs.Dir, err_details: ErrorDetails, source: []const u8, strings: []const []const u8, source_mappings: ?SourceMappings) !void {
pub fn renderErrorMessage(
io: Io,
writer: *std.Io.Writer,
tty_config: std.Io.tty.Config,
cwd: std.fs.Dir,
err_details: ErrorDetails,
source: []const u8,
strings: []const []const u8,
source_mappings: ?SourceMappings,
) !void {
if (err_details.type == .hint) return;
const source_line_start = err_details.token.getLineStartForErrorDisplay(source);
@ -989,6 +1005,7 @@ pub fn renderErrorMessage(writer: *std.Io.Writer, tty_config: std.Io.tty.Config,
var initial_lines_err: ?anyerror = null;
var file_reader_buf: [max_source_line_bytes * 2]u8 = undefined;
var corresponding_lines: ?CorrespondingLines = CorrespondingLines.init(
io,
cwd,
err_details,
source_line_for_display.line,
@ -1084,6 +1101,7 @@ const CorrespondingLines = struct {
code_page: SupportedCodePage,
pub fn init(
io: Io,
cwd: std.fs.Dir,
err_details: ErrorDetails,
line_for_comparison: []const u8,
@ -1108,7 +1126,7 @@ const CorrespondingLines = struct {
.code_page = err_details.code_page,
.file_reader = undefined,
};
corresponding_lines.file_reader = corresponding_lines.file.reader(file_reader_buf);
corresponding_lines.file_reader = corresponding_lines.file.reader(io, file_reader_buf);
errdefer corresponding_lines.deinit();
try corresponding_lines.writeLineFromStreamVerbatim(

View File

@ -1,5 +1,9 @@
const std = @import("std");
const builtin = @import("builtin");
const std = @import("std");
const Io = std.Io;
const Allocator = std.mem.Allocator;
const removeComments = @import("comments.zig").removeComments;
const parseAndRemoveLineCommands = @import("source_mapping.zig").parseAndRemoveLineCommands;
const compile = @import("compile.zig").compile;
@ -16,19 +20,18 @@ const aro = @import("aro");
const compiler_util = @import("../util.zig");
pub fn main() !void {
var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init;
defer std.debug.assert(gpa.deinit() == .ok);
const allocator = gpa.allocator();
var debug_allocator: std.heap.DebugAllocator(.{}) = .init;
defer std.debug.assert(debug_allocator.deinit() == .ok);
const gpa = debug_allocator.allocator();
var arena_state = std.heap.ArenaAllocator.init(allocator);
var arena_state = std.heap.ArenaAllocator.init(gpa);
defer arena_state.deinit();
const arena = arena_state.allocator();
const stderr = std.fs.File.stderr();
const stderr_config = std.Io.tty.detectConfig(stderr);
const args = try std.process.argsAlloc(allocator);
defer std.process.argsFree(allocator, args);
const args = try std.process.argsAlloc(arena);
if (args.len < 2) {
try renderErrorMessage(std.debug.lockStderrWriter(&.{}), stderr_config, .err, "expected zig lib dir as first argument", .{});
@ -59,11 +62,11 @@ pub fn main() !void {
};
var options = options: {
var cli_diagnostics = cli.Diagnostics.init(allocator);
var cli_diagnostics = cli.Diagnostics.init(gpa);
defer cli_diagnostics.deinit();
var options = cli.parse(allocator, cli_args, &cli_diagnostics) catch |err| switch (err) {
var options = cli.parse(gpa, cli_args, &cli_diagnostics) catch |err| switch (err) {
error.ParseError => {
try error_handler.emitCliDiagnostics(allocator, cli_args, &cli_diagnostics);
try error_handler.emitCliDiagnostics(gpa, cli_args, &cli_diagnostics);
std.process.exit(1);
},
else => |e| return e,
@ -84,6 +87,10 @@ pub fn main() !void {
};
defer options.deinit();
var threaded: std.Io.Threaded = .init(gpa);
defer threaded.deinit();
const io = threaded.io();
if (options.print_help_and_exit) {
try cli.writeUsage(stdout, "zig rc");
try stdout.flush();
@ -99,12 +106,13 @@ pub fn main() !void {
try stdout.flush();
}
var dependencies = Dependencies.init(allocator);
var dependencies = Dependencies.init(gpa);
defer dependencies.deinit();
const maybe_dependencies: ?*Dependencies = if (options.depfile_path != null) &dependencies else null;
var include_paths = LazyIncludePaths{
.arena = arena,
.io = io,
.auto_includes_option = options.auto_includes,
.zig_lib_dir = zig_lib_dir,
.target_machine_type = options.coff_options.target,
@ -112,12 +120,12 @@ pub fn main() !void {
const full_input = full_input: {
if (options.input_format == .rc and options.preprocess != .no) {
var preprocessed_buf: std.Io.Writer.Allocating = .init(allocator);
var preprocessed_buf: std.Io.Writer.Allocating = .init(gpa);
errdefer preprocessed_buf.deinit();
// We're going to throw away everything except the final preprocessed output anyway,
// so we can use a scoped arena for everything else.
var aro_arena_state = std.heap.ArenaAllocator.init(allocator);
var aro_arena_state = std.heap.ArenaAllocator.init(gpa);
defer aro_arena_state.deinit();
const aro_arena = aro_arena_state.allocator();
@ -129,12 +137,12 @@ pub fn main() !void {
.color = stderr_config,
} } },
true => .{ .output = .{ .to_list = .{
.arena = .init(allocator),
.arena = .init(gpa),
} } },
};
defer diagnostics.deinit();
var comp = aro.Compilation.init(aro_arena, aro_arena, &diagnostics, std.fs.cwd());
var comp = aro.Compilation.init(aro_arena, aro_arena, io, &diagnostics, std.fs.cwd());
defer comp.deinit();
var argv: std.ArrayList([]const u8) = .empty;
@ -159,20 +167,20 @@ pub fn main() !void {
preprocess.preprocess(&comp, &preprocessed_buf.writer, argv.items, maybe_dependencies) catch |err| switch (err) {
error.GeneratedSourceError => {
try error_handler.emitAroDiagnostics(allocator, "failed during preprocessor setup (this is always a bug)", &comp);
try error_handler.emitAroDiagnostics(gpa, "failed during preprocessor setup (this is always a bug)", &comp);
std.process.exit(1);
},
// ArgError can occur if e.g. the .rc file is not found
error.ArgError, error.PreprocessError => {
try error_handler.emitAroDiagnostics(allocator, "failed during preprocessing", &comp);
try error_handler.emitAroDiagnostics(gpa, "failed during preprocessing", &comp);
std.process.exit(1);
},
error.FileTooBig => {
try error_handler.emitMessage(allocator, .err, "failed during preprocessing: maximum file size exceeded", .{});
try error_handler.emitMessage(gpa, .err, "failed during preprocessing: maximum file size exceeded", .{});
std.process.exit(1);
},
error.WriteFailed => {
try error_handler.emitMessage(allocator, .err, "failed during preprocessing: error writing the preprocessed output", .{});
try error_handler.emitMessage(gpa, .err, "failed during preprocessing: error writing the preprocessed output", .{});
std.process.exit(1);
},
error.OutOfMemory => |e| return e,
@ -182,22 +190,22 @@ pub fn main() !void {
} else {
switch (options.input_source) {
.stdio => |file| {
var file_reader = file.reader(&.{});
break :full_input file_reader.interface.allocRemaining(allocator, .unlimited) catch |err| {
try error_handler.emitMessage(allocator, .err, "unable to read input from stdin: {s}", .{@errorName(err)});
var file_reader = file.reader(io, &.{});
break :full_input file_reader.interface.allocRemaining(gpa, .unlimited) catch |err| {
try error_handler.emitMessage(gpa, .err, "unable to read input from stdin: {s}", .{@errorName(err)});
std.process.exit(1);
};
},
.filename => |input_filename| {
break :full_input std.fs.cwd().readFileAlloc(input_filename, allocator, .unlimited) catch |err| {
try error_handler.emitMessage(allocator, .err, "unable to read input file path '{s}': {s}", .{ input_filename, @errorName(err) });
break :full_input std.fs.cwd().readFileAlloc(input_filename, gpa, .unlimited) catch |err| {
try error_handler.emitMessage(gpa, .err, "unable to read input file path '{s}': {s}", .{ input_filename, @errorName(err) });
std.process.exit(1);
};
},
}
}
};
defer allocator.free(full_input);
defer gpa.free(full_input);
if (options.preprocess == .only) {
switch (options.output_source) {
@ -221,55 +229,55 @@ pub fn main() !void {
}
else if (options.input_format == .res)
IoStream.fromIoSource(options.input_source, .input) catch |err| {
try error_handler.emitMessage(allocator, .err, "unable to read res file path '{s}': {s}", .{ options.input_source.filename, @errorName(err) });
try error_handler.emitMessage(gpa, .err, "unable to read res file path '{s}': {s}", .{ options.input_source.filename, @errorName(err) });
std.process.exit(1);
}
else
IoStream.fromIoSource(options.output_source, .output) catch |err| {
try error_handler.emitMessage(allocator, .err, "unable to create output file '{s}': {s}", .{ options.output_source.filename, @errorName(err) });
try error_handler.emitMessage(gpa, .err, "unable to create output file '{s}': {s}", .{ options.output_source.filename, @errorName(err) });
std.process.exit(1);
};
defer res_stream.deinit(allocator);
defer res_stream.deinit(gpa);
const res_data = res_data: {
if (options.input_format != .res) {
// Note: We still want to run this when no-preprocess is set because:
// 1. We want to print accurate line numbers after removing multiline comments
// 2. We want to be able to handle an already-preprocessed input with #line commands in it
var mapping_results = parseAndRemoveLineCommands(allocator, full_input, full_input, .{ .initial_filename = options.input_source.filename }) catch |err| switch (err) {
var mapping_results = parseAndRemoveLineCommands(gpa, full_input, full_input, .{ .initial_filename = options.input_source.filename }) catch |err| switch (err) {
error.InvalidLineCommand => {
// TODO: Maybe output the invalid line command
try error_handler.emitMessage(allocator, .err, "invalid line command in the preprocessed source", .{});
try error_handler.emitMessage(gpa, .err, "invalid line command in the preprocessed source", .{});
if (options.preprocess == .no) {
try error_handler.emitMessage(allocator, .note, "line commands must be of the format: #line <num> \"<path>\"", .{});
try error_handler.emitMessage(gpa, .note, "line commands must be of the format: #line <num> \"<path>\"", .{});
} else {
try error_handler.emitMessage(allocator, .note, "this is likely to be a bug, please report it", .{});
try error_handler.emitMessage(gpa, .note, "this is likely to be a bug, please report it", .{});
}
std.process.exit(1);
},
error.LineNumberOverflow => {
// TODO: Better error message
try error_handler.emitMessage(allocator, .err, "line number count exceeded maximum of {}", .{std.math.maxInt(usize)});
try error_handler.emitMessage(gpa, .err, "line number count exceeded maximum of {}", .{std.math.maxInt(usize)});
std.process.exit(1);
},
error.OutOfMemory => |e| return e,
};
defer mapping_results.mappings.deinit(allocator);
defer mapping_results.mappings.deinit(gpa);
const default_code_page = options.default_code_page orelse .windows1252;
const has_disjoint_code_page = hasDisjointCodePage(mapping_results.result, &mapping_results.mappings, default_code_page);
const final_input = try removeComments(mapping_results.result, mapping_results.result, &mapping_results.mappings);
var diagnostics = Diagnostics.init(allocator);
var diagnostics = Diagnostics.init(gpa, io);
defer diagnostics.deinit();
var output_buffer: [4096]u8 = undefined;
var res_stream_writer = res_stream.source.writer(allocator, &output_buffer);
var res_stream_writer = res_stream.source.writer(gpa, &output_buffer);
defer res_stream_writer.deinit(&res_stream.source);
const output_buffered_stream = res_stream_writer.interface();
compile(allocator, final_input, output_buffered_stream, .{
compile(gpa, io, final_input, output_buffered_stream, .{
.cwd = std.fs.cwd(),
.diagnostics = &diagnostics,
.source_mappings = &mapping_results.mappings,
@ -287,7 +295,7 @@ pub fn main() !void {
.warn_instead_of_error_on_invalid_code_page = options.warn_instead_of_error_on_invalid_code_page,
}) catch |err| switch (err) {
error.ParseError, error.CompileError => {
try error_handler.emitDiagnostics(allocator, std.fs.cwd(), final_input, &diagnostics, mapping_results.mappings);
try error_handler.emitDiagnostics(gpa, std.fs.cwd(), final_input, &diagnostics, mapping_results.mappings);
// Delete the output file on error
res_stream.cleanupAfterError();
std.process.exit(1);
@ -305,7 +313,7 @@ pub fn main() !void {
// write the depfile
if (options.depfile_path) |depfile_path| {
var depfile = std.fs.cwd().createFile(depfile_path, .{}) catch |err| {
try error_handler.emitMessage(allocator, .err, "unable to create depfile '{s}': {s}", .{ depfile_path, @errorName(err) });
try error_handler.emitMessage(gpa, .err, "unable to create depfile '{s}': {s}", .{ depfile_path, @errorName(err) });
std.process.exit(1);
};
defer depfile.close();
@ -332,41 +340,41 @@ pub fn main() !void {
if (options.output_format != .coff) return;
break :res_data res_stream.source.readAll(allocator) catch |err| {
try error_handler.emitMessage(allocator, .err, "unable to read res from '{s}': {s}", .{ res_stream.name, @errorName(err) });
break :res_data res_stream.source.readAll(gpa, io) catch |err| {
try error_handler.emitMessage(gpa, .err, "unable to read res from '{s}': {s}", .{ res_stream.name, @errorName(err) });
std.process.exit(1);
};
};
// No need to keep the res_data around after parsing the resources from it
defer res_data.deinit(allocator);
defer res_data.deinit(gpa);
std.debug.assert(options.output_format == .coff);
// TODO: Maybe use a buffered file reader instead of reading file into memory -> fbs
var res_reader: std.Io.Reader = .fixed(res_data.bytes);
break :resources cvtres.parseRes(allocator, &res_reader, .{ .max_size = res_data.bytes.len }) catch |err| {
break :resources cvtres.parseRes(gpa, &res_reader, .{ .max_size = res_data.bytes.len }) catch |err| {
// TODO: Better errors
try error_handler.emitMessage(allocator, .err, "unable to parse res from '{s}': {s}", .{ res_stream.name, @errorName(err) });
try error_handler.emitMessage(gpa, .err, "unable to parse res from '{s}': {s}", .{ res_stream.name, @errorName(err) });
std.process.exit(1);
};
};
defer resources.deinit();
var coff_stream = IoStream.fromIoSource(options.output_source, .output) catch |err| {
try error_handler.emitMessage(allocator, .err, "unable to create output file '{s}': {s}", .{ options.output_source.filename, @errorName(err) });
try error_handler.emitMessage(gpa, .err, "unable to create output file '{s}': {s}", .{ options.output_source.filename, @errorName(err) });
std.process.exit(1);
};
defer coff_stream.deinit(allocator);
defer coff_stream.deinit(gpa);
var coff_output_buffer: [4096]u8 = undefined;
var coff_output_buffered_stream = coff_stream.source.writer(allocator, &coff_output_buffer);
var coff_output_buffered_stream = coff_stream.source.writer(gpa, &coff_output_buffer);
var cvtres_diagnostics: cvtres.Diagnostics = .{ .none = {} };
cvtres.writeCoff(allocator, coff_output_buffered_stream.interface(), resources.list.items, options.coff_options, &cvtres_diagnostics) catch |err| {
cvtres.writeCoff(gpa, coff_output_buffered_stream.interface(), resources.list.items, options.coff_options, &cvtres_diagnostics) catch |err| {
switch (err) {
error.DuplicateResource => {
const duplicate_resource = resources.list.items[cvtres_diagnostics.duplicate_resource];
try error_handler.emitMessage(allocator, .err, "duplicate resource [id: {f}, type: {f}, language: {f}]", .{
try error_handler.emitMessage(gpa, .err, "duplicate resource [id: {f}, type: {f}, language: {f}]", .{
duplicate_resource.name_value,
fmtResourceType(duplicate_resource.type_value),
duplicate_resource.language,
@ -374,8 +382,8 @@ pub fn main() !void {
},
error.ResourceDataTooLong => {
const overflow_resource = resources.list.items[cvtres_diagnostics.duplicate_resource];
try error_handler.emitMessage(allocator, .err, "resource has a data length that is too large to be written into a coff section", .{});
try error_handler.emitMessage(allocator, .note, "the resource with the invalid size is [id: {f}, type: {f}, language: {f}]", .{
try error_handler.emitMessage(gpa, .err, "resource has a data length that is too large to be written into a coff section", .{});
try error_handler.emitMessage(gpa, .note, "the resource with the invalid size is [id: {f}, type: {f}, language: {f}]", .{
overflow_resource.name_value,
fmtResourceType(overflow_resource.type_value),
overflow_resource.language,
@ -383,15 +391,15 @@ pub fn main() !void {
},
error.TotalResourceDataTooLong => {
const overflow_resource = resources.list.items[cvtres_diagnostics.duplicate_resource];
try error_handler.emitMessage(allocator, .err, "total resource data exceeds the maximum of the coff 'size of raw data' field", .{});
try error_handler.emitMessage(allocator, .note, "size overflow occurred when attempting to write this resource: [id: {f}, type: {f}, language: {f}]", .{
try error_handler.emitMessage(gpa, .err, "total resource data exceeds the maximum of the coff 'size of raw data' field", .{});
try error_handler.emitMessage(gpa, .note, "size overflow occurred when attempting to write this resource: [id: {f}, type: {f}, language: {f}]", .{
overflow_resource.name_value,
fmtResourceType(overflow_resource.type_value),
overflow_resource.language,
});
},
else => {
try error_handler.emitMessage(allocator, .err, "unable to write coff output file '{s}': {s}", .{ coff_stream.name, @errorName(err) });
try error_handler.emitMessage(gpa, .err, "unable to write coff output file '{s}': {s}", .{ coff_stream.name, @errorName(err) });
},
}
// Delete the output file on error
@ -423,7 +431,7 @@ const IoStream = struct {
};
}
pub fn deinit(self: *IoStream, allocator: std.mem.Allocator) void {
pub fn deinit(self: *IoStream, allocator: Allocator) void {
self.source.deinit(allocator);
}
@ -458,7 +466,7 @@ const IoStream = struct {
}
}
pub fn deinit(self: *Source, allocator: std.mem.Allocator) void {
pub fn deinit(self: *Source, allocator: Allocator) void {
switch (self.*) {
.file => |file| file.close(),
.stdio => {},
@ -471,18 +479,18 @@ const IoStream = struct {
bytes: []const u8,
needs_free: bool,
pub fn deinit(self: Data, allocator: std.mem.Allocator) void {
pub fn deinit(self: Data, allocator: Allocator) void {
if (self.needs_free) {
allocator.free(self.bytes);
}
}
};
pub fn readAll(self: Source, allocator: std.mem.Allocator) !Data {
pub fn readAll(self: Source, allocator: Allocator, io: Io) !Data {
return switch (self) {
inline .file, .stdio => |file| .{
.bytes = b: {
var file_reader = file.reader(&.{});
var file_reader = file.reader(io, &.{});
break :b try file_reader.interface.allocRemaining(allocator, .unlimited);
},
.needs_free = true,
@ -496,7 +504,7 @@ const IoStream = struct {
file: std.fs.File.Writer,
allocating: std.Io.Writer.Allocating,
pub const Error = std.mem.Allocator.Error || std.fs.File.WriteError;
pub const Error = Allocator.Error || std.fs.File.WriteError;
pub fn interface(this: *@This()) *std.Io.Writer {
return switch (this.*) {
@ -514,7 +522,7 @@ const IoStream = struct {
}
};
pub fn writer(source: *Source, allocator: std.mem.Allocator, buffer: []u8) Writer {
pub fn writer(source: *Source, allocator: Allocator, buffer: []u8) Writer {
return switch (source.*) {
.file, .stdio => |file| .{ .file = file.writer(buffer) },
.memory => |*list| .{ .allocating = .fromArrayList(allocator, list) },
@ -525,17 +533,20 @@ const IoStream = struct {
};
const LazyIncludePaths = struct {
arena: std.mem.Allocator,
arena: Allocator,
io: Io,
auto_includes_option: cli.Options.AutoIncludes,
zig_lib_dir: []const u8,
target_machine_type: std.coff.IMAGE.FILE.MACHINE,
resolved_include_paths: ?[]const []const u8 = null,
pub fn get(self: *LazyIncludePaths, error_handler: *ErrorHandler) ![]const []const u8 {
const io = self.io;
if (self.resolved_include_paths) |include_paths|
return include_paths;
return getIncludePaths(self.arena, self.auto_includes_option, self.zig_lib_dir, self.target_machine_type) catch |err| switch (err) {
return getIncludePaths(self.arena, io, self.auto_includes_option, self.zig_lib_dir, self.target_machine_type) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
else => |e| {
switch (e) {
@ -556,7 +567,13 @@ const LazyIncludePaths = struct {
}
};
fn getIncludePaths(arena: std.mem.Allocator, auto_includes_option: cli.Options.AutoIncludes, zig_lib_dir: []const u8, target_machine_type: std.coff.IMAGE.FILE.MACHINE) ![]const []const u8 {
fn getIncludePaths(
arena: Allocator,
io: Io,
auto_includes_option: cli.Options.AutoIncludes,
zig_lib_dir: []const u8,
target_machine_type: std.coff.IMAGE.FILE.MACHINE,
) ![]const []const u8 {
if (auto_includes_option == .none) return &[_][]const u8{};
const includes_arch: std.Target.Cpu.Arch = switch (target_machine_type) {
@ -600,7 +617,7 @@ fn getIncludePaths(arena: std.mem.Allocator, auto_includes_option: cli.Options.A
.cpu_arch = includes_arch,
.abi = .msvc,
};
const target = std.zig.resolveTargetQueryOrFatal(target_query);
const target = std.zig.resolveTargetQueryOrFatal(io, target_query);
const is_native_abi = target_query.isNativeAbi();
const detected_libc = std.zig.LibCDirs.detect(arena, zig_lib_dir, &target, is_native_abi, true, null) catch {
if (includes == .any) {
@ -626,7 +643,7 @@ fn getIncludePaths(arena: std.mem.Allocator, auto_includes_option: cli.Options.A
.cpu_arch = includes_arch,
.abi = .gnu,
};
const target = std.zig.resolveTargetQueryOrFatal(target_query);
const target = std.zig.resolveTargetQueryOrFatal(io, target_query);
const is_native_abi = target_query.isNativeAbi();
const detected_libc = std.zig.LibCDirs.detect(arena, zig_lib_dir, &target, is_native_abi, true, null) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
@ -647,7 +664,7 @@ const ErrorHandler = union(enum) {
pub fn emitCliDiagnostics(
self: *ErrorHandler,
allocator: std.mem.Allocator,
allocator: Allocator,
args: []const []const u8,
diagnostics: *cli.Diagnostics,
) !void {
@ -666,7 +683,7 @@ const ErrorHandler = union(enum) {
pub fn emitAroDiagnostics(
self: *ErrorHandler,
allocator: std.mem.Allocator,
allocator: Allocator,
fail_msg: []const u8,
comp: *aro.Compilation,
) !void {
@ -692,7 +709,7 @@ const ErrorHandler = union(enum) {
pub fn emitDiagnostics(
self: *ErrorHandler,
allocator: std.mem.Allocator,
allocator: Allocator,
cwd: std.fs.Dir,
source: []const u8,
diagnostics: *Diagnostics,
@ -713,7 +730,7 @@ const ErrorHandler = union(enum) {
pub fn emitMessage(
self: *ErrorHandler,
allocator: std.mem.Allocator,
allocator: Allocator,
msg_type: @import("utils.zig").ErrorMessageType,
comptime format: []const u8,
args: anytype,
@ -738,7 +755,7 @@ const ErrorHandler = union(enum) {
};
fn cliDiagnosticsToErrorBundle(
gpa: std.mem.Allocator,
gpa: Allocator,
diagnostics: *cli.Diagnostics,
) !ErrorBundle {
@branchHint(.cold);
@ -783,7 +800,7 @@ fn cliDiagnosticsToErrorBundle(
}
fn diagnosticsToErrorBundle(
gpa: std.mem.Allocator,
gpa: Allocator,
source: []const u8,
diagnostics: *Diagnostics,
mappings: SourceMappings,
@ -870,7 +887,7 @@ fn diagnosticsToErrorBundle(
return try bundle.toOwnedBundle("");
}
fn errorStringToErrorBundle(allocator: std.mem.Allocator, comptime format: []const u8, args: anytype) !ErrorBundle {
fn errorStringToErrorBundle(allocator: Allocator, comptime format: []const u8, args: anytype) !ErrorBundle {
@branchHint(.cold);
var bundle: ErrorBundle.Wip = undefined;
try bundle.init(allocator);

View File

@ -26,7 +26,11 @@ pub const UncheckedSliceWriter = struct {
/// Cross-platform 'std.fs.Dir.openFile' wrapper that will always return IsDir if
/// a directory is attempted to be opened.
/// TODO: Remove once https://github.com/ziglang/zig/issues/5732 is addressed.
pub fn openFileNotDir(cwd: std.fs.Dir, path: []const u8, flags: std.fs.File.OpenFlags) std.fs.File.OpenError!std.fs.File {
pub fn openFileNotDir(
cwd: std.fs.Dir,
path: []const u8,
flags: std.fs.File.OpenFlags,
) (std.fs.File.OpenError || std.fs.File.StatError)!std.fs.File {
const file = try cwd.openFile(path, flags);
errdefer file.close();
// https://github.com/ziglang/zig/issues/5732

View File

@ -2,6 +2,7 @@
const builtin = @import("builtin");
const std = @import("std");
const Io = std.Io;
const fatal = std.process.fatal;
const testing = std.testing;
const assert = std.debug.assert;
@ -12,10 +13,11 @@ pub const std_options: std.Options = .{
};
var log_err_count: usize = 0;
var fba = std.heap.FixedBufferAllocator.init(&fba_buffer);
var fba: std.heap.FixedBufferAllocator = .init(&fba_buffer);
var fba_buffer: [8192]u8 = undefined;
var stdin_buffer: [4096]u8 = undefined;
var stdout_buffer: [4096]u8 = undefined;
var runner_threaded_io: Io.Threaded = .init_single_threaded;
/// Keep in sync with logic in `std.Build.addRunArtifact` which decides whether
/// the test runner will communicate with the build runner via `std.zig.Server`.
@ -63,8 +65,6 @@ pub fn main() void {
fuzz_abi.fuzzer_init(.fromSlice(cache_dir));
}
fba.reset();
if (listen) {
return mainServer() catch @panic("internal test runner failure");
} else {
@ -74,7 +74,7 @@ pub fn main() void {
fn mainServer() !void {
@disableInstrumentation();
var stdin_reader = std.fs.File.stdin().readerStreaming(&stdin_buffer);
var stdin_reader = std.fs.File.stdin().readerStreaming(runner_threaded_io.io(), &stdin_buffer);
var stdout_writer = std.fs.File.stdout().writerStreaming(&stdout_buffer);
var server = try std.zig.Server.init(.{
.in = &stdin_reader.interface,
@ -131,6 +131,7 @@ fn mainServer() !void {
.run_test => {
testing.allocator_instance = .{};
testing.io_instance = .init(testing.allocator);
log_err_count = 0;
const index = try server.receiveBody_u32();
const test_fn = builtin.test_functions[index];
@ -152,6 +153,7 @@ fn mainServer() !void {
break :s .fail;
},
};
testing.io_instance.deinit();
const leak_count = testing.allocator_instance.detectLeaks();
testing.allocator_instance.deinitWithoutLeakChecks();
try server.serveTestResults(.{
@ -228,18 +230,13 @@ fn mainTerminal() void {
});
const have_tty = std.fs.File.stderr().isTty();
var async_frame_buffer: []align(builtin.target.stackAlignment()) u8 = undefined;
// TODO this is on the next line (using `undefined` above) because otherwise zig incorrectly
// ignores the alignment of the slice.
async_frame_buffer = &[_]u8{};
var leaks: usize = 0;
for (test_fn_list, 0..) |test_fn, i| {
testing.allocator_instance = .{};
testing.io_instance = .init(testing.allocator);
defer {
if (testing.allocator_instance.deinit() == .leak) {
leaks += 1;
}
testing.io_instance.deinit();
if (testing.allocator_instance.deinit() == .leak) leaks += 1;
}
testing.log_level = .warn;
@ -326,7 +323,7 @@ pub fn mainSimple() anyerror!void {
.stage2_aarch64, .stage2_riscv64 => true,
else => false,
};
// is the backend capable of calling `std.Io.Writer.print`?
// is the backend capable of calling `Io.Writer.print`?
const enable_print = switch (builtin.zig_backend) {
.stage2_aarch64, .stage2_riscv64 => true,
else => false,

View File

@ -18,6 +18,10 @@ pub fn main() u8 {
defer arena_instance.deinit();
const arena = arena_instance.allocator();
var threaded: std.Io.Threaded = .init(gpa);
defer threaded.deinit();
const io = threaded.io();
var args = process.argsAlloc(arena) catch {
std.debug.print("ran out of memory allocating arguments\n", .{});
if (fast_exit) process.exit(1);
@ -42,7 +46,7 @@ pub fn main() u8 {
};
defer diagnostics.deinit();
var comp = aro.Compilation.initDefault(gpa, arena, &diagnostics, std.fs.cwd()) catch |err| switch (err) {
var comp = aro.Compilation.initDefault(gpa, arena, io, &diagnostics, std.fs.cwd()) catch |err| switch (err) {
error.OutOfMemory => {
std.debug.print("ran out of memory initializing C compilation\n", .{});
if (fast_exit) process.exit(1);

View File

@ -1,5 +1,7 @@
const std = @import("std.zig");
const builtin = @import("builtin");
const std = @import("std.zig");
const Io = std.Io;
const fs = std.fs;
const mem = std.mem;
const debug = std.debug;
@ -110,6 +112,7 @@ pub const ReleaseMode = enum {
/// Shared state among all Build instances.
/// Settings that are here rather than in Build are not configurable per-package.
pub const Graph = struct {
io: Io,
arena: Allocator,
system_library_options: std.StringArrayHashMapUnmanaged(SystemLibraryMode) = .empty,
system_package_mode: bool = false,
@ -1834,6 +1837,8 @@ pub fn runAllowFail(
if (!process.can_spawn)
return error.ExecNotSupported;
const io = b.graph.io;
const max_output_size = 400 * 1024;
var child = std.process.Child.init(argv, b.allocator);
child.stdin_behavior = .Ignore;
@ -1844,7 +1849,7 @@ pub fn runAllowFail(
try Step.handleVerbose2(b, null, child.env_map, argv);
try child.spawn();
var stdout_reader = child.stdout.?.readerStreaming(&.{});
var stdout_reader = child.stdout.?.readerStreaming(io, &.{});
const stdout = stdout_reader.interface.allocRemaining(b.allocator, .limited(max_output_size)) catch {
return error.ReadFailure;
};
@ -2666,9 +2671,10 @@ pub fn resolveTargetQuery(b: *Build, query: Target.Query) ResolvedTarget {
// Hot path. This is faster than querying the native CPU and OS again.
return b.graph.host;
}
const io = b.graph.io;
return .{
.query = query,
.result = std.zig.system.resolveTargetQuery(query) catch
.result = std.zig.system.resolveTargetQuery(io, query) catch
@panic("unable to resolve target query"),
};
}

View File

@ -3,8 +3,10 @@
//! not to withstand attacks using specially-crafted input.
const Cache = @This();
const std = @import("std");
const builtin = @import("builtin");
const std = @import("std");
const Io = std.Io;
const crypto = std.crypto;
const fs = std.fs;
const assert = std.debug.assert;
@ -15,10 +17,11 @@ const Allocator = std.mem.Allocator;
const log = std.log.scoped(.cache);
gpa: Allocator,
io: Io,
manifest_dir: fs.Dir,
hash: HashHelper = .{},
/// This value is accessed from multiple threads, protected by mutex.
recent_problematic_timestamp: i128 = 0,
recent_problematic_timestamp: Io.Timestamp = .zero,
mutex: std.Thread.Mutex = .{},
/// A set of strings such as the zig library directory or project source root, which
@ -152,7 +155,7 @@ pub const File = struct {
pub const Stat = struct {
inode: fs.File.INode,
size: u64,
mtime: i128,
mtime: Io.Timestamp,
pub fn fromFs(fs_stat: fs.File.Stat) Stat {
return .{
@ -327,7 +330,7 @@ pub const Manifest = struct {
diagnostic: Diagnostic = .none,
/// Keeps track of the last time we performed a file system write to observe
/// what time the file system thinks it is, according to its own granularity.
recent_problematic_timestamp: i128 = 0,
recent_problematic_timestamp: Io.Timestamp = .zero,
pub const Diagnostic = union(enum) {
none,
@ -661,9 +664,10 @@ pub const Manifest = struct {
},
} {
const gpa = self.cache.gpa;
const io = self.cache.io;
const input_file_count = self.files.entries.len;
var tiny_buffer: [1]u8 = undefined; // allows allocRemaining to detect limit exceeded
var manifest_reader = self.manifest_file.?.reader(&tiny_buffer); // Reads positionally from zero.
var manifest_reader = self.manifest_file.?.reader(io, &tiny_buffer); // Reads positionally from zero.
const limit: std.Io.Limit = .limited(manifest_file_size_max);
const file_contents = manifest_reader.interface.allocRemaining(gpa, limit) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
@ -724,7 +728,7 @@ pub const Manifest = struct {
file.stat = .{
.size = stat_size,
.inode = stat_inode,
.mtime = stat_mtime,
.mtime = .{ .nanoseconds = stat_mtime },
};
file.bin_digest = file_bin_digest;
break :f file;
@ -743,7 +747,7 @@ pub const Manifest = struct {
.stat = .{
.size = stat_size,
.inode = stat_inode,
.mtime = stat_mtime,
.mtime = .{ .nanoseconds = stat_mtime },
},
.bin_digest = file_bin_digest,
};
@ -776,7 +780,7 @@ pub const Manifest = struct {
return error.CacheCheckFailed;
};
const size_match = actual_stat.size == cache_hash_file.stat.size;
const mtime_match = actual_stat.mtime == cache_hash_file.stat.mtime;
const mtime_match = actual_stat.mtime.nanoseconds == cache_hash_file.stat.mtime.nanoseconds;
const inode_match = actual_stat.inode == cache_hash_file.stat.inode;
if (!size_match or !mtime_match or !inode_match) {
@ -788,7 +792,7 @@ pub const Manifest = struct {
if (self.isProblematicTimestamp(cache_hash_file.stat.mtime)) {
// The actual file has an unreliable timestamp, force it to be hashed
cache_hash_file.stat.mtime = 0;
cache_hash_file.stat.mtime = .zero;
cache_hash_file.stat.inode = 0;
}
@ -844,10 +848,10 @@ pub const Manifest = struct {
}
}
fn isProblematicTimestamp(man: *Manifest, file_time: i128) bool {
fn isProblematicTimestamp(man: *Manifest, timestamp: Io.Timestamp) bool {
// If the file_time is prior to the most recent problematic timestamp
// then we don't need to access the filesystem.
if (file_time < man.recent_problematic_timestamp)
if (timestamp.nanoseconds < man.recent_problematic_timestamp.nanoseconds)
return false;
// Next we will check the globally shared Cache timestamp, which is accessed
@ -857,7 +861,7 @@ pub const Manifest = struct {
// Save the global one to our local one to avoid locking next time.
man.recent_problematic_timestamp = man.cache.recent_problematic_timestamp;
if (file_time < man.recent_problematic_timestamp)
if (timestamp.nanoseconds < man.recent_problematic_timestamp.nanoseconds)
return false;
// This flag prevents multiple filesystem writes for the same hit() call.
@ -875,7 +879,7 @@ pub const Manifest = struct {
man.cache.recent_problematic_timestamp = man.recent_problematic_timestamp;
}
return file_time >= man.recent_problematic_timestamp;
return timestamp.nanoseconds >= man.recent_problematic_timestamp.nanoseconds;
}
fn populateFileHash(self: *Manifest, ch_file: *File) !void {
@ -900,7 +904,7 @@ pub const Manifest = struct {
if (self.isProblematicTimestamp(ch_file.stat.mtime)) {
// The actual file has an unreliable timestamp, force it to be hashed
ch_file.stat.mtime = 0;
ch_file.stat.mtime = .zero;
ch_file.stat.inode = 0;
}
@ -1036,7 +1040,7 @@ pub const Manifest = struct {
if (self.isProblematicTimestamp(new_file.stat.mtime)) {
// The actual file has an unreliable timestamp, force it to be hashed
new_file.stat.mtime = 0;
new_file.stat.mtime = .zero;
new_file.stat.inode = 0;
}
@ -1301,7 +1305,7 @@ fn hashFile(file: fs.File, bin_digest: *[Hasher.mac_length]u8) fs.File.PReadErro
}
// Create/Write a file, close it, then grab its stat.mtime timestamp.
fn testGetCurrentFileTimestamp(dir: fs.Dir) !i128 {
fn testGetCurrentFileTimestamp(dir: fs.Dir) !Io.Timestamp {
const test_out_file = "test-filetimestamp.tmp";
var file = try dir.createFile(test_out_file, .{
@ -1317,6 +1321,8 @@ fn testGetCurrentFileTimestamp(dir: fs.Dir) !i128 {
}
test "cache file and then recall it" {
const io = std.testing.io;
var tmp = testing.tmpDir(.{});
defer tmp.cleanup();
@ -1327,15 +1333,16 @@ test "cache file and then recall it" {
// Wait for file timestamps to tick
const initial_time = try testGetCurrentFileTimestamp(tmp.dir);
while ((try testGetCurrentFileTimestamp(tmp.dir)) == initial_time) {
std.Thread.sleep(1);
while ((try testGetCurrentFileTimestamp(tmp.dir)).nanoseconds == initial_time.nanoseconds) {
try std.Io.Clock.Duration.sleep(.{ .clock = .boot, .raw = .fromNanoseconds(1) }, io);
}
var digest1: HexDigest = undefined;
var digest2: HexDigest = undefined;
{
var cache = Cache{
var cache: Cache = .{
.io = io,
.gpa = testing.allocator,
.manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
};
@ -1378,6 +1385,8 @@ test "cache file and then recall it" {
}
test "check that changing a file makes cache fail" {
const io = std.testing.io;
var tmp = testing.tmpDir(.{});
defer tmp.cleanup();
@ -1390,15 +1399,16 @@ test "check that changing a file makes cache fail" {
// Wait for file timestamps to tick
const initial_time = try testGetCurrentFileTimestamp(tmp.dir);
while ((try testGetCurrentFileTimestamp(tmp.dir)) == initial_time) {
std.Thread.sleep(1);
while ((try testGetCurrentFileTimestamp(tmp.dir)).nanoseconds == initial_time.nanoseconds) {
try std.Io.Clock.Duration.sleep(.{ .clock = .boot, .raw = .fromNanoseconds(1) }, io);
}
var digest1: HexDigest = undefined;
var digest2: HexDigest = undefined;
{
var cache = Cache{
var cache: Cache = .{
.io = io,
.gpa = testing.allocator,
.manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
};
@ -1447,6 +1457,8 @@ test "check that changing a file makes cache fail" {
}
test "no file inputs" {
const io = testing.io;
var tmp = testing.tmpDir(.{});
defer tmp.cleanup();
@ -1455,7 +1467,8 @@ test "no file inputs" {
var digest1: HexDigest = undefined;
var digest2: HexDigest = undefined;
var cache = Cache{
var cache: Cache = .{
.io = io,
.gpa = testing.allocator,
.manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
};
@ -1490,6 +1503,8 @@ test "no file inputs" {
}
test "Manifest with files added after initial hash work" {
const io = std.testing.io;
var tmp = testing.tmpDir(.{});
defer tmp.cleanup();
@ -1502,8 +1517,8 @@ test "Manifest with files added after initial hash work" {
// Wait for file timestamps to tick
const initial_time = try testGetCurrentFileTimestamp(tmp.dir);
while ((try testGetCurrentFileTimestamp(tmp.dir)) == initial_time) {
std.Thread.sleep(1);
while ((try testGetCurrentFileTimestamp(tmp.dir)).nanoseconds == initial_time.nanoseconds) {
try std.Io.Clock.Duration.sleep(.{ .clock = .boot, .raw = .fromNanoseconds(1) }, io);
}
var digest1: HexDigest = undefined;
@ -1511,7 +1526,8 @@ test "Manifest with files added after initial hash work" {
var digest3: HexDigest = undefined;
{
var cache = Cache{
var cache: Cache = .{
.io = io,
.gpa = testing.allocator,
.manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
};
@ -1552,8 +1568,8 @@ test "Manifest with files added after initial hash work" {
// Wait for file timestamps to tick
const initial_time2 = try testGetCurrentFileTimestamp(tmp.dir);
while ((try testGetCurrentFileTimestamp(tmp.dir)) == initial_time2) {
std.Thread.sleep(1);
while ((try testGetCurrentFileTimestamp(tmp.dir)).nanoseconds == initial_time2.nanoseconds) {
try std.Io.Clock.Duration.sleep(.{ .clock = .boot, .raw = .fromNanoseconds(1) }, io);
}
{

View File

@ -1,5 +1,7 @@
const Path = @This();
const std = @import("../../std.zig");
const Io = std.Io;
const assert = std.debug.assert;
const fs = std.fs;
const Allocator = std.mem.Allocator;
@ -119,7 +121,7 @@ pub fn atomicFile(
return p.root_dir.handle.atomicFile(joined_path, options);
}
pub fn access(p: Path, sub_path: []const u8, flags: fs.File.OpenFlags) !void {
pub fn access(p: Path, sub_path: []const u8, flags: Io.Dir.AccessOptions) !void {
var buf: [fs.max_path_bytes]u8 = undefined;
const joined_path = if (p.sub_path.len == 0) sub_path else p: {
break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{
@ -151,7 +153,7 @@ pub fn fmtEscapeString(path: Path) std.fmt.Alt(Path, formatEscapeString) {
return .{ .data = path };
}
pub fn formatEscapeString(path: Path, writer: *std.Io.Writer) std.Io.Writer.Error!void {
pub fn formatEscapeString(path: Path, writer: *Io.Writer) Io.Writer.Error!void {
if (path.root_dir.path) |p| {
try std.zig.stringEscape(p, writer);
if (path.sub_path.len > 0) try std.zig.stringEscape(fs.path.sep_str, writer);
@ -167,7 +169,7 @@ pub fn fmtEscapeChar(path: Path) std.fmt.Alt(Path, formatEscapeChar) {
}
/// Deprecated, use double quoted escape to print paths.
pub fn formatEscapeChar(path: Path, writer: *std.Io.Writer) std.Io.Writer.Error!void {
pub fn formatEscapeChar(path: Path, writer: *Io.Writer) Io.Writer.Error!void {
if (path.root_dir.path) |p| {
for (p) |byte| try std.zig.charEscape(byte, writer);
if (path.sub_path.len > 0) try writer.writeByte(fs.path.sep);
@ -177,7 +179,7 @@ pub fn formatEscapeChar(path: Path, writer: *std.Io.Writer) std.Io.Writer.Error!
}
}
pub fn format(self: Path, writer: *std.Io.Writer) std.Io.Writer.Error!void {
pub fn format(self: Path, writer: *Io.Writer) Io.Writer.Error!void {
if (std.fs.path.isAbsolute(self.sub_path)) {
try writer.writeAll(self.sub_path);
return;

View File

@ -1,4 +1,5 @@
const std = @import("../std.zig");
const Io = std.Io;
const Build = std.Build;
const Cache = Build.Cache;
const Step = std.Build.Step;
@ -14,6 +15,7 @@ const Fuzz = @This();
const build_runner = @import("root");
gpa: Allocator,
io: Io,
mode: Mode,
/// Allocated into `gpa`.
@ -75,6 +77,7 @@ const CoverageMap = struct {
pub fn init(
gpa: Allocator,
io: Io,
thread_pool: *std.Thread.Pool,
all_steps: []const *Build.Step,
root_prog_node: std.Progress.Node,
@ -111,6 +114,7 @@ pub fn init(
return .{
.gpa = gpa,
.io = io,
.mode = mode,
.run_steps = run_steps,
.wait_group = .{},
@ -484,6 +488,7 @@ fn addEntryPoint(fuzz: *Fuzz, coverage_id: u64, addr: u64) error{ AlreadyReporte
pub fn waitAndPrintReport(fuzz: *Fuzz) void {
assert(fuzz.mode == .limit);
const io = fuzz.io;
fuzz.wait_group.wait();
fuzz.wait_group.reset();
@ -506,7 +511,7 @@ pub fn waitAndPrintReport(fuzz: *Fuzz) void {
const fuzz_abi = std.Build.abi.fuzz;
var rbuf: [0x1000]u8 = undefined;
var r = coverage_file.reader(&rbuf);
var r = coverage_file.reader(io, &rbuf);
var header: fuzz_abi.SeenPcsHeader = undefined;
r.interface.readSliceAll(std.mem.asBytes(&header)) catch |err| {

View File

@ -1,9 +1,11 @@
const Step = @This();
const builtin = @import("builtin");
const std = @import("../std.zig");
const Io = std.Io;
const Build = std.Build;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const builtin = @import("builtin");
const Cache = Build.Cache;
const Path = Cache.Path;
const ArrayList = std.ArrayList;
@ -327,7 +329,7 @@ pub fn cast(step: *Step, comptime T: type) ?*T {
}
/// For debugging purposes, prints identifying information about this Step.
pub fn dump(step: *Step, w: *std.Io.Writer, tty_config: std.Io.tty.Config) void {
pub fn dump(step: *Step, w: *Io.Writer, tty_config: Io.tty.Config) void {
if (step.debug_stack_trace.instruction_addresses.len > 0) {
w.print("name: '{s}'. creation stack trace:\n", .{step.name}) catch {};
std.debug.writeStackTrace(&step.debug_stack_trace, w, tty_config) catch {};
@ -382,7 +384,7 @@ pub fn addError(step: *Step, comptime fmt: []const u8, args: anytype) error{OutO
pub const ZigProcess = struct {
child: std.process.Child,
poller: std.Io.Poller(StreamEnum),
poller: Io.Poller(StreamEnum),
progress_ipc_fd: if (std.Progress.have_ipc) ?std.posix.fd_t else void,
pub const StreamEnum = enum { stdout, stderr };
@ -458,7 +460,7 @@ pub fn evalZigProcess(
const zp = try gpa.create(ZigProcess);
zp.* = .{
.child = child,
.poller = std.Io.poll(gpa, ZigProcess.StreamEnum, .{
.poller = Io.poll(gpa, ZigProcess.StreamEnum, .{
.stdout = child.stdout.?,
.stderr = child.stderr.?,
}),
@ -505,11 +507,12 @@ pub fn evalZigProcess(
}
/// Wrapper around `std.fs.Dir.updateFile` that handles verbose and error output.
pub fn installFile(s: *Step, src_lazy_path: Build.LazyPath, dest_path: []const u8) !std.fs.Dir.PrevStatus {
pub fn installFile(s: *Step, src_lazy_path: Build.LazyPath, dest_path: []const u8) !Io.Dir.PrevStatus {
const b = s.owner;
const io = b.graph.io;
const src_path = src_lazy_path.getPath3(b, s);
try handleVerbose(b, null, &.{ "install", "-C", b.fmt("{f}", .{src_path}), dest_path });
return src_path.root_dir.handle.updateFile(src_path.sub_path, std.fs.cwd(), dest_path, .{}) catch |err| {
return Io.Dir.updateFile(src_path.root_dir.handle.adaptToNewApi(), io, src_path.sub_path, .cwd(), dest_path, .{}) catch |err| {
return s.fail("unable to update file from '{f}' to '{s}': {s}", .{
src_path, dest_path, @errorName(err),
});
@ -738,7 +741,7 @@ pub fn allocPrintCmd2(
argv: []const []const u8,
) Allocator.Error![]u8 {
const shell = struct {
fn escape(writer: *std.Io.Writer, string: []const u8, is_argv0: bool) !void {
fn escape(writer: *Io.Writer, string: []const u8, is_argv0: bool) !void {
for (string) |c| {
if (switch (c) {
else => true,
@ -772,7 +775,7 @@ pub fn allocPrintCmd2(
}
};
var aw: std.Io.Writer.Allocating = .init(gpa);
var aw: Io.Writer.Allocating = .init(gpa);
defer aw.deinit();
const writer = &aw.writer;
if (opt_cwd) |cwd| writer.print("cd {s} && ", .{cwd}) catch return error.OutOfMemory;

View File

@ -1701,7 +1701,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
// This prevents a warning, that should probably be upgraded to an error in Zig's
// CLI parsing code, when the linker sees an -L directory that does not exist.
if (prefix_dir.accessZ("lib", .{})) |_| {
if (prefix_dir.access("lib", .{})) |_| {
try zig_args.appendSlice(&.{
"-L", b.pathJoin(&.{ search_prefix, "lib" }),
});
@ -1712,7 +1712,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
}),
}
if (prefix_dir.accessZ("include", .{})) |_| {
if (prefix_dir.access("include", .{})) |_| {
try zig_args.appendSlice(&.{
"-I", b.pathJoin(&.{ search_prefix, "include" }),
});

View File

@ -532,12 +532,16 @@ const Arg = struct {
test Options {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
const io = std.testing.io;
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
var graph: std.Build.Graph = .{
.io = io,
.arena = arena.allocator(),
.cache = .{
.io = io,
.gpa = arena.allocator(),
.manifest_dir = std.fs.cwd(),
},
@ -546,7 +550,7 @@ test Options {
.global_cache_root = .{ .path = "test", .handle = std.fs.cwd() },
.host = .{
.query = .{},
.result = try std.zig.system.resolveTargetQuery(.{}),
.result = try std.zig.system.resolveTargetQuery(io, .{}),
},
.zig_lib_directory = std.Build.Cache.Directory.cwd(),
.time_report = false,

View File

@ -761,6 +761,7 @@ const IndexedOutput = struct {
};
fn make(step: *Step, options: Step.MakeOptions) !void {
const b = step.owner;
const io = b.graph.io;
const arena = b.allocator;
const run: *Run = @fieldParentPtr("step", step);
const has_side_effects = run.hasSideEffects();
@ -834,7 +835,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
defer file.close();
var buf: [1024]u8 = undefined;
var file_reader = file.reader(&buf);
var file_reader = file.reader(io, &buf);
_ = file_reader.interface.streamRemaining(&result.writer) catch |err| switch (err) {
error.ReadFailed => return step.fail(
"failed to read from '{f}': {t}",
@ -1067,6 +1068,7 @@ pub fn rerunInFuzzMode(
) !void {
const step = &run.step;
const b = step.owner;
const io = b.graph.io;
const arena = b.allocator;
var argv_list: std.ArrayList([]const u8) = .empty;
for (run.argv.items) |arg| {
@ -1093,7 +1095,7 @@ pub fn rerunInFuzzMode(
defer file.close();
var buf: [1024]u8 = undefined;
var file_reader = file.reader(&buf);
var file_reader = file.reader(io, &buf);
_ = file_reader.interface.streamRemaining(&result.writer) catch |err| switch (err) {
error.ReadFailed => return file_reader.err.?,
error.WriteFailed => return error.OutOfMemory,
@ -2090,6 +2092,7 @@ fn sendRunFuzzTestMessage(
fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult {
const b = run.step.owner;
const io = b.graph.io;
const arena = b.allocator;
try child.spawn();
@ -2113,7 +2116,7 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult {
defer file.close();
// TODO https://github.com/ziglang/zig/issues/23955
var read_buffer: [1024]u8 = undefined;
var file_reader = file.reader(&read_buffer);
var file_reader = file.reader(io, &read_buffer);
var write_buffer: [1024]u8 = undefined;
var stdin_writer = child.stdin.?.writer(&write_buffer);
_ = stdin_writer.interface.sendFileAll(&file_reader, .unlimited) catch |err| switch (err) {
@ -2159,7 +2162,7 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult {
stdout_bytes = try poller.toOwnedSlice(.stdout);
stderr_bytes = try poller.toOwnedSlice(.stderr);
} else {
var stdout_reader = stdout.readerStreaming(&.{});
var stdout_reader = stdout.readerStreaming(io, &.{});
stdout_bytes = stdout_reader.interface.allocRemaining(arena, run.stdio_limit) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.ReadFailed => return stdout_reader.err.?,
@ -2167,7 +2170,7 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult {
};
}
} else if (child.stderr) |stderr| {
var stderr_reader = stderr.readerStreaming(&.{});
var stderr_reader = stderr.readerStreaming(io, &.{});
stderr_bytes = stderr_reader.interface.allocRemaining(arena, run.stdio_limit) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.ReadFailed => return stderr_reader.err.?,

View File

@ -3,11 +3,13 @@
//! not be used during the normal build process, but as a utility run by a
//! developer with intention to update source files, which will then be
//! committed to version control.
const UpdateSourceFiles = @This();
const std = @import("std");
const Io = std.Io;
const Step = std.Build.Step;
const fs = std.fs;
const ArrayList = std.ArrayList;
const UpdateSourceFiles = @This();
step: Step,
output_source_files: std.ArrayListUnmanaged(OutputSourceFile),
@ -70,22 +72,21 @@ pub fn addBytesToSource(usf: *UpdateSourceFiles, bytes: []const u8, sub_path: []
fn make(step: *Step, options: Step.MakeOptions) !void {
_ = options;
const b = step.owner;
const io = b.graph.io;
const usf: *UpdateSourceFiles = @fieldParentPtr("step", step);
var any_miss = false;
for (usf.output_source_files.items) |output_source_file| {
if (fs.path.dirname(output_source_file.sub_path)) |dirname| {
b.build_root.handle.makePath(dirname) catch |err| {
return step.fail("unable to make path '{f}{s}': {s}", .{
b.build_root, dirname, @errorName(err),
});
return step.fail("unable to make path '{f}{s}': {t}", .{ b.build_root, dirname, err });
};
}
switch (output_source_file.contents) {
.bytes => |bytes| {
b.build_root.handle.writeFile(.{ .sub_path = output_source_file.sub_path, .data = bytes }) catch |err| {
return step.fail("unable to write file '{f}{s}': {s}", .{
b.build_root, output_source_file.sub_path, @errorName(err),
return step.fail("unable to write file '{f}{s}': {t}", .{
b.build_root, output_source_file.sub_path, err,
});
};
any_miss = true;
@ -94,15 +95,16 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
if (!step.inputs.populated()) try step.addWatchInput(file_source);
const source_path = file_source.getPath2(b, step);
const prev_status = fs.Dir.updateFile(
fs.cwd(),
const prev_status = Io.Dir.updateFile(
.cwd(),
io,
source_path,
b.build_root.handle,
b.build_root.handle.adaptToNewApi(),
output_source_file.sub_path,
.{},
) catch |err| {
return step.fail("unable to update file from '{s}' to '{f}{s}': {s}", .{
source_path, b.build_root, output_source_file.sub_path, @errorName(err),
return step.fail("unable to update file from '{s}' to '{f}{s}': {t}", .{
source_path, b.build_root, output_source_file.sub_path, err,
});
};
any_miss = any_miss or prev_status == .stale;

View File

@ -2,6 +2,7 @@
//! the local cache which has a set of files that have either been generated
//! during the build, or are copied from the source package.
const std = @import("std");
const Io = std.Io;
const Step = std.Build.Step;
const fs = std.fs;
const ArrayList = std.ArrayList;
@ -174,6 +175,7 @@ fn maybeUpdateName(write_file: *WriteFile) void {
fn make(step: *Step, options: Step.MakeOptions) !void {
_ = options;
const b = step.owner;
const io = b.graph.io;
const arena = b.allocator;
const gpa = arena;
const write_file: *WriteFile = @fieldParentPtr("step", step);
@ -264,40 +266,27 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
};
defer cache_dir.close();
const cwd = fs.cwd();
for (write_file.files.items) |file| {
if (fs.path.dirname(file.sub_path)) |dirname| {
cache_dir.makePath(dirname) catch |err| {
return step.fail("unable to make path '{f}{s}{c}{s}': {s}", .{
b.cache_root, cache_path, fs.path.sep, dirname, @errorName(err),
return step.fail("unable to make path '{f}{s}{c}{s}': {t}", .{
b.cache_root, cache_path, fs.path.sep, dirname, err,
});
};
}
switch (file.contents) {
.bytes => |bytes| {
cache_dir.writeFile(.{ .sub_path = file.sub_path, .data = bytes }) catch |err| {
return step.fail("unable to write file '{f}{s}{c}{s}': {s}", .{
b.cache_root, cache_path, fs.path.sep, file.sub_path, @errorName(err),
return step.fail("unable to write file '{f}{s}{c}{s}': {t}", .{
b.cache_root, cache_path, fs.path.sep, file.sub_path, err,
});
};
},
.copy => |file_source| {
const source_path = file_source.getPath2(b, step);
const prev_status = fs.Dir.updateFile(
cwd,
source_path,
cache_dir,
file.sub_path,
.{},
) catch |err| {
return step.fail("unable to update file from '{s}' to '{f}{s}{c}{s}': {s}", .{
source_path,
b.cache_root,
cache_path,
fs.path.sep,
file.sub_path,
@errorName(err),
const prev_status = Io.Dir.updateFile(.cwd(), io, source_path, cache_dir.adaptToNewApi(), file.sub_path, .{}) catch |err| {
return step.fail("unable to update file from '{s}' to '{f}{s}{c}{s}': {t}", .{
source_path, b.cache_root, cache_path, fs.path.sep, file.sub_path, err,
});
};
// At this point we already will mark the step as a cache miss.
@ -331,10 +320,11 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
switch (entry.kind) {
.directory => try cache_dir.makePath(dest_path),
.file => {
const prev_status = fs.Dir.updateFile(
src_entry_path.root_dir.handle,
const prev_status = Io.Dir.updateFile(
src_entry_path.root_dir.handle.adaptToNewApi(),
io,
src_entry_path.sub_path,
cache_dir,
cache_dir.adaptToNewApi(),
dest_path,
.{},
) catch |err| {

View File

@ -2,15 +2,16 @@ gpa: Allocator,
thread_pool: *std.Thread.Pool,
graph: *const Build.Graph,
all_steps: []const *Build.Step,
listen_address: std.net.Address,
ttyconf: std.Io.tty.Config,
listen_address: net.IpAddress,
ttyconf: Io.tty.Config,
root_prog_node: std.Progress.Node,
watch: bool,
tcp_server: ?std.net.Server,
tcp_server: ?net.Server,
serve_thread: ?std.Thread,
base_timestamp: i128,
/// Uses `Io.Clock.awake`.
base_timestamp: Io.Timestamp,
/// The "step name" data which trails `abi.Hello`, for the steps in `all_steps`.
step_names_trailing: []u8,
@ -42,6 +43,8 @@ runner_request: ?RunnerRequest,
/// on a fixed interval of this many milliseconds.
const default_update_interval_ms = 500;
pub const base_clock: Io.Clock = .awake;
/// Thread-safe. Triggers updates to be sent to connected WebSocket clients; see `update_id`.
pub fn notifyUpdate(ws: *WebServer) void {
_ = ws.update_id.rmw(.Add, 1, .release);
@ -53,15 +56,17 @@ pub const Options = struct {
thread_pool: *std.Thread.Pool,
graph: *const std.Build.Graph,
all_steps: []const *Build.Step,
ttyconf: std.Io.tty.Config,
ttyconf: Io.tty.Config,
root_prog_node: std.Progress.Node,
watch: bool,
listen_address: std.net.Address,
listen_address: net.IpAddress,
base_timestamp: Io.Clock.Timestamp,
};
pub fn init(opts: Options) WebServer {
// The upcoming `std.Io` interface should allow us to use `Io.async` and `Io.concurrent`
// The upcoming `Io` interface should allow us to use `Io.async` and `Io.concurrent`
// instead of threads, so that the web server can function in single-threaded builds.
comptime assert(!builtin.single_threaded);
assert(opts.base_timestamp.clock == base_clock);
const all_steps = opts.all_steps;
@ -106,7 +111,7 @@ pub fn init(opts: Options) WebServer {
.tcp_server = null,
.serve_thread = null,
.base_timestamp = std.time.nanoTimestamp(),
.base_timestamp = opts.base_timestamp.raw,
.step_names_trailing = step_names_trailing,
.step_status_bits = step_status_bits,
@ -147,32 +152,34 @@ pub fn deinit(ws: *WebServer) void {
pub fn start(ws: *WebServer) error{AlreadyReported}!void {
assert(ws.tcp_server == null);
assert(ws.serve_thread == null);
const io = ws.graph.io;
ws.tcp_server = ws.listen_address.listen(.{ .reuse_address = true }) catch |err| {
ws.tcp_server = ws.listen_address.listen(io, .{ .reuse_address = true }) catch |err| {
log.err("failed to listen to port {d}: {s}", .{ ws.listen_address.getPort(), @errorName(err) });
return error.AlreadyReported;
};
ws.serve_thread = std.Thread.spawn(.{}, serve, .{ws}) catch |err| {
log.err("unable to spawn web server thread: {s}", .{@errorName(err)});
ws.tcp_server.?.deinit();
ws.tcp_server.?.deinit(io);
ws.tcp_server = null;
return error.AlreadyReported;
};
log.info("web interface listening at http://{f}/", .{ws.tcp_server.?.listen_address});
log.info("web interface listening at http://{f}/", .{ws.tcp_server.?.socket.address});
if (ws.listen_address.getPort() == 0) {
log.info("hint: pass '--webui={f}' to use the same port next time", .{ws.tcp_server.?.listen_address});
log.info("hint: pass '--webui={f}' to use the same port next time", .{ws.tcp_server.?.socket.address});
}
}
fn serve(ws: *WebServer) void {
const io = ws.graph.io;
while (true) {
const connection = ws.tcp_server.?.accept() catch |err| {
var stream = ws.tcp_server.?.accept(io) catch |err| {
log.err("failed to accept connection: {s}", .{@errorName(err)});
return;
};
_ = std.Thread.spawn(.{}, accept, .{ ws, connection }) catch |err| {
_ = std.Thread.spawn(.{}, accept, .{ ws, stream }) catch |err| {
log.err("unable to spawn connection thread: {s}", .{@errorName(err)});
connection.stream.close();
stream.close(io);
continue;
};
}
@ -227,6 +234,7 @@ pub fn finishBuild(ws: *WebServer, opts: struct {
ws.fuzz = Fuzz.init(
ws.gpa,
ws.graph.io,
ws.thread_pool,
ws.all_steps,
ws.root_prog_node,
@ -241,17 +249,24 @@ pub fn finishBuild(ws: *WebServer, opts: struct {
}
pub fn now(s: *const WebServer) i64 {
return @intCast(std.time.nanoTimestamp() - s.base_timestamp);
const io = s.graph.io;
const ts = base_clock.now(io) catch s.base_timestamp;
return @intCast(s.base_timestamp.durationTo(ts).toNanoseconds());
}
fn accept(ws: *WebServer, connection: std.net.Server.Connection) void {
defer connection.stream.close();
fn accept(ws: *WebServer, stream: net.Stream) void {
const io = ws.graph.io;
defer {
// `net.Stream.close` wants to helpfully overwrite `stream` with
// `undefined`, but it cannot do so since it is an immutable parameter.
var copy = stream;
copy.close(io);
}
var send_buffer: [4096]u8 = undefined;
var recv_buffer: [4096]u8 = undefined;
var connection_reader = connection.stream.reader(&recv_buffer);
var connection_writer = connection.stream.writer(&send_buffer);
var server: http.Server = .init(connection_reader.interface(), &connection_writer.interface);
var connection_reader = stream.reader(io, &recv_buffer);
var connection_writer = stream.writer(io, &send_buffer);
var server: http.Server = .init(&connection_reader.interface, &connection_writer.interface);
while (true) {
var request = server.receiveHead() catch |err| switch (err) {
@ -466,12 +481,9 @@ pub fn serveFile(
},
});
}
pub fn serveTarFile(
ws: *WebServer,
request: *http.Server.Request,
paths: []const Cache.Path,
) !void {
pub fn serveTarFile(ws: *WebServer, request: *http.Server.Request, paths: []const Cache.Path) !void {
const gpa = ws.gpa;
const io = ws.graph.io;
var send_buffer: [0x4000]u8 = undefined;
var response = try request.respondStreaming(&send_buffer, .{
@ -496,7 +508,7 @@ pub fn serveTarFile(
defer file.close();
const stat = try file.stat();
var read_buffer: [1024]u8 = undefined;
var file_reader: std.fs.File.Reader = .initSize(file, &read_buffer, stat.size);
var file_reader: Io.File.Reader = .initSize(file.adaptToNewApi(), io, &read_buffer, stat.size);
// TODO: this logic is completely bogus -- obviously so, because `path.root_dir.path` can
// be cwd-relative. This is also related to why linkification doesn't work in the fuzzer UI:
@ -508,7 +520,7 @@ pub fn serveTarFile(
if (cached_cwd_path == null) cached_cwd_path = try std.process.getCwdAlloc(gpa);
break :cwd cached_cwd_path.?;
};
try archiver.writeFile(path.sub_path, &file_reader, stat.mtime);
try archiver.writeFile(path.sub_path, &file_reader, @intCast(stat.mtime.toSeconds()));
}
// intentionally not calling `archiver.finishPedantically`
@ -516,6 +528,7 @@ pub fn serveTarFile(
}
fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.OptimizeMode) !Cache.Path {
const io = ws.graph.io;
const root_name = "build-web";
const arch_os_abi = "wasm32-freestanding";
const cpu_features = "baseline+atomics+bulk_memory+multivalue+mutable_globals+nontrapping_fptoint+reference_types+sign_ext";
@ -565,7 +578,7 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim
child.stderr_behavior = .Pipe;
try child.spawn();
var poller = std.Io.poll(gpa, enum { stdout, stderr }, .{
var poller = Io.poll(gpa, enum { stdout, stderr }, .{
.stdout = child.stdout.?,
.stderr = child.stderr.?,
});
@ -659,7 +672,7 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim
};
const bin_name = try std.zig.binNameAlloc(arena, .{
.root_name = root_name,
.target = &(std.zig.system.resolveTargetQuery(std.Build.parseTargetQuery(.{
.target = &(std.zig.system.resolveTargetQuery(io, std.Build.parseTargetQuery(.{
.arch_os_abi = arch_os_abi,
.cpu_features = cpu_features,
}) catch unreachable) catch unreachable),
@ -841,7 +854,10 @@ const cache_control_header: http.Header = .{
};
const builtin = @import("builtin");
const std = @import("std");
const Io = std.Io;
const net = std.Io.net;
const assert = std.debug.assert;
const mem = std.mem;
const log = std.log.scoped(.web_server);

File diff suppressed because it is too large Load Diff

392
lib/std/Io/Dir.zig Normal file
View File

@ -0,0 +1,392 @@
const Dir = @This();
const builtin = @import("builtin");
const native_os = builtin.os.tag;
const std = @import("../std.zig");
const Io = std.Io;
const File = Io.File;
handle: Handle,
pub const Mode = Io.File.Mode;
pub const default_mode: Mode = 0o755;
/// Returns a handle to the current working directory.
///
/// It is not opened with iteration capability. Iterating over the result is
/// illegal behavior.
///
/// Closing the returned `Dir` is checked illegal behavior.
///
/// On POSIX targets, this function is comptime-callable.
pub fn cwd() Dir {
return switch (native_os) {
.windows => .{ .handle = std.os.windows.peb().ProcessParameters.CurrentDirectory.Handle },
.wasi => .{ .handle = std.options.wasiCwd() },
else => .{ .handle = std.posix.AT.FDCWD },
};
}
pub const Handle = std.posix.fd_t;
pub const PathNameError = error{
NameTooLong,
/// File system cannot encode the requested file name bytes.
/// Could be due to invalid WTF-8 on Windows, invalid UTF-8 on WASI,
/// invalid characters on Windows, etc. Filesystem and operating specific.
BadPathName,
};
pub const AccessError = error{
AccessDenied,
PermissionDenied,
FileNotFound,
InputOutput,
SystemResources,
FileBusy,
SymLinkLoop,
ReadOnlyFileSystem,
} || PathNameError || Io.Cancelable || Io.UnexpectedError;
pub const AccessOptions = packed struct {
follow_symlinks: bool = true,
read: bool = false,
write: bool = false,
execute: bool = false,
};
/// Test accessing `sub_path`.
///
/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
/// On WASI, `sub_path` should be encoded as valid UTF-8.
/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
///
/// Be careful of Time-Of-Check-Time-Of-Use race conditions when using this
/// function. For example, instead of testing if a file exists and then opening
/// it, just open it and handle the error for file not found.
pub fn access(dir: Dir, io: Io, sub_path: []const u8, options: AccessOptions) AccessError!void {
return io.vtable.dirAccess(io.userdata, dir, sub_path, options);
}
pub const OpenError = error{
FileNotFound,
NotDir,
AccessDenied,
PermissionDenied,
SymLinkLoop,
ProcessFdQuotaExceeded,
SystemFdQuotaExceeded,
NoDevice,
SystemResources,
DeviceBusy,
/// On Windows, `\\server` or `\\server\share` was not found.
NetworkNotFound,
} || PathNameError || Io.Cancelable || Io.UnexpectedError;
pub const OpenOptions = struct {
/// `true` means the opened directory can be used as the `Dir` parameter
/// for functions which operate based on an open directory handle. When `false`,
/// such operations are Illegal Behavior.
access_sub_paths: bool = true,
/// `true` means the opened directory can be scanned for the files and sub-directories
/// of the result. It means the `iterate` function can be called.
iterate: bool = false,
/// `false` means it won't dereference the symlinks.
follow_symlinks: bool = true,
};
/// Opens a directory at the given path. The directory is a system resource that remains
/// open until `close` is called on the result.
///
/// The directory cannot be iterated unless the `iterate` option is set to `true`.
///
/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
/// On WASI, `sub_path` should be encoded as valid UTF-8.
/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
pub fn openDir(dir: Dir, io: Io, sub_path: []const u8, options: OpenOptions) OpenError!Dir {
return io.vtable.dirOpenDir(io.userdata, dir, sub_path, options);
}
pub fn close(dir: Dir, io: Io) void {
return io.vtable.dirClose(io.userdata, dir);
}
/// Opens a file for reading or writing, without attempting to create a new file.
///
/// To create a new file, see `createFile`.
///
/// Allocates a resource to be released with `File.close`.
///
/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
/// On WASI, `sub_path` should be encoded as valid UTF-8.
/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
pub fn openFile(dir: Dir, io: Io, sub_path: []const u8, flags: File.OpenFlags) File.OpenError!File {
return io.vtable.dirOpenFile(io.userdata, dir, sub_path, flags);
}
/// Creates, opens, or overwrites a file with write access.
///
/// Allocates a resource to be dellocated with `File.close`.
///
/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
/// On WASI, `sub_path` should be encoded as valid UTF-8.
/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
pub fn createFile(dir: Dir, io: Io, sub_path: []const u8, flags: File.CreateFlags) File.OpenError!File {
return io.vtable.dirCreateFile(io.userdata, dir, sub_path, flags);
}
pub const WriteFileOptions = struct {
/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
/// On WASI, `sub_path` should be encoded as valid UTF-8.
/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
sub_path: []const u8,
data: []const u8,
flags: File.CreateFlags = .{},
};
pub const WriteFileError = File.WriteError || File.OpenError || Io.Cancelable;
/// Writes content to the file system, using the file creation flags provided.
pub fn writeFile(dir: Dir, io: Io, options: WriteFileOptions) WriteFileError!void {
var file = try dir.createFile(io, options.sub_path, options.flags);
defer file.close(io);
try file.writeAll(io, options.data);
}
pub const PrevStatus = enum {
stale,
fresh,
};
pub const UpdateFileError = File.OpenError;
/// Check the file size, mtime, and mode of `source_path` and `dest_path`. If
/// they are equal, does nothing. Otherwise, atomically copies `source_path` to
/// `dest_path`, creating the parent directory hierarchy as needed. The
/// destination file gains the mtime, atime, and mode of the source file so
/// that the next call to `updateFile` will not need a copy.
///
/// Returns the previous status of the file before updating.
///
/// * On Windows, both paths should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
/// * On WASI, both paths should be encoded as valid UTF-8.
/// * On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
pub fn updateFile(
source_dir: Dir,
io: Io,
source_path: []const u8,
dest_dir: Dir,
/// If directories in this path do not exist, they are created.
dest_path: []const u8,
options: std.fs.Dir.CopyFileOptions,
) !PrevStatus {
var src_file = try source_dir.openFile(io, source_path, .{});
defer src_file.close(io);
const src_stat = try src_file.stat(io);
const actual_mode = options.override_mode orelse src_stat.mode;
check_dest_stat: {
const dest_stat = blk: {
var dest_file = dest_dir.openFile(io, dest_path, .{}) catch |err| switch (err) {
error.FileNotFound => break :check_dest_stat,
else => |e| return e,
};
defer dest_file.close(io);
break :blk try dest_file.stat(io);
};
if (src_stat.size == dest_stat.size and
src_stat.mtime.nanoseconds == dest_stat.mtime.nanoseconds and
actual_mode == dest_stat.mode)
{
return .fresh;
}
}
if (std.fs.path.dirname(dest_path)) |dirname| {
try dest_dir.makePath(io, dirname);
}
var buffer: [1000]u8 = undefined; // Used only when direct fd-to-fd is not available.
var atomic_file = try std.fs.Dir.atomicFile(.adaptFromNewApi(dest_dir), dest_path, .{
.mode = actual_mode,
.write_buffer = &buffer,
});
defer atomic_file.deinit();
var src_reader: File.Reader = .initSize(src_file, io, &.{}, src_stat.size);
const dest_writer = &atomic_file.file_writer.interface;
_ = dest_writer.sendFileAll(&src_reader, .unlimited) catch |err| switch (err) {
error.ReadFailed => return src_reader.err.?,
error.WriteFailed => return atomic_file.file_writer.err.?,
};
try atomic_file.flush();
try atomic_file.file_writer.file.updateTimes(src_stat.atime, src_stat.mtime);
try atomic_file.renameIntoPlace();
return .stale;
}
pub const ReadFileError = File.OpenError || File.Reader.Error;
/// Read all of file contents using a preallocated buffer.
///
/// The returned slice has the same pointer as `buffer`. If the length matches `buffer.len`
/// the situation is ambiguous. It could either mean that the entire file was read, and
/// it exactly fits the buffer, or it could mean the buffer was not big enough for the
/// entire file.
///
/// * On Windows, `file_path` should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
/// * On WASI, `file_path` should be encoded as valid UTF-8.
/// * On other platforms, `file_path` is an opaque sequence of bytes with no particular encoding.
pub fn readFile(dir: Dir, io: Io, file_path: []const u8, buffer: []u8) ReadFileError![]u8 {
var file = try dir.openFile(io, file_path, .{});
defer file.close(io);
var reader = file.reader(io, &.{});
const n = reader.interface.readSliceShort(buffer) catch |err| switch (err) {
error.ReadFailed => return reader.err.?,
};
return buffer[0..n];
}
pub const MakeError = error{
/// In WASI, this error may occur when the file descriptor does
/// not hold the required rights to create a new directory relative to it.
AccessDenied,
PermissionDenied,
DiskQuota,
PathAlreadyExists,
SymLinkLoop,
LinkQuotaExceeded,
FileNotFound,
SystemResources,
NoSpaceLeft,
NotDir,
ReadOnlyFileSystem,
NoDevice,
/// On Windows, `\\server` or `\\server\share` was not found.
NetworkNotFound,
} || PathNameError || Io.Cancelable || Io.UnexpectedError;
/// Creates a single directory with a relative or absolute path.
///
/// * On Windows, `sub_path` should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
/// * On WASI, `sub_path` should be encoded as valid UTF-8.
/// * On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
///
/// Related:
/// * `makePath`
/// * `makeDirAbsolute`
pub fn makeDir(dir: Dir, io: Io, sub_path: []const u8) MakeError!void {
return io.vtable.dirMake(io.userdata, dir, sub_path, default_mode);
}
pub const MakePathError = MakeError || StatPathError;
/// Calls makeDir iteratively to make an entire path, creating any parent
/// directories that do not exist.
///
/// Returns success if the path already exists and is a directory.
///
/// This function is not atomic, and if it returns an error, the file system
/// may have been modified regardless.
///
/// Fails on an empty path with `error.BadPathName` as that is not a path that
/// can be created.
///
/// On Windows, `sub_path` should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
/// On WASI, `sub_path` should be encoded as valid UTF-8.
/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
///
/// Paths containing `..` components are handled differently depending on the platform:
/// - On Windows, `..` are resolved before the path is passed to NtCreateFile, meaning
/// a `sub_path` like "first/../second" will resolve to "second" and only a
/// `./second` directory will be created.
/// - On other platforms, `..` are not resolved before the path is passed to `mkdirat`,
/// meaning a `sub_path` like "first/../second" will create both a `./first`
/// and a `./second` directory.
pub fn makePath(dir: Dir, io: Io, sub_path: []const u8) MakePathError!void {
_ = try makePathStatus(dir, io, sub_path);
}
pub const MakePathStatus = enum { existed, created };
/// Same as `makePath` except returns whether the path already existed or was
/// successfully created.
pub fn makePathStatus(dir: Dir, io: Io, sub_path: []const u8) MakePathError!MakePathStatus {
var it = try std.fs.path.componentIterator(sub_path);
var status: MakePathStatus = .existed;
var component = it.last() orelse return error.BadPathName;
while (true) {
if (makeDir(dir, io, component.path)) |_| {
status = .created;
} else |err| switch (err) {
error.PathAlreadyExists => {
// stat the file and return an error if it's not a directory
// this is important because otherwise a dangling symlink
// could cause an infinite loop
check_dir: {
// workaround for windows, see https://github.com/ziglang/zig/issues/16738
const fstat = statPath(dir, io, component.path, .{}) catch |stat_err| switch (stat_err) {
error.IsDir => break :check_dir,
else => |e| return e,
};
if (fstat.kind != .directory) return error.NotDir;
}
},
error.FileNotFound => |e| {
component = it.previous() orelse return e;
continue;
},
else => |e| return e,
}
component = it.next() orelse return status;
}
}
pub const MakeOpenPathError = MakeError || OpenError || StatPathError;
/// Performs the equivalent of `makePath` followed by `openDir`, atomically if possible.
///
/// When this operation is canceled, it may leave the file system in a
/// partially modified state.
///
/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
/// On WASI, `sub_path` should be encoded as valid UTF-8.
/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
pub fn makeOpenPath(dir: Dir, io: Io, sub_path: []const u8, options: OpenOptions) MakeOpenPathError!Dir {
return io.vtable.dirMakeOpenPath(io.userdata, dir, sub_path, options);
}
pub const Stat = File.Stat;
pub const StatError = File.StatError;
pub fn stat(dir: Dir, io: Io) StatError!Stat {
return io.vtable.dirStat(io.userdata, dir);
}
pub const StatPathError = File.OpenError || File.StatError;
pub const StatPathOptions = struct {
follow_symlinks: bool = true,
};
/// Returns metadata for a file inside the directory.
///
/// On Windows, this requires three syscalls. On other operating systems, it
/// only takes one.
///
/// Symlinks are followed.
///
/// `sub_path` may be absolute, in which case `self` is ignored.
///
/// * On Windows, `sub_path` should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
/// * On WASI, `sub_path` should be encoded as valid UTF-8.
/// * On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
pub fn statPath(dir: Dir, io: Io, sub_path: []const u8, options: StatPathOptions) StatPathError!Stat {
return io.vtable.dirStatPath(io.userdata, dir, sub_path, options);
}

659
lib/std/Io/File.zig Normal file
View File

@ -0,0 +1,659 @@
const File = @This();
const builtin = @import("builtin");
const native_os = builtin.os.tag;
const is_windows = native_os == .windows;
const std = @import("../std.zig");
const Io = std.Io;
const assert = std.debug.assert;
handle: Handle,
pub const Handle = std.posix.fd_t;
pub const Mode = std.posix.mode_t;
pub const INode = std.posix.ino_t;
pub const Kind = enum {
block_device,
character_device,
directory,
named_pipe,
sym_link,
file,
unix_domain_socket,
whiteout,
door,
event_port,
unknown,
};
pub const Stat = struct {
/// A number that the system uses to point to the file metadata. This
/// number is not guaranteed to be unique across time, as some file
/// systems may reuse an inode after its file has been deleted. Some
/// systems may change the inode of a file over time.
///
/// On Linux, the inode is a structure that stores the metadata, and
/// the inode _number_ is what you see here: the index number of the
/// inode.
///
/// The FileIndex on Windows is similar. It is a number for a file that
/// is unique to each filesystem.
inode: INode,
size: u64,
/// This is available on POSIX systems and is always 0 otherwise.
mode: Mode,
kind: Kind,
/// Last access time in nanoseconds, relative to UTC 1970-01-01.
atime: Io.Timestamp,
/// Last modification time in nanoseconds, relative to UTC 1970-01-01.
mtime: Io.Timestamp,
/// Last status/metadata change time in nanoseconds, relative to UTC 1970-01-01.
ctime: Io.Timestamp,
};
pub fn stdout() File {
return .{ .handle = if (is_windows) std.os.windows.peb().ProcessParameters.hStdOutput else std.posix.STDOUT_FILENO };
}
pub fn stderr() File {
return .{ .handle = if (is_windows) std.os.windows.peb().ProcessParameters.hStdError else std.posix.STDERR_FILENO };
}
pub fn stdin() File {
return .{ .handle = if (is_windows) std.os.windows.peb().ProcessParameters.hStdInput else std.posix.STDIN_FILENO };
}
pub const StatError = error{
SystemResources,
/// In WASI, this error may occur when the file descriptor does
/// not hold the required rights to get its filestat information.
AccessDenied,
PermissionDenied,
/// Attempted to stat a non-file stream.
Streaming,
} || Io.Cancelable || Io.UnexpectedError;
/// Returns `Stat` containing basic information about the `File`.
pub fn stat(file: File, io: Io) StatError!Stat {
return io.vtable.fileStat(io.userdata, file);
}
pub const OpenMode = enum {
read_only,
write_only,
read_write,
};
pub const Lock = enum {
none,
shared,
exclusive,
};
pub const OpenFlags = struct {
mode: OpenMode = .read_only,
/// Open the file with an advisory lock to coordinate with other processes
/// accessing it at the same time. An exclusive lock will prevent other
/// processes from acquiring a lock. A shared lock will prevent other
/// processes from acquiring a exclusive lock, but does not prevent
/// other process from getting their own shared locks.
///
/// The lock is advisory, except on Linux in very specific circumstances[1].
/// This means that a process that does not respect the locking API can still get access
/// to the file, despite the lock.
///
/// On these operating systems, the lock is acquired atomically with
/// opening the file:
/// * Darwin
/// * DragonFlyBSD
/// * FreeBSD
/// * Haiku
/// * NetBSD
/// * OpenBSD
/// On these operating systems, the lock is acquired via a separate syscall
/// after opening the file:
/// * Linux
/// * Windows
///
/// [1]: https://www.kernel.org/doc/Documentation/filesystems/mandatory-locking.txt
lock: Lock = .none,
/// Sets whether or not to wait until the file is locked to return. If set to true,
/// `error.WouldBlock` will be returned. Otherwise, the file will wait until the file
/// is available to proceed.
lock_nonblocking: bool = false,
/// Set this to allow the opened file to automatically become the
/// controlling TTY for the current process.
allow_ctty: bool = false,
follow_symlinks: bool = true,
pub fn isRead(self: OpenFlags) bool {
return self.mode != .write_only;
}
pub fn isWrite(self: OpenFlags) bool {
return self.mode != .read_only;
}
};
pub const CreateFlags = std.fs.File.CreateFlags;
pub const OpenError = error{
SharingViolation,
PipeBusy,
NoDevice,
/// On Windows, `\\server` or `\\server\share` was not found.
NetworkNotFound,
ProcessNotFound,
/// On Windows, antivirus software is enabled by default. It can be
/// disabled, but Windows Update sometimes ignores the user's preference
/// and re-enables it. When enabled, antivirus software on Windows
/// intercepts file system operations and makes them significantly slower
/// in addition to possibly failing with this error code.
AntivirusInterference,
/// In WASI, this error may occur when the file descriptor does
/// not hold the required rights to open a new resource relative to it.
AccessDenied,
PermissionDenied,
SymLinkLoop,
ProcessFdQuotaExceeded,
SystemFdQuotaExceeded,
/// Either:
/// * One of the path components does not exist.
/// * Cwd was used, but cwd has been deleted.
/// * The path associated with the open directory handle has been deleted.
/// * On macOS, multiple processes or threads raced to create the same file
/// with `O.EXCL` set to `false`.
FileNotFound,
/// The path exceeded `max_path_bytes` bytes.
/// Insufficient kernel memory was available, or
/// the named file is a FIFO and per-user hard limit on
/// memory allocation for pipes has been reached.
SystemResources,
/// The file is too large to be opened. This error is unreachable
/// for 64-bit targets, as well as when opening directories.
FileTooBig,
/// The path refers to directory but the `DIRECTORY` flag was not provided.
IsDir,
/// A new path cannot be created because the device has no room for the new file.
/// This error is only reachable when the `CREAT` flag is provided.
NoSpaceLeft,
/// A component used as a directory in the path was not, in fact, a directory, or
/// `DIRECTORY` was specified and the path was not a directory.
NotDir,
/// The path already exists and the `CREAT` and `EXCL` flags were provided.
PathAlreadyExists,
DeviceBusy,
FileLocksNotSupported,
/// One of these three things:
/// * pathname refers to an executable image which is currently being
/// executed and write access was requested.
/// * pathname refers to a file that is currently in use as a swap
/// file, and the O_TRUNC flag was specified.
/// * pathname refers to a file that is currently being read by the
/// kernel (e.g., for module/firmware loading), and write access was
/// requested.
FileBusy,
/// Non-blocking was requested and the operation cannot return immediately.
WouldBlock,
} || Io.Dir.PathNameError || Io.Cancelable || Io.UnexpectedError;
pub fn close(file: File, io: Io) void {
return io.vtable.fileClose(io.userdata, file);
}
pub const OpenSelfExeError = OpenError || std.fs.SelfExePathError || std.posix.FlockError;
pub fn openSelfExe(io: Io, flags: OpenFlags) OpenSelfExeError!File {
return io.vtable.openSelfExe(io.userdata, flags);
}
pub const ReadPositionalError = Reader.Error || error{Unseekable};
pub fn readPositional(file: File, io: Io, buffer: []u8, offset: u64) ReadPositionalError!usize {
return io.vtable.fileReadPositional(io.userdata, file, buffer, offset);
}
pub const WriteStreamingError = error{} || Io.UnexpectedError || Io.Cancelable;
pub fn writeStreaming(file: File, io: Io, buffer: [][]const u8) WriteStreamingError!usize {
return file.fileWriteStreaming(io, buffer);
}
pub const WritePositionalError = WriteStreamingError || error{Unseekable};
pub fn writePositional(file: File, io: Io, buffer: [][]const u8, offset: u64) WritePositionalError!usize {
return io.vtable.fileWritePositional(io.userdata, file, buffer, offset);
}
pub fn openAbsolute(io: Io, absolute_path: []const u8, flags: OpenFlags) OpenError!File {
assert(std.fs.path.isAbsolute(absolute_path));
return Io.Dir.cwd().openFile(io, absolute_path, flags);
}
/// Defaults to positional reading; falls back to streaming.
///
/// Positional is more threadsafe, since the global seek position is not
/// affected.
pub fn reader(file: File, io: Io, buffer: []u8) Reader {
return .init(file, io, buffer);
}
/// Positional is more threadsafe, since the global seek position is not
/// affected, but when such syscalls are not available, preemptively
/// initializing in streaming mode skips a failed syscall.
pub fn readerStreaming(file: File, io: Io, buffer: []u8) Reader {
return .initStreaming(file, io, buffer);
}
pub const SeekError = error{
Unseekable,
/// The file descriptor does not hold the required rights to seek on it.
AccessDenied,
} || Io.Cancelable || Io.UnexpectedError;
/// Memoizes key information about a file handle such as:
/// * The size from calling stat, or the error that occurred therein.
/// * The current seek position.
/// * The error that occurred when trying to seek.
/// * Whether reading should be done positionally or streaming.
/// * Whether reading should be done via fd-to-fd syscalls (e.g. `sendfile`)
/// versus plain variants (e.g. `read`).
///
/// Fulfills the `Io.Reader` interface.
pub const Reader = struct {
io: Io,
file: File,
err: ?Error = null,
mode: Reader.Mode = .positional,
/// Tracks the true seek position in the file. To obtain the logical
/// position, use `logicalPos`.
pos: u64 = 0,
size: ?u64 = null,
size_err: ?SizeError = null,
seek_err: ?Reader.SeekError = null,
interface: Io.Reader,
pub const Error = error{
InputOutput,
SystemResources,
IsDir,
BrokenPipe,
ConnectionResetByPeer,
Timeout,
/// In WASI, EBADF is mapped to this error because it is returned when
/// trying to read a directory file descriptor as if it were a file.
NotOpenForReading,
SocketUnconnected,
/// This error occurs when no global event loop is configured,
/// and reading from the file descriptor would block.
WouldBlock,
/// In WASI, this error occurs when the file descriptor does
/// not hold the required rights to read from it.
AccessDenied,
/// This error occurs in Linux if the process to be read from
/// no longer exists.
ProcessNotFound,
/// Unable to read file due to lock.
LockViolation,
} || Io.Cancelable || Io.UnexpectedError;
pub const SizeError = std.os.windows.GetFileSizeError || StatError || error{
/// Occurs if, for example, the file handle is a network socket and therefore does not have a size.
Streaming,
};
pub const SeekError = File.SeekError || error{
/// Seeking fell back to reading, and reached the end before the requested seek position.
/// `pos` remains at the end of the file.
EndOfStream,
/// Seeking fell back to reading, which failed.
ReadFailed,
};
pub const Mode = enum {
streaming,
positional,
/// Avoid syscalls other than `read` and `readv`.
streaming_reading,
/// Avoid syscalls other than `pread` and `preadv`.
positional_reading,
/// Indicates reading cannot continue because of a seek failure.
failure,
pub fn toStreaming(m: @This()) @This() {
return switch (m) {
.positional, .streaming => .streaming,
.positional_reading, .streaming_reading => .streaming_reading,
.failure => .failure,
};
}
pub fn toReading(m: @This()) @This() {
return switch (m) {
.positional, .positional_reading => .positional_reading,
.streaming, .streaming_reading => .streaming_reading,
.failure => .failure,
};
}
};
pub fn initInterface(buffer: []u8) Io.Reader {
return .{
.vtable = &.{
.stream = Reader.stream,
.discard = Reader.discard,
.readVec = Reader.readVec,
},
.buffer = buffer,
.seek = 0,
.end = 0,
};
}
pub fn init(file: File, io: Io, buffer: []u8) Reader {
return .{
.io = io,
.file = file,
.interface = initInterface(buffer),
};
}
/// Takes a legacy `std.fs.File` to help with upgrading.
pub fn initAdapted(file: std.fs.File, io: Io, buffer: []u8) Reader {
return .init(.{ .handle = file.handle }, io, buffer);
}
pub fn initSize(file: File, io: Io, buffer: []u8, size: ?u64) Reader {
return .{
.io = io,
.file = file,
.interface = initInterface(buffer),
.size = size,
};
}
/// Positional is more threadsafe, since the global seek position is not
/// affected, but when such syscalls are not available, preemptively
/// initializing in streaming mode skips a failed syscall.
pub fn initStreaming(file: File, io: Io, buffer: []u8) Reader {
return .{
.io = io,
.file = file,
.interface = Reader.initInterface(buffer),
.mode = .streaming,
.seek_err = error.Unseekable,
.size_err = error.Streaming,
};
}
pub fn getSize(r: *Reader) SizeError!u64 {
return r.size orelse {
if (r.size_err) |err| return err;
if (stat(r.file, r.io)) |st| {
if (st.kind == .file) {
r.size = st.size;
return st.size;
} else {
r.mode = r.mode.toStreaming();
r.size_err = error.Streaming;
return error.Streaming;
}
} else |err| {
r.size_err = err;
return err;
}
};
}
pub fn seekBy(r: *Reader, offset: i64) Reader.SeekError!void {
const io = r.io;
switch (r.mode) {
.positional, .positional_reading => {
setLogicalPos(r, @intCast(@as(i64, @intCast(logicalPos(r))) + offset));
},
.streaming, .streaming_reading => {
const seek_err = r.seek_err orelse e: {
if (io.vtable.fileSeekBy(io.userdata, r.file, offset)) |_| {
setLogicalPos(r, @intCast(@as(i64, @intCast(logicalPos(r))) + offset));
return;
} else |err| {
r.seek_err = err;
break :e err;
}
};
var remaining = std.math.cast(u64, offset) orelse return seek_err;
while (remaining > 0) {
remaining -= discard(&r.interface, .limited64(remaining)) catch |err| {
r.seek_err = err;
return err;
};
}
r.interface.seek = 0;
r.interface.end = 0;
},
.failure => return r.seek_err.?,
}
}
/// Repositions logical read offset relative to the beginning of the file.
pub fn seekTo(r: *Reader, offset: u64) Reader.SeekError!void {
const io = r.io;
switch (r.mode) {
.positional, .positional_reading => {
setLogicalPos(r, offset);
},
.streaming, .streaming_reading => {
const logical_pos = logicalPos(r);
if (offset >= logical_pos) return Reader.seekBy(r, @intCast(offset - logical_pos));
if (r.seek_err) |err| return err;
io.vtable.fileSeekTo(io.userdata, r.file, offset) catch |err| {
r.seek_err = err;
return err;
};
setLogicalPos(r, offset);
},
.failure => return r.seek_err.?,
}
}
pub fn logicalPos(r: *const Reader) u64 {
return r.pos - r.interface.bufferedLen();
}
fn setLogicalPos(r: *Reader, offset: u64) void {
const logical_pos = logicalPos(r);
if (offset < logical_pos or offset >= r.pos) {
r.interface.seek = 0;
r.interface.end = 0;
r.pos = offset;
} else {
const logical_delta: usize = @intCast(offset - logical_pos);
r.interface.seek += logical_delta;
}
}
/// Number of slices to store on the stack, when trying to send as many byte
/// vectors through the underlying read calls as possible.
const max_buffers_len = 16;
fn stream(io_reader: *Io.Reader, w: *Io.Writer, limit: Io.Limit) Io.Reader.StreamError!usize {
const r: *Reader = @alignCast(@fieldParentPtr("interface", io_reader));
return streamMode(r, w, limit, r.mode);
}
pub fn streamMode(r: *Reader, w: *Io.Writer, limit: Io.Limit, mode: Reader.Mode) Io.Reader.StreamError!usize {
switch (mode) {
.positional, .streaming => return w.sendFile(r, limit) catch |write_err| switch (write_err) {
error.Unimplemented => {
r.mode = r.mode.toReading();
return 0;
},
else => |e| return e,
},
.positional_reading => {
const dest = limit.slice(try w.writableSliceGreedy(1));
var data: [1][]u8 = .{dest};
const n = try readVecPositional(r, &data);
w.advance(n);
return n;
},
.streaming_reading => {
const dest = limit.slice(try w.writableSliceGreedy(1));
var data: [1][]u8 = .{dest};
const n = try readVecStreaming(r, &data);
w.advance(n);
return n;
},
.failure => return error.ReadFailed,
}
}
fn readVec(io_reader: *Io.Reader, data: [][]u8) Io.Reader.Error!usize {
const r: *Reader = @alignCast(@fieldParentPtr("interface", io_reader));
switch (r.mode) {
.positional, .positional_reading => return readVecPositional(r, data),
.streaming, .streaming_reading => return readVecStreaming(r, data),
.failure => return error.ReadFailed,
}
}
fn readVecPositional(r: *Reader, data: [][]u8) Io.Reader.Error!usize {
const io = r.io;
var iovecs_buffer: [max_buffers_len][]u8 = undefined;
const dest_n, const data_size = try r.interface.writableVector(&iovecs_buffer, data);
const dest = iovecs_buffer[0..dest_n];
assert(dest[0].len > 0);
const n = io.vtable.fileReadPositional(io.userdata, r.file, dest, r.pos) catch |err| switch (err) {
error.Unseekable => {
r.mode = r.mode.toStreaming();
const pos = r.pos;
if (pos != 0) {
r.pos = 0;
r.seekBy(@intCast(pos)) catch {
r.mode = .failure;
return error.ReadFailed;
};
}
return 0;
},
else => |e| {
r.err = e;
return error.ReadFailed;
},
};
if (n == 0) {
r.size = r.pos;
return error.EndOfStream;
}
r.pos += n;
if (n > data_size) {
r.interface.end += n - data_size;
return data_size;
}
return n;
}
fn readVecStreaming(r: *Reader, data: [][]u8) Io.Reader.Error!usize {
const io = r.io;
var iovecs_buffer: [max_buffers_len][]u8 = undefined;
const dest_n, const data_size = try r.interface.writableVector(&iovecs_buffer, data);
const dest = iovecs_buffer[0..dest_n];
assert(dest[0].len > 0);
const n = io.vtable.fileReadStreaming(io.userdata, r.file, dest) catch |err| {
r.err = err;
return error.ReadFailed;
};
if (n == 0) {
r.size = r.pos;
return error.EndOfStream;
}
r.pos += n;
if (n > data_size) {
r.interface.end += n - data_size;
return data_size;
}
return n;
}
fn discard(io_reader: *Io.Reader, limit: Io.Limit) Io.Reader.Error!usize {
const r: *Reader = @alignCast(@fieldParentPtr("interface", io_reader));
const io = r.io;
const file = r.file;
switch (r.mode) {
.positional, .positional_reading => {
const size = r.getSize() catch {
r.mode = r.mode.toStreaming();
return 0;
};
const logical_pos = logicalPos(r);
const delta = @min(@intFromEnum(limit), size - logical_pos);
setLogicalPos(r, logical_pos + delta);
return delta;
},
.streaming, .streaming_reading => {
// Unfortunately we can't seek forward without knowing the
// size because the seek syscalls provided to us will not
// return the true end position if a seek would exceed the
// end.
fallback: {
if (r.size_err == null and r.seek_err == null) break :fallback;
const buffered_len = r.interface.bufferedLen();
var remaining = @intFromEnum(limit);
if (remaining <= buffered_len) {
r.interface.seek += remaining;
return remaining;
}
remaining -= buffered_len;
r.interface.seek = 0;
r.interface.end = 0;
var trash_buffer: [128]u8 = undefined;
var data: [1][]u8 = .{trash_buffer[0..@min(trash_buffer.len, remaining)]};
var iovecs_buffer: [max_buffers_len][]u8 = undefined;
const dest_n, const data_size = try r.interface.writableVector(&iovecs_buffer, &data);
const dest = iovecs_buffer[0..dest_n];
assert(dest[0].len > 0);
const n = io.vtable.fileReadStreaming(io.userdata, file, dest) catch |err| {
r.err = err;
return error.ReadFailed;
};
if (n == 0) {
r.size = r.pos;
return error.EndOfStream;
}
r.pos += n;
if (n > data_size) {
r.interface.end += n - data_size;
remaining -= data_size;
} else {
remaining -= n;
}
return @intFromEnum(limit) - remaining;
}
const size = r.getSize() catch return 0;
const n = @min(size - r.pos, std.math.maxInt(i64), @intFromEnum(limit));
io.vtable.fileSeekBy(io.userdata, file, n) catch |err| {
r.seek_err = err;
return 0;
};
r.pos += n;
return n;
},
.failure => return error.ReadFailed,
}
}
/// Returns whether the stream is at the logical end.
pub fn atEnd(r: *Reader) bool {
// Even if stat fails, size is set when end is encountered.
const size = r.size orelse return false;
return size - logicalPos(r) == 0;
}
};

1497
lib/std/Io/IoUring.zig Normal file

File diff suppressed because it is too large Load Diff

1743
lib/std/Io/Kqueue.zig Normal file

File diff suppressed because it is too large Load Diff

6156
lib/std/Io/Threaded.zig Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,58 @@
const builtin = @import("builtin");
const std = @import("std");
const Io = std.Io;
const testing = std.testing;
const assert = std.debug.assert;
test "concurrent vs main prevents deadlock via oversubscription" {
var threaded: Io.Threaded = .init(std.testing.allocator);
defer threaded.deinit();
const io = threaded.io();
threaded.cpu_count = 1;
var queue: Io.Queue(u8) = .init(&.{});
var putter = io.concurrent(put, .{ io, &queue }) catch |err| switch (err) {
error.ConcurrencyUnavailable => {
try testing.expect(builtin.single_threaded);
return;
},
};
defer putter.cancel(io);
try testing.expectEqual(42, queue.getOneUncancelable(io));
}
fn put(io: Io, queue: *Io.Queue(u8)) void {
queue.putOneUncancelable(io, 42);
}
fn get(io: Io, queue: *Io.Queue(u8)) void {
assert(queue.getOneUncancelable(io) == 42);
}
test "concurrent vs concurrent prevents deadlock via oversubscription" {
var threaded: Io.Threaded = .init(std.testing.allocator);
defer threaded.deinit();
const io = threaded.io();
threaded.cpu_count = 1;
var queue: Io.Queue(u8) = .init(&.{});
var putter = io.concurrent(put, .{ io, &queue }) catch |err| switch (err) {
error.ConcurrencyUnavailable => {
try testing.expect(builtin.single_threaded);
return;
},
};
defer putter.cancel(io);
var getter = try io.concurrent(get, .{ io, &queue });
defer getter.cancel(io);
getter.await(io);
putter.await(io);
}

View File

@ -5,7 +5,7 @@ const Writer = @This();
const std = @import("../std.zig");
const assert = std.debug.assert;
const Limit = std.Io.Limit;
const File = std.fs.File;
const File = std.Io.File;
const testing = std.testing;
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
@ -2827,6 +2827,8 @@ pub const Allocating = struct {
};
test "discarding sendFile" {
const io = testing.io;
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
@ -2837,7 +2839,7 @@ test "discarding sendFile" {
try file_writer.interface.writeByte('h');
try file_writer.interface.flush();
var file_reader = file_writer.moveToReader();
var file_reader = file_writer.moveToReader(io);
try file_reader.seekTo(0);
var w_buffer: [256]u8 = undefined;
@ -2847,6 +2849,8 @@ test "discarding sendFile" {
}
test "allocating sendFile" {
const io = testing.io;
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
@ -2857,7 +2861,7 @@ test "allocating sendFile" {
try file_writer.interface.writeAll("abcd");
try file_writer.interface.flush();
var file_reader = file_writer.moveToReader();
var file_reader = file_writer.moveToReader(io);
try file_reader.seekTo(0);
try file_reader.interface.fill(2);
@ -2869,6 +2873,8 @@ test "allocating sendFile" {
}
test sendFileReading {
const io = testing.io;
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
@ -2879,7 +2885,7 @@ test sendFileReading {
try file_writer.interface.writeAll("abcd");
try file_writer.interface.flush();
var file_reader = file_writer.moveToReader();
var file_reader = file_writer.moveToReader(io);
try file_reader.seekTo(0);
try file_reader.interface.fill(2);

1379
lib/std/Io/net.zig Normal file

File diff suppressed because it is too large Load Diff

433
lib/std/Io/net/HostName.zig Normal file
View File

@ -0,0 +1,433 @@
//! An already-validated host name. A valid host name:
//! * Has length less than or equal to `max_len`.
//! * Is valid UTF-8.
//! * Lacks ASCII characters other than alphanumeric, '-', and '.'.
const HostName = @This();
const builtin = @import("builtin");
const native_os = builtin.os.tag;
const std = @import("../../std.zig");
const Io = std.Io;
const IpAddress = Io.net.IpAddress;
const Ip6Address = Io.net.Ip6Address;
const assert = std.debug.assert;
const Stream = Io.net.Stream;
/// Externally managed memory. Already checked to be valid.
bytes: []const u8,
pub const max_len = 255;
pub const ValidateError = error{
NameTooLong,
InvalidHostName,
};
pub fn validate(bytes: []const u8) ValidateError!void {
if (bytes.len > max_len) return error.NameTooLong;
if (!std.unicode.utf8ValidateSlice(bytes)) return error.InvalidHostName;
for (bytes) |byte| {
if (!std.ascii.isAscii(byte) or byte == '.' or byte == '-' or std.ascii.isAlphanumeric(byte)) {
continue;
}
return error.InvalidHostName;
}
}
pub fn init(bytes: []const u8) ValidateError!HostName {
try validate(bytes);
return .{ .bytes = bytes };
}
pub fn sameParentDomain(parent_host: HostName, child_host: HostName) bool {
const parent_bytes = parent_host.bytes;
const child_bytes = child_host.bytes;
if (!std.ascii.endsWithIgnoreCase(child_bytes, parent_bytes)) return false;
if (child_bytes.len == parent_bytes.len) return true;
if (parent_bytes.len > child_bytes.len) return false;
return child_bytes[child_bytes.len - parent_bytes.len - 1] == '.';
}
test sameParentDomain {
try std.testing.expect(!sameParentDomain(try .init("foo.com"), try .init("bar.com")));
try std.testing.expect(sameParentDomain(try .init("foo.com"), try .init("foo.com")));
try std.testing.expect(sameParentDomain(try .init("foo.com"), try .init("bar.foo.com")));
try std.testing.expect(!sameParentDomain(try .init("bar.foo.com"), try .init("foo.com")));
}
/// Domain names are case-insensitive (RFC 5890, Section 2.3.2.4)
pub fn eql(a: HostName, b: HostName) bool {
return std.ascii.eqlIgnoreCase(a.bytes, b.bytes);
}
pub const LookupOptions = struct {
port: u16,
canonical_name_buffer: *[max_len]u8,
/// `null` means either.
family: ?IpAddress.Family = null,
};
pub const LookupError = error{
UnknownHostName,
ResolvConfParseFailed,
InvalidDnsARecord,
InvalidDnsAAAARecord,
InvalidDnsCnameRecord,
NameServerFailure,
/// Failed to open or read "/etc/hosts" or "/etc/resolv.conf".
DetectingNetworkConfigurationFailed,
} || Io.Clock.Error || IpAddress.BindError || Io.Cancelable;
pub const LookupResult = union(enum) {
address: IpAddress,
canonical_name: HostName,
end: LookupError!void,
};
/// Adds any number of `IpAddress` into resolved, exactly one canonical_name,
/// and then always finishes by adding one `LookupResult.end` entry.
///
/// Guaranteed not to block if provided queue has capacity at least 16.
pub fn lookup(
host_name: HostName,
io: Io,
resolved: *Io.Queue(LookupResult),
options: LookupOptions,
) void {
return io.vtable.netLookup(io.userdata, host_name, resolved, options);
}
pub const ExpandError = error{InvalidDnsPacket} || ValidateError;
/// Decompresses a DNS name.
///
/// Returns number of bytes consumed from `packet` starting at `i`,
/// along with the expanded `HostName`.
///
/// Asserts `buffer` is has length at least `max_len`.
pub fn expand(noalias packet: []const u8, start_i: usize, noalias dest_buffer: []u8) ExpandError!struct { usize, HostName } {
const dest = dest_buffer[0..max_len];
var i = start_i;
var dest_i: usize = 0;
var len: ?usize = null;
// Detect reference loop using an iteration counter.
for (0..packet.len / 2) |_| {
if (i >= packet.len) return error.InvalidDnsPacket;
const c = packet[i];
if ((c & 0xc0) != 0) {
if (i + 1 >= packet.len) return error.InvalidDnsPacket;
const j: usize = (@as(usize, c & 0x3F) << 8) | packet[i + 1];
if (j >= packet.len) return error.InvalidDnsPacket;
if (len == null) len = (i + 2) - start_i;
i = j;
} else if (c != 0) {
if (dest_i != 0) {
dest[dest_i] = '.';
dest_i += 1;
}
const label_len: usize = c;
if (i + 1 + label_len > packet.len) return error.InvalidDnsPacket;
if (dest_i + label_len + 1 > dest.len) return error.InvalidDnsPacket;
@memcpy(dest[dest_i..][0..label_len], packet[i + 1 ..][0..label_len]);
dest_i += label_len;
i += 1 + label_len;
} else {
dest[dest_i] = 0;
dest_i += 1;
return .{
len orelse i - start_i + 1,
try .init(dest[0..dest_i]),
};
}
}
return error.InvalidDnsPacket;
}
pub const DnsRecord = enum(u8) {
A = 1,
CNAME = 5,
AAAA = 28,
_,
};
pub const DnsResponse = struct {
bytes: []const u8,
bytes_index: u32,
answers_remaining: u16,
pub const Answer = struct {
rr: DnsRecord,
packet: []const u8,
data_off: u32,
data_len: u16,
};
pub const Error = error{InvalidDnsPacket};
pub fn init(r: []const u8) Error!DnsResponse {
if (r.len < 12) return error.InvalidDnsPacket;
if ((r[3] & 15) != 0) return .{ .bytes = r, .bytes_index = 3, .answers_remaining = 0 };
var i: u32 = 12;
var query_count = std.mem.readInt(u16, r[4..6], .big);
while (query_count != 0) : (query_count -= 1) {
while (i < r.len and r[i] -% 1 < 127) i += 1;
if (r.len - i < 6) return error.InvalidDnsPacket;
i = i + 5 + @intFromBool(r[i] != 0);
}
return .{
.bytes = r,
.bytes_index = i,
.answers_remaining = std.mem.readInt(u16, r[6..8], .big),
};
}
pub fn next(dr: *DnsResponse) Error!?Answer {
if (dr.answers_remaining == 0) return null;
dr.answers_remaining -= 1;
const r = dr.bytes;
var i = dr.bytes_index;
while (i < r.len and r[i] -% 1 < 127) i += 1;
if (r.len - i < 12) return error.InvalidDnsPacket;
i = i + 1 + @intFromBool(r[i] != 0);
const len = std.mem.readInt(u16, r[i + 8 ..][0..2], .big);
if (i + 10 + len > r.len) return error.InvalidDnsPacket;
defer dr.bytes_index = i + 10 + len;
return .{
.rr = @enumFromInt(r[i + 1]),
.packet = r,
.data_off = i + 10,
.data_len = len,
};
}
};
pub const ConnectError = LookupError || IpAddress.ConnectError;
pub fn connect(
host_name: HostName,
io: Io,
port: u16,
options: IpAddress.ConnectOptions,
) ConnectError!Stream {
var connect_many_buffer: [32]ConnectManyResult = undefined;
var connect_many_queue: Io.Queue(ConnectManyResult) = .init(&connect_many_buffer);
var connect_many = io.async(connectMany, .{ host_name, io, port, &connect_many_queue, options });
var saw_end = false;
defer {
connect_many.cancel(io);
if (!saw_end) while (true) switch (connect_many_queue.getOneUncancelable(io)) {
.connection => |loser| if (loser) |s| s.close(io) else |_| continue,
.end => break,
};
}
var aggregate_error: ConnectError = error.UnknownHostName;
while (connect_many_queue.getOne(io)) |result| switch (result) {
.connection => |connection| if (connection) |stream| return stream else |err| switch (err) {
error.SystemResources,
error.OptionUnsupported,
error.ProcessFdQuotaExceeded,
error.SystemFdQuotaExceeded,
error.Canceled,
=> |e| return e,
error.WouldBlock => return error.Unexpected,
else => |e| aggregate_error = e,
},
.end => |end| {
saw_end = true;
try end;
return aggregate_error;
},
} else |err| switch (err) {
error.Canceled => |e| return e,
}
}
pub const ConnectManyResult = union(enum) {
connection: IpAddress.ConnectError!Stream,
end: ConnectError!void,
};
/// Asynchronously establishes a connection to all IP addresses associated with
/// a host name, adding them to a results queue upon completion.
pub fn connectMany(
host_name: HostName,
io: Io,
port: u16,
results: *Io.Queue(ConnectManyResult),
options: IpAddress.ConnectOptions,
) void {
var canonical_name_buffer: [max_len]u8 = undefined;
var lookup_buffer: [32]HostName.LookupResult = undefined;
var lookup_queue: Io.Queue(LookupResult) = .init(&lookup_buffer);
var group: Io.Group = .init;
defer group.cancel(io);
group.async(io, lookup, .{ host_name, io, &lookup_queue, .{
.port = port,
.canonical_name_buffer = &canonical_name_buffer,
} });
while (lookup_queue.getOne(io)) |dns_result| switch (dns_result) {
.address => |address| group.async(io, enqueueConnection, .{ address, io, results, options }),
.canonical_name => continue,
.end => |lookup_result| {
group.wait(io);
results.putOneUncancelable(io, .{ .end = lookup_result });
return;
},
} else |err| switch (err) {
error.Canceled => |e| {
group.cancel(io);
results.putOneUncancelable(io, .{ .end = e });
},
}
}
fn enqueueConnection(
address: IpAddress,
io: Io,
queue: *Io.Queue(ConnectManyResult),
options: IpAddress.ConnectOptions,
) void {
queue.putOneUncancelable(io, .{ .connection = address.connect(io, options) });
}
pub const ResolvConf = struct {
attempts: u32,
ndots: u32,
timeout_seconds: u32,
nameservers_buffer: [max_nameservers]IpAddress,
nameservers_len: usize,
search_buffer: [max_len]u8,
search_len: usize,
/// According to resolv.conf(5) there is a maximum of 3 nameservers in this
/// file.
pub const max_nameservers = 3;
/// Returns `error.StreamTooLong` if a line is longer than 512 bytes.
pub fn init(io: Io) !ResolvConf {
var rc: ResolvConf = .{
.nameservers_buffer = undefined,
.nameservers_len = 0,
.search_buffer = undefined,
.search_len = 0,
.ndots = 1,
.timeout_seconds = 5,
.attempts = 2,
};
const file = Io.File.openAbsolute(io, "/etc/resolv.conf", .{}) catch |err| switch (err) {
error.FileNotFound,
error.NotDir,
error.AccessDenied,
=> {
try addNumeric(&rc, io, "127.0.0.1", 53);
return rc;
},
else => |e| return e,
};
defer file.close(io);
var line_buf: [512]u8 = undefined;
var file_reader = file.reader(io, &line_buf);
parse(&rc, io, &file_reader.interface) catch |err| switch (err) {
error.ReadFailed => return file_reader.err.?,
else => |e| return e,
};
return rc;
}
const Directive = enum { options, nameserver, domain, search };
const Option = enum { ndots, attempts, timeout };
pub fn parse(rc: *ResolvConf, io: Io, reader: *Io.Reader) !void {
while (reader.takeSentinel('\n')) |line_with_comment| {
const line = line: {
var split = std.mem.splitScalar(u8, line_with_comment, '#');
break :line split.first();
};
var line_it = std.mem.tokenizeAny(u8, line, " \t");
const token = line_it.next() orelse continue;
switch (std.meta.stringToEnum(Directive, token) orelse continue) {
.options => while (line_it.next()) |sub_tok| {
var colon_it = std.mem.splitScalar(u8, sub_tok, ':');
const name = colon_it.first();
const value_txt = colon_it.next() orelse continue;
const value = std.fmt.parseInt(u8, value_txt, 10) catch |err| switch (err) {
error.Overflow => 255,
error.InvalidCharacter => continue,
};
switch (std.meta.stringToEnum(Option, name) orelse continue) {
.ndots => rc.ndots = @min(value, 15),
.attempts => rc.attempts = @min(value, 10),
.timeout => rc.timeout_seconds = @min(value, 60),
}
},
.nameserver => {
const ip_txt = line_it.next() orelse continue;
try addNumeric(rc, io, ip_txt, 53);
},
.domain, .search => {
const rest = line_it.rest();
@memcpy(rc.search_buffer[0..rest.len], rest);
rc.search_len = rest.len;
},
}
} else |err| switch (err) {
error.EndOfStream => if (reader.bufferedLen() != 0) return error.EndOfStream,
else => |e| return e,
}
if (rc.nameservers_len == 0) {
try addNumeric(rc, io, "127.0.0.1", 53);
}
}
fn addNumeric(rc: *ResolvConf, io: Io, name: []const u8, port: u16) !void {
if (rc.nameservers_len < rc.nameservers_buffer.len) {
rc.nameservers_buffer[rc.nameservers_len] = try .resolve(io, name, port);
rc.nameservers_len += 1;
}
}
pub fn nameservers(rc: *const ResolvConf) []const IpAddress {
return rc.nameservers_buffer[0..rc.nameservers_len];
}
};
test ResolvConf {
const input =
\\# Generated by resolvconf
\\nameserver 1.0.0.1
\\nameserver 1.1.1.1
\\nameserver fe80::e0e:76ff:fed4:cf22
\\options edns0
\\
;
var reader: Io.Reader = .fixed(input);
var rc: ResolvConf = .{
.nameservers_buffer = undefined,
.nameservers_len = 0,
.search_buffer = undefined,
.search_len = 0,
.ndots = 1,
.timeout_seconds = 5,
.attempts = 2,
};
try rc.parse(std.testing.io, &reader);
try std.testing.expectEqual(3, rc.nameservers().len);
}

345
lib/std/Io/net/test.zig Normal file
View File

@ -0,0 +1,345 @@
const builtin = @import("builtin");
const std = @import("std");
const Io = std.Io;
const net = std.Io.net;
const mem = std.mem;
const testing = std.testing;
test "parse and render IP addresses at comptime" {
comptime {
const ipv6addr = net.IpAddress.parse("::1", 0) catch unreachable;
try testing.expectFmt("[::1]:0", "{f}", .{ipv6addr});
const ipv4addr = net.IpAddress.parse("127.0.0.1", 0) catch unreachable;
try testing.expectFmt("127.0.0.1:0", "{f}", .{ipv4addr});
try testing.expectError(error.ParseFailed, net.IpAddress.parse("::123.123.123.123", 0));
try testing.expectError(error.ParseFailed, net.IpAddress.parse("127.01.0.1", 0));
}
}
test "format IPv6 address with no zero runs" {
const addr = try net.IpAddress.parseIp6("2001:db8:1:2:3:4:5:6", 0);
try testing.expectFmt("[2001:db8:1:2:3:4:5:6]:0", "{f}", .{addr});
}
test "parse IPv6 addresses and check compressed form" {
try testing.expectFmt("[2001:db8::1:0:0:2]:0", "{f}", .{
try net.IpAddress.parseIp6("2001:0db8:0000:0000:0001:0000:0000:0002", 0),
});
try testing.expectFmt("[2001:db8::1:2]:0", "{f}", .{
try net.IpAddress.parseIp6("2001:0db8:0000:0000:0000:0000:0001:0002", 0),
});
try testing.expectFmt("[2001:db8:1:0:1::2]:0", "{f}", .{
try net.IpAddress.parseIp6("2001:0db8:0001:0000:0001:0000:0000:0002", 0),
});
}
test "parse IPv6 address, check raw bytes" {
const expected_raw: [16]u8 = .{
0x20, 0x01, 0x0d, 0xb8, // 2001:db8
0x00, 0x00, 0x00, 0x00, // :0000:0000
0x00, 0x01, 0x00, 0x00, // :0001:0000
0x00, 0x00, 0x00, 0x02, // :0000:0002
};
const addr = try net.IpAddress.parseIp6("2001:db8:0000:0000:0001:0000:0000:0002", 0);
try testing.expectEqualSlices(u8, &expected_raw, &addr.ip6.bytes);
}
test "parse and render IPv6 addresses" {
try testParseAndRenderIp6Address("FF01:0:0:0:0:0:0:FB", "ff01::fb");
try testParseAndRenderIp6Address("FF01::Fb", "ff01::fb");
try testParseAndRenderIp6Address("::1", "::1");
try testParseAndRenderIp6Address("::", "::");
try testParseAndRenderIp6Address("1::", "1::");
try testParseAndRenderIp6Address("2001:db8::", "2001:db8::");
try testParseAndRenderIp6Address("::1234:5678", "::1234:5678");
try testParseAndRenderIp6Address("2001:db8::1234:5678", "2001:db8::1234:5678");
try testParseAndRenderIp6Address("FF01::FB%1234", "ff01::fb%1234");
try testParseAndRenderIp6Address("::ffff:123.5.123.5", "::ffff:123.5.123.5");
try testParseAndRenderIp6Address("ff01::fb%12345678901234", "ff01::fb%12345678901234");
}
fn testParseAndRenderIp6Address(input: []const u8, expected_output: []const u8) !void {
var buffer: [100]u8 = undefined;
const parsed = net.Ip6Address.Unresolved.parse(input);
const actual_printed = try std.fmt.bufPrint(&buffer, "{f}", .{parsed.success});
try testing.expectEqualStrings(expected_output, actual_printed);
}
test "IPv6 address parse failures" {
try testing.expectError(error.ParseFailed, net.IpAddress.parseIp6(":::", 0));
const Unresolved = net.Ip6Address.Unresolved;
try testing.expectEqual(Unresolved.Parsed{ .invalid_byte = 2 }, Unresolved.parse(":::"));
try testing.expectEqual(Unresolved.Parsed{ .overflow = 4 }, Unresolved.parse("FF001::FB"));
try testing.expectEqual(Unresolved.Parsed{ .invalid_byte = 9 }, Unresolved.parse("FF01::Fb:zig"));
try testing.expectEqual(Unresolved.Parsed{ .junk_after_end = 19 }, Unresolved.parse("FF01:0:0:0:0:0:0:FB:"));
try testing.expectEqual(Unresolved.Parsed.incomplete, Unresolved.parse("FF01:"));
try testing.expectEqual(Unresolved.Parsed{ .invalid_byte = 5 }, Unresolved.parse("::123.123.123.123"));
try testing.expectEqual(Unresolved.Parsed.incomplete, Unresolved.parse("1"));
try testing.expectEqual(Unresolved.Parsed.incomplete, Unresolved.parse("ff01::fb%"));
}
test "invalid but parseable IPv6 scope ids" {
const io = testing.io;
if (builtin.os.tag != .linux and comptime !builtin.os.tag.isDarwin()) {
return error.SkipZigTest; // TODO
}
try testing.expectError(error.InterfaceNotFound, net.IpAddress.resolveIp6(io, "ff01::fb%123s45678901234", 0));
}
test "parse and render IPv4 addresses" {
var buffer: [18]u8 = undefined;
for ([_][]const u8{
"0.0.0.0",
"255.255.255.255",
"1.2.3.4",
"123.255.0.91",
"127.0.0.1",
}) |ip| {
const addr = net.IpAddress.parseIp4(ip, 0) catch unreachable;
var newIp = std.fmt.bufPrint(buffer[0..], "{f}", .{addr}) catch unreachable;
try testing.expect(std.mem.eql(u8, ip, newIp[0 .. newIp.len - 2]));
}
try testing.expectError(error.Overflow, net.IpAddress.parseIp4("256.0.0.1", 0));
try testing.expectError(error.InvalidCharacter, net.IpAddress.parseIp4("x.0.0.1", 0));
try testing.expectError(error.InvalidEnd, net.IpAddress.parseIp4("127.0.0.1.1", 0));
try testing.expectError(error.Incomplete, net.IpAddress.parseIp4("127.0.0.", 0));
try testing.expectError(error.InvalidCharacter, net.IpAddress.parseIp4("100..0.1", 0));
try testing.expectError(error.NonCanonical, net.IpAddress.parseIp4("127.01.0.1", 0));
}
test "resolve DNS" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
const io = testing.io;
// Resolve localhost, this should not fail.
{
const localhost_v4 = try net.IpAddress.parse("127.0.0.1", 80);
const localhost_v6 = try net.IpAddress.parse("::2", 80);
var canonical_name_buffer: [net.HostName.max_len]u8 = undefined;
var results_buffer: [32]net.HostName.LookupResult = undefined;
var results: Io.Queue(net.HostName.LookupResult) = .init(&results_buffer);
net.HostName.lookup(try .init("localhost"), io, &results, .{
.port = 80,
.canonical_name_buffer = &canonical_name_buffer,
});
var addresses_found: usize = 0;
while (results.getOne(io)) |result| switch (result) {
.address => |address| {
if (address.eql(&localhost_v4) or address.eql(&localhost_v6))
addresses_found += 1;
},
.canonical_name => |canonical_name| try testing.expectEqualStrings("localhost", canonical_name.bytes),
.end => |end| {
try end;
break;
},
} else |err| return err;
try testing.expect(addresses_found != 0);
}
{
// The tests are required to work even when there is no Internet connection,
// so some of these errors we must accept and skip the test.
var canonical_name_buffer: [net.HostName.max_len]u8 = undefined;
var results_buffer: [16]net.HostName.LookupResult = undefined;
var results: Io.Queue(net.HostName.LookupResult) = .init(&results_buffer);
net.HostName.lookup(try .init("example.com"), io, &results, .{
.port = 80,
.canonical_name_buffer = &canonical_name_buffer,
});
while (results.getOne(io)) |result| switch (result) {
.address => {},
.canonical_name => {},
.end => |end| {
end catch |err| switch (err) {
error.UnknownHostName => return error.SkipZigTest,
error.NameServerFailure => return error.SkipZigTest,
else => return err,
};
break;
},
} else |err| return err;
}
}
test "listen on a port, send bytes, receive bytes" {
if (builtin.single_threaded) return error.SkipZigTest;
if (builtin.os.tag == .wasi) return error.SkipZigTest;
const io = testing.io;
// Try only the IPv4 variant as some CI builders have no IPv6 localhost
// configured.
const localhost: net.IpAddress = .{ .ip4 = .loopback(0) };
var server = try localhost.listen(io, .{});
defer server.deinit(io);
const S = struct {
fn clientFn(server_address: net.IpAddress) !void {
var stream = try server_address.connect(io, .{ .mode = .stream });
defer stream.close(io);
var stream_writer = stream.writer(io, &.{});
try stream_writer.interface.writeAll("Hello world!");
}
};
const t = try std.Thread.spawn(.{}, S.clientFn, .{server.socket.address});
defer t.join();
var stream = try server.accept(io);
defer stream.close(io);
var buf: [16]u8 = undefined;
var stream_reader = stream.reader(io, &.{});
const n = try stream_reader.interface.readSliceShort(&buf);
try testing.expectEqual(@as(usize, 12), n);
try testing.expectEqualSlices(u8, "Hello world!", buf[0..n]);
}
test "listen on an in use port" {
if (builtin.os.tag != .linux and comptime !builtin.os.tag.isDarwin() and builtin.os.tag != .windows) {
// TODO build abstractions for other operating systems
return error.SkipZigTest;
}
const io = testing.io;
const localhost: net.IpAddress = .{ .ip4 = .loopback(0) };
var server1 = try localhost.listen(io, .{ .reuse_address = true });
defer server1.deinit(io);
var server2 = try server1.socket.address.listen(io, .{ .reuse_address = true });
defer server2.deinit(io);
}
fn testClientToHost(allocator: mem.Allocator, name: []const u8, port: u16) anyerror!void {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
const connection = try net.tcpConnectToHost(allocator, name, port);
defer connection.close();
var buf: [100]u8 = undefined;
const len = try connection.read(&buf);
const msg = buf[0..len];
try testing.expect(mem.eql(u8, msg, "hello from server\n"));
}
fn testClient(addr: net.IpAddress) anyerror!void {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
const socket_file = try net.tcpConnectToAddress(addr);
defer socket_file.close();
var buf: [100]u8 = undefined;
const len = try socket_file.read(&buf);
const msg = buf[0..len];
try testing.expect(mem.eql(u8, msg, "hello from server\n"));
}
fn testServer(server: *net.Server) anyerror!void {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
const io = testing.io;
var stream = try server.accept(io);
var writer = stream.writer(io, &.{});
try writer.interface.print("hello from server\n", .{});
}
test "listen on a unix socket, send bytes, receive bytes" {
if (builtin.single_threaded) return error.SkipZigTest;
if (!net.has_unix_sockets) return error.SkipZigTest;
const io = testing.io;
const socket_path = try generateFileName("socket.unix");
defer testing.allocator.free(socket_path);
const socket_addr = try net.UnixAddress.init(socket_path);
defer std.fs.cwd().deleteFile(socket_path) catch {};
var server = try socket_addr.listen(io, .{});
defer server.socket.close(io);
const S = struct {
fn clientFn(path: []const u8) !void {
const server_path: net.UnixAddress = try .init(path);
var stream = try server_path.connect(io);
defer stream.close(io);
var stream_writer = stream.writer(io, &.{});
try stream_writer.interface.writeAll("Hello world!");
}
};
const t = try std.Thread.spawn(.{}, S.clientFn, .{socket_path});
defer t.join();
var stream = try server.accept(io);
defer stream.close(io);
var buf: [16]u8 = undefined;
var stream_reader = stream.reader(io, &.{});
const n = try stream_reader.interface.readSliceShort(&buf);
try testing.expectEqual(@as(usize, 12), n);
try testing.expectEqualSlices(u8, "Hello world!", buf[0..n]);
}
fn generateFileName(base_name: []const u8) ![]const u8 {
const random_bytes_count = 12;
const sub_path_len = comptime std.fs.base64_encoder.calcSize(random_bytes_count);
var random_bytes: [12]u8 = undefined;
std.crypto.random.bytes(&random_bytes);
var sub_path: [sub_path_len]u8 = undefined;
_ = std.fs.base64_encoder.encode(&sub_path, &random_bytes);
return std.fmt.allocPrint(testing.allocator, "{s}-{s}", .{ sub_path[0..], base_name });
}
test "non-blocking tcp server" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
if (true) {
// https://github.com/ziglang/zig/issues/18315
return error.SkipZigTest;
}
const io = testing.io;
const localhost: net.IpAddress = .{ .ip4 = .loopback(0) };
var server = localhost.listen(io, .{ .force_nonblocking = true });
defer server.deinit(io);
const accept_err = server.accept(io);
try testing.expectError(error.WouldBlock, accept_err);
const socket_file = try net.tcpConnectToAddress(server.socket.address);
defer socket_file.close();
var stream = try server.accept(io);
defer stream.close(io);
var writer = stream.writer(io, .{});
try writer.interface.print("hello from server\n", .{});
var buf: [100]u8 = undefined;
const len = try socket_file.read(&buf);
const msg = buf[0..len];
try testing.expect(mem.eql(u8, msg, "hello from server\n"));
}

View File

@ -1,21 +1,28 @@
const builtin = @import("builtin");
const native_endian = builtin.cpu.arch.endian();
const std = @import("std");
const DefaultPrng = std.Random.DefaultPrng;
const Io = std.Io;
const testing = std.testing;
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const expectError = std.testing.expectError;
const DefaultPrng = std.Random.DefaultPrng;
const mem = std.mem;
const fs = std.fs;
const File = std.fs.File;
const native_endian = @import("builtin").target.cpu.arch.endian();
const assert = std.debug.assert;
const tmpDir = std.testing.tmpDir;
test "write a file, read it, then delete it" {
const io = testing.io;
var tmp = tmpDir(.{});
defer tmp.cleanup();
var data: [1024]u8 = undefined;
var prng = DefaultPrng.init(std.testing.random_seed);
var prng = DefaultPrng.init(testing.random_seed);
const random = prng.random();
random.bytes(data[0..]);
const tmp_file_name = "temp_test_file.txt";
@ -45,9 +52,9 @@ test "write a file, read it, then delete it" {
try expectEqual(expected_file_size, file_size);
var file_buffer: [1024]u8 = undefined;
var file_reader = file.reader(&file_buffer);
const contents = try file_reader.interface.allocRemaining(std.testing.allocator, .limited(2 * 1024));
defer std.testing.allocator.free(contents);
var file_reader = file.reader(io, &file_buffer);
const contents = try file_reader.interface.allocRemaining(testing.allocator, .limited(2 * 1024));
defer testing.allocator.free(contents);
try expect(mem.eql(u8, contents[0.."begin".len], "begin"));
try expect(mem.eql(u8, contents["begin".len .. contents.len - "end".len], &data));
@ -89,18 +96,18 @@ test "setEndPos" {
defer file.close();
// Verify that the file size changes and the file offset is not moved
try std.testing.expect((try file.getEndPos()) == 0);
try std.testing.expect((try file.getPos()) == 0);
try expect((try file.getEndPos()) == 0);
try expect((try file.getPos()) == 0);
try file.setEndPos(8192);
try std.testing.expect((try file.getEndPos()) == 8192);
try std.testing.expect((try file.getPos()) == 0);
try expect((try file.getEndPos()) == 8192);
try expect((try file.getPos()) == 0);
try file.seekTo(100);
try file.setEndPos(4096);
try std.testing.expect((try file.getEndPos()) == 4096);
try std.testing.expect((try file.getPos()) == 100);
try expect((try file.getEndPos()) == 4096);
try expect((try file.getPos()) == 100);
try file.setEndPos(0);
try std.testing.expect((try file.getEndPos()) == 0);
try std.testing.expect((try file.getPos()) == 100);
try expect((try file.getEndPos()) == 0);
try expect((try file.getPos()) == 100);
}
test "updateTimes" {
@ -114,10 +121,90 @@ test "updateTimes" {
const stat_old = try file.stat();
// Set atime and mtime to 5s before
try file.updateTimes(
stat_old.atime - 5 * std.time.ns_per_s,
stat_old.mtime - 5 * std.time.ns_per_s,
stat_old.atime.subDuration(.fromSeconds(5)),
stat_old.mtime.subDuration(.fromSeconds(5)),
);
const stat_new = try file.stat();
try expect(stat_new.atime < stat_old.atime);
try expect(stat_new.mtime < stat_old.mtime);
try expect(stat_new.atime.nanoseconds < stat_old.atime.nanoseconds);
try expect(stat_new.mtime.nanoseconds < stat_old.mtime.nanoseconds);
}
test "Group" {
const io = testing.io;
var group: Io.Group = .init;
var results: [2]usize = undefined;
group.async(io, count, .{ 1, 10, &results[0] });
group.async(io, count, .{ 20, 30, &results[1] });
group.wait(io);
try testing.expectEqualSlices(usize, &.{ 45, 245 }, &results);
}
fn count(a: usize, b: usize, result: *usize) void {
var sum: usize = 0;
for (a..b) |i| {
sum += i;
}
result.* = sum;
}
test "Group cancellation" {
const io = testing.io;
var group: Io.Group = .init;
var results: [2]usize = undefined;
group.async(io, sleep, .{ io, &results[0] });
group.async(io, sleep, .{ io, &results[1] });
group.cancel(io);
try testing.expectEqualSlices(usize, &.{ 1, 1 }, &results);
}
fn sleep(io: Io, result: *usize) void {
// TODO when cancellation race bug is fixed, make this timeout much longer so that
// it causes the unit test to be failed if not cancelled.
io.sleep(.fromMilliseconds(1), .awake) catch {};
result.* = 1;
}
test "select" {
const io = testing.io;
var queue: Io.Queue(u8) = .init(&.{});
var get_a = io.concurrent(Io.Queue(u8).getOne, .{ &queue, io }) catch |err| switch (err) {
error.ConcurrencyUnavailable => {
try testing.expect(builtin.single_threaded);
return;
},
};
defer if (get_a.cancel(io)) |_| {} else |_| @panic("fail");
var get_b = try io.concurrent(Io.Queue(u8).getOne, .{ &queue, io });
defer if (get_b.cancel(io)) |_| {} else |_| @panic("fail");
var timeout = io.async(Io.sleep, .{ io, .fromMilliseconds(1), .awake });
defer timeout.cancel(io) catch {};
switch (try io.select(.{
.get_a = &get_a,
.get_b = &get_b,
.timeout = &timeout,
})) {
.get_a => return error.TestFailure,
.get_b => return error.TestFailure,
.timeout => {
// Unblock the queues to avoid making this unit test depend on
// cancellation.
queue.putOneUncancelable(io, 1);
queue.putOneUncancelable(io, 1);
try testing.expectEqual(1, try get_a.await(io));
try testing.expectEqual(1, try get_b.await(io));
},
}
}

View File

@ -392,7 +392,7 @@ var global_progress: Progress = .{
.terminal = undefined,
.terminal_mode = .off,
.update_thread = null,
.redraw_event = .{},
.redraw_event = .unset,
.refresh_rate_ns = undefined,
.initial_delay_ns = undefined,
.rows = 0,
@ -493,7 +493,7 @@ pub fn start(options: Options) Node {
.mask = posix.sigemptyset(),
.flags = (posix.SA.SIGINFO | posix.SA.RESTART),
};
posix.sigaction(posix.SIG.WINCH, &act, null);
posix.sigaction(.WINCH, &act, null);
}
if (switch (global_progress.terminal_mode) {
@ -523,9 +523,7 @@ pub fn setStatus(new_status: Status) void {
/// Returns whether a resize is needed to learn the terminal size.
fn wait(timeout_ns: u64) bool {
const resize_flag = if (global_progress.redraw_event.timedWait(timeout_ns)) |_|
true
else |err| switch (err) {
const resize_flag = if (global_progress.redraw_event.timedWait(timeout_ns)) |_| true else |err| switch (err) {
error.Timeout => false,
};
global_progress.redraw_event.reset();
@ -1537,10 +1535,10 @@ fn maybeUpdateSize(resize_flag: bool) void {
}
}
fn handleSigWinch(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*anyopaque) callconv(.c) void {
fn handleSigWinch(sig: posix.SIG, info: *const posix.siginfo_t, ctx_ptr: ?*anyopaque) callconv(.c) void {
_ = info;
_ = ctx_ptr;
assert(sig == posix.SIG.WINCH);
assert(sig == .WINCH);
global_progress.redraw_event.set();
}

View File

@ -58,6 +58,12 @@ pub fn bytes(r: Random, buf: []u8) void {
r.fillFn(r.ptr, buf);
}
pub fn array(r: Random, comptime E: type, comptime N: usize) [N]E {
var result: [N]E = undefined;
bytes(r, &result);
return result;
}
pub fn boolean(r: Random) bool {
return r.int(u1) != 0;
}

View File

@ -612,6 +612,8 @@ fn versionEqualOpt(a: ?SemanticVersion, b: ?SemanticVersion) bool {
}
test parse {
const io = std.testing.io;
if (builtin.target.isGnuLibC()) {
var query = try Query.parse(.{});
query.setGnuLibCVersion(2, 1, 1);
@ -654,7 +656,7 @@ test parse {
.arch_os_abi = "x86_64-linux-gnu",
.cpu_features = "x86_64-sse-sse2-avx-cx8",
});
const target = try std.zig.system.resolveTargetQuery(query);
const target = try std.zig.system.resolveTargetQuery(io, query);
try std.testing.expect(target.os.tag == .linux);
try std.testing.expect(target.abi == .gnu);
@ -679,7 +681,7 @@ test parse {
.arch_os_abi = "arm-linux-musleabihf",
.cpu_features = "generic+v8a",
});
const target = try std.zig.system.resolveTargetQuery(query);
const target = try std.zig.system.resolveTargetQuery(io, query);
try std.testing.expect(target.os.tag == .linux);
try std.testing.expect(target.abi == .musleabihf);
@ -696,7 +698,7 @@ test parse {
.arch_os_abi = "aarch64-linux.3.10...4.4.1-gnu.2.27",
.cpu_features = "generic+v8a",
});
const target = try std.zig.system.resolveTargetQuery(query);
const target = try std.zig.system.resolveTargetQuery(io, query);
try std.testing.expect(target.cpu.arch == .aarch64);
try std.testing.expect(target.os.tag == .linux);
@ -719,7 +721,7 @@ test parse {
const query = try Query.parse(.{
.arch_os_abi = "aarch64-linux.3.10...4.4.1-android.30",
});
const target = try std.zig.system.resolveTargetQuery(query);
const target = try std.zig.system.resolveTargetQuery(io, query);
try std.testing.expect(target.cpu.arch == .aarch64);
try std.testing.expect(target.os.tag == .linux);
@ -740,7 +742,7 @@ test parse {
const query = try Query.parse(.{
.arch_os_abi = "x86-windows.xp...win8-msvc",
});
const target = try std.zig.system.resolveTargetQuery(query);
const target = try std.zig.system.resolveTargetQuery(io, query);
try std.testing.expect(target.cpu.arch == .x86);
try std.testing.expect(target.os.tag == .windows);

View File

@ -10,9 +10,9 @@ const target = builtin.target;
const native_os = builtin.os.tag;
const posix = std.posix;
const windows = std.os.windows;
const testing = std.testing;
pub const Futex = @import("Thread/Futex.zig");
pub const ResetEvent = @import("Thread/ResetEvent.zig");
pub const Mutex = @import("Thread/Mutex.zig");
pub const Semaphore = @import("Thread/Semaphore.zig");
pub const Condition = @import("Thread/Condition.zig");
@ -22,81 +22,122 @@ pub const WaitGroup = @import("Thread/WaitGroup.zig");
pub const use_pthreads = native_os != .windows and native_os != .wasi and builtin.link_libc;
/// Spurious wakeups are possible and no precision of timing is guaranteed.
pub fn sleep(nanoseconds: u64) void {
if (builtin.os.tag == .windows) {
const big_ms_from_ns = nanoseconds / std.time.ns_per_ms;
const ms = math.cast(windows.DWORD, big_ms_from_ns) orelse math.maxInt(windows.DWORD);
windows.kernel32.Sleep(ms);
return;
/// A thread-safe logical boolean value which can be `set` and `unset`.
///
/// It can also block threads until the value is set with cancelation via timed
/// waits. Statically initializable; four bytes on all targets.
pub const ResetEvent = enum(u32) {
unset = 0,
waiting = 1,
is_set = 2,
/// Returns whether the logical boolean is `set`.
///
/// Once `reset` is called, this returns false until the next `set`.
///
/// The memory accesses before the `set` can be said to happen before
/// `isSet` returns true.
pub fn isSet(re: *const ResetEvent) bool {
if (builtin.single_threaded) return switch (re.*) {
.unset => false,
.waiting => unreachable,
.is_set => true,
};
// Acquire barrier ensures memory accesses before `set` happen before
// returning true.
return @atomicLoad(ResetEvent, re, .acquire) == .is_set;
}
if (builtin.os.tag == .wasi) {
const w = std.os.wasi;
const userdata: w.userdata_t = 0x0123_45678;
const clock: w.subscription_clock_t = .{
.id = .MONOTONIC,
.timeout = nanoseconds,
.precision = 0,
.flags = 0,
/// Blocks the calling thread until `set` is called.
///
/// This is effectively a more efficient version of `while (!isSet()) {}`.
///
/// The memory accesses before the `set` can be said to happen before `wait` returns.
pub fn wait(re: *ResetEvent) void {
if (builtin.single_threaded) switch (re.*) {
.unset => unreachable, // Deadlock, no other threads to wake us up.
.waiting => unreachable, // Invalid state.
.is_set => return,
};
const in: w.subscription_t = .{
.userdata = userdata,
.u = .{
.tag = .CLOCK,
.u = .{ .clock = clock },
},
if (!re.isSet()) return timedWaitInner(re, null) catch |err| switch (err) {
error.Timeout => unreachable, // No timeout specified.
};
var event: w.event_t = undefined;
var nevents: usize = undefined;
_ = w.poll_oneoff(&in, &event, 1, &nevents);
return;
}
if (builtin.os.tag == .uefi) {
const boot_services = std.os.uefi.system_table.boot_services.?;
const us_from_ns = nanoseconds / std.time.ns_per_us;
const us = math.cast(usize, us_from_ns) orelse math.maxInt(usize);
boot_services.stall(us) catch unreachable;
return;
/// Blocks the calling thread until `set` is called, or until the
/// corresponding timeout expires, returning `error.Timeout`.
///
/// This is effectively a more efficient version of `while (!isSet()) {}`.
///
/// The memory accesses before the set() can be said to happen before
/// timedWait() returns without error.
pub fn timedWait(re: *ResetEvent, timeout_ns: u64) error{Timeout}!void {
if (builtin.single_threaded) switch (re.*) {
.unset => return error.Timeout,
.waiting => unreachable, // Invalid state.
.is_set => return,
};
if (!re.isSet()) return timedWaitInner(re, timeout_ns);
}
const s = nanoseconds / std.time.ns_per_s;
const ns = nanoseconds % std.time.ns_per_s;
fn timedWaitInner(re: *ResetEvent, timeout: ?u64) error{Timeout}!void {
@branchHint(.cold);
// Newer kernel ports don't have old `nanosleep()` and `clock_nanosleep()` has been around
// since Linux 2.6 and glibc 2.1 anyway.
if (builtin.os.tag == .linux) {
const linux = std.os.linux;
var req: linux.timespec = .{
.sec = std.math.cast(linux.time_t, s) orelse std.math.maxInt(linux.time_t),
.nsec = std.math.cast(linux.time_t, ns) orelse std.math.maxInt(linux.time_t),
};
var rem: linux.timespec = undefined;
// Try to set the state from `unset` to `waiting` to indicate to the
// `set` thread that others are blocked on the ResetEvent. Avoid using
// any strict barriers until we know the ResetEvent is set.
var state = @atomicLoad(ResetEvent, re, .acquire);
if (state == .unset) {
state = @cmpxchgStrong(ResetEvent, re, state, .waiting, .acquire, .acquire) orelse .waiting;
}
// Wait until the ResetEvent is set since the state is waiting.
if (state == .waiting) {
var futex_deadline = Futex.Deadline.init(timeout);
while (true) {
switch (linux.E.init(linux.clock_nanosleep(.MONOTONIC, .{ .ABSTIME = false }, &req, &rem))) {
.SUCCESS => return,
.INTR => {
req = rem;
continue;
},
.FAULT => unreachable,
.INVAL => unreachable,
.OPNOTSUPP => unreachable,
else => return,
}
const wait_result = futex_deadline.wait(@ptrCast(re), @intFromEnum(ResetEvent.waiting));
// Check if the ResetEvent was set before possibly reporting error.Timeout below.
state = @atomicLoad(ResetEvent, re, .acquire);
if (state != .waiting) break;
try wait_result;
}
}
posix.nanosleep(s, ns);
}
assert(state == .is_set);
}
test sleep {
sleep(1);
}
/// Marks the logical boolean as `set` and unblocks any threads in `wait`
/// or `timedWait` to observe the new state.
///
/// The logical boolean stays `set` until `reset` is called, making future
/// `set` calls do nothing semantically.
///
/// The memory accesses before `set` can be said to happen before `isSet`
/// returns true or `wait`/`timedWait` return successfully.
pub fn set(re: *ResetEvent) void {
if (builtin.single_threaded) {
re.* = .is_set;
return;
}
if (@atomicRmw(ResetEvent, re, .Xchg, .is_set, .release) == .waiting) {
Futex.wake(@ptrCast(re), std.math.maxInt(u32));
}
}
/// Unmarks the ResetEvent as if `set` was never called.
///
/// Assumes no threads are blocked in `wait` or `timedWait`. Concurrent
/// calls to `set`, `isSet` and `reset` are allowed.
pub fn reset(re: *ResetEvent) void {
if (builtin.single_threaded) {
re.* = .unset;
return;
}
@atomicStore(ResetEvent, re, .unset, .monotonic);
}
};
const Thread = @This();
const Impl = if (native_os == .windows)
@ -130,6 +171,7 @@ pub const SetNameError = error{
NameTooLong,
Unsupported,
Unexpected,
InvalidWtf8,
} || posix.PrctlError || posix.WriteError || std.fs.File.OpenError || std.fmt.BufPrintError;
pub fn setName(self: Thread, name: []const u8) SetNameError!void {
@ -277,10 +319,13 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co
var buf: [32]u8 = undefined;
const path = try std.fmt.bufPrint(&buf, "/proc/self/task/{d}/comm", .{self.getHandle()});
var threaded: std.Io.Threaded = .init_single_threaded;
const io = threaded.ioBasic();
const file = try std.fs.cwd().openFile(path, .{});
defer file.close();
var file_reader = file.readerStreaming(&.{});
var file_reader = file.readerStreaming(io, &.{});
const data_len = file_reader.interface.readSliceShort(buffer_ptr[0 .. max_name_len + 1]) catch |err| switch (err) {
error.ReadFailed => return file_reader.err.?,
};
@ -385,6 +430,8 @@ pub const CpuCountError = error{
};
/// Returns the platforms view on the number of logical CPU cores available.
///
/// Returned value guaranteed to be >= 1.
pub fn getCpuCount() CpuCountError!usize {
return try Impl.getCpuCount();
}
@ -963,7 +1010,7 @@ const WasiThreadImpl = struct {
@call(.auto, f, w.args) catch |err| {
std.debug.print("error: {s}\n", .{@errorName(err)});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace.*);
std.debug.dumpStackTrace(trace);
}
};
},
@ -1652,9 +1699,9 @@ test "setName, getName" {
if (builtin.single_threaded) return error.SkipZigTest;
const Context = struct {
start_wait_event: ResetEvent = .{},
test_done_event: ResetEvent = .{},
thread_done_event: ResetEvent = .{},
start_wait_event: ResetEvent = .unset,
test_done_event: ResetEvent = .unset,
thread_done_event: ResetEvent = .unset,
done: std.atomic.Value(bool) = std.atomic.Value(bool).init(false),
thread: Thread = undefined,
@ -1721,7 +1768,7 @@ test join {
if (builtin.single_threaded) return error.SkipZigTest;
var value: usize = 0;
var event = ResetEvent{};
var event: ResetEvent = .unset;
const thread = try Thread.spawn(.{}, testIncrementNotify, .{ &value, &event });
thread.join();
@ -1733,7 +1780,7 @@ test detach {
if (builtin.single_threaded) return error.SkipZigTest;
var value: usize = 0;
var event = ResetEvent{};
var event: ResetEvent = .unset;
const thread = try Thread.spawn(.{}, testIncrementNotify, .{ &value, &event });
thread.detach();
@ -1778,3 +1825,124 @@ fn testTls() !void {
x += 1;
if (x != 1235) return error.TlsBadEndValue;
}
test "ResetEvent smoke test" {
var event: ResetEvent = .unset;
try testing.expectEqual(false, event.isSet());
// make sure the event gets set
event.set();
try testing.expectEqual(true, event.isSet());
// make sure the event gets unset again
event.reset();
try testing.expectEqual(false, event.isSet());
// waits should timeout as there's no other thread to set the event
try testing.expectError(error.Timeout, event.timedWait(0));
try testing.expectError(error.Timeout, event.timedWait(std.time.ns_per_ms));
// set the event again and make sure waits complete
event.set();
event.wait();
try event.timedWait(std.time.ns_per_ms);
try testing.expectEqual(true, event.isSet());
}
test "ResetEvent signaling" {
// This test requires spawning threads
if (builtin.single_threaded) {
return error.SkipZigTest;
}
const Context = struct {
in: ResetEvent = .unset,
out: ResetEvent = .unset,
value: usize = 0,
fn input(self: *@This()) !void {
// wait for the value to become 1
self.in.wait();
self.in.reset();
try testing.expectEqual(self.value, 1);
// bump the value and wake up output()
self.value = 2;
self.out.set();
// wait for output to receive 2, bump the value and wake us up with 3
self.in.wait();
self.in.reset();
try testing.expectEqual(self.value, 3);
// bump the value and wake up output() for it to see 4
self.value = 4;
self.out.set();
}
fn output(self: *@This()) !void {
// start with 0 and bump the value for input to see 1
try testing.expectEqual(self.value, 0);
self.value = 1;
self.in.set();
// wait for input to receive 1, bump the value to 2 and wake us up
self.out.wait();
self.out.reset();
try testing.expectEqual(self.value, 2);
// bump the value to 3 for input to see (rhymes)
self.value = 3;
self.in.set();
// wait for input to bump the value to 4 and receive no more (rhymes)
self.out.wait();
self.out.reset();
try testing.expectEqual(self.value, 4);
}
};
var ctx = Context{};
const thread = try std.Thread.spawn(.{}, Context.output, .{&ctx});
defer thread.join();
try ctx.input();
}
test "ResetEvent broadcast" {
// This test requires spawning threads
if (builtin.single_threaded) {
return error.SkipZigTest;
}
const num_threads = 10;
const Barrier = struct {
event: ResetEvent = .unset,
counter: std.atomic.Value(usize) = std.atomic.Value(usize).init(num_threads),
fn wait(self: *@This()) void {
if (self.counter.fetchSub(1, .acq_rel) == 1) {
self.event.set();
}
}
};
const Context = struct {
start_barrier: Barrier = .{},
finish_barrier: Barrier = .{},
fn run(self: *@This()) void {
self.start_barrier.wait();
self.finish_barrier.wait();
}
};
var ctx = Context{};
var threads: [num_threads - 1]std.Thread = undefined;
for (&threads) |*t| t.* = try std.Thread.spawn(.{}, Context.run, .{&ctx});
defer for (threads) |t| t.join();
ctx.run();
}

View File

@ -123,14 +123,9 @@ const SingleThreadedImpl = struct {
fn wait(self: *Impl, mutex: *Mutex, timeout: ?u64) error{Timeout}!void {
_ = self;
_ = mutex;
// There are no other threads to wake us up.
// So if we wait without a timeout we would never wake up.
const timeout_ns = timeout orelse {
unreachable; // deadlock detected
};
std.Thread.sleep(timeout_ns);
assert(timeout != null); // Deadlock detected.
return error.Timeout;
}
@ -323,6 +318,8 @@ test "wait and signal" {
return error.SkipZigTest;
}
const io = testing.io;
const num_threads = 4;
const MultiWait = struct {
@ -348,7 +345,7 @@ test "wait and signal" {
}
while (true) {
std.Thread.sleep(100 * std.time.ns_per_ms);
try std.Io.Clock.Duration.sleep(.{ .clock = .awake, .raw = .fromMilliseconds(100) }, io);
multi_wait.mutex.lock();
defer multi_wait.mutex.unlock();
@ -368,6 +365,8 @@ test signal {
return error.SkipZigTest;
}
const io = testing.io;
const num_threads = 4;
const SignalTest = struct {
@ -405,7 +404,7 @@ test signal {
}
while (true) {
std.Thread.sleep(10 * std.time.ns_per_ms);
try std.Io.Clock.Duration.sleep(.{ .clock = .awake, .raw = .fromMilliseconds(10) }, io);
signal_test.mutex.lock();
defer signal_test.mutex.unlock();

View File

@ -116,7 +116,7 @@ const SingleThreadedImpl = struct {
unreachable; // deadlock detected
};
std.Thread.sleep(delay);
_ = delay;
return error.Timeout;
}

View File

@ -1,278 +0,0 @@
//! ResetEvent is a thread-safe bool which can be set to true/false ("set"/"unset").
//! It can also block threads until the "bool" is set with cancellation via timed waits.
//! ResetEvent can be statically initialized and is at most `@sizeOf(u64)` large.
const std = @import("../std.zig");
const builtin = @import("builtin");
const ResetEvent = @This();
const os = std.os;
const assert = std.debug.assert;
const testing = std.testing;
const Futex = std.Thread.Futex;
impl: Impl = .{},
/// Returns if the ResetEvent was set().
/// Once reset() is called, this returns false until the next set().
/// The memory accesses before the set() can be said to happen before isSet() returns true.
pub fn isSet(self: *const ResetEvent) bool {
return self.impl.isSet();
}
/// Block's the callers thread until the ResetEvent is set().
/// This is effectively a more efficient version of `while (!isSet()) {}`.
/// The memory accesses before the set() can be said to happen before wait() returns.
pub fn wait(self: *ResetEvent) void {
self.impl.wait(null) catch |err| switch (err) {
error.Timeout => unreachable, // no timeout provided so we shouldn't have timed-out
};
}
/// Block's the callers thread until the ResetEvent is set(), or until the corresponding timeout expires.
/// If the timeout expires before the ResetEvent is set, `error.Timeout` is returned.
/// This is effectively a more efficient version of `while (!isSet()) {}`.
/// The memory accesses before the set() can be said to happen before timedWait() returns without error.
pub fn timedWait(self: *ResetEvent, timeout_ns: u64) error{Timeout}!void {
return self.impl.wait(timeout_ns);
}
/// Marks the ResetEvent as "set" and unblocks any threads in `wait()` or `timedWait()` to observe the new state.
/// The ResetEvent says "set" until reset() is called, making future set() calls do nothing semantically.
/// The memory accesses before set() can be said to happen before isSet() returns true or wait()/timedWait() return successfully.
pub fn set(self: *ResetEvent) void {
self.impl.set();
}
/// Unmarks the ResetEvent from its "set" state if set() was called previously.
/// It is undefined behavior is reset() is called while threads are blocked in wait() or timedWait().
/// Concurrent calls to set(), isSet() and reset() are allowed.
pub fn reset(self: *ResetEvent) void {
self.impl.reset();
}
const Impl = if (builtin.single_threaded)
SingleThreadedImpl
else
FutexImpl;
const SingleThreadedImpl = struct {
is_set: bool = false,
fn isSet(self: *const Impl) bool {
return self.is_set;
}
fn wait(self: *Impl, timeout: ?u64) error{Timeout}!void {
if (self.isSet()) {
return;
}
// There are no other threads to wake us up.
// So if we wait without a timeout we would never wake up.
const timeout_ns = timeout orelse {
unreachable; // deadlock detected
};
std.Thread.sleep(timeout_ns);
return error.Timeout;
}
fn set(self: *Impl) void {
self.is_set = true;
}
fn reset(self: *Impl) void {
self.is_set = false;
}
};
const FutexImpl = struct {
state: std.atomic.Value(u32) = std.atomic.Value(u32).init(unset),
const unset = 0;
const waiting = 1;
const is_set = 2;
fn isSet(self: *const Impl) bool {
// Acquire barrier ensures memory accesses before set() happen before we return true.
return self.state.load(.acquire) == is_set;
}
fn wait(self: *Impl, timeout: ?u64) error{Timeout}!void {
// Outline the slow path to allow isSet() to be inlined
if (!self.isSet()) {
return self.waitUntilSet(timeout);
}
}
fn waitUntilSet(self: *Impl, timeout: ?u64) error{Timeout}!void {
@branchHint(.cold);
// Try to set the state from `unset` to `waiting` to indicate
// to the set() thread that others are blocked on the ResetEvent.
// We avoid using any strict barriers until the end when we know the ResetEvent is set.
var state = self.state.load(.acquire);
if (state == unset) {
state = self.state.cmpxchgStrong(state, waiting, .acquire, .acquire) orelse waiting;
}
// Wait until the ResetEvent is set since the state is waiting.
if (state == waiting) {
var futex_deadline = Futex.Deadline.init(timeout);
while (true) {
const wait_result = futex_deadline.wait(&self.state, waiting);
// Check if the ResetEvent was set before possibly reporting error.Timeout below.
state = self.state.load(.acquire);
if (state != waiting) {
break;
}
try wait_result;
}
}
assert(state == is_set);
}
fn set(self: *Impl) void {
// Quick check if the ResetEvent is already set before doing the atomic swap below.
// set() could be getting called quite often and multiple threads calling swap() increases contention unnecessarily.
if (self.state.load(.monotonic) == is_set) {
return;
}
// Mark the ResetEvent as set and unblock all waiters waiting on it if any.
// Release barrier ensures memory accesses before set() happen before the ResetEvent is observed to be "set".
if (self.state.swap(is_set, .release) == waiting) {
Futex.wake(&self.state, std.math.maxInt(u32));
}
}
fn reset(self: *Impl) void {
self.state.store(unset, .monotonic);
}
};
test "smoke test" {
// make sure the event is unset
var event = ResetEvent{};
try testing.expectEqual(false, event.isSet());
// make sure the event gets set
event.set();
try testing.expectEqual(true, event.isSet());
// make sure the event gets unset again
event.reset();
try testing.expectEqual(false, event.isSet());
// waits should timeout as there's no other thread to set the event
try testing.expectError(error.Timeout, event.timedWait(0));
try testing.expectError(error.Timeout, event.timedWait(std.time.ns_per_ms));
// set the event again and make sure waits complete
event.set();
event.wait();
try event.timedWait(std.time.ns_per_ms);
try testing.expectEqual(true, event.isSet());
}
test "signaling" {
// This test requires spawning threads
if (builtin.single_threaded) {
return error.SkipZigTest;
}
const Context = struct {
in: ResetEvent = .{},
out: ResetEvent = .{},
value: usize = 0,
fn input(self: *@This()) !void {
// wait for the value to become 1
self.in.wait();
self.in.reset();
try testing.expectEqual(self.value, 1);
// bump the value and wake up output()
self.value = 2;
self.out.set();
// wait for output to receive 2, bump the value and wake us up with 3
self.in.wait();
self.in.reset();
try testing.expectEqual(self.value, 3);
// bump the value and wake up output() for it to see 4
self.value = 4;
self.out.set();
}
fn output(self: *@This()) !void {
// start with 0 and bump the value for input to see 1
try testing.expectEqual(self.value, 0);
self.value = 1;
self.in.set();
// wait for input to receive 1, bump the value to 2 and wake us up
self.out.wait();
self.out.reset();
try testing.expectEqual(self.value, 2);
// bump the value to 3 for input to see (rhymes)
self.value = 3;
self.in.set();
// wait for input to bump the value to 4 and receive no more (rhymes)
self.out.wait();
self.out.reset();
try testing.expectEqual(self.value, 4);
}
};
var ctx = Context{};
const thread = try std.Thread.spawn(.{}, Context.output, .{&ctx});
defer thread.join();
try ctx.input();
}
test "broadcast" {
// This test requires spawning threads
if (builtin.single_threaded) {
return error.SkipZigTest;
}
const num_threads = 10;
const Barrier = struct {
event: ResetEvent = .{},
counter: std.atomic.Value(usize) = std.atomic.Value(usize).init(num_threads),
fn wait(self: *@This()) void {
if (self.counter.fetchSub(1, .acq_rel) == 1) {
self.event.set();
}
}
};
const Context = struct {
start_barrier: Barrier = .{},
finish_barrier: Barrier = .{},
fn run(self: *@This()) void {
self.start_barrier.wait();
self.finish_barrier.wait();
}
};
var ctx = Context{};
var threads: [num_threads - 1]std.Thread = undefined;
for (&threads) |*t| t.* = try std.Thread.spawn(.{}, Context.run, .{&ctx});
defer for (threads) |t| t.join();
ctx.run();
}

View File

@ -7,11 +7,15 @@ const is_waiting: usize = 1 << 0;
const one_pending: usize = 1 << 1;
state: std.atomic.Value(usize) = std.atomic.Value(usize).init(0),
event: std.Thread.ResetEvent = .{},
event: std.Thread.ResetEvent = .unset,
pub fn start(self: *WaitGroup) void {
const state = self.state.fetchAdd(one_pending, .monotonic);
assert((state / one_pending) < (std.math.maxInt(usize) / one_pending));
return startStateless(&self.state);
}
pub fn startStateless(state: *std.atomic.Value(usize)) void {
const prev_state = state.fetchAdd(one_pending, .monotonic);
assert((prev_state / one_pending) < (std.math.maxInt(usize) / one_pending));
}
pub fn startMany(self: *WaitGroup, n: usize) void {
@ -28,13 +32,20 @@ pub fn finish(self: *WaitGroup) void {
}
}
pub fn wait(self: *WaitGroup) void {
const state = self.state.fetchAdd(is_waiting, .acquire);
assert(state & is_waiting == 0);
pub fn finishStateless(state: *std.atomic.Value(usize), event: *std.Thread.ResetEvent) void {
const prev_state = state.fetchSub(one_pending, .acq_rel);
assert((prev_state / one_pending) > 0);
if (prev_state == (one_pending | is_waiting)) event.set();
}
if ((state / one_pending) > 0) {
self.event.wait();
}
pub fn wait(wg: *WaitGroup) void {
return waitStateless(&wg.state, &wg.event);
}
pub fn waitStateless(state: *std.atomic.Value(usize), event: *std.Thread.ResetEvent) void {
const prev_state = state.fetchAdd(is_waiting, .acquire);
assert(prev_state & is_waiting == 0);
if ((prev_state / one_pending) > 0) event.wait();
}
pub fn reset(self: *WaitGroup) void {

View File

@ -1,45 +1,48 @@
//! Uniform Resource Identifier (URI) parsing roughly adhering to <https://tools.ietf.org/html/rfc3986>.
//! Does not do perfect grammar and character class checking, but should be robust against URIs in the wild.
//! Uniform Resource Identifier (URI) parsing roughly adhering to
//! <https://tools.ietf.org/html/rfc3986>. Does not do perfect grammar and
//! character class checking, but should be robust against URIs in the wild.
const std = @import("std.zig");
const testing = std.testing;
const Uri = @This();
const Allocator = std.mem.Allocator;
const Writer = std.Io.Writer;
const HostName = std.Io.net.HostName;
scheme: []const u8,
user: ?Component = null,
password: ?Component = null,
/// If non-null, already validated.
host: ?Component = null,
port: ?u16 = null,
path: Component = Component.empty,
query: ?Component = null,
fragment: ?Component = null,
pub const host_name_max = 255;
pub const GetHostError = error{UriMissingHost};
/// Returned value may point into `buffer` or be the original string.
///
/// Suggested buffer length: `host_name_max`.
///
/// See also:
/// * `getHostAlloc`
pub fn getHost(uri: Uri, buffer: []u8) error{ UriMissingHost, UriHostTooLong }![]const u8 {
pub fn getHost(uri: Uri, buffer: *[HostName.max_len]u8) GetHostError!HostName {
const component = uri.host orelse return error.UriMissingHost;
return component.toRaw(buffer) catch |err| switch (err) {
error.NoSpaceLeft => return error.UriHostTooLong,
const bytes = component.toRaw(buffer) catch |err| switch (err) {
error.NoSpaceLeft => unreachable, // `host` already validated.
};
return .{ .bytes = bytes };
}
pub const GetHostAllocError = GetHostError || error{OutOfMemory};
/// Returned value may point into `buffer` or be the original string.
///
/// See also:
/// * `getHost`
pub fn getHostAlloc(uri: Uri, arena: Allocator) error{ UriMissingHost, UriHostTooLong, OutOfMemory }![]const u8 {
pub fn getHostAlloc(uri: Uri, arena: Allocator) GetHostAllocError!HostName {
const component = uri.host orelse return error.UriMissingHost;
const result = try component.toRawMaybeAlloc(arena);
if (result.len > host_name_max) return error.UriHostTooLong;
return result;
const bytes = try component.toRawMaybeAlloc(arena);
return .{ .bytes = bytes };
}
pub const Component = union(enum) {
@ -194,7 +197,12 @@ pub fn percentDecodeInPlace(buffer: []u8) []u8 {
return percentDecodeBackwards(buffer, buffer);
}
pub const ParseError = error{ UnexpectedCharacter, InvalidFormat, InvalidPort };
pub const ParseError = error{
UnexpectedCharacter,
InvalidFormat,
InvalidPort,
InvalidHostName,
};
/// Parses the URI or returns an error. This function is not compliant, but is required to parse
/// some forms of URIs in the wild, such as HTTP Location headers.
@ -397,7 +405,7 @@ pub fn resolveInPlace(base: Uri, new_len: usize, aux_buf: *[]u8) ResolveInPlaceE
.scheme = new_parsed.scheme,
.user = new_parsed.user,
.password = new_parsed.password,
.host = new_parsed.host,
.host = try validateHostComponent(new_parsed.host),
.port = new_parsed.port,
.path = remove_dot_segments(new_path),
.query = new_parsed.query,
@ -408,7 +416,7 @@ pub fn resolveInPlace(base: Uri, new_len: usize, aux_buf: *[]u8) ResolveInPlaceE
.scheme = base.scheme,
.user = new_parsed.user,
.password = new_parsed.password,
.host = host,
.host = try validateHostComponent(host),
.port = new_parsed.port,
.path = remove_dot_segments(new_path),
.query = new_parsed.query,
@ -430,7 +438,7 @@ pub fn resolveInPlace(base: Uri, new_len: usize, aux_buf: *[]u8) ResolveInPlaceE
.scheme = base.scheme,
.user = base.user,
.password = base.password,
.host = base.host,
.host = try validateHostComponent(base.host),
.port = base.port,
.path = path,
.query = query,
@ -438,6 +446,18 @@ pub fn resolveInPlace(base: Uri, new_len: usize, aux_buf: *[]u8) ResolveInPlaceE
};
}
fn validateHostComponent(optional_component: ?Component) error{InvalidHostName}!?Component {
const component = optional_component orelse return null;
switch (component) {
.raw => |raw| HostName.validate(raw) catch return error.InvalidHostName,
.percent_encoded => |encoded| {
// TODO validate decoded name instead
HostName.validate(encoded) catch return error.InvalidHostName;
},
}
return component;
}
/// In-place implementation of RFC 3986, Section 5.2.4.
fn remove_dot_segments(path: []u8) Component {
var in_i: usize = 0;

View File

@ -37,19 +37,6 @@ pub const subsystem: ?std.Target.SubSystem = blk: {
pub const StackTrace = struct {
index: usize,
instruction_addresses: []usize,
pub fn format(st: *const StackTrace, writer: *std.Io.Writer) std.Io.Writer.Error!void {
// TODO: re-evaluate whether to use format() methods at all.
// Until then, avoid an error when using GeneralPurposeAllocator with WebAssembly
// where it tries to call detectTTYConfig here.
if (builtin.os.tag == .freestanding) return;
// TODO: why on earth are we using stderr's ttyconfig?
// If we want colored output, we should just make a formatter out of `writeStackTrace`.
const tty_config = std.Io.tty.detectConfig(.stderr());
try writer.writeAll("\n");
try std.debug.writeStackTrace(st, writer, tty_config);
}
};
/// This data structure is used by the Zig language code generation and

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,20 @@
//! concatenated together in the `bytes` array. The `map` field contains an
//! index from the DER-encoded subject name to the index of the containing
//! certificate within `bytes`.
const Bundle = @This();
const builtin = @import("builtin");
const std = @import("../../std.zig");
const Io = std.Io;
const assert = std.debug.assert;
const fs = std.fs;
const mem = std.mem;
const crypto = std.crypto;
const Allocator = std.mem.Allocator;
const Certificate = std.crypto.Certificate;
const der = Certificate.der;
const base64 = std.base64.standard.decoderWithIgnore(" \t\r\n");
/// The key is the contents slice of the subject.
map: std.HashMapUnmanaged(der.Element.Slice, u32, MapContext, std.hash_map.default_max_load_percentage) = .empty,
@ -56,18 +70,18 @@ pub const RescanError = RescanLinuxError || RescanMacError || RescanWithPathErro
/// file system standard locations for certificates.
/// For operating systems that do not have standard CA installations to be
/// found, this function clears the set of certificates.
pub fn rescan(cb: *Bundle, gpa: Allocator) RescanError!void {
pub fn rescan(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp) RescanError!void {
switch (builtin.os.tag) {
.linux => return rescanLinux(cb, gpa),
.macos => return rescanMac(cb, gpa),
.freebsd, .openbsd => return rescanWithPath(cb, gpa, "/etc/ssl/cert.pem"),
.netbsd => return rescanWithPath(cb, gpa, "/etc/openssl/certs/ca-certificates.crt"),
.dragonfly => return rescanWithPath(cb, gpa, "/usr/local/etc/ssl/cert.pem"),
.illumos => return rescanWithPath(cb, gpa, "/etc/ssl/cacert.pem"),
.haiku => return rescanWithPath(cb, gpa, "/boot/system/data/ssl/CARootCertificates.pem"),
.linux => return rescanLinux(cb, gpa, io, now),
.macos => return rescanMac(cb, gpa, io, now),
.freebsd, .openbsd => return rescanWithPath(cb, gpa, io, now, "/etc/ssl/cert.pem"),
.netbsd => return rescanWithPath(cb, gpa, io, now, "/etc/openssl/certs/ca-certificates.crt"),
.dragonfly => return rescanWithPath(cb, gpa, io, now, "/usr/local/etc/ssl/cert.pem"),
.illumos => return rescanWithPath(cb, gpa, io, now, "/etc/ssl/cacert.pem"),
.haiku => return rescanWithPath(cb, gpa, io, now, "/boot/system/data/ssl/CARootCertificates.pem"),
// https://github.com/SerenityOS/serenity/blob/222acc9d389bc6b490d4c39539761b043a4bfcb0/Ports/ca-certificates/package.sh#L19
.serenity => return rescanWithPath(cb, gpa, "/etc/ssl/certs/ca-certificates.crt"),
.windows => return rescanWindows(cb, gpa),
.serenity => return rescanWithPath(cb, gpa, io, now, "/etc/ssl/certs/ca-certificates.crt"),
.windows => return rescanWindows(cb, gpa, io, now),
else => {},
}
}
@ -77,7 +91,7 @@ const RescanMacError = @import("Bundle/macos.zig").RescanMacError;
const RescanLinuxError = AddCertsFromFilePathError || AddCertsFromDirPathError;
fn rescanLinux(cb: *Bundle, gpa: Allocator) RescanLinuxError!void {
fn rescanLinux(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp) RescanLinuxError!void {
// Possible certificate files; stop after finding one.
const cert_file_paths = [_][]const u8{
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
@ -100,7 +114,7 @@ fn rescanLinux(cb: *Bundle, gpa: Allocator) RescanLinuxError!void {
scan: {
for (cert_file_paths) |cert_file_path| {
if (addCertsFromFilePathAbsolute(cb, gpa, cert_file_path)) |_| {
if (addCertsFromFilePathAbsolute(cb, gpa, io, now, cert_file_path)) |_| {
break :scan;
} else |err| switch (err) {
error.FileNotFound => continue,
@ -109,7 +123,7 @@ fn rescanLinux(cb: *Bundle, gpa: Allocator) RescanLinuxError!void {
}
for (cert_dir_paths) |cert_dir_path| {
addCertsFromDirPathAbsolute(cb, gpa, cert_dir_path) catch |err| switch (err) {
addCertsFromDirPathAbsolute(cb, gpa, io, now, cert_dir_path) catch |err| switch (err) {
error.FileNotFound => continue,
else => |e| return e,
};
@ -121,19 +135,21 @@ fn rescanLinux(cb: *Bundle, gpa: Allocator) RescanLinuxError!void {
const RescanWithPathError = AddCertsFromFilePathError;
fn rescanWithPath(cb: *Bundle, gpa: Allocator, cert_file_path: []const u8) RescanWithPathError!void {
fn rescanWithPath(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp, cert_file_path: []const u8) RescanWithPathError!void {
cb.bytes.clearRetainingCapacity();
cb.map.clearRetainingCapacity();
try addCertsFromFilePathAbsolute(cb, gpa, cert_file_path);
try addCertsFromFilePathAbsolute(cb, gpa, io, now, cert_file_path);
cb.bytes.shrinkAndFree(gpa, cb.bytes.items.len);
}
const RescanWindowsError = Allocator.Error || ParseCertError || std.posix.UnexpectedError || error{FileNotFound};
fn rescanWindows(cb: *Bundle, gpa: Allocator) RescanWindowsError!void {
fn rescanWindows(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp) RescanWindowsError!void {
cb.bytes.clearRetainingCapacity();
cb.map.clearRetainingCapacity();
_ = io;
const w = std.os.windows;
const GetLastError = w.GetLastError;
const root = [4:0]u16{ 'R', 'O', 'O', 'T' };
@ -143,7 +159,7 @@ fn rescanWindows(cb: *Bundle, gpa: Allocator) RescanWindowsError!void {
};
defer _ = w.crypt32.CertCloseStore(store, 0);
const now_sec = std.time.timestamp();
const now_sec = now.toSeconds();
var ctx = w.crypt32.CertEnumCertificatesInStore(store, null);
while (ctx) |context| : (ctx = w.crypt32.CertEnumCertificatesInStore(store, ctx)) {
@ -160,28 +176,31 @@ pub const AddCertsFromDirPathError = fs.File.OpenError || AddCertsFromDirError;
pub fn addCertsFromDirPath(
cb: *Bundle,
gpa: Allocator,
io: Io,
dir: fs.Dir,
sub_dir_path: []const u8,
) AddCertsFromDirPathError!void {
var iterable_dir = try dir.openDir(sub_dir_path, .{ .iterate = true });
defer iterable_dir.close();
return addCertsFromDir(cb, gpa, iterable_dir);
return addCertsFromDir(cb, gpa, io, iterable_dir);
}
pub fn addCertsFromDirPathAbsolute(
cb: *Bundle,
gpa: Allocator,
io: Io,
now: Io.Timestamp,
abs_dir_path: []const u8,
) AddCertsFromDirPathError!void {
assert(fs.path.isAbsolute(abs_dir_path));
var iterable_dir = try fs.openDirAbsolute(abs_dir_path, .{ .iterate = true });
defer iterable_dir.close();
return addCertsFromDir(cb, gpa, iterable_dir);
return addCertsFromDir(cb, gpa, io, now, iterable_dir);
}
pub const AddCertsFromDirError = AddCertsFromFilePathError;
pub fn addCertsFromDir(cb: *Bundle, gpa: Allocator, iterable_dir: fs.Dir) AddCertsFromDirError!void {
pub fn addCertsFromDir(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp, iterable_dir: fs.Dir) AddCertsFromDirError!void {
var it = iterable_dir.iterate();
while (try it.next()) |entry| {
switch (entry.kind) {
@ -189,32 +208,37 @@ pub fn addCertsFromDir(cb: *Bundle, gpa: Allocator, iterable_dir: fs.Dir) AddCer
else => continue,
}
try addCertsFromFilePath(cb, gpa, iterable_dir, entry.name);
try addCertsFromFilePath(cb, gpa, io, now, iterable_dir.adaptToNewApi(), entry.name);
}
}
pub const AddCertsFromFilePathError = fs.File.OpenError || AddCertsFromFileError;
pub const AddCertsFromFilePathError = fs.File.OpenError || AddCertsFromFileError || Io.Clock.Error;
pub fn addCertsFromFilePathAbsolute(
cb: *Bundle,
gpa: Allocator,
io: Io,
now: Io.Timestamp,
abs_file_path: []const u8,
) AddCertsFromFilePathError!void {
assert(fs.path.isAbsolute(abs_file_path));
var file = try fs.openFileAbsolute(abs_file_path, .{});
defer file.close();
return addCertsFromFile(cb, gpa, file);
var file_reader = file.reader(io, &.{});
return addCertsFromFile(cb, gpa, &file_reader, now.toSeconds());
}
pub fn addCertsFromFilePath(
cb: *Bundle,
gpa: Allocator,
dir: fs.Dir,
io: Io,
now: Io.Timestamp,
dir: Io.Dir,
sub_file_path: []const u8,
) AddCertsFromFilePathError!void {
var file = try dir.openFile(sub_file_path, .{});
defer file.close();
return addCertsFromFile(cb, gpa, file);
var file = try dir.openFile(io, sub_file_path, .{});
defer file.close(io);
var file_reader = file.reader(io, &.{});
return addCertsFromFile(cb, gpa, &file_reader, now.toSeconds());
}
pub const AddCertsFromFileError = Allocator.Error ||
@ -222,10 +246,10 @@ pub const AddCertsFromFileError = Allocator.Error ||
fs.File.ReadError ||
ParseCertError ||
std.base64.Error ||
error{ CertificateAuthorityBundleTooBig, MissingEndCertificateMarker };
error{ CertificateAuthorityBundleTooBig, MissingEndCertificateMarker, Streaming };
pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) AddCertsFromFileError!void {
const size = try file.getEndPos();
pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file_reader: *Io.File.Reader, now_sec: i64) AddCertsFromFileError!void {
const size = try file_reader.getSize();
// We borrow `bytes` as a temporary buffer for the base64-encoded data.
// This is possible by computing the decoded length and reserving the space
@ -236,14 +260,14 @@ pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) AddCertsFrom
try cb.bytes.ensureUnusedCapacity(gpa, needed_capacity);
const end_reserved: u32 = @intCast(cb.bytes.items.len + decoded_size_upper_bound);
const buffer = cb.bytes.allocatedSlice()[end_reserved..];
const end_index = try file.readAll(buffer);
const end_index = file_reader.interface.readSliceShort(buffer) catch |err| switch (err) {
error.ReadFailed => return file_reader.err.?,
};
const encoded_bytes = buffer[0..end_index];
const begin_marker = "-----BEGIN CERTIFICATE-----";
const end_marker = "-----END CERTIFICATE-----";
const now_sec = std.time.timestamp();
var start_index: usize = 0;
while (mem.indexOfPos(u8, encoded_bytes, start_index, begin_marker)) |begin_marker_start| {
const cert_start = begin_marker_start + begin_marker.len;
@ -288,19 +312,6 @@ pub fn parseCert(cb: *Bundle, gpa: Allocator, decoded_start: u32, now_sec: i64)
}
}
const builtin = @import("builtin");
const std = @import("../../std.zig");
const assert = std.debug.assert;
const fs = std.fs;
const mem = std.mem;
const crypto = std.crypto;
const Allocator = std.mem.Allocator;
const Certificate = std.crypto.Certificate;
const der = Certificate.der;
const Bundle = @This();
const base64 = std.base64.standard.decoderWithIgnore(" \t\r\n");
const MapContext = struct {
cb: *const Bundle,
@ -321,8 +332,13 @@ const MapContext = struct {
test "scan for OS-provided certificates" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
var bundle: Bundle = .{};
defer bundle.deinit(std.testing.allocator);
const io = std.testing.io;
const gpa = std.testing.allocator;
try bundle.rescan(std.testing.allocator);
var bundle: Bundle = .{};
defer bundle.deinit(gpa);
const now = try Io.Clock.real.now(io);
try bundle.rescan(gpa, io, now);
}

View File

@ -1,4 +1,5 @@
const std = @import("std");
const Io = std.Io;
const assert = std.debug.assert;
const fs = std.fs;
const mem = std.mem;
@ -7,7 +8,7 @@ const Bundle = @import("../Bundle.zig");
pub const RescanMacError = Allocator.Error || fs.File.OpenError || fs.File.ReadError || fs.File.SeekError || Bundle.ParseCertError || error{EndOfStream};
pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void {
pub fn rescanMac(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp) RescanMacError!void {
cb.bytes.clearRetainingCapacity();
cb.map.clearRetainingCapacity();
@ -16,6 +17,7 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void {
"/Library/Keychains/System.keychain",
};
_ = io; // TODO migrate file system to use std.Io
for (keychain_paths) |keychain_path| {
const bytes = std.fs.cwd().readFileAlloc(keychain_path, gpa, .limited(std.math.maxInt(u32))) catch |err| switch (err) {
error.StreamTooLong => return error.FileTooBig,
@ -23,8 +25,8 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void {
};
defer gpa.free(bytes);
var reader: std.Io.Reader = .fixed(bytes);
scanReader(cb, gpa, &reader) catch |err| switch (err) {
var reader: Io.Reader = .fixed(bytes);
scanReader(cb, gpa, &reader, now.toSeconds()) catch |err| switch (err) {
error.ReadFailed => unreachable, // prebuffered
else => |e| return e,
};
@ -33,7 +35,7 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void {
cb.bytes.shrinkAndFree(gpa, cb.bytes.items.len);
}
fn scanReader(cb: *Bundle, gpa: Allocator, reader: *std.Io.Reader) !void {
fn scanReader(cb: *Bundle, gpa: Allocator, reader: *Io.Reader, now_sec: i64) !void {
const db_header = try reader.takeStruct(ApplDbHeader, .big);
assert(mem.eql(u8, &db_header.signature, "kych"));
@ -49,8 +51,6 @@ fn scanReader(cb: *Bundle, gpa: Allocator, reader: *std.Io.Reader) !void {
table_list[table_idx] = try reader.takeInt(u32, .big);
}
const now_sec = std.time.timestamp();
for (table_list) |table_offset| {
reader.seek = db_header.schema_offset + table_offset;

View File

@ -105,6 +105,14 @@ pub const Options = struct {
/// Verify that the server certificate is authorized by a given ca bundle.
bundle: Certificate.Bundle,
},
write_buffer: []u8,
read_buffer: []u8,
/// Cryptographically secure random bytes. The pointer is not captured; data is only
/// read during `init`.
entropy: *const [176]u8,
/// Current time according to the wall clock / calendar, in seconds.
realtime_now_seconds: i64,
/// If non-null, ssl secrets are logged to this stream. Creating such a log file allows
/// other programs with access to that file to decrypt all traffic over this connection.
///
@ -120,8 +128,6 @@ pub const Options = struct {
/// application layer itself verifies that the amount of data received equals
/// the amount of data expected, such as HTTP with the Content-Length header.
allow_truncation_attacks: bool = false,
write_buffer: []u8,
read_buffer: []u8,
/// Populated when `error.TlsAlert` is returned from `init`.
alert: ?*tls.Alert = null,
};
@ -189,14 +195,12 @@ pub fn init(input: *Reader, output: *Writer, options: Options) InitError!Client
};
const host_len: u16 = @intCast(host.len);
var random_buffer: [176]u8 = undefined;
crypto.random.bytes(&random_buffer);
const client_hello_rand = random_buffer[0..32].*;
const client_hello_rand = options.entropy[0..32].*;
var key_seq: u64 = 0;
var server_hello_rand: [32]u8 = undefined;
const legacy_session_id = random_buffer[32..64].*;
const legacy_session_id = options.entropy[32..64].*;
var key_share = KeyShare.init(random_buffer[64..176].*) catch |err| switch (err) {
var key_share = KeyShare.init(options.entropy[64..176].*) catch |err| switch (err) {
// Only possible to happen if the seed is all zeroes.
error.IdentityElement => return error.InsufficientEntropy,
};
@ -321,7 +325,7 @@ pub fn init(input: *Reader, output: *Writer, options: Options) InitError!Client
var handshake_cipher: tls.HandshakeCipher = undefined;
var main_cert_pub_key: CertificatePublicKey = undefined;
var tls12_negotiated_group: ?tls.NamedGroup = null;
const now_sec = std.time.timestamp();
const now_sec = options.realtime_now_seconds;
var cleartext_fragment_start: usize = 0;
var cleartext_fragment_end: usize = 0;

View File

@ -1,4 +1,7 @@
const std = @import("std.zig");
const Io = std.Io;
const Writer = std.Io.Writer;
const tty = std.Io.tty;
const math = std.math;
const mem = std.mem;
const posix = std.posix;
@ -7,12 +10,11 @@ const testing = std.testing;
const Allocator = mem.Allocator;
const File = std.fs.File;
const windows = std.os.windows;
const Writer = std.Io.Writer;
const tty = std.Io.tty;
const builtin = @import("builtin");
const native_arch = builtin.cpu.arch;
const native_os = builtin.os.tag;
const StackTrace = std.builtin.StackTrace;
const root = @import("root");
@ -82,6 +84,7 @@ pub const SelfInfoError = error{
/// The required debug info could not be read from disk due to some IO error.
ReadFailed,
OutOfMemory,
Canceled,
Unexpected,
};
@ -544,7 +547,7 @@ pub fn defaultPanic(
stderr.print("panic: ", .{}) catch break :trace;
} else {
const current_thread_id = std.Thread.getCurrentId();
stderr.print("thread {} panic: ", .{current_thread_id}) catch break :trace;
stderr.print("thread {d} panic: ", .{current_thread_id}) catch break :trace;
}
stderr.print("{s}\n", .{msg}) catch break :trace;
@ -606,8 +609,8 @@ pub const StackUnwindOptions = struct {
/// the given buffer, so `addr_buf` must have a lifetime at least equal to the `StackTrace`.
///
/// See `writeCurrentStackTrace` to immediately print the trace instead of capturing it.
pub noinline fn captureCurrentStackTrace(options: StackUnwindOptions, addr_buf: []usize) std.builtin.StackTrace {
const empty_trace: std.builtin.StackTrace = .{ .index = 0, .instruction_addresses = &.{} };
pub noinline fn captureCurrentStackTrace(options: StackUnwindOptions, addr_buf: []usize) StackTrace {
const empty_trace: StackTrace = .{ .index = 0, .instruction_addresses = &.{} };
if (!std.options.allow_stack_tracing) return empty_trace;
var it = StackIterator.init(options.context) catch return empty_trace;
defer it.deinit();
@ -645,6 +648,9 @@ pub noinline fn captureCurrentStackTrace(options: StackUnwindOptions, addr_buf:
///
/// See `captureCurrentStackTrace` to capture the trace addresses into a buffer instead of printing.
pub noinline fn writeCurrentStackTrace(options: StackUnwindOptions, writer: *Writer, tty_config: tty.Config) Writer.Error!void {
var threaded: Io.Threaded = .init_single_threaded;
const io = threaded.ioBasic();
if (!std.options.allow_stack_tracing) {
tty_config.setColor(writer, .dim) catch {};
try writer.print("Cannot print stack trace: stack tracing is disabled\n", .{});
@ -691,6 +697,7 @@ pub noinline fn writeCurrentStackTrace(options: StackUnwindOptions, writer: *Wri
error.UnsupportedDebugInfo => "unwind info unsupported",
error.ReadFailed => "filesystem error",
error.OutOfMemory => "out of memory",
error.Canceled => "operation canceled",
error.Unexpected => "unexpected error",
};
if (it.stratOk(options.allow_unsafe_unwind)) {
@ -728,7 +735,7 @@ pub noinline fn writeCurrentStackTrace(options: StackUnwindOptions, writer: *Wri
}
// `ret_addr` is the return address, which is *after* the function call.
// Subtract 1 to get an address *in* the function call for a better source location.
try printSourceAtAddress(di_gpa, di, writer, ret_addr -| StackIterator.ra_call_offset, tty_config);
try printSourceAtAddress(di_gpa, io, di, writer, ret_addr -| StackIterator.ra_call_offset, tty_config);
printed_any_frame = true;
},
};
@ -752,14 +759,29 @@ pub fn dumpCurrentStackTrace(options: StackUnwindOptions) void {
};
}
pub const FormatStackTrace = struct {
stack_trace: StackTrace,
tty_config: tty.Config,
pub fn format(context: @This(), writer: *Io.Writer) Io.Writer.Error!void {
try writer.writeAll("\n");
try writeStackTrace(&context.stack_trace, writer, context.tty_config);
}
};
/// Write a previously captured stack trace to `writer`, annotated with source locations.
pub fn writeStackTrace(st: *const std.builtin.StackTrace, writer: *Writer, tty_config: tty.Config) Writer.Error!void {
pub fn writeStackTrace(st: *const StackTrace, writer: *Writer, tty_config: tty.Config) Writer.Error!void {
if (!std.options.allow_stack_tracing) {
tty_config.setColor(writer, .dim) catch {};
try writer.print("Cannot print stack trace: stack tracing is disabled\n", .{});
tty_config.setColor(writer, .reset) catch {};
return;
}
// We use an independent Io implementation here in case there was a problem
// with the application's Io implementation itself.
var threaded: Io.Threaded = .init_single_threaded;
const io = threaded.ioBasic();
// Fetch `st.index` straight away. Aside from avoiding redundant loads, this prevents issues if
// `st` is `@errorReturnTrace()` and errors are encountered while writing the stack trace.
const n_frames = st.index;
@ -777,7 +799,7 @@ pub fn writeStackTrace(st: *const std.builtin.StackTrace, writer: *Writer, tty_c
for (st.instruction_addresses[0..captured_frames]) |ret_addr| {
// `ret_addr` is the return address, which is *after* the function call.
// Subtract 1 to get an address *in* the function call for a better source location.
try printSourceAtAddress(di_gpa, di, writer, ret_addr -| StackIterator.ra_call_offset, tty_config);
try printSourceAtAddress(di_gpa, io, di, writer, ret_addr -| StackIterator.ra_call_offset, tty_config);
}
if (n_frames > captured_frames) {
tty_config.setColor(writer, .bold) catch {};
@ -786,7 +808,7 @@ pub fn writeStackTrace(st: *const std.builtin.StackTrace, writer: *Writer, tty_c
}
}
/// A thin wrapper around `writeStackTrace` which writes to stderr and ignores write errors.
pub fn dumpStackTrace(st: *const std.builtin.StackTrace) void {
pub fn dumpStackTrace(st: *const StackTrace) void {
const tty_config = tty.detectConfig(.stderr());
const stderr = lockStderrWriter(&.{});
defer unlockStderrWriter();
@ -1073,13 +1095,13 @@ pub inline fn stripInstructionPtrAuthCode(ptr: usize) usize {
return ptr;
}
fn printSourceAtAddress(gpa: Allocator, debug_info: *SelfInfo, writer: *Writer, address: usize, tty_config: tty.Config) Writer.Error!void {
const symbol: Symbol = debug_info.getSymbol(gpa, address) catch |err| switch (err) {
fn printSourceAtAddress(gpa: Allocator, io: Io, debug_info: *SelfInfo, writer: *Writer, address: usize, tty_config: tty.Config) Writer.Error!void {
const symbol: Symbol = debug_info.getSymbol(gpa, io, address) catch |err| switch (err) {
error.MissingDebugInfo,
error.UnsupportedDebugInfo,
error.InvalidDebugInfo,
=> .unknown,
error.ReadFailed, error.Unexpected => s: {
error.ReadFailed, error.Unexpected, error.Canceled => s: {
tty_config.setColor(writer, .dim) catch {};
try writer.print("Failed to read debug info from filesystem, trace may be incomplete\n\n", .{});
tty_config.setColor(writer, .reset) catch {};
@ -1387,10 +1409,10 @@ pub fn maybeEnableSegfaultHandler() void {
var windows_segfault_handle: ?windows.HANDLE = null;
pub fn updateSegfaultHandler(act: ?*const posix.Sigaction) void {
posix.sigaction(posix.SIG.SEGV, act, null);
posix.sigaction(posix.SIG.ILL, act, null);
posix.sigaction(posix.SIG.BUS, act, null);
posix.sigaction(posix.SIG.FPE, act, null);
posix.sigaction(.SEGV, act, null);
posix.sigaction(.ILL, act, null);
posix.sigaction(.BUS, act, null);
posix.sigaction(.FPE, act, null);
}
/// Attaches a global handler for several signals which, when triggered, prints output to stderr
@ -1435,7 +1457,7 @@ fn resetSegfaultHandler() void {
updateSegfaultHandler(&act);
}
fn handleSegfaultPosix(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*anyopaque) callconv(.c) noreturn {
fn handleSegfaultPosix(sig: posix.SIG, info: *const posix.siginfo_t, ctx_ptr: ?*anyopaque) callconv(.c) noreturn {
if (use_trap_panic) @trap();
const addr: ?usize, const name: []const u8 = info: {
if (native_os == .linux and native_arch == .x86_64) {
@ -1447,7 +1469,7 @@ fn handleSegfaultPosix(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*anyopa
// for example when reading/writing model-specific registers
// by executing `rdmsr` or `wrmsr` in user-space (unprivileged mode).
const SI_KERNEL = 0x80;
if (sig == posix.SIG.SEGV and info.code == SI_KERNEL) {
if (sig == .SEGV and info.code == SI_KERNEL) {
break :info .{ null, "General protection exception" };
}
}
@ -1474,10 +1496,10 @@ fn handleSegfaultPosix(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*anyopa
else => comptime unreachable,
};
const name = switch (sig) {
posix.SIG.SEGV => "Segmentation fault",
posix.SIG.ILL => "Illegal instruction",
posix.SIG.BUS => "Bus error",
posix.SIG.FPE => "Arithmetic exception",
.SEGV => "Segmentation fault",
.ILL => "Illegal instruction",
.BUS => "Bus error",
.FPE => "Arithmetic exception",
else => unreachable,
};
break :info .{ addr, name };
@ -1579,11 +1601,14 @@ test "manage resources correctly" {
}
};
const gpa = std.testing.allocator;
var discarding: std.Io.Writer.Discarding = .init(&.{});
var threaded: Io.Threaded = .init_single_threaded;
const io = threaded.ioBasic();
var discarding: Io.Writer.Discarding = .init(&.{});
var di: SelfInfo = .init;
defer di.deinit(gpa);
try printSourceAtAddress(
gpa,
io,
&di,
&discarding.writer,
S.showMyTrace(),
@ -1657,7 +1682,7 @@ pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize
stderr.print("{s}:\n", .{t.notes[i]}) catch return;
var frames_array_mutable = frames_array;
const frames = mem.sliceTo(frames_array_mutable[0..], 0);
const stack_trace: std.builtin.StackTrace = .{
const stack_trace: StackTrace = .{
.index = frames.len,
.instruction_addresses = frames,
};

View File

@ -108,6 +108,8 @@ pub const LoadError = error{
LockedMemoryLimitExceeded,
ProcessFdQuotaExceeded,
SystemFdQuotaExceeded,
Streaming,
Canceled,
Unexpected,
};
@ -408,7 +410,7 @@ fn loadInner(
arena: Allocator,
elf_file: std.fs.File,
opt_crc: ?u32,
) (LoadError || error{CrcMismatch})!LoadInnerResult {
) (LoadError || error{ CrcMismatch, Streaming, Canceled })!LoadInnerResult {
const mapped_mem: []align(std.heap.page_size_min) const u8 = mapped: {
const file_len = std.math.cast(
usize,

View File

@ -28,7 +28,8 @@ pub fn deinit(si: *SelfInfo, gpa: Allocator) void {
if (si.unwind_cache) |cache| gpa.free(cache);
}
pub fn getSymbol(si: *SelfInfo, gpa: Allocator, address: usize) Error!std.debug.Symbol {
pub fn getSymbol(si: *SelfInfo, gpa: Allocator, io: Io, address: usize) Error!std.debug.Symbol {
_ = io;
const module = try si.findModule(gpa, address, .exclusive);
defer si.rwlock.unlock();
@ -336,6 +337,7 @@ const Module = struct {
var elf_file = load_result catch |err| switch (err) {
error.OutOfMemory,
error.Unexpected,
error.Canceled,
=> |e| return e,
error.Overflow,
@ -353,6 +355,7 @@ const Module = struct {
error.LockedMemoryLimitExceeded,
error.ProcessFdQuotaExceeded,
error.SystemFdQuotaExceeded,
error.Streaming,
=> return error.ReadFailed,
};
errdefer elf_file.deinit(gpa);
@ -487,6 +490,7 @@ const DlIterContext = struct {
};
const std = @import("std");
const Io = std.Io;
const Allocator = std.mem.Allocator;
const Dwarf = std.debug.Dwarf;
const Error = std.debug.SelfInfoError;

View File

@ -30,7 +30,8 @@ pub fn deinit(si: *SelfInfo, gpa: Allocator) void {
si.ofiles.deinit(gpa);
}
pub fn getSymbol(si: *SelfInfo, gpa: Allocator, address: usize) Error!std.debug.Symbol {
pub fn getSymbol(si: *SelfInfo, gpa: Allocator, io: Io, address: usize) Error!std.debug.Symbol {
_ = io;
const module = try si.findModule(gpa, address);
defer si.mutex.unlock();
@ -117,11 +118,14 @@ pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) Error
error.ReadFailed,
error.OutOfMemory,
error.Unexpected,
error.Canceled,
=> |e| return e,
error.UnsupportedRegister,
error.UnsupportedAddrSize,
error.UnimplementedUserOpcode,
=> return error.UnsupportedDebugInfo,
error.Overflow,
error.EndOfStream,
error.StreamTooLong,
@ -967,6 +971,7 @@ fn loadOFile(gpa: Allocator, o_file_path: []const u8) !OFile {
}
const std = @import("std");
const Io = std.Io;
const Allocator = std.mem.Allocator;
const Dwarf = std.debug.Dwarf;
const Error = std.debug.SelfInfoError;

View File

@ -20,11 +20,11 @@ pub fn deinit(si: *SelfInfo, gpa: Allocator) void {
module_name_arena.deinit();
}
pub fn getSymbol(si: *SelfInfo, gpa: Allocator, address: usize) Error!std.debug.Symbol {
pub fn getSymbol(si: *SelfInfo, gpa: Allocator, io: Io, address: usize) Error!std.debug.Symbol {
si.mutex.lock();
defer si.mutex.unlock();
const module = try si.findModule(gpa, address);
const di = try module.getDebugInfo(gpa);
const di = try module.getDebugInfo(gpa, io);
return di.getSymbol(gpa, address - module.base_address);
}
pub fn getModuleName(si: *SelfInfo, gpa: Allocator, address: usize) Error![]const u8 {
@ -190,6 +190,7 @@ const Module = struct {
const DebugInfo = struct {
arena: std.heap.ArenaAllocator.State,
io: Io,
coff_image_base: u64,
mapped_file: ?MappedFile,
dwarf: ?Dwarf,
@ -209,9 +210,10 @@ const Module = struct {
};
fn deinit(di: *DebugInfo, gpa: Allocator) void {
const io = di.io;
if (di.dwarf) |*dwarf| dwarf.deinit(gpa);
if (di.pdb) |*pdb| {
pdb.file_reader.file.close();
pdb.file_reader.file.close(io);
pdb.deinit();
}
if (di.mapped_file) |*mf| mf.deinit();
@ -277,11 +279,11 @@ const Module = struct {
}
};
fn getDebugInfo(module: *Module, gpa: Allocator) Error!*DebugInfo {
if (module.di == null) module.di = loadDebugInfo(module, gpa);
fn getDebugInfo(module: *Module, gpa: Allocator, io: Io) Error!*DebugInfo {
if (module.di == null) module.di = loadDebugInfo(module, gpa, io);
return if (module.di.?) |*di| di else |err| err;
}
fn loadDebugInfo(module: *const Module, gpa: Allocator) Error!DebugInfo {
fn loadDebugInfo(module: *const Module, gpa: Allocator, io: Io) Error!DebugInfo {
const mapped_ptr: [*]const u8 = @ptrFromInt(module.base_address);
const mapped = mapped_ptr[0..module.size];
var coff_obj = coff.Coff.init(mapped, true) catch return error.InvalidDebugInfo;
@ -305,7 +307,10 @@ const Module = struct {
windows.PATH_MAX_WIDE,
);
if (len == 0) return error.MissingDebugInfo;
const coff_file = fs.openFileAbsoluteW(name_buffer[0 .. len + 4 :0], .{}) catch |err| switch (err) {
const name_w = name_buffer[0 .. len + 4 :0];
var threaded: Io.Threaded = .init_single_threaded;
const coff_file = threaded.dirOpenFileWtf16(null, name_w, .{}) catch |err| switch (err) {
error.Canceled => |e| return e,
error.Unexpected => |e| return e,
error.FileNotFound => return error.MissingDebugInfo,
@ -314,8 +319,6 @@ const Module = struct {
error.NotDir,
error.SymLinkLoop,
error.NameTooLong,
error.InvalidUtf8,
error.InvalidWtf8,
error.BadPathName,
=> return error.InvalidDebugInfo,
@ -338,7 +341,7 @@ const Module = struct {
error.FileBusy,
=> return error.ReadFailed,
};
errdefer coff_file.close();
errdefer coff_file.close(io);
var section_handle: windows.HANDLE = undefined;
const create_section_rc = windows.ntdll.NtCreateSection(
&section_handle,
@ -372,7 +375,7 @@ const Module = struct {
const section_view = section_view_ptr.?[0..coff_len];
coff_obj = coff.Coff.init(section_view, false) catch return error.InvalidDebugInfo;
break :mapped .{
.file = coff_file,
.file = .adaptFromNewApi(coff_file),
.section_handle = section_handle,
.section_view = section_view,
};
@ -434,8 +437,8 @@ const Module = struct {
};
errdefer pdb_file.close();
const pdb_reader = try arena.create(std.fs.File.Reader);
pdb_reader.* = pdb_file.reader(try arena.alloc(u8, 4096));
const pdb_reader = try arena.create(Io.File.Reader);
pdb_reader.* = pdb_file.reader(io, try arena.alloc(u8, 4096));
var pdb = Pdb.init(gpa, pdb_reader) catch |err| switch (err) {
error.OutOfMemory, error.ReadFailed, error.Unexpected => |e| return e,
@ -473,7 +476,7 @@ const Module = struct {
break :pdb pdb;
};
errdefer if (opt_pdb) |*pdb| {
pdb.file_reader.file.close();
pdb.file_reader.file.close(io);
pdb.deinit();
};
@ -483,6 +486,7 @@ const Module = struct {
return .{
.arena = arena_instance.state,
.io = io,
.coff_image_base = coff_image_base,
.mapped_file = mapped_file,
.dwarf = opt_dwarf,
@ -544,6 +548,7 @@ fn findModule(si: *SelfInfo, gpa: Allocator, address: usize) error{ MissingDebug
}
const std = @import("std");
const Io = std.Io;
const Allocator = std.mem.Allocator;
const Dwarf = std.debug.Dwarf;
const Pdb = std.debug.Pdb;

View File

@ -137,6 +137,8 @@ const ElfDynLibError = error{
ElfStringSectionNotFound,
ElfSymSectionNotFound,
ElfHashTableNotFound,
Canceled,
Streaming,
} || posix.OpenError || posix.MMapError;
pub const ElfDynLib = struct {

View File

@ -1,9 +1,11 @@
//! Executable and Linkable Format.
const std = @import("std.zig");
const Io = std.Io;
const math = std.math;
const mem = std.mem;
const assert = std.debug.assert;
const Endian = std.builtin.Endian;
const native_endian = @import("builtin").target.cpu.arch.endian();
pub const AT_NULL = 0;
@ -568,7 +570,7 @@ pub const ET = enum(u16) {
/// All integers are native endian.
pub const Header = struct {
is_64: bool,
endian: std.builtin.Endian,
endian: Endian,
os_abi: OSABI,
/// The meaning of this value depends on `os_abi`.
abi_version: u8,
@ -583,48 +585,76 @@ pub const Header = struct {
shnum: u16,
shstrndx: u16,
pub fn iterateProgramHeaders(h: Header, file_reader: *std.fs.File.Reader) ProgramHeaderIterator {
pub fn iterateProgramHeaders(h: *const Header, file_reader: *Io.File.Reader) ProgramHeaderIterator {
return .{
.elf_header = h,
.is_64 = h.is_64,
.endian = h.endian,
.phnum = h.phnum,
.phoff = h.phoff,
.file_reader = file_reader,
};
}
pub fn iterateProgramHeadersBuffer(h: Header, buf: []const u8) ProgramHeaderBufferIterator {
pub fn iterateProgramHeadersBuffer(h: *const Header, buf: []const u8) ProgramHeaderBufferIterator {
return .{
.elf_header = h,
.is_64 = h.is_64,
.endian = h.endian,
.phnum = h.phnum,
.phoff = h.phoff,
.buf = buf,
};
}
pub fn iterateSectionHeaders(h: Header, file_reader: *std.fs.File.Reader) SectionHeaderIterator {
pub fn iterateSectionHeaders(h: *const Header, file_reader: *Io.File.Reader) SectionHeaderIterator {
return .{
.elf_header = h,
.is_64 = h.is_64,
.endian = h.endian,
.shnum = h.shnum,
.shoff = h.shoff,
.file_reader = file_reader,
};
}
pub fn iterateSectionHeadersBuffer(h: Header, buf: []const u8) SectionHeaderBufferIterator {
pub fn iterateSectionHeadersBuffer(h: *const Header, buf: []const u8) SectionHeaderBufferIterator {
return .{
.elf_header = h,
.is_64 = h.is_64,
.endian = h.endian,
.shnum = h.shnum,
.shoff = h.shoff,
.buf = buf,
};
}
pub const ReadError = std.Io.Reader.Error || error{
pub fn iterateDynamicSection(
h: *const Header,
file_reader: *Io.File.Reader,
offset: u64,
size: u64,
) DynamicSectionIterator {
return .{
.is_64 = h.is_64,
.endian = h.endian,
.offset = offset,
.end_offset = offset + size,
.file_reader = file_reader,
};
}
pub const ReadError = Io.Reader.Error || error{
InvalidElfMagic,
InvalidElfVersion,
InvalidElfClass,
InvalidElfEndian,
};
pub fn read(r: *std.Io.Reader) ReadError!Header {
/// If this function fails, seek position of `r` is unchanged.
pub fn read(r: *Io.Reader) ReadError!Header {
const buf = try r.peek(@sizeOf(Elf64_Ehdr));
if (!mem.eql(u8, buf[0..4], MAGIC)) return error.InvalidElfMagic;
if (buf[EI.VERSION] != 1) return error.InvalidElfVersion;
const endian: std.builtin.Endian = switch (buf[EI.DATA]) {
const endian: Endian = switch (buf[EI.DATA]) {
ELFDATA2LSB => .little,
ELFDATA2MSB => .big,
else => return error.InvalidElfEndian,
@ -637,7 +667,7 @@ pub const Header = struct {
};
}
pub fn init(hdr: anytype, endian: std.builtin.Endian) Header {
pub fn init(hdr: anytype, endian: Endian) Header {
// Converting integers to exhaustive enums using `@enumFromInt` could cause a panic.
comptime assert(!@typeInfo(OSABI).@"enum".is_exhaustive);
return .{
@ -664,46 +694,54 @@ pub const Header = struct {
};
pub const ProgramHeaderIterator = struct {
elf_header: Header,
file_reader: *std.fs.File.Reader,
is_64: bool,
endian: Endian,
phnum: u16,
phoff: u64,
file_reader: *Io.File.Reader,
index: usize = 0,
pub fn next(it: *ProgramHeaderIterator) !?Elf64_Phdr {
if (it.index >= it.elf_header.phnum) return null;
if (it.index >= it.phnum) return null;
defer it.index += 1;
const size: u64 = if (it.elf_header.is_64) @sizeOf(Elf64_Phdr) else @sizeOf(Elf32_Phdr);
const offset = it.elf_header.phoff + size * it.index;
const size: u64 = if (it.is_64) @sizeOf(Elf64_Phdr) else @sizeOf(Elf32_Phdr);
const offset = it.phoff + size * it.index;
try it.file_reader.seekTo(offset);
return takePhdr(&it.file_reader.interface, it.elf_header);
return try takeProgramHeader(&it.file_reader.interface, it.is_64, it.endian);
}
};
pub const ProgramHeaderBufferIterator = struct {
elf_header: Header,
is_64: bool,
endian: Endian,
phnum: u16,
phoff: u64,
buf: []const u8,
index: usize = 0,
pub fn next(it: *ProgramHeaderBufferIterator) !?Elf64_Phdr {
if (it.index >= it.elf_header.phnum) return null;
if (it.index >= it.phnum) return null;
defer it.index += 1;
const size: u64 = if (it.elf_header.is_64) @sizeOf(Elf64_Phdr) else @sizeOf(Elf32_Phdr);
const offset = it.elf_header.phoff + size * it.index;
var reader = std.Io.Reader.fixed(it.buf[offset..]);
const size: u64 = if (it.is_64) @sizeOf(Elf64_Phdr) else @sizeOf(Elf32_Phdr);
const offset = it.phoff + size * it.index;
var reader = Io.Reader.fixed(it.buf[offset..]);
return takePhdr(&reader, it.elf_header);
return try takeProgramHeader(&reader, it.is_64, it.endian);
}
};
fn takePhdr(reader: *std.Io.Reader, elf_header: Header) !?Elf64_Phdr {
if (elf_header.is_64) {
const phdr = try reader.takeStruct(Elf64_Phdr, elf_header.endian);
pub fn takeProgramHeader(reader: *Io.Reader, is_64: bool, endian: Endian) !Elf64_Phdr {
if (is_64) {
const phdr = try reader.takeStruct(Elf64_Phdr, endian);
return phdr;
}
const phdr = try reader.takeStruct(Elf32_Phdr, elf_header.endian);
const phdr = try reader.takeStruct(Elf32_Phdr, endian);
return .{
.p_type = phdr.p_type,
.p_offset = phdr.p_offset,
@ -717,47 +755,55 @@ fn takePhdr(reader: *std.Io.Reader, elf_header: Header) !?Elf64_Phdr {
}
pub const SectionHeaderIterator = struct {
elf_header: Header,
file_reader: *std.fs.File.Reader,
is_64: bool,
endian: Endian,
shnum: u16,
shoff: u64,
file_reader: *Io.File.Reader,
index: usize = 0,
pub fn next(it: *SectionHeaderIterator) !?Elf64_Shdr {
if (it.index >= it.elf_header.shnum) return null;
if (it.index >= it.shnum) return null;
defer it.index += 1;
const size: u64 = if (it.elf_header.is_64) @sizeOf(Elf64_Shdr) else @sizeOf(Elf32_Shdr);
const offset = it.elf_header.shoff + size * it.index;
const size: u64 = if (it.is_64) @sizeOf(Elf64_Shdr) else @sizeOf(Elf32_Shdr);
const offset = it.shoff + size * it.index;
try it.file_reader.seekTo(offset);
return takeShdr(&it.file_reader.interface, it.elf_header);
return try takeSectionHeader(&it.file_reader.interface, it.is_64, it.endian);
}
};
pub const SectionHeaderBufferIterator = struct {
elf_header: Header,
is_64: bool,
endian: Endian,
shnum: u16,
shoff: u64,
buf: []const u8,
index: usize = 0,
pub fn next(it: *SectionHeaderBufferIterator) !?Elf64_Shdr {
if (it.index >= it.elf_header.shnum) return null;
if (it.index >= it.shnum) return null;
defer it.index += 1;
const size: u64 = if (it.elf_header.is_64) @sizeOf(Elf64_Shdr) else @sizeOf(Elf32_Shdr);
const offset = it.elf_header.shoff + size * it.index;
const size: u64 = if (it.is_64) @sizeOf(Elf64_Shdr) else @sizeOf(Elf32_Shdr);
const offset = it.shoff + size * it.index;
if (offset > it.buf.len) return error.EndOfStream;
var reader = std.Io.Reader.fixed(it.buf[@intCast(offset)..]);
var reader = Io.Reader.fixed(it.buf[@intCast(offset)..]);
return takeShdr(&reader, it.elf_header);
return try takeSectionHeader(&reader, it.is_64, it.endian);
}
};
fn takeShdr(reader: *std.Io.Reader, elf_header: Header) !?Elf64_Shdr {
if (elf_header.is_64) {
const shdr = try reader.takeStruct(Elf64_Shdr, elf_header.endian);
pub fn takeSectionHeader(reader: *Io.Reader, is_64: bool, endian: Endian) !Elf64_Shdr {
if (is_64) {
const shdr = try reader.takeStruct(Elf64_Shdr, endian);
return shdr;
}
const shdr = try reader.takeStruct(Elf32_Shdr, elf_header.endian);
const shdr = try reader.takeStruct(Elf32_Shdr, endian);
return .{
.sh_name = shdr.sh_name,
.sh_type = shdr.sh_type,
@ -772,6 +818,36 @@ fn takeShdr(reader: *std.Io.Reader, elf_header: Header) !?Elf64_Shdr {
};
}
pub const DynamicSectionIterator = struct {
is_64: bool,
endian: Endian,
offset: u64,
end_offset: u64,
file_reader: *Io.File.Reader,
pub fn next(it: *DynamicSectionIterator) !?Elf64_Dyn {
if (it.offset >= it.end_offset) return null;
const size: u64 = if (it.is_64) @sizeOf(Elf64_Dyn) else @sizeOf(Elf32_Dyn);
defer it.offset += size;
try it.file_reader.seekTo(it.offset);
return try takeDynamicSection(&it.file_reader.interface, it.is_64, it.endian);
}
};
pub fn takeDynamicSection(reader: *Io.Reader, is_64: bool, endian: Endian) !Elf64_Dyn {
if (is_64) {
const dyn = try reader.takeStruct(Elf64_Dyn, endian);
return dyn;
}
const dyn = try reader.takeStruct(Elf32_Dyn, endian);
return .{
.d_tag = dyn.d_tag,
.d_val = dyn.d_val,
};
}
pub const EI = struct {
pub const CLASS = 4;
pub const DATA = 5;

View File

@ -1,14 +1,15 @@
//! File System.
const builtin = @import("builtin");
const native_os = builtin.os.tag;
const std = @import("std.zig");
const builtin = @import("builtin");
const Io = std.Io;
const root = @import("root");
const mem = std.mem;
const base64 = std.base64;
const crypto = std.crypto;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const native_os = builtin.os.tag;
const posix = std.posix;
const windows = std.os.windows;
@ -97,23 +98,6 @@ pub const base64_encoder = base64.Base64Encoder.init(base64_alphabet, null);
/// Base64 decoder, replacing the standard `+/` with `-_` so that it can be used in a file name on any filesystem.
pub const base64_decoder = base64.Base64Decoder.init(base64_alphabet, null);
/// Same as `Dir.updateFile`, except asserts that both `source_path` and `dest_path`
/// are absolute. See `Dir.updateFile` for a function that operates on both
/// absolute and relative paths.
/// On Windows, both paths should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
/// On WASI, both paths should be encoded as valid UTF-8.
/// On other platforms, both paths are an opaque sequence of bytes with no particular encoding.
pub fn updateFileAbsolute(
source_path: []const u8,
dest_path: []const u8,
args: Dir.CopyFileOptions,
) !Dir.PrevStatus {
assert(path.isAbsolute(source_path));
assert(path.isAbsolute(dest_path));
const my_cwd = cwd();
return Dir.updateFile(my_cwd, source_path, my_cwd, dest_path, args);
}
/// Same as `Dir.copyFile`, except asserts that both `source_path` and `dest_path`
/// are absolute. See `Dir.copyFile` for a function that operates on both
/// absolute and relative paths.
@ -131,6 +115,8 @@ pub fn copyFileAbsolute(
return Dir.copyFile(my_cwd, source_path, my_cwd, dest_path, args);
}
test copyFileAbsolute {}
/// Create a new directory, based on an absolute path.
/// Asserts that the path is absolute. See `Dir.makeDir` for a function that operates
/// on both absolute and relative paths.
@ -142,17 +128,15 @@ pub fn makeDirAbsolute(absolute_path: []const u8) !void {
return posix.mkdir(absolute_path, Dir.default_mode);
}
test makeDirAbsolute {}
/// Same as `makeDirAbsolute` except the parameter is null-terminated.
pub fn makeDirAbsoluteZ(absolute_path_z: [*:0]const u8) !void {
assert(path.isAbsoluteZ(absolute_path_z));
return posix.mkdirZ(absolute_path_z, Dir.default_mode);
}
/// Same as `makeDirAbsolute` except the parameter is a null-terminated WTF-16 LE-encoded string.
pub fn makeDirAbsoluteW(absolute_path_w: [*:0]const u16) !void {
assert(path.isAbsoluteWindowsW(absolute_path_w));
return posix.mkdirW(mem.span(absolute_path_w), Dir.default_mode);
}
test makeDirAbsoluteZ {}
/// Same as `Dir.deleteDir` except the path is absolute.
/// On Windows, `dir_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
@ -169,12 +153,6 @@ pub fn deleteDirAbsoluteZ(dir_path: [*:0]const u8) !void {
return posix.rmdirZ(dir_path);
}
/// Same as `deleteDirAbsolute` except the path parameter is WTF-16 and target OS is assumed Windows.
pub fn deleteDirAbsoluteW(dir_path: [*:0]const u16) !void {
assert(path.isAbsoluteWindowsW(dir_path));
return posix.rmdirW(mem.span(dir_path));
}
/// Same as `Dir.rename` except the paths are absolute.
/// On Windows, both paths should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
/// On WASI, both paths should be encoded as valid UTF-8.
@ -192,13 +170,6 @@ pub fn renameAbsoluteZ(old_path: [*:0]const u8, new_path: [*:0]const u8) !void {
return posix.renameZ(old_path, new_path);
}
/// Same as `renameAbsolute` except the path parameters are WTF-16 and target OS is assumed Windows.
pub fn renameAbsoluteW(old_path: [*:0]const u16, new_path: [*:0]const u16) !void {
assert(path.isAbsoluteWindowsW(old_path));
assert(path.isAbsoluteWindowsW(new_path));
return posix.renameW(old_path, new_path);
}
/// Same as `Dir.rename`, except `new_sub_path` is relative to `new_dir`
pub fn rename(old_dir: Dir, old_sub_path: []const u8, new_dir: Dir, new_sub_path: []const u8) !void {
return posix.renameat(old_dir.fd, old_sub_path, new_dir.fd, new_sub_path);
@ -209,15 +180,7 @@ pub fn renameZ(old_dir: Dir, old_sub_path_z: [*:0]const u8, new_dir: Dir, new_su
return posix.renameatZ(old_dir.fd, old_sub_path_z, new_dir.fd, new_sub_path_z);
}
/// Same as `rename` except the parameters are WTF16LE, NT prefixed.
/// This function is Windows-only.
pub fn renameW(old_dir: Dir, old_sub_path_w: []const u16, new_dir: Dir, new_sub_path_w: []const u16) !void {
return posix.renameatW(old_dir.fd, old_sub_path_w, new_dir.fd, new_sub_path_w, windows.TRUE);
}
/// Returns a handle to the current working directory. It is not opened with iteration capability.
/// Closing the returned `Dir` is checked illegal behavior. Iterating over the result is illegal behavior.
/// On POSIX targets, this function is comptime-callable.
/// Deprecated in favor of `Io.Dir.cwd`.
pub fn cwd() Dir {
if (native_os == .windows) {
return .{ .fd = windows.peb().ProcessParameters.CurrentDirectory.Handle };
@ -251,12 +214,6 @@ pub fn openDirAbsoluteZ(absolute_path_c: [*:0]const u8, flags: Dir.OpenOptions)
assert(path.isAbsoluteZ(absolute_path_c));
return cwd().openDirZ(absolute_path_c, flags);
}
/// Same as `openDirAbsolute` but the path parameter is null-terminated.
pub fn openDirAbsoluteW(absolute_path_c: [*:0]const u16, flags: Dir.OpenOptions) File.OpenError!Dir {
assert(path.isAbsoluteWindowsW(absolute_path_c));
return cwd().openDirW(absolute_path_c, flags);
}
/// Opens a file for reading or writing, without attempting to create a new file, based on an absolute path.
/// Call `File.close` to release the resource.
/// Asserts that the path is absolute. See `Dir.openFile` for a function that
@ -271,18 +228,6 @@ pub fn openFileAbsolute(absolute_path: []const u8, flags: File.OpenFlags) File.O
return cwd().openFile(absolute_path, flags);
}
/// Same as `openFileAbsolute` but the path parameter is null-terminated.
pub fn openFileAbsoluteZ(absolute_path_c: [*:0]const u8, flags: File.OpenFlags) File.OpenError!File {
assert(path.isAbsoluteZ(absolute_path_c));
return cwd().openFileZ(absolute_path_c, flags);
}
/// Same as `openFileAbsolute` but the path parameter is WTF-16-encoded.
pub fn openFileAbsoluteW(absolute_path_w: []const u16, flags: File.OpenFlags) File.OpenError!File {
assert(path.isAbsoluteWindowsWTF16(absolute_path_w));
return cwd().openFileW(absolute_path_w, flags);
}
/// Test accessing `path`.
/// Be careful of Time-Of-Check-Time-Of-Use race conditions when using this function.
/// For example, instead of testing if a file exists and then opening it, just
@ -291,21 +236,10 @@ pub fn openFileAbsoluteW(absolute_path_w: []const u16, flags: File.OpenFlags) Fi
/// On Windows, `absolute_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
/// On WASI, `absolute_path` should be encoded as valid UTF-8.
/// On other platforms, `absolute_path` is an opaque sequence of bytes with no particular encoding.
pub fn accessAbsolute(absolute_path: []const u8, flags: File.OpenFlags) Dir.AccessError!void {
pub fn accessAbsolute(absolute_path: []const u8, flags: Io.Dir.AccessOptions) Dir.AccessError!void {
assert(path.isAbsolute(absolute_path));
try cwd().access(absolute_path, flags);
}
/// Same as `accessAbsolute` but the path parameter is null-terminated.
pub fn accessAbsoluteZ(absolute_path: [*:0]const u8, flags: File.OpenFlags) Dir.AccessError!void {
assert(path.isAbsoluteZ(absolute_path));
try cwd().accessZ(absolute_path, flags);
}
/// Same as `accessAbsolute` but the path parameter is WTF-16 encoded.
pub fn accessAbsoluteW(absolute_path: [*:0]const u16, flags: File.OpenFlags) Dir.AccessError!void {
assert(path.isAbsoluteWindowsW(absolute_path));
try cwd().accessW(absolute_path, flags);
}
/// Creates, opens, or overwrites a file with write access, based on an absolute path.
/// Call `File.close` to release the resource.
/// Asserts that the path is absolute. See `Dir.createFile` for a function that
@ -320,18 +254,6 @@ pub fn createFileAbsolute(absolute_path: []const u8, flags: File.CreateFlags) Fi
return cwd().createFile(absolute_path, flags);
}
/// Same as `createFileAbsolute` but the path parameter is null-terminated.
pub fn createFileAbsoluteZ(absolute_path_c: [*:0]const u8, flags: File.CreateFlags) File.OpenError!File {
assert(path.isAbsoluteZ(absolute_path_c));
return cwd().createFileZ(absolute_path_c, flags);
}
/// Same as `createFileAbsolute` but the path parameter is WTF-16 encoded.
pub fn createFileAbsoluteW(absolute_path_w: [*:0]const u16, flags: File.CreateFlags) File.OpenError!File {
assert(path.isAbsoluteWindowsW(absolute_path_w));
return cwd().createFileW(mem.span(absolute_path_w), flags);
}
/// Delete a file name and possibly the file it refers to, based on an absolute path.
/// Asserts that the path is absolute. See `Dir.deleteFile` for a function that
/// operates on both absolute and relative paths.
@ -344,18 +266,6 @@ pub fn deleteFileAbsolute(absolute_path: []const u8) Dir.DeleteFileError!void {
return cwd().deleteFile(absolute_path);
}
/// Same as `deleteFileAbsolute` except the parameter is null-terminated.
pub fn deleteFileAbsoluteZ(absolute_path_c: [*:0]const u8) Dir.DeleteFileError!void {
assert(path.isAbsoluteZ(absolute_path_c));
return cwd().deleteFileZ(absolute_path_c);
}
/// Same as `deleteFileAbsolute` except the parameter is WTF-16 encoded.
pub fn deleteFileAbsoluteW(absolute_path_w: [*:0]const u16) Dir.DeleteFileError!void {
assert(path.isAbsoluteWindowsW(absolute_path_w));
return cwd().deleteFileW(mem.span(absolute_path_w));
}
/// Removes a symlink, file, or directory.
/// This is equivalent to `Dir.deleteTree` with the base directory.
/// Asserts that the path is absolute. See `Dir.deleteTree` for a function that
@ -387,19 +297,6 @@ pub fn readLinkAbsolute(pathname: []const u8, buffer: *[max_path_bytes]u8) ![]u8
return posix.readlink(pathname, buffer);
}
/// Windows-only. Same as `readlinkW`, except the path parameter is null-terminated, WTF16
/// encoded.
pub fn readlinkAbsoluteW(pathname_w: [*:0]const u16, buffer: *[max_path_bytes]u8) ![]u8 {
assert(path.isAbsoluteWindowsW(pathname_w));
return posix.readlinkW(mem.span(pathname_w), buffer);
}
/// Same as `readLink`, except the path parameter is null-terminated.
pub fn readLinkAbsoluteZ(pathname_c: [*:0]const u8, buffer: *[max_path_bytes]u8) ![]u8 {
assert(path.isAbsoluteZ(pathname_c));
return posix.readlinkZ(pathname_c, buffer);
}
/// Creates a symbolic link named `sym_link_path` which contains the string `target_path`.
/// A symbolic link (also known as a soft link) may point to an existing file or to a nonexistent
/// one; the latter case is known as a dangling link.
@ -437,44 +334,21 @@ pub fn symLinkAbsoluteW(
return windows.CreateSymbolicLink(null, mem.span(sym_link_path_w), mem.span(target_path_w), flags.is_directory);
}
/// Same as `symLinkAbsolute` except the parameters are null-terminated pointers.
/// See also `symLinkAbsolute`.
pub fn symLinkAbsoluteZ(
target_path_c: [*:0]const u8,
sym_link_path_c: [*:0]const u8,
flags: Dir.SymLinkFlags,
) !void {
assert(path.isAbsoluteZ(target_path_c));
assert(path.isAbsoluteZ(sym_link_path_c));
if (native_os == .windows) {
const target_path_w = try windows.cStrToPrefixedFileW(null, target_path_c);
const sym_link_path_w = try windows.cStrToPrefixedFileW(null, sym_link_path_c);
return windows.CreateSymbolicLink(null, sym_link_path_w.span(), target_path_w.span(), flags.is_directory);
}
return posix.symlinkZ(target_path_c, sym_link_path_c);
}
pub const OpenSelfExeError = posix.OpenError || SelfExePathError || posix.FlockError;
pub const OpenSelfExeError = Io.File.OpenSelfExeError;
/// Deprecated in favor of `Io.File.openSelfExe`.
pub fn openSelfExe(flags: File.OpenFlags) OpenSelfExeError!File {
if (native_os == .linux or native_os == .serenity) {
return openFileAbsoluteZ("/proc/self/exe", flags);
}
if (native_os == .windows) {
// If ImagePathName is a symlink, then it will contain the path of the symlink,
// not the path that the symlink points to. However, because we are opening
// the file, we can let the openFileW call follow the symlink for us.
const image_path_unicode_string = &windows.peb().ProcessParameters.ImagePathName;
const image_path_name = image_path_unicode_string.Buffer.?[0 .. image_path_unicode_string.Length / 2 :0];
const prefixed_path_w = try windows.wToPrefixedFileW(null, image_path_name);
return cwd().openFileW(prefixed_path_w.span(), flags);
if (native_os == .linux or native_os == .serenity or native_os == .windows) {
var threaded: Io.Threaded = .init_single_threaded;
const io = threaded.ioBasic();
return .adaptFromNewApi(try Io.File.openSelfExe(io, flags));
}
// Use of max_path_bytes here is valid as the resulting path is immediately
// opened with no modification.
var buf: [max_path_bytes]u8 = undefined;
const self_exe_path = try selfExePath(&buf);
buf[self_exe_path.len] = 0;
return openFileAbsoluteZ(buf[0..self_exe_path.len :0].ptr, flags);
return openFileAbsolute(buf[0..self_exe_path.len :0], flags);
}
// This is `posix.ReadLinkError || posix.RealPathError` with impossible errors excluded
@ -515,6 +389,8 @@ pub const SelfExePathError = error{
/// On Windows, the volume does not contain a recognized file system. File
/// system drivers might not be loaded, or the volume may be corrupt.
UnrecognizedVolume,
Canceled,
} || posix.SysCtlError;
/// `selfExePath` except allocates the result on the heap.
@ -554,7 +430,6 @@ pub fn selfExePath(out_buffer: []u8) SelfExePathError![]u8 {
var real_path_buf: [max_path_bytes]u8 = undefined;
const real_path = std.posix.realpathZ(&symlink_path_buf, &real_path_buf) catch |err| switch (err) {
error.InvalidWtf8 => unreachable, // Windows-only
error.NetworkNotFound => unreachable, // Windows-only
else => |e| return e,
};
@ -565,15 +440,11 @@ pub fn selfExePath(out_buffer: []u8) SelfExePathError![]u8 {
}
switch (native_os) {
.linux, .serenity => return posix.readlinkZ("/proc/self/exe", out_buffer) catch |err| switch (err) {
error.InvalidUtf8 => unreachable, // WASI-only
error.InvalidWtf8 => unreachable, // Windows-only
error.UnsupportedReparsePointType => unreachable, // Windows-only
error.NetworkNotFound => unreachable, // Windows-only
else => |e| return e,
},
.illumos => return posix.readlinkZ("/proc/self/path/a.out", out_buffer) catch |err| switch (err) {
error.InvalidUtf8 => unreachable, // WASI-only
error.InvalidWtf8 => unreachable, // Windows-only
error.UnsupportedReparsePointType => unreachable, // Windows-only
error.NetworkNotFound => unreachable, // Windows-only
else => |e| return e,
@ -602,7 +473,6 @@ pub fn selfExePath(out_buffer: []u8) SelfExePathError![]u8 {
// argv[0] is a path (relative or absolute): use realpath(3) directly
var real_path_buf: [max_path_bytes]u8 = undefined;
const real_path = posix.realpathZ(std.os.argv[0], &real_path_buf) catch |err| switch (err) {
error.InvalidWtf8 => unreachable, // Windows-only
error.NetworkNotFound => unreachable, // Windows-only
else => |e| return e,
};
@ -645,10 +515,7 @@ pub fn selfExePath(out_buffer: []u8) SelfExePathError![]u8 {
// that the symlink points to, though, so we need to get the realpath.
var pathname_w = try windows.wToPrefixedFileW(null, image_path_name);
const wide_slice = std.fs.cwd().realpathW2(pathname_w.span(), &pathname_w.data) catch |err| switch (err) {
error.InvalidWtf8 => unreachable,
else => |e| return e,
};
const wide_slice = try std.fs.cwd().realpathW2(pathname_w.span(), &pathname_w.data);
const len = std.unicode.calcWtf8Len(wide_slice);
if (len > out_buffer.len)
@ -702,16 +569,10 @@ pub fn realpathAlloc(allocator: Allocator, pathname: []const u8) ![]u8 {
}
test {
if (native_os != .wasi) {
_ = &makeDirAbsolute;
_ = &makeDirAbsoluteZ;
_ = &copyFileAbsolute;
_ = &updateFileAbsolute;
}
_ = &AtomicFile;
_ = &Dir;
_ = &File;
_ = &path;
_ = AtomicFile;
_ = Dir;
_ = File;
_ = path;
_ = @import("fs/test.zig");
_ = @import("fs/get_app_data_dir.zig");
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -313,7 +313,7 @@ pub fn isAbsoluteWindowsW(path_w: [*:0]const u16) bool {
return isAbsoluteWindowsImpl(u16, mem.sliceTo(path_w, 0));
}
pub fn isAbsoluteWindowsWTF16(path: []const u16) bool {
pub fn isAbsoluteWindowsWtf16(path: []const u16) bool {
return isAbsoluteWindowsImpl(u16, path);
}

View File

@ -1,10 +1,12 @@
const std = @import("../std.zig");
const builtin = @import("builtin");
const native_os = builtin.os.tag;
const std = @import("../std.zig");
const Io = std.Io;
const testing = std.testing;
const fs = std.fs;
const mem = std.mem;
const wasi = std.os.wasi;
const native_os = builtin.os.tag;
const windows = std.os.windows;
const posix = std.posix;
@ -73,6 +75,7 @@ const PathType = enum {
};
const TestContext = struct {
io: Io,
path_type: PathType,
path_sep: u8,
arena: ArenaAllocator,
@ -83,6 +86,7 @@ const TestContext = struct {
pub fn init(path_type: PathType, path_sep: u8, allocator: mem.Allocator, transform_fn: *const PathType.TransformFn) TestContext {
const tmp = tmpDir(.{ .iterate = true });
return .{
.io = testing.io,
.path_type = path_type,
.path_sep = path_sep,
.arena = ArenaAllocator.init(allocator),
@ -1319,6 +1323,8 @@ test "max file name component lengths" {
}
test "writev, readv" {
const io = testing.io;
var tmp = tmpDir(.{});
defer tmp.cleanup();
@ -1327,78 +1333,55 @@ test "writev, readv" {
var buf1: [line1.len]u8 = undefined;
var buf2: [line2.len]u8 = undefined;
var write_vecs = [_]posix.iovec_const{
.{
.base = line1,
.len = line1.len,
},
.{
.base = line2,
.len = line2.len,
},
};
var read_vecs = [_]posix.iovec{
.{
.base = &buf2,
.len = buf2.len,
},
.{
.base = &buf1,
.len = buf1.len,
},
};
var write_vecs: [2][]const u8 = .{ line1, line2 };
var read_vecs: [2][]u8 = .{ &buf2, &buf1 };
var src_file = try tmp.dir.createFile("test.txt", .{ .read = true });
defer src_file.close();
try src_file.writevAll(&write_vecs);
var writer = src_file.writerStreaming(&.{});
try writer.interface.writeVecAll(&write_vecs);
try writer.interface.flush();
try testing.expectEqual(@as(u64, line1.len + line2.len), try src_file.getEndPos());
try src_file.seekTo(0);
const read = try src_file.readvAll(&read_vecs);
try testing.expectEqual(@as(usize, line1.len + line2.len), read);
var reader = writer.moveToReader(io);
try reader.seekTo(0);
try reader.interface.readVecAll(&read_vecs);
try testing.expectEqualStrings(&buf1, "line2\n");
try testing.expectEqualStrings(&buf2, "line1\n");
try testing.expectError(error.EndOfStream, reader.interface.readSliceAll(&buf1));
}
test "pwritev, preadv" {
const io = testing.io;
var tmp = tmpDir(.{});
defer tmp.cleanup();
const line1 = "line1\n";
const line2 = "line2\n";
var lines: [2][]const u8 = .{ line1, line2 };
var buf1: [line1.len]u8 = undefined;
var buf2: [line2.len]u8 = undefined;
var write_vecs = [_]posix.iovec_const{
.{
.base = line1,
.len = line1.len,
},
.{
.base = line2,
.len = line2.len,
},
};
var read_vecs = [_]posix.iovec{
.{
.base = &buf2,
.len = buf2.len,
},
.{
.base = &buf1,
.len = buf1.len,
},
};
var read_vecs: [2][]u8 = .{ &buf2, &buf1 };
var src_file = try tmp.dir.createFile("test.txt", .{ .read = true });
defer src_file.close();
try src_file.pwritevAll(&write_vecs, 16);
var writer = src_file.writer(&.{});
try writer.seekTo(16);
try writer.interface.writeVecAll(&lines);
try writer.interface.flush();
try testing.expectEqual(@as(u64, 16 + line1.len + line2.len), try src_file.getEndPos());
const read = try src_file.preadvAll(&read_vecs, 16);
try testing.expectEqual(@as(usize, line1.len + line2.len), read);
var reader = writer.moveToReader(io);
try reader.seekTo(16);
try reader.interface.readVecAll(&read_vecs);
try testing.expectEqualStrings(&buf1, "line2\n");
try testing.expectEqualStrings(&buf2, "line1\n");
try testing.expectError(error.EndOfStream, reader.interface.readSliceAll(&buf1));
}
test "setEndPos" {
@ -1406,6 +1389,8 @@ test "setEndPos" {
if (native_os == .wasi and builtin.link_libc) return error.SkipZigTest;
if (builtin.cpu.arch.isMIPS64() and (builtin.abi == .gnuabin32 or builtin.abi == .muslabin32)) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23806
const io = testing.io;
var tmp = tmpDir(.{});
defer tmp.cleanup();
@ -1416,11 +1401,13 @@ test "setEndPos" {
const initial_size = try f.getEndPos();
var buffer: [32]u8 = undefined;
var reader = f.reader(io, &.{});
{
try f.setEndPos(initial_size);
try testing.expectEqual(initial_size, try f.getEndPos());
try testing.expectEqual(initial_size, try f.preadAll(&buffer, 0));
try reader.seekTo(0);
try testing.expectEqual(initial_size, try reader.interface.readSliceShort(&buffer));
try testing.expectEqualStrings("ninebytes", buffer[0..@intCast(initial_size)]);
}
@ -1428,7 +1415,8 @@ test "setEndPos" {
const larger = initial_size + 4;
try f.setEndPos(larger);
try testing.expectEqual(larger, try f.getEndPos());
try testing.expectEqual(larger, try f.preadAll(&buffer, 0));
try reader.seekTo(0);
try testing.expectEqual(larger, try reader.interface.readSliceShort(&buffer));
try testing.expectEqualStrings("ninebytes\x00\x00\x00\x00", buffer[0..@intCast(larger)]);
}
@ -1436,27 +1424,15 @@ test "setEndPos" {
const smaller = initial_size - 5;
try f.setEndPos(smaller);
try testing.expectEqual(smaller, try f.getEndPos());
try testing.expectEqual(smaller, try f.preadAll(&buffer, 0));
try reader.seekTo(0);
try testing.expectEqual(smaller, try reader.interface.readSliceShort(&buffer));
try testing.expectEqualStrings("nine", buffer[0..@intCast(smaller)]);
}
try f.setEndPos(0);
try testing.expectEqual(0, try f.getEndPos());
try testing.expectEqual(0, try f.preadAll(&buffer, 0));
// Invalid file length should error gracefully. Actual limit is host
// and file-system dependent, but 1PB should fail on filesystems like
// EXT4 and NTFS. But XFS or Btrfs support up to 8EiB files.
f.setEndPos(0x4_0000_0000_0000) catch |err| if (err != error.FileTooBig) {
return err;
};
f.setEndPos(std.math.maxInt(u63)) catch |err| if (err != error.FileTooBig) {
return err;
};
try testing.expectError(error.FileTooBig, f.setEndPos(std.math.maxInt(u63) + 1));
try testing.expectError(error.FileTooBig, f.setEndPos(std.math.maxInt(u64)));
try reader.seekTo(0);
try testing.expectEqual(0, try reader.interface.readSliceShort(&buffer));
}
test "access file" {
@ -1476,6 +1452,8 @@ test "access file" {
}
test "sendfile" {
const io = testing.io;
var tmp = tmpDir(.{});
defer tmp.cleanup();
@ -1486,21 +1464,14 @@ test "sendfile" {
const line1 = "line1\n";
const line2 = "second line\n";
var vecs = [_]posix.iovec_const{
.{
.base = line1,
.len = line1.len,
},
.{
.base = line2,
.len = line2.len,
},
};
var vecs = [_][]const u8{ line1, line2 };
var src_file = try dir.createFile("sendfile1.txt", .{ .read = true });
defer src_file.close();
try src_file.writevAll(&vecs);
{
var fw = src_file.writer(&.{});
try fw.interface.writeVecAll(&vecs);
}
var dest_file = try dir.createFile("sendfile2.txt", .{ .read = true });
defer dest_file.close();
@ -1513,7 +1484,7 @@ test "sendfile" {
var trailers: [2][]const u8 = .{ trailer1, trailer2 };
var written_buf: [100]u8 = undefined;
var file_reader = src_file.reader(&.{});
var file_reader = src_file.reader(io, &.{});
var fallback_buffer: [50]u8 = undefined;
var file_writer = dest_file.writer(&fallback_buffer);
try file_writer.interface.writeVecAll(&headers);
@ -1521,11 +1492,15 @@ test "sendfile" {
try testing.expectEqual(10, try file_writer.interface.sendFileAll(&file_reader, .limited(10)));
try file_writer.interface.writeVecAll(&trailers);
try file_writer.interface.flush();
const amt = try dest_file.preadAll(&written_buf, 0);
var fr = file_writer.moveToReader(io);
try fr.seekTo(0);
const amt = try fr.interface.readSliceShort(&written_buf);
try testing.expectEqualStrings("header1\nsecond header\nine1\nsecontrailer1\nsecond trailer\n", written_buf[0..amt]);
}
test "sendfile with buffered data" {
const io = testing.io;
var tmp = tmpDir(.{});
defer tmp.cleanup();
@ -1543,7 +1518,7 @@ test "sendfile with buffered data" {
defer dest_file.close();
var src_buffer: [32]u8 = undefined;
var file_reader = src_file.reader(&src_buffer);
var file_reader = src_file.reader(io, &src_buffer);
try file_reader.seekTo(0);
try file_reader.interface.fill(8);
@ -1554,37 +1529,14 @@ test "sendfile with buffered data" {
try std.testing.expectEqual(4, try file_writer.interface.sendFileAll(&file_reader, .limited(4)));
var written_buf: [8]u8 = undefined;
const amt = try dest_file.preadAll(&written_buf, 0);
var fr = file_writer.moveToReader(io);
try fr.seekTo(0);
const amt = try fr.interface.readSliceShort(&written_buf);
try std.testing.expectEqual(4, amt);
try std.testing.expectEqualSlices(u8, "AAAA", written_buf[0..amt]);
}
test "copyRangeAll" {
var tmp = tmpDir(.{});
defer tmp.cleanup();
try tmp.dir.makePath("os_test_tmp");
var dir = try tmp.dir.openDir("os_test_tmp", .{});
defer dir.close();
var src_file = try dir.createFile("file1.txt", .{ .read = true });
defer src_file.close();
const data = "u6wj+JmdF3qHsFPE BUlH2g4gJCmEz0PP";
try src_file.writeAll(data);
var dest_file = try dir.createFile("file2.txt", .{ .read = true });
defer dest_file.close();
var written_buf: [100]u8 = undefined;
_ = try src_file.copyRangeAll(0, dest_file, 0, data.len);
const amt = try dest_file.preadAll(&written_buf, 0);
try testing.expectEqualStrings(data, written_buf[0..amt]);
}
test "copyFile" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
@ -1708,8 +1660,8 @@ test "open file with exclusive lock twice, make sure second lock waits" {
}
};
var started = std.Thread.ResetEvent{};
var locked = std.Thread.ResetEvent{};
var started: std.Thread.ResetEvent = .unset;
var locked: std.Thread.ResetEvent = .unset;
const t = try std.Thread.spawn(.{}, S.checkFn, .{
&ctx.dir,
@ -1773,7 +1725,7 @@ test "read from locked file" {
const f = try ctx.dir.createFile(filename, .{ .read = true });
defer f.close();
var buffer: [1]u8 = undefined;
_ = try f.readAll(&buffer);
_ = try f.read(&buffer);
}
{
const f = try ctx.dir.createFile(filename, .{
@ -1785,9 +1737,9 @@ test "read from locked file" {
defer f2.close();
var buffer: [1]u8 = undefined;
if (builtin.os.tag == .windows) {
try std.testing.expectError(error.LockViolation, f2.readAll(&buffer));
try std.testing.expectError(error.LockViolation, f2.read(&buffer));
} else {
try std.testing.expectEqual(0, f2.readAll(&buffer));
try std.testing.expectEqual(0, f2.read(&buffer));
}
}
}
@ -1944,6 +1896,7 @@ test "'.' and '..' in fs.Dir functions" {
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const io = ctx.io;
const subdir_path = try ctx.transformPath("./subdir");
const file_path = try ctx.transformPath("./subdir/../file");
const copy_path = try ctx.transformPath("./subdir/../copy");
@ -1966,8 +1919,9 @@ test "'.' and '..' in fs.Dir functions" {
try ctx.dir.deleteFile(rename_path);
try ctx.dir.writeFile(.{ .sub_path = update_path, .data = "something" });
const prev_status = try ctx.dir.updateFile(file_path, ctx.dir, update_path, .{});
try testing.expectEqual(fs.Dir.PrevStatus.stale, prev_status);
var dir = ctx.dir.adaptToNewApi();
const prev_status = try dir.updateFile(io, file_path, dir, update_path, .{});
try testing.expectEqual(Io.Dir.PrevStatus.stale, prev_status);
try ctx.dir.deleteDir(subdir_path);
}
@ -2005,13 +1959,6 @@ test "'.' and '..' in absolute functions" {
renamed_file.close();
try fs.deleteFileAbsolute(renamed_file_path);
const update_file_path = try fs.path.join(allocator, &.{ subdir_path, "../update" });
const update_file = try fs.createFileAbsolute(update_file_path, .{});
try update_file.writeAll("something");
update_file.close();
const prev_status = try fs.updateFileAbsolute(created_file_path, update_file_path, .{});
try testing.expectEqual(fs.Dir.PrevStatus.stale, prev_status);
try fs.deleteDirAbsolute(subdir_path);
}
@ -2072,48 +2019,40 @@ test "delete a setAsCwd directory on Windows" {
test "invalid UTF-8/WTF-8 paths" {
const expected_err = switch (native_os) {
.wasi => error.InvalidUtf8,
.windows => error.InvalidWtf8,
.wasi => error.BadPathName,
.windows => error.BadPathName,
else => return error.SkipZigTest,
};
try testWithAllSupportedPathTypes(struct {
fn impl(ctx: *TestContext) !void {
const io = ctx.io;
// This is both invalid UTF-8 and WTF-8, since \xFF is an invalid start byte
const invalid_path = try ctx.transformPath("\xFF");
try testing.expectError(expected_err, ctx.dir.openFile(invalid_path, .{}));
try testing.expectError(expected_err, ctx.dir.openFileZ(invalid_path, .{}));
try testing.expectError(expected_err, ctx.dir.createFile(invalid_path, .{}));
try testing.expectError(expected_err, ctx.dir.createFileZ(invalid_path, .{}));
try testing.expectError(expected_err, ctx.dir.makeDir(invalid_path));
try testing.expectError(expected_err, ctx.dir.makeDirZ(invalid_path));
try testing.expectError(expected_err, ctx.dir.makePath(invalid_path));
try testing.expectError(expected_err, ctx.dir.makeOpenPath(invalid_path, .{}));
try testing.expectError(expected_err, ctx.dir.openDir(invalid_path, .{}));
try testing.expectError(expected_err, ctx.dir.openDirZ(invalid_path, .{}));
try testing.expectError(expected_err, ctx.dir.deleteFile(invalid_path));
try testing.expectError(expected_err, ctx.dir.deleteFileZ(invalid_path));
try testing.expectError(expected_err, ctx.dir.deleteDir(invalid_path));
try testing.expectError(expected_err, ctx.dir.deleteDirZ(invalid_path));
try testing.expectError(expected_err, ctx.dir.rename(invalid_path, invalid_path));
try testing.expectError(expected_err, ctx.dir.renameZ(invalid_path, invalid_path));
try testing.expectError(expected_err, ctx.dir.symLink(invalid_path, invalid_path, .{}));
try testing.expectError(expected_err, ctx.dir.symLinkZ(invalid_path, invalid_path, .{}));
if (native_os == .wasi) {
try testing.expectError(expected_err, ctx.dir.symLinkWasi(invalid_path, invalid_path, .{}));
}
try testing.expectError(expected_err, ctx.dir.readLink(invalid_path, &[_]u8{}));
try testing.expectError(expected_err, ctx.dir.readLinkZ(invalid_path, &[_]u8{}));
if (native_os == .wasi) {
try testing.expectError(expected_err, ctx.dir.readLinkWasi(invalid_path, &[_]u8{}));
}
@ -2127,47 +2066,34 @@ test "invalid UTF-8/WTF-8 paths" {
try testing.expectError(expected_err, ctx.dir.writeFile(.{ .sub_path = invalid_path, .data = "" }));
try testing.expectError(expected_err, ctx.dir.access(invalid_path, .{}));
try testing.expectError(expected_err, ctx.dir.accessZ(invalid_path, .{}));
try testing.expectError(expected_err, ctx.dir.updateFile(invalid_path, ctx.dir, invalid_path, .{}));
var dir = ctx.dir.adaptToNewApi();
try testing.expectError(expected_err, dir.updateFile(io, invalid_path, dir, invalid_path, .{}));
try testing.expectError(expected_err, ctx.dir.copyFile(invalid_path, ctx.dir, invalid_path, .{}));
try testing.expectError(expected_err, ctx.dir.statFile(invalid_path));
if (native_os != .wasi) {
try testing.expectError(expected_err, ctx.dir.realpath(invalid_path, &[_]u8{}));
try testing.expectError(expected_err, ctx.dir.realpathZ(invalid_path, &[_]u8{}));
try testing.expectError(expected_err, ctx.dir.realpathAlloc(testing.allocator, invalid_path));
}
try testing.expectError(expected_err, fs.rename(ctx.dir, invalid_path, ctx.dir, invalid_path));
try testing.expectError(expected_err, fs.renameZ(ctx.dir, invalid_path, ctx.dir, invalid_path));
if (native_os != .wasi and ctx.path_type != .relative) {
try testing.expectError(expected_err, fs.updateFileAbsolute(invalid_path, invalid_path, .{}));
try testing.expectError(expected_err, fs.copyFileAbsolute(invalid_path, invalid_path, .{}));
try testing.expectError(expected_err, fs.makeDirAbsolute(invalid_path));
try testing.expectError(expected_err, fs.makeDirAbsoluteZ(invalid_path));
try testing.expectError(expected_err, fs.deleteDirAbsolute(invalid_path));
try testing.expectError(expected_err, fs.deleteDirAbsoluteZ(invalid_path));
try testing.expectError(expected_err, fs.renameAbsolute(invalid_path, invalid_path));
try testing.expectError(expected_err, fs.renameAbsoluteZ(invalid_path, invalid_path));
try testing.expectError(expected_err, fs.openDirAbsolute(invalid_path, .{}));
try testing.expectError(expected_err, fs.openDirAbsoluteZ(invalid_path, .{}));
try testing.expectError(expected_err, fs.openFileAbsolute(invalid_path, .{}));
try testing.expectError(expected_err, fs.openFileAbsoluteZ(invalid_path, .{}));
try testing.expectError(expected_err, fs.accessAbsolute(invalid_path, .{}));
try testing.expectError(expected_err, fs.accessAbsoluteZ(invalid_path, .{}));
try testing.expectError(expected_err, fs.createFileAbsolute(invalid_path, .{}));
try testing.expectError(expected_err, fs.createFileAbsoluteZ(invalid_path, .{}));
try testing.expectError(expected_err, fs.deleteFileAbsolute(invalid_path));
try testing.expectError(expected_err, fs.deleteFileAbsoluteZ(invalid_path));
try testing.expectError(expected_err, fs.deleteTreeAbsolute(invalid_path));
var readlink_buf: [fs.max_path_bytes]u8 = undefined;
try testing.expectError(expected_err, fs.readLinkAbsolute(invalid_path, &readlink_buf));
try testing.expectError(expected_err, fs.readLinkAbsoluteZ(invalid_path, &readlink_buf));
try testing.expectError(expected_err, fs.symLinkAbsolute(invalid_path, invalid_path, .{}));
try testing.expectError(expected_err, fs.symLinkAbsoluteZ(invalid_path, invalid_path, .{}));
try testing.expectError(expected_err, fs.realpathAlloc(testing.allocator, invalid_path));
}
}
@ -2175,6 +2101,8 @@ test "invalid UTF-8/WTF-8 paths" {
}
test "read file non vectored" {
const io = std.testing.io;
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
@ -2188,7 +2116,7 @@ test "read file non vectored" {
try file_writer.interface.flush();
}
var file_reader: std.fs.File.Reader = .init(file, &.{});
var file_reader: std.Io.File.Reader = .initAdapted(file, io, &.{});
var write_buffer: [100]u8 = undefined;
var w: std.Io.Writer = .fixed(&write_buffer);
@ -2205,6 +2133,8 @@ test "read file non vectored" {
}
test "seek keeping partial buffer" {
const io = std.testing.io;
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
@ -2219,7 +2149,7 @@ test "seek keeping partial buffer" {
}
var read_buffer: [3]u8 = undefined;
var file_reader: std.fs.File.Reader = .init(file, &read_buffer);
var file_reader: Io.File.Reader = .initAdapted(file, io, &read_buffer);
try testing.expectEqual(0, file_reader.logicalPos());
@ -2246,13 +2176,15 @@ test "seek keeping partial buffer" {
}
test "seekBy" {
const io = testing.io;
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
try tmp_dir.dir.writeFile(.{ .sub_path = "blah.txt", .data = "let's test seekBy" });
const f = try tmp_dir.dir.openFile("blah.txt", .{ .mode = .read_only });
defer f.close();
var reader = f.readerStreaming(&.{});
var reader = f.readerStreaming(io, &.{});
try reader.seekBy(2);
var buffer: [20]u8 = undefined;
@ -2265,6 +2197,8 @@ test "seekTo flushes buffered data" {
var tmp = std.testing.tmpDir(.{});
defer tmp.cleanup();
const io = std.testing.io;
const contents = "data";
const file = try tmp.dir.createFile("seek.bin", .{ .read = true });
@ -2279,7 +2213,7 @@ test "seekTo flushes buffered data" {
}
var read_buffer: [16]u8 = undefined;
var file_reader: std.fs.File.Reader = .init(file, &read_buffer);
var file_reader: std.Io.File.Reader = .initAdapted(file, io, &read_buffer);
var buf: [4]u8 = undefined;
try file_reader.interface.readSliceAll(&buf);
@ -2287,6 +2221,8 @@ test "seekTo flushes buffered data" {
}
test "File.Writer sendfile with buffered contents" {
const io = testing.io;
var tmp_dir = testing.tmpDir(.{});
defer tmp_dir.cleanup();
@ -2298,7 +2234,7 @@ test "File.Writer sendfile with buffered contents" {
defer out.close();
var in_buf: [2]u8 = undefined;
var in_r = in.reader(&in_buf);
var in_r = in.reader(io, &in_buf);
_ = try in_r.getSize(); // Catch seeks past end by populating size
try in_r.interface.fill(2);
@ -2312,7 +2248,7 @@ test "File.Writer sendfile with buffered contents" {
var check = try tmp_dir.dir.openFile("b", .{});
defer check.close();
var check_buf: [4]u8 = undefined;
var check_r = check.reader(&check_buf);
var check_r = check.reader(io, &check_buf);
try testing.expectEqualStrings("abcd", try check_r.interface.take(4));
try testing.expectError(error.EndOfStream, check_r.interface.takeByte());
}

View File

@ -1827,9 +1827,9 @@ test "put and remove loop in random order" {
}
}
test "remove one million elements in random order" {
test "remove many elements in random order" {
const Map = AutoHashMap(u32, u32);
const n = 1000 * 1000;
const n = 1000 * 100;
var map = Map.init(std.heap.page_allocator);
defer map.deinit();
@ -2147,14 +2147,14 @@ test "getOrPut allocation failure" {
try testing.expectError(error.OutOfMemory, map.getOrPut(std.testing.failing_allocator, "hello"));
}
test "std.hash_map rehash" {
test "rehash" {
var map = AutoHashMap(usize, usize).init(std.testing.allocator);
defer map.deinit();
var prng = std.Random.DefaultPrng.init(0);
const random = prng.random();
const count = 6 * random.intRangeLessThan(u32, 100_000, 500_000);
const count = 4 * random.intRangeLessThan(u32, 100_000, 500_000);
for (0..count) |i| {
try map.put(i, i);

View File

@ -80,15 +80,15 @@
//!
//! Resizing and remapping are forwarded directly to the backing allocator,
//! except where such operations would change the category from large to small.
const builtin = @import("builtin");
const StackTrace = std.builtin.StackTrace;
const std = @import("std");
const builtin = @import("builtin");
const log = std.log.scoped(.gpa);
const math = std.math;
const assert = std.debug.assert;
const mem = std.mem;
const Allocator = std.mem.Allocator;
const StackTrace = std.builtin.StackTrace;
const default_page_size: usize = switch (builtin.os.tag) {
// Makes `std.heap.PageAllocator` take the happy path.
@ -421,7 +421,12 @@ pub fn DebugAllocator(comptime config: Config) type {
return usedBitsCount(slot_count) * @sizeOf(usize);
}
fn detectLeaksInBucket(bucket: *BucketHeader, size_class_index: usize, used_bits_count: usize) usize {
fn detectLeaksInBucket(
bucket: *BucketHeader,
size_class_index: usize,
used_bits_count: usize,
tty_config: std.Io.tty.Config,
) usize {
const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
const slot_count = slot_counts[size_class_index];
var leaks: usize = 0;
@ -436,7 +441,13 @@ pub fn DebugAllocator(comptime config: Config) type {
const stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc);
const page_addr = @intFromPtr(bucket) & ~(page_size - 1);
const addr = page_addr + slot_index * size_class;
log.err("memory address 0x{x} leaked: {f}", .{ addr, stack_trace });
log.err("memory address 0x{x} leaked: {f}", .{
addr,
std.debug.FormatStackTrace{
.stack_trace = stack_trace,
.tty_config = tty_config,
},
});
leaks += 1;
}
}
@ -449,12 +460,14 @@ pub fn DebugAllocator(comptime config: Config) type {
pub fn detectLeaks(self: *Self) usize {
var leaks: usize = 0;
const tty_config = std.Io.tty.detectConfig(.stderr());
for (self.buckets, 0..) |init_optional_bucket, size_class_index| {
var optional_bucket = init_optional_bucket;
const slot_count = slot_counts[size_class_index];
const used_bits_count = usedBitsCount(slot_count);
while (optional_bucket) |bucket| {
leaks += detectLeaksInBucket(bucket, size_class_index, used_bits_count);
leaks += detectLeaksInBucket(bucket, size_class_index, used_bits_count, tty_config);
optional_bucket = bucket.prev;
}
}
@ -464,7 +477,11 @@ pub fn DebugAllocator(comptime config: Config) type {
if (config.retain_metadata and large_alloc.freed) continue;
const stack_trace = large_alloc.getStackTrace(.alloc);
log.err("memory address 0x{x} leaked: {f}", .{
@intFromPtr(large_alloc.bytes.ptr), stack_trace,
@intFromPtr(large_alloc.bytes.ptr),
std.debug.FormatStackTrace{
.stack_trace = stack_trace,
.tty_config = tty_config,
},
});
leaks += 1;
}
@ -519,8 +536,20 @@ pub fn DebugAllocator(comptime config: Config) type {
fn reportDoubleFree(ret_addr: usize, alloc_stack_trace: StackTrace, free_stack_trace: StackTrace) void {
var addr_buf: [stack_n]usize = undefined;
const second_free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = ret_addr }, &addr_buf);
const tty_config = std.Io.tty.detectConfig(.stderr());
log.err("Double free detected. Allocation: {f} First free: {f} Second free: {f}", .{
alloc_stack_trace, free_stack_trace, second_free_stack_trace,
std.debug.FormatStackTrace{
.stack_trace = alloc_stack_trace,
.tty_config = tty_config,
},
std.debug.FormatStackTrace{
.stack_trace = free_stack_trace,
.tty_config = tty_config,
},
std.debug.FormatStackTrace{
.stack_trace = second_free_stack_trace,
.tty_config = tty_config,
},
});
}
@ -561,11 +590,18 @@ pub fn DebugAllocator(comptime config: Config) type {
if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
var addr_buf: [stack_n]usize = undefined;
const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = ret_addr }, &addr_buf);
const tty_config = std.Io.tty.detectConfig(.stderr());
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
entry.value_ptr.bytes.len,
old_mem.len,
entry.value_ptr.getStackTrace(.alloc),
free_stack_trace,
std.debug.FormatStackTrace{
.stack_trace = entry.value_ptr.getStackTrace(.alloc),
.tty_config = tty_config,
},
std.debug.FormatStackTrace{
.stack_trace = free_stack_trace,
.tty_config = tty_config,
},
});
}
@ -667,11 +703,18 @@ pub fn DebugAllocator(comptime config: Config) type {
if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
var addr_buf: [stack_n]usize = undefined;
const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = ret_addr }, &addr_buf);
const tty_config = std.Io.tty.detectConfig(.stderr());
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
entry.value_ptr.bytes.len,
old_mem.len,
entry.value_ptr.getStackTrace(.alloc),
free_stack_trace,
std.debug.FormatStackTrace{
.stack_trace = entry.value_ptr.getStackTrace(.alloc),
.tty_config = tty_config,
},
std.debug.FormatStackTrace{
.stack_trace = free_stack_trace,
.tty_config = tty_config,
},
});
}
@ -892,19 +935,33 @@ pub fn DebugAllocator(comptime config: Config) type {
var addr_buf: [stack_n]usize = undefined;
const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = return_address }, &addr_buf);
if (old_memory.len != requested_size) {
const tty_config = std.Io.tty.detectConfig(.stderr());
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
requested_size,
old_memory.len,
bucketStackTrace(bucket, slot_count, slot_index, .alloc),
free_stack_trace,
std.debug.FormatStackTrace{
.stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc),
.tty_config = tty_config,
},
std.debug.FormatStackTrace{
.stack_trace = free_stack_trace,
.tty_config = tty_config,
},
});
}
if (alignment != slot_alignment) {
const tty_config = std.Io.tty.detectConfig(.stderr());
log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {f} Free: {f}", .{
slot_alignment.toByteUnits(),
alignment.toByteUnits(),
bucketStackTrace(bucket, slot_count, slot_index, .alloc),
free_stack_trace,
std.debug.FormatStackTrace{
.stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc),
.tty_config = tty_config,
},
std.debug.FormatStackTrace{
.stack_trace = free_stack_trace,
.tty_config = tty_config,
},
});
}
}
@ -987,19 +1044,33 @@ pub fn DebugAllocator(comptime config: Config) type {
var addr_buf: [stack_n]usize = undefined;
const free_stack_trace = std.debug.captureCurrentStackTrace(.{ .first_address = return_address }, &addr_buf);
if (memory.len != requested_size) {
const tty_config = std.Io.tty.detectConfig(.stderr());
log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
requested_size,
memory.len,
bucketStackTrace(bucket, slot_count, slot_index, .alloc),
free_stack_trace,
std.debug.FormatStackTrace{
.stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc),
.tty_config = tty_config,
},
std.debug.FormatStackTrace{
.stack_trace = free_stack_trace,
.tty_config = tty_config,
},
});
}
if (alignment != slot_alignment) {
const tty_config = std.Io.tty.detectConfig(.stderr());
log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {f} Free: {f}", .{
slot_alignment.toByteUnits(),
alignment.toByteUnits(),
bucketStackTrace(bucket, slot_count, slot_index, .alloc),
free_stack_trace,
std.debug.FormatStackTrace{
.stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc),
.tty_config = tty_config,
},
std.debug.FormatStackTrace{
.stack_trace = free_stack_trace,
.tty_config = tty_config,
},
});
}
}

View File

@ -9,12 +9,13 @@ const builtin = @import("builtin");
const testing = std.testing;
const http = std.http;
const mem = std.mem;
const net = std.net;
const Uri = std.Uri;
const Allocator = mem.Allocator;
const assert = std.debug.assert;
const Io = std.Io;
const Writer = std.Io.Writer;
const Reader = std.Io.Reader;
const HostName = std.Io.net.HostName;
const Client = @This();
@ -22,6 +23,8 @@ pub const disable_tls = std.options.http_disable_tls;
/// Used for all client allocations. Must be thread-safe.
allocator: Allocator,
/// Used for opening TCP connections.
io: Io,
ca_bundle: if (disable_tls) void else std.crypto.Certificate.Bundle = if (disable_tls) {} else .{},
ca_bundle_mutex: std.Thread.Mutex = .{},
@ -32,9 +35,11 @@ tls_buffer_size: if (disable_tls) u0 else usize = if (disable_tls) 0 else std.cr
/// traffic over connections created with this `Client`.
ssl_key_log: ?*std.crypto.tls.Client.SslKeyLog = null,
/// When this is `true`, the next time this client performs an HTTPS request,
/// it will first rescan the system for root certificates.
next_https_rescan_certs: bool = true,
/// The time used to decide whether certificates are expired.
///
/// When this is `null`, the next time this client performs an HTTPS request,
/// it will first check the time and rescan the system for root certificates.
now: ?Io.Timestamp = null,
/// The pool of connections that can be reused (and currently in use).
connection_pool: ConnectionPool = .{},
@ -67,7 +72,7 @@ pub const ConnectionPool = struct {
/// The criteria for a connection to be considered a match.
pub const Criteria = struct {
host: []const u8,
host: HostName,
port: u16,
protocol: Protocol,
};
@ -87,7 +92,7 @@ pub const ConnectionPool = struct {
if (connection.port != criteria.port) continue;
// Domain names are case-insensitive (RFC 5890, Section 2.3.2.4)
if (!std.ascii.eqlIgnoreCase(connection.host(), criteria.host)) continue;
if (!connection.host().eql(criteria.host)) continue;
pool.acquireUnsafe(connection);
return connection;
@ -116,19 +121,19 @@ pub const ConnectionPool = struct {
/// If the connection is marked as closing, it will be closed instead.
///
/// Threadsafe.
pub fn release(pool: *ConnectionPool, connection: *Connection) void {
pub fn release(pool: *ConnectionPool, connection: *Connection, io: Io) void {
pool.mutex.lock();
defer pool.mutex.unlock();
pool.used.remove(&connection.pool_node);
if (connection.closing or pool.free_size == 0) return connection.destroy();
if (connection.closing or pool.free_size == 0) return connection.destroy(io);
if (pool.free_len >= pool.free_size) {
const popped: *Connection = @alignCast(@fieldParentPtr("pool_node", pool.free.popFirst().?));
pool.free_len -= 1;
popped.destroy();
popped.destroy(io);
}
if (connection.proxied) {
@ -176,21 +181,21 @@ pub const ConnectionPool = struct {
/// All future operations on the connection pool will deadlock.
///
/// Threadsafe.
pub fn deinit(pool: *ConnectionPool) void {
pub fn deinit(pool: *ConnectionPool, io: Io) void {
pool.mutex.lock();
var next = pool.free.first;
while (next) |node| {
const connection: *Connection = @alignCast(@fieldParentPtr("pool_node", node));
next = node.next;
connection.destroy();
connection.destroy(io);
}
next = pool.used.first;
while (next) |node| {
const connection: *Connection = @alignCast(@fieldParentPtr("pool_node", node));
next = node.next;
connection.destroy();
connection.destroy(io);
}
pool.* = undefined;
@ -225,8 +230,8 @@ pub const Protocol = enum {
pub const Connection = struct {
client: *Client,
stream_writer: net.Stream.Writer,
stream_reader: net.Stream.Reader,
stream_writer: Io.net.Stream.Writer,
stream_reader: Io.net.Stream.Reader,
/// Entry in `ConnectionPool.used` or `ConnectionPool.free`.
pool_node: std.DoublyLinkedList.Node,
port: u16,
@ -240,28 +245,29 @@ pub const Connection = struct {
fn create(
client: *Client,
remote_host: []const u8,
remote_host: HostName,
port: u16,
stream: net.Stream,
stream: Io.net.Stream,
) error{OutOfMemory}!*Plain {
const io = client.io;
const gpa = client.allocator;
const alloc_len = allocLen(client, remote_host.len);
const alloc_len = allocLen(client, remote_host.bytes.len);
const base = try gpa.alignedAlloc(u8, .of(Plain), alloc_len);
errdefer gpa.free(base);
const host_buffer = base[@sizeOf(Plain)..][0..remote_host.len];
const host_buffer = base[@sizeOf(Plain)..][0..remote_host.bytes.len];
const socket_read_buffer = host_buffer.ptr[host_buffer.len..][0..client.read_buffer_size];
const socket_write_buffer = socket_read_buffer.ptr[socket_read_buffer.len..][0..client.write_buffer_size];
assert(base.ptr + alloc_len == socket_write_buffer.ptr + socket_write_buffer.len);
@memcpy(host_buffer, remote_host);
@memcpy(host_buffer, remote_host.bytes);
const plain: *Plain = @ptrCast(base);
plain.* = .{
.connection = .{
.client = client,
.stream_writer = stream.writer(socket_write_buffer),
.stream_reader = stream.reader(socket_read_buffer),
.stream_writer = stream.writer(io, socket_write_buffer),
.stream_reader = stream.reader(io, socket_read_buffer),
.pool_node = .{},
.port = port,
.host_len = @intCast(remote_host.len),
.host_len = @intCast(remote_host.bytes.len),
.proxied = false,
.closing = false,
.protocol = .plain,
@ -281,9 +287,9 @@ pub const Connection = struct {
return @sizeOf(Plain) + host_len + client.read_buffer_size + client.write_buffer_size;
}
fn host(plain: *Plain) []u8 {
fn host(plain: *Plain) HostName {
const base: [*]u8 = @ptrCast(plain);
return base[@sizeOf(Plain)..][0..plain.connection.host_len];
return .{ .bytes = base[@sizeOf(Plain)..][0..plain.connection.host_len] };
}
};
@ -291,17 +297,19 @@ pub const Connection = struct {
client: std.crypto.tls.Client,
connection: Connection,
/// Asserts that `client.now` is non-null.
fn create(
client: *Client,
remote_host: []const u8,
remote_host: HostName,
port: u16,
stream: net.Stream,
) error{ OutOfMemory, TlsInitializationFailed }!*Tls {
stream: Io.net.Stream,
) !*Tls {
const io = client.io;
const gpa = client.allocator;
const alloc_len = allocLen(client, remote_host.len);
const alloc_len = allocLen(client, remote_host.bytes.len);
const base = try gpa.alignedAlloc(u8, .of(Tls), alloc_len);
errdefer gpa.free(base);
const host_buffer = base[@sizeOf(Tls)..][0..remote_host.len];
const host_buffer = base[@sizeOf(Tls)..][0..remote_host.bytes.len];
// The TLS client wants enough buffer for the max encrypted frame
// size, and the HTTP body reader wants enough buffer for the
// entire HTTP header. This means we need a combined upper bound.
@ -311,35 +319,43 @@ pub const Connection = struct {
const socket_write_buffer = tls_write_buffer.ptr[tls_write_buffer.len..][0..client.write_buffer_size];
const socket_read_buffer = socket_write_buffer.ptr[socket_write_buffer.len..][0..client.tls_buffer_size];
assert(base.ptr + alloc_len == socket_read_buffer.ptr + socket_read_buffer.len);
@memcpy(host_buffer, remote_host);
@memcpy(host_buffer, remote_host.bytes);
const tls: *Tls = @ptrCast(base);
var random_buffer: [176]u8 = undefined;
std.crypto.random.bytes(&random_buffer);
tls.* = .{
.connection = .{
.client = client,
.stream_writer = stream.writer(tls_write_buffer),
.stream_reader = stream.reader(socket_read_buffer),
.stream_writer = stream.writer(io, tls_write_buffer),
.stream_reader = stream.reader(io, socket_read_buffer),
.pool_node = .{},
.port = port,
.host_len = @intCast(remote_host.len),
.host_len = @intCast(remote_host.bytes.len),
.proxied = false,
.closing = false,
.protocol = .tls,
},
// TODO data race here on ca_bundle if the user sets next_https_rescan_certs to true
// TODO data race here on ca_bundle if the user sets `now` to null
.client = std.crypto.tls.Client.init(
tls.connection.stream_reader.interface(),
&tls.connection.stream_reader.interface,
&tls.connection.stream_writer.interface,
.{
.host = .{ .explicit = remote_host },
.host = .{ .explicit = remote_host.bytes },
.ca = .{ .bundle = client.ca_bundle },
.ssl_key_log = client.ssl_key_log,
.read_buffer = tls_read_buffer,
.write_buffer = socket_write_buffer,
.entropy = &random_buffer,
.realtime_now_seconds = client.now.?.toSeconds(),
// This is appropriate for HTTPS because the HTTP headers contain
// the content length which is used to detect truncation attacks.
.allow_truncation_attacks = true,
},
) catch return error.TlsInitializationFailed,
) catch |err| switch (err) {
error.WriteFailed => return tls.connection.stream_writer.err.?,
error.ReadFailed => return tls.connection.stream_reader.err.?,
else => |e| return e,
},
};
return tls;
}
@ -357,32 +373,32 @@ pub const Connection = struct {
client.write_buffer_size + client.tls_buffer_size;
}
fn host(tls: *Tls) []u8 {
fn host(tls: *Tls) HostName {
const base: [*]u8 = @ptrCast(tls);
return base[@sizeOf(Tls)..][0..tls.connection.host_len];
return .{ .bytes = base[@sizeOf(Tls)..][0..tls.connection.host_len] };
}
};
pub const ReadError = std.crypto.tls.Client.ReadError || std.net.Stream.ReadError;
pub const ReadError = std.crypto.tls.Client.ReadError || Io.net.Stream.Reader.Error;
pub fn getReadError(c: *const Connection) ?ReadError {
return switch (c.protocol) {
.tls => {
if (disable_tls) unreachable;
const tls: *const Tls = @alignCast(@fieldParentPtr("connection", c));
return tls.client.read_err orelse c.stream_reader.getError();
return tls.client.read_err orelse c.stream_reader.err.?;
},
.plain => {
return c.stream_reader.getError();
return c.stream_reader.err.?;
},
};
}
fn getStream(c: *Connection) net.Stream {
return c.stream_reader.getStream();
fn getStream(c: *Connection) Io.net.Stream {
return c.stream_reader.stream;
}
pub fn host(c: *Connection) []u8 {
pub fn host(c: *Connection) HostName {
return switch (c.protocol) {
.tls => {
if (disable_tls) unreachable;
@ -398,8 +414,8 @@ pub const Connection = struct {
/// If this is called without calling `flush` or `end`, data will be
/// dropped unsent.
pub fn destroy(c: *Connection) void {
c.getStream().close();
pub fn destroy(c: *Connection, io: Io) void {
c.stream_reader.stream.close(io);
switch (c.protocol) {
.tls => {
if (disable_tls) unreachable;
@ -435,7 +451,7 @@ pub const Connection = struct {
const tls: *Tls = @alignCast(@fieldParentPtr("connection", c));
return &tls.client.reader;
},
.plain => c.stream_reader.interface(),
.plain => &c.stream_reader.interface,
};
}
@ -864,6 +880,7 @@ pub const Request = struct {
/// Returns the request's `Connection` back to the pool of the `Client`.
pub fn deinit(r: *Request) void {
const io = r.client.io;
if (r.connection) |connection| {
connection.closing = connection.closing or switch (r.reader.state) {
.ready => false,
@ -878,7 +895,7 @@ pub const Request = struct {
},
else => true,
};
r.client.connection_pool.release(connection);
r.client.connection_pool.release(connection, io);
}
r.* = undefined;
}
@ -1180,6 +1197,7 @@ pub const Request = struct {
///
/// `aux_buf` must outlive accesses to `Request.uri`.
fn redirect(r: *Request, head: *const Response.Head, aux_buf: *[]u8) !void {
const io = r.client.io;
const new_location = head.location orelse return error.HttpRedirectLocationMissing;
if (new_location.len > aux_buf.*.len) return error.HttpRedirectLocationOversize;
const location = aux_buf.*[0..new_location.len];
@ -1196,19 +1214,20 @@ pub const Request = struct {
error.UnexpectedCharacter => return error.HttpRedirectLocationInvalid,
error.InvalidFormat => return error.HttpRedirectLocationInvalid,
error.InvalidPort => return error.HttpRedirectLocationInvalid,
error.InvalidHostName => return error.HttpRedirectLocationInvalid,
error.NoSpaceLeft => return error.HttpRedirectLocationOversize,
};
const protocol = Protocol.fromUri(new_uri) orelse return error.UnsupportedUriScheme;
const old_connection = r.connection.?;
const old_host = old_connection.host();
var new_host_name_buffer: [Uri.host_name_max]u8 = undefined;
var new_host_name_buffer: [HostName.max_len]u8 = undefined;
const new_host = try new_uri.getHost(&new_host_name_buffer);
const keep_privileged_headers =
std.ascii.eqlIgnoreCase(r.uri.scheme, new_uri.scheme) and
sameParentDomain(old_host, new_host);
old_host.sameParentDomain(new_host);
r.client.connection_pool.release(old_connection);
r.client.connection_pool.release(old_connection, io);
r.connection = null;
if (!keep_privileged_headers) {
@ -1264,7 +1283,7 @@ pub const Request = struct {
pub const Proxy = struct {
protocol: Protocol,
host: []const u8,
host: HostName,
authorization: ?[]const u8,
port: u16,
supports_connect: bool,
@ -1275,9 +1294,10 @@ pub const Proxy = struct {
/// All pending requests must be de-initialized and all active connections released
/// before calling this function.
pub fn deinit(client: *Client) void {
const io = client.io;
assert(client.connection_pool.used.first == null); // There are still active requests.
client.connection_pool.deinit();
client.connection_pool.deinit(io);
if (!disable_tls) client.ca_bundle.deinit(client.allocator);
client.* = undefined;
@ -1383,25 +1403,16 @@ pub const basic_authorization = struct {
}
};
pub const ConnectTcpError = Allocator.Error || error{
ConnectionRefused,
NetworkUnreachable,
ConnectionTimedOut,
ConnectionResetByPeer,
TemporaryNameServerFailure,
NameServerFailure,
UnknownHostName,
HostLacksNetworkAddresses,
UnexpectedConnectFailure,
pub const ConnectTcpError = error{
TlsInitializationFailed,
};
} || Allocator.Error || HostName.ConnectError;
/// Reuses a `Connection` if one matching `host` and `port` is already open.
///
/// Threadsafe.
pub fn connectTcp(
client: *Client,
host: []const u8,
host: HostName,
port: u16,
protocol: Protocol,
) ConnectTcpError!*Connection {
@ -1409,15 +1420,17 @@ pub fn connectTcp(
}
pub const ConnectTcpOptions = struct {
host: []const u8,
host: HostName,
port: u16,
protocol: Protocol,
proxied_host: ?[]const u8 = null,
proxied_host: ?HostName = null,
proxied_port: ?u16 = null,
timeout: Io.Timeout = .none,
};
pub fn connectTcpOptions(client: *Client, options: ConnectTcpOptions) ConnectTcpError!*Connection {
const io = client.io;
const host = options.host;
const port = options.port;
const protocol = options.protocol;
@ -1431,23 +1444,18 @@ pub fn connectTcpOptions(client: *Client, options: ConnectTcpOptions) ConnectTcp
.protocol = protocol,
})) |conn| return conn;
const stream = net.tcpConnectToHost(client.allocator, host, port) catch |err| switch (err) {
error.ConnectionRefused => return error.ConnectionRefused,
error.NetworkUnreachable => return error.NetworkUnreachable,
error.ConnectionTimedOut => return error.ConnectionTimedOut,
error.ConnectionResetByPeer => return error.ConnectionResetByPeer,
error.TemporaryNameServerFailure => return error.TemporaryNameServerFailure,
error.NameServerFailure => return error.NameServerFailure,
error.UnknownHostName => return error.UnknownHostName,
error.HostLacksNetworkAddresses => return error.HostLacksNetworkAddresses,
else => return error.UnexpectedConnectFailure,
};
errdefer stream.close();
var stream = try host.connect(io, port, .{ .mode = .stream });
errdefer stream.close(io);
switch (protocol) {
.tls => {
if (disable_tls) return error.TlsInitializationFailed;
const tc = try Connection.Tls.create(client, proxied_host, proxied_port, stream);
const tc = Connection.Tls.create(client, proxied_host, proxied_port, stream) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.Unexpected => |e| return e,
error.Canceled => |e| return e,
else => return error.TlsInitializationFailed,
};
client.connection_pool.addUsed(&tc.connection);
return &tc.connection;
},
@ -1476,7 +1484,7 @@ pub fn connectUnix(client: *Client, path: []const u8) ConnectUnixError!*Connecti
errdefer client.allocator.destroy(conn);
conn.* = .{ .data = undefined };
const stream = try std.net.connectUnixSocket(path);
const stream = try Io.net.connectUnixSocket(path);
errdefer stream.close();
conn.data = .{
@ -1501,9 +1509,10 @@ pub fn connectUnix(client: *Client, path: []const u8) ConnectUnixError!*Connecti
pub fn connectProxied(
client: *Client,
proxy: *Proxy,
proxied_host: []const u8,
proxied_host: HostName,
proxied_port: u16,
) !*Connection {
const io = client.io;
if (!proxy.supports_connect) return error.TunnelNotSupported;
if (client.connection_pool.findConnection(.{
@ -1523,12 +1532,12 @@ pub fn connectProxied(
});
errdefer {
connection.closing = true;
client.connection_pool.release(connection);
client.connection_pool.release(connection, io);
}
var req = client.request(.CONNECT, .{
.scheme = "http",
.host = .{ .raw = proxied_host },
.host = .{ .raw = proxied_host.bytes },
.port = proxied_port,
}, .{
.redirect_behavior = .unhandled,
@ -1573,7 +1582,7 @@ pub const ConnectError = ConnectTcpError || RequestError;
/// This function is threadsafe.
pub fn connect(
client: *Client,
host: []const u8,
host: HostName,
port: u16,
protocol: Protocol,
) ConnectError!*Connection {
@ -1583,9 +1592,7 @@ pub fn connect(
} orelse return client.connectTcp(host, port, protocol);
// Prevent proxying through itself.
if (std.ascii.eqlIgnoreCase(proxy.host, host) and
proxy.port == port and proxy.protocol == protocol)
{
if (proxy.host.eql(host) and proxy.port == port and proxy.protocol == protocol) {
return client.connectTcp(host, port, protocol);
}
@ -1605,7 +1612,6 @@ pub fn connect(
pub const RequestError = ConnectTcpError || error{
UnsupportedUriScheme,
UriMissingHost,
UriHostTooLong,
CertificateBundleLoadFailure,
};
@ -1663,6 +1669,8 @@ pub fn request(
uri: Uri,
options: RequestOptions,
) RequestError!Request {
const io = client.io;
if (std.debug.runtime_safety) {
for (options.extra_headers) |header| {
assert(header.name.len != 0);
@ -1681,20 +1689,21 @@ pub fn request(
if (protocol == .tls) {
if (disable_tls) unreachable;
if (@atomicLoad(bool, &client.next_https_rescan_certs, .acquire)) {
{
client.ca_bundle_mutex.lock();
defer client.ca_bundle_mutex.unlock();
if (client.next_https_rescan_certs) {
client.ca_bundle.rescan(client.allocator) catch
if (client.now == null) {
const now = try Io.Clock.real.now(io);
client.now = now;
client.ca_bundle.rescan(client.allocator, io, now) catch
return error.CertificateBundleLoadFailure;
@atomicStore(bool, &client.next_https_rescan_certs, false, .release);
}
}
}
const connection = options.connection orelse c: {
var host_name_buffer: [Uri.host_name_max]u8 = undefined;
var host_name_buffer: [HostName.max_len]u8 = undefined;
const host_name = try uri.getHost(&host_name_buffer);
break :c try client.connect(host_name, uriPort(uri, protocol), protocol);
};
@ -1832,20 +1841,6 @@ pub fn fetch(client: *Client, options: FetchOptions) FetchError!FetchResult {
return .{ .status = response.head.status };
}
pub fn sameParentDomain(parent_host: []const u8, child_host: []const u8) bool {
if (!std.ascii.endsWithIgnoreCase(child_host, parent_host)) return false;
if (child_host.len == parent_host.len) return true;
if (parent_host.len > child_host.len) return false;
return child_host[child_host.len - parent_host.len - 1] == '.';
}
test sameParentDomain {
try testing.expect(!sameParentDomain("foo.com", "bar.com"));
try testing.expect(sameParentDomain("foo.com", "foo.com"));
try testing.expect(sameParentDomain("foo.com", "bar.foo.com"));
try testing.expect(!sameParentDomain("bar.foo.com", "foo.com"));
}
test {
_ = Response;
}

View File

@ -688,7 +688,7 @@ pub const WebSocket = struct {
pub const ReadSmallTextMessageError = error{
ConnectionClose,
UnexpectedOpCode,
MessageTooBig,
MessageOversize,
MissingMaskBit,
ReadFailed,
EndOfStream,
@ -717,15 +717,15 @@ pub const WebSocket = struct {
_ => return error.UnexpectedOpCode,
}
if (!h0.fin) return error.MessageTooBig;
if (!h0.fin) return error.MessageOversize;
if (!h1.mask) return error.MissingMaskBit;
const len: usize = switch (h1.payload_len) {
.len16 => try in.takeInt(u16, .big),
.len64 => std.math.cast(usize, try in.takeInt(u64, .big)) orelse return error.MessageTooBig,
.len64 => std.math.cast(usize, try in.takeInt(u64, .big)) orelse return error.MessageOversize,
else => @intFromEnum(h1.payload_len),
};
if (len > in.buffer.len) return error.MessageTooBig;
if (len > in.buffer.len) return error.MessageOversize;
const mask: u32 = @bitCast((try in.takeArray(4)).*);
const payload = try in.take(len);

View File

@ -1,27 +1,36 @@
const builtin = @import("builtin");
const native_endian = builtin.cpu.arch.endian();
const std = @import("std");
const http = std.http;
const mem = std.mem;
const native_endian = builtin.cpu.arch.endian();
const net = std.Io.net;
const Io = std.Io;
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const expectEqualStrings = std.testing.expectEqualStrings;
const expectError = std.testing.expectError;
test "trailers" {
const test_server = try createTestServer(struct {
if (builtin.cpu.arch == .arm) {
// https://github.com/ziglang/zig/issues/25762
return error.SkipZigTest;
}
const io = std.testing.io;
const test_server = try createTestServer(io, struct {
fn run(test_server: *TestServer) anyerror!void {
const net_server = &test_server.net_server;
var recv_buffer: [1024]u8 = undefined;
var send_buffer: [1024]u8 = undefined;
var remaining: usize = 1;
while (remaining != 0) : (remaining -= 1) {
const connection = try net_server.accept();
defer connection.stream.close();
var stream = try net_server.accept(io);
defer stream.close(io);
var connection_br = connection.stream.reader(&recv_buffer);
var connection_bw = connection.stream.writer(&send_buffer);
var server = http.Server.init(connection_br.interface(), &connection_bw.interface);
var connection_br = stream.reader(io, &recv_buffer);
var connection_bw = stream.writer(io, &send_buffer);
var server = http.Server.init(&connection_br.interface, &connection_bw.interface);
try expectEqual(.ready, server.reader.state);
var request = try server.receiveHead();
@ -49,7 +58,7 @@ test "trailers" {
const gpa = std.testing.allocator;
var client: http.Client = .{ .allocator = gpa };
var client: http.Client = .{ .allocator = gpa, .io = io };
defer client.deinit();
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}/trailer", .{
@ -92,17 +101,18 @@ test "trailers" {
}
test "HTTP server handles a chunked transfer coding request" {
const test_server = try createTestServer(struct {
const io = std.testing.io;
const test_server = try createTestServer(io, struct {
fn run(test_server: *TestServer) anyerror!void {
const net_server = &test_server.net_server;
var recv_buffer: [8192]u8 = undefined;
var send_buffer: [500]u8 = undefined;
const connection = try net_server.accept();
defer connection.stream.close();
var stream = try net_server.accept(io);
defer stream.close(io);
var connection_br = connection.stream.reader(&recv_buffer);
var connection_bw = connection.stream.writer(&send_buffer);
var server = http.Server.init(connection_br.interface(), &connection_bw.interface);
var connection_br = stream.reader(io, &recv_buffer);
var connection_bw = stream.writer(io, &send_buffer);
var server = http.Server.init(&connection_br.interface, &connection_bw.interface);
var request = try server.receiveHead();
try expect(request.head.transfer_encoding == .chunked);
@ -136,12 +146,13 @@ test "HTTP server handles a chunked transfer coding request" {
"0\r\n" ++
"\r\n";
const gpa = std.testing.allocator;
const stream = try std.net.tcpConnectToHost(gpa, "127.0.0.1", test_server.port());
defer stream.close();
var stream_writer = stream.writer(&.{});
const host_name: net.HostName = try .init("127.0.0.1");
var stream = try host_name.connect(io, test_server.port(), .{ .mode = .stream });
defer stream.close(io);
var stream_writer = stream.writer(io, &.{});
try stream_writer.interface.writeAll(request_bytes);
const gpa = std.testing.allocator;
const expected_response =
"HTTP/1.1 200 OK\r\n" ++
"connection: close\r\n" ++
@ -149,26 +160,27 @@ test "HTTP server handles a chunked transfer coding request" {
"content-type: text/plain\r\n" ++
"\r\n" ++
"message from server!\n";
var stream_reader = stream.reader(&.{});
const response = try stream_reader.interface().allocRemaining(gpa, .limited(expected_response.len + 1));
var stream_reader = stream.reader(io, &.{});
const response = try stream_reader.interface.allocRemaining(gpa, .limited(expected_response.len + 1));
defer gpa.free(response);
try expectEqualStrings(expected_response, response);
}
test "echo content server" {
const test_server = try createTestServer(struct {
const io = std.testing.io;
const test_server = try createTestServer(io, struct {
fn run(test_server: *TestServer) anyerror!void {
const net_server = &test_server.net_server;
var recv_buffer: [1024]u8 = undefined;
var send_buffer: [100]u8 = undefined;
accept: while (!test_server.shutting_down) {
const connection = try net_server.accept();
defer connection.stream.close();
var stream = try net_server.accept(io);
defer stream.close(io);
var connection_br = connection.stream.reader(&recv_buffer);
var connection_bw = connection.stream.writer(&send_buffer);
var http_server = http.Server.init(connection_br.interface(), &connection_bw.interface);
var connection_br = stream.reader(io, &recv_buffer);
var connection_bw = stream.writer(io, &send_buffer);
var http_server = http.Server.init(&connection_br.interface, &connection_bw.interface);
while (http_server.reader.state == .ready) {
var request = http_server.receiveHead() catch |err| switch (err) {
@ -235,7 +247,7 @@ test "echo content server" {
defer test_server.destroy();
{
var client: http.Client = .{ .allocator = std.testing.allocator };
var client: http.Client = .{ .allocator = std.testing.allocator, .io = io };
defer client.deinit();
try echoTests(&client, test_server.port());
@ -243,6 +255,8 @@ test "echo content server" {
}
test "Server.Request.respondStreaming non-chunked, unknown content-length" {
const io = std.testing.io;
if (builtin.os.tag == .windows) {
// https://github.com/ziglang/zig/issues/21457
return error.SkipZigTest;
@ -250,19 +264,19 @@ test "Server.Request.respondStreaming non-chunked, unknown content-length" {
// In this case, the response is expected to stream until the connection is
// closed, indicating the end of the body.
const test_server = try createTestServer(struct {
const test_server = try createTestServer(io, struct {
fn run(test_server: *TestServer) anyerror!void {
const net_server = &test_server.net_server;
var recv_buffer: [1000]u8 = undefined;
var send_buffer: [500]u8 = undefined;
var remaining: usize = 1;
while (remaining != 0) : (remaining -= 1) {
const connection = try net_server.accept();
defer connection.stream.close();
var stream = try net_server.accept(io);
defer stream.close(io);
var connection_br = connection.stream.reader(&recv_buffer);
var connection_bw = connection.stream.writer(&send_buffer);
var server = http.Server.init(connection_br.interface(), &connection_bw.interface);
var connection_br = stream.reader(io, &recv_buffer);
var connection_bw = stream.writer(io, &send_buffer);
var server = http.Server.init(&connection_br.interface, &connection_bw.interface);
try expectEqual(.ready, server.reader.state);
var request = try server.receiveHead();
@ -286,14 +300,15 @@ test "Server.Request.respondStreaming non-chunked, unknown content-length" {
defer test_server.destroy();
const request_bytes = "GET /foo HTTP/1.1\r\n\r\n";
const gpa = std.testing.allocator;
const stream = try std.net.tcpConnectToHost(gpa, "127.0.0.1", test_server.port());
defer stream.close();
var stream_writer = stream.writer(&.{});
const host_name: net.HostName = try .init("127.0.0.1");
var stream = try host_name.connect(io, test_server.port(), .{ .mode = .stream });
defer stream.close(io);
var stream_writer = stream.writer(io, &.{});
try stream_writer.interface.writeAll(request_bytes);
var stream_reader = stream.reader(&.{});
const response = try stream_reader.interface().allocRemaining(gpa, .unlimited);
var stream_reader = stream.reader(io, &.{});
const gpa = std.testing.allocator;
const response = try stream_reader.interface.allocRemaining(gpa, .unlimited);
defer gpa.free(response);
var expected_response = std.array_list.Managed(u8).init(gpa);
@ -316,19 +331,21 @@ test "Server.Request.respondStreaming non-chunked, unknown content-length" {
}
test "receiving arbitrary http headers from the client" {
const test_server = try createTestServer(struct {
const io = std.testing.io;
const test_server = try createTestServer(io, struct {
fn run(test_server: *TestServer) anyerror!void {
const net_server = &test_server.net_server;
var recv_buffer: [666]u8 = undefined;
var send_buffer: [777]u8 = undefined;
var remaining: usize = 1;
while (remaining != 0) : (remaining -= 1) {
const connection = try net_server.accept();
defer connection.stream.close();
var stream = try net_server.accept(io);
defer stream.close(io);
var connection_br = connection.stream.reader(&recv_buffer);
var connection_bw = connection.stream.writer(&send_buffer);
var server = http.Server.init(connection_br.interface(), &connection_bw.interface);
var connection_br = stream.reader(io, &recv_buffer);
var connection_bw = stream.writer(io, &send_buffer);
var server = http.Server.init(&connection_br.interface, &connection_bw.interface);
try expectEqual(.ready, server.reader.state);
var request = try server.receiveHead();
@ -356,14 +373,15 @@ test "receiving arbitrary http headers from the client" {
"CoNneCtIoN:close\r\n" ++
"aoeu: asdf \r\n" ++
"\r\n";
const gpa = std.testing.allocator;
const stream = try std.net.tcpConnectToHost(gpa, "127.0.0.1", test_server.port());
defer stream.close();
var stream_writer = stream.writer(&.{});
const host_name: net.HostName = try .init("127.0.0.1");
var stream = try host_name.connect(io, test_server.port(), .{ .mode = .stream });
defer stream.close(io);
var stream_writer = stream.writer(io, &.{});
try stream_writer.interface.writeAll(request_bytes);
var stream_reader = stream.reader(&.{});
const response = try stream_reader.interface().allocRemaining(gpa, .unlimited);
var stream_reader = stream.reader(io, &.{});
const gpa = std.testing.allocator;
const response = try stream_reader.interface.allocRemaining(gpa, .unlimited);
defer gpa.free(response);
var expected_response = std.array_list.Managed(u8).init(gpa);
@ -376,24 +394,26 @@ test "receiving arbitrary http headers from the client" {
}
test "general client/server API coverage" {
const io = std.testing.io;
if (builtin.os.tag == .windows) {
// This test was never passing on Windows.
return error.SkipZigTest;
}
const test_server = try createTestServer(struct {
const test_server = try createTestServer(io, struct {
fn run(test_server: *TestServer) anyerror!void {
const net_server = &test_server.net_server;
var recv_buffer: [1024]u8 = undefined;
var send_buffer: [100]u8 = undefined;
outer: while (!test_server.shutting_down) {
var connection = try net_server.accept();
defer connection.stream.close();
var stream = try net_server.accept(io);
defer stream.close(io);
var connection_br = connection.stream.reader(&recv_buffer);
var connection_bw = connection.stream.writer(&send_buffer);
var http_server = http.Server.init(connection_br.interface(), &connection_bw.interface);
var connection_br = stream.reader(io, &recv_buffer);
var connection_bw = stream.writer(io, &send_buffer);
var http_server = http.Server.init(&connection_br.interface, &connection_bw.interface);
while (http_server.reader.state == .ready) {
var request = http_server.receiveHead() catch |err| switch (err) {
@ -401,7 +421,7 @@ test "general client/server API coverage" {
else => |e| return e,
};
try handleRequest(&request, net_server.listen_address.getPort());
try handleRequest(&request, net_server.socket.address.getPort());
}
}
}
@ -530,10 +550,10 @@ test "general client/server API coverage" {
}
fn getUnusedTcpPort() !u16 {
const addr = try std.net.Address.parseIp("127.0.0.1", 0);
var s = try addr.listen(.{});
defer s.deinit();
return s.listen_address.in.getPort();
const addr = try net.IpAddress.parse("127.0.0.1", 0);
var s = try addr.listen(io, .{});
defer s.deinit(io);
return s.socket.address.getPort();
}
});
defer test_server.destroy();
@ -541,7 +561,7 @@ test "general client/server API coverage" {
const log = std.log.scoped(.client);
const gpa = std.testing.allocator;
var client: http.Client = .{ .allocator = gpa };
var client: http.Client = .{ .allocator = gpa, .io = io };
defer client.deinit();
const port = test_server.port();
@ -867,18 +887,20 @@ test "general client/server API coverage" {
}
test "Server streams both reading and writing" {
const test_server = try createTestServer(struct {
const io = std.testing.io;
const test_server = try createTestServer(io, struct {
fn run(test_server: *TestServer) anyerror!void {
const net_server = &test_server.net_server;
var recv_buffer: [1024]u8 = undefined;
var send_buffer: [777]u8 = undefined;
const connection = try net_server.accept();
defer connection.stream.close();
var stream = try net_server.accept(io);
defer stream.close(io);
var connection_br = connection.stream.reader(&recv_buffer);
var connection_bw = connection.stream.writer(&send_buffer);
var server = http.Server.init(connection_br.interface(), &connection_bw.interface);
var connection_br = stream.reader(io, &recv_buffer);
var connection_bw = stream.writer(io, &send_buffer);
var server = http.Server.init(&connection_br.interface, &connection_bw.interface);
var request = try server.receiveHead();
var read_buffer: [100]u8 = undefined;
var br = try request.readerExpectContinue(&read_buffer);
@ -904,7 +926,10 @@ test "Server streams both reading and writing" {
});
defer test_server.destroy();
var client: http.Client = .{ .allocator = std.testing.allocator };
var client: http.Client = .{
.allocator = std.testing.allocator,
.io = io,
};
defer client.deinit();
var redirect_buffer: [555]u8 = undefined;
@ -1075,36 +1100,40 @@ fn echoTests(client: *http.Client, port: u16) !void {
}
const TestServer = struct {
io: Io,
shutting_down: bool,
server_thread: std.Thread,
net_server: std.net.Server,
net_server: net.Server,
fn destroy(self: *@This()) void {
const io = self.io;
self.shutting_down = true;
const conn = std.net.tcpConnectToAddress(self.net_server.listen_address) catch @panic("shutdown failure");
conn.close();
var stream = self.net_server.socket.address.connect(io, .{ .mode = .stream }) catch
@panic("shutdown failure");
stream.close(io);
self.server_thread.join();
self.net_server.deinit();
self.net_server.deinit(io);
std.testing.allocator.destroy(self);
}
fn port(self: @This()) u16 {
return self.net_server.listen_address.in.getPort();
return self.net_server.socket.address.getPort();
}
};
fn createTestServer(S: type) !*TestServer {
fn createTestServer(io: Io, S: type) !*TestServer {
if (builtin.single_threaded) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and native_endian == .big) {
// https://github.com/ziglang/zig/issues/13782
return error.SkipZigTest;
}
const address = try std.net.Address.parseIp("127.0.0.1", 0);
const address = try net.IpAddress.parse("127.0.0.1", 0);
const test_server = try std.testing.allocator.create(TestServer);
test_server.* = .{
.net_server = try address.listen(.{ .reuse_address = true }),
.io = io,
.net_server = try address.listen(io, .{ .reuse_address = true }),
.shutting_down = false,
.server_thread = try std.Thread.spawn(.{}, S.run, .{test_server}),
};
@ -1112,18 +1141,19 @@ fn createTestServer(S: type) !*TestServer {
}
test "redirect to different connection" {
const test_server_new = try createTestServer(struct {
const io = std.testing.io;
const test_server_new = try createTestServer(io, struct {
fn run(test_server: *TestServer) anyerror!void {
const net_server = &test_server.net_server;
var recv_buffer: [888]u8 = undefined;
var send_buffer: [777]u8 = undefined;
const connection = try net_server.accept();
defer connection.stream.close();
var stream = try net_server.accept(io);
defer stream.close(io);
var connection_br = connection.stream.reader(&recv_buffer);
var connection_bw = connection.stream.writer(&send_buffer);
var server = http.Server.init(connection_br.interface(), &connection_bw.interface);
var connection_br = stream.reader(io, &recv_buffer);
var connection_bw = stream.writer(io, &send_buffer);
var server = http.Server.init(&connection_br.interface, &connection_bw.interface);
var request = try server.receiveHead();
try expectEqualStrings(request.head.target, "/ok");
try request.respond("good job, you pass", .{});
@ -1136,23 +1166,23 @@ test "redirect to different connection" {
};
global.other_port = test_server_new.port();
const test_server_orig = try createTestServer(struct {
const test_server_orig = try createTestServer(io, struct {
fn run(test_server: *TestServer) anyerror!void {
const net_server = &test_server.net_server;
var recv_buffer: [999]u8 = undefined;
var send_buffer: [100]u8 = undefined;
const connection = try net_server.accept();
defer connection.stream.close();
var stream = try net_server.accept(io);
defer stream.close(io);
var loc_buf: [50]u8 = undefined;
const new_loc = try std.fmt.bufPrint(&loc_buf, "http://127.0.0.1:{d}/ok", .{
global.other_port.?,
});
var connection_br = connection.stream.reader(&recv_buffer);
var connection_bw = connection.stream.writer(&send_buffer);
var server = http.Server.init(connection_br.interface(), &connection_bw.interface);
var connection_br = stream.reader(io, &recv_buffer);
var connection_bw = stream.writer(io, &send_buffer);
var server = http.Server.init(&connection_br.interface, &connection_bw.interface);
var request = try server.receiveHead();
try expectEqualStrings(request.head.target, "/help");
try request.respond("", .{
@ -1167,7 +1197,10 @@ test "redirect to different connection" {
const gpa = std.testing.allocator;
var client: http.Client = .{ .allocator = gpa };
var client: http.Client = .{
.allocator = gpa,
.io = io,
};
defer client.deinit();
var loc_buf: [100]u8 = undefined;

View File

@ -1678,6 +1678,7 @@ test "indexOfPos empty needle" {
/// needle.len must be > 0
/// does not count overlapping needles
pub fn count(comptime T: type, haystack: []const T, needle: []const T) usize {
if (needle.len == 1) return countScalar(T, haystack, needle[0]);
assert(needle.len > 0);
var i: usize = 0;
var found: usize = 0;
@ -1704,9 +1705,9 @@ test count {
try testing.expect(count(u8, "owowowu", "owowu") == 1);
}
/// Returns the number of needles inside the haystack
pub fn countScalar(comptime T: type, haystack: []const T, needle: T) usize {
const n = haystack.len;
/// Returns the number of times `element` appears in a slice of memory.
pub fn countScalar(comptime T: type, list: []const T, element: T) usize {
const n = list.len;
var i: usize = 0;
var found: usize = 0;
@ -1716,16 +1717,16 @@ pub fn countScalar(comptime T: type, haystack: []const T, needle: T) usize {
if (std.simd.suggestVectorLength(T)) |block_size| {
const Block = @Vector(block_size, T);
const letter_mask: Block = @splat(needle);
const letter_mask: Block = @splat(element);
while (n - i >= block_size) : (i += block_size) {
const haystack_block: Block = haystack[i..][0..block_size].*;
const haystack_block: Block = list[i..][0..block_size].*;
found += std.simd.countTrues(letter_mask == haystack_block);
}
}
}
for (haystack[i..n]) |item| {
found += @intFromBool(item == needle);
for (list[i..n]) |item| {
found += @intFromBool(item == element);
}
return found;
@ -1735,6 +1736,7 @@ test countScalar {
try testing.expectEqual(0, countScalar(u8, "", 'h'));
try testing.expectEqual(1, countScalar(u8, "h", 'h'));
try testing.expectEqual(2, countScalar(u8, "hh", 'h'));
try testing.expectEqual(2, countScalar(u8, "ahhb", 'h'));
try testing.expectEqual(3, countScalar(u8, " abcabc abc", 'b'));
}
@ -1744,6 +1746,7 @@ test countScalar {
//
/// See also: `containsAtLeastScalar`
pub fn containsAtLeast(comptime T: type, haystack: []const T, expected_count: usize, needle: []const T) bool {
if (needle.len == 1) return containsAtLeastScalar(T, haystack, expected_count, needle[0]);
assert(needle.len > 0);
if (expected_count == 0) return true;
@ -1774,32 +1777,52 @@ test containsAtLeast {
try testing.expect(!containsAtLeast(u8, " radar radar ", 3, "radar"));
}
/// Returns true if the haystack contains expected_count or more needles
//
/// See also: `containsAtLeast`
pub fn containsAtLeastScalar(comptime T: type, haystack: []const T, expected_count: usize, needle: T) bool {
if (expected_count == 0) return true;
/// Deprecated in favor of `containsAtLeastScalar2`.
pub fn containsAtLeastScalar(comptime T: type, list: []const T, minimum: usize, element: T) bool {
return containsAtLeastScalar2(T, list, element, minimum);
}
/// Returns true if `element` appears at least `minimum` number of times in `list`.
//
/// Related:
/// * `containsAtLeast`
/// * `countScalar`
pub fn containsAtLeastScalar2(comptime T: type, list: []const T, element: T, minimum: usize) bool {
const n = list.len;
var i: usize = 0;
var found: usize = 0;
for (haystack) |item| {
if (item == needle) {
found += 1;
if (found == expected_count) return true;
if (use_vectors_for_comparison and
(@typeInfo(T) == .int or @typeInfo(T) == .float) and std.math.isPowerOfTwo(@bitSizeOf(T)))
{
if (std.simd.suggestVectorLength(T)) |block_size| {
const Block = @Vector(block_size, T);
const letter_mask: Block = @splat(element);
while (n - i >= block_size) : (i += block_size) {
const haystack_block: Block = list[i..][0..block_size].*;
found += std.simd.countTrues(letter_mask == haystack_block);
if (found >= minimum) return true;
}
}
}
for (list[i..n]) |item| {
found += @intFromBool(item == element);
if (found >= minimum) return true;
}
return false;
}
test containsAtLeastScalar {
try testing.expect(containsAtLeastScalar(u8, "aa", 0, 'a'));
try testing.expect(containsAtLeastScalar(u8, "aa", 1, 'a'));
try testing.expect(containsAtLeastScalar(u8, "aa", 2, 'a'));
try testing.expect(!containsAtLeastScalar(u8, "aa", 3, 'a'));
test containsAtLeastScalar2 {
try testing.expect(containsAtLeastScalar2(u8, "aa", 'a', 0));
try testing.expect(containsAtLeastScalar2(u8, "aa", 'a', 1));
try testing.expect(containsAtLeastScalar2(u8, "aa", 'a', 2));
try testing.expect(!containsAtLeastScalar2(u8, "aa", 'a', 3));
try testing.expect(containsAtLeastScalar(u8, "adadda", 3, 'd'));
try testing.expect(!containsAtLeastScalar(u8, "adadda", 4, 'd'));
try testing.expect(containsAtLeastScalar2(u8, "adadda", 'd', 3));
try testing.expect(!containsAtLeastScalar2(u8, "adadda", 'd', 4));
}
/// Reads an integer from memory with size equal to bytes.len.

File diff suppressed because it is too large Load Diff

View File

@ -1,373 +0,0 @@
const std = @import("std");
const builtin = @import("builtin");
const net = std.net;
const mem = std.mem;
const testing = std.testing;
test "parse and render IP addresses at comptime" {
comptime {
const ipv6addr = net.Address.parseIp("::1", 0) catch unreachable;
try std.testing.expectFmt("[::1]:0", "{f}", .{ipv6addr});
const ipv4addr = net.Address.parseIp("127.0.0.1", 0) catch unreachable;
try std.testing.expectFmt("127.0.0.1:0", "{f}", .{ipv4addr});
try testing.expectError(error.InvalidIPAddressFormat, net.Address.parseIp("::123.123.123.123", 0));
try testing.expectError(error.InvalidIPAddressFormat, net.Address.parseIp("127.01.0.1", 0));
try testing.expectError(error.InvalidIPAddressFormat, net.Address.resolveIp("::123.123.123.123", 0));
try testing.expectError(error.InvalidIPAddressFormat, net.Address.resolveIp("127.01.0.1", 0));
}
}
test "format IPv6 address with no zero runs" {
const addr = try std.net.Address.parseIp6("2001:db8:1:2:3:4:5:6", 0);
try std.testing.expectFmt("[2001:db8:1:2:3:4:5:6]:0", "{f}", .{addr});
}
test "parse IPv6 addresses and check compressed form" {
try std.testing.expectFmt("[2001:db8::1:0:0:2]:0", "{f}", .{
try std.net.Address.parseIp6("2001:0db8:0000:0000:0001:0000:0000:0002", 0),
});
try std.testing.expectFmt("[2001:db8::1:2]:0", "{f}", .{
try std.net.Address.parseIp6("2001:0db8:0000:0000:0000:0000:0001:0002", 0),
});
try std.testing.expectFmt("[2001:db8:1:0:1::2]:0", "{f}", .{
try std.net.Address.parseIp6("2001:0db8:0001:0000:0001:0000:0000:0002", 0),
});
}
test "parse IPv6 address, check raw bytes" {
const expected_raw: [16]u8 = .{
0x20, 0x01, 0x0d, 0xb8, // 2001:db8
0x00, 0x00, 0x00, 0x00, // :0000:0000
0x00, 0x01, 0x00, 0x00, // :0001:0000
0x00, 0x00, 0x00, 0x02, // :0000:0002
};
const addr = try std.net.Address.parseIp6("2001:db8:0000:0000:0001:0000:0000:0002", 0);
const actual_raw = addr.in6.sa.addr[0..];
try std.testing.expectEqualSlices(u8, expected_raw[0..], actual_raw);
}
test "parse and render IPv6 addresses" {
var buffer: [100]u8 = undefined;
const ips = [_][]const u8{
"FF01:0:0:0:0:0:0:FB",
"FF01::Fb",
"::1",
"::",
"1::",
"2001:db8::",
"::1234:5678",
"2001:db8::1234:5678",
"FF01::FB%1234",
"::ffff:123.5.123.5",
};
const printed = [_][]const u8{
"ff01::fb",
"ff01::fb",
"::1",
"::",
"1::",
"2001:db8::",
"::1234:5678",
"2001:db8::1234:5678",
"ff01::fb%1234",
"::ffff:123.5.123.5",
};
for (ips, 0..) |ip, i| {
const addr = net.Address.parseIp6(ip, 0) catch unreachable;
var newIp = std.fmt.bufPrint(buffer[0..], "{f}", .{addr}) catch unreachable;
try std.testing.expect(std.mem.eql(u8, printed[i], newIp[1 .. newIp.len - 3]));
if (builtin.os.tag == .linux) {
const addr_via_resolve = net.Address.resolveIp6(ip, 0) catch unreachable;
var newResolvedIp = std.fmt.bufPrint(buffer[0..], "{f}", .{addr_via_resolve}) catch unreachable;
try std.testing.expect(std.mem.eql(u8, printed[i], newResolvedIp[1 .. newResolvedIp.len - 3]));
}
}
try testing.expectError(error.InvalidCharacter, net.Address.parseIp6(":::", 0));
try testing.expectError(error.Overflow, net.Address.parseIp6("FF001::FB", 0));
try testing.expectError(error.InvalidCharacter, net.Address.parseIp6("FF01::Fb:zig", 0));
try testing.expectError(error.InvalidEnd, net.Address.parseIp6("FF01:0:0:0:0:0:0:FB:", 0));
try testing.expectError(error.Incomplete, net.Address.parseIp6("FF01:", 0));
try testing.expectError(error.InvalidIpv4Mapping, net.Address.parseIp6("::123.123.123.123", 0));
try testing.expectError(error.Incomplete, net.Address.parseIp6("1", 0));
// TODO Make this test pass on other operating systems.
if (builtin.os.tag == .linux or comptime builtin.os.tag.isDarwin() or builtin.os.tag == .windows) {
try testing.expectError(error.Incomplete, net.Address.resolveIp6("ff01::fb%", 0));
// Assumes IFNAMESIZE will always be a multiple of 2
try testing.expectError(error.Overflow, net.Address.resolveIp6("ff01::fb%wlp3" ++ "s0" ** @divExact(std.posix.IFNAMESIZE - 4, 2), 0));
try testing.expectError(error.Overflow, net.Address.resolveIp6("ff01::fb%12345678901234", 0));
}
}
test "invalid but parseable IPv6 scope ids" {
if (builtin.os.tag != .linux and comptime !builtin.os.tag.isDarwin() and builtin.os.tag != .windows) {
// Currently, resolveIp6 with alphanumerical scope IDs only works on Linux.
// TODO Make this test pass on other operating systems.
return error.SkipZigTest;
}
try testing.expectError(error.InterfaceNotFound, net.Address.resolveIp6("ff01::fb%123s45678901234", 0));
}
test "parse and render IPv4 addresses" {
var buffer: [18]u8 = undefined;
for ([_][]const u8{
"0.0.0.0",
"255.255.255.255",
"1.2.3.4",
"123.255.0.91",
"127.0.0.1",
}) |ip| {
const addr = net.Address.parseIp4(ip, 0) catch unreachable;
var newIp = std.fmt.bufPrint(buffer[0..], "{f}", .{addr}) catch unreachable;
try std.testing.expect(std.mem.eql(u8, ip, newIp[0 .. newIp.len - 2]));
}
try testing.expectError(error.Overflow, net.Address.parseIp4("256.0.0.1", 0));
try testing.expectError(error.InvalidCharacter, net.Address.parseIp4("x.0.0.1", 0));
try testing.expectError(error.InvalidEnd, net.Address.parseIp4("127.0.0.1.1", 0));
try testing.expectError(error.Incomplete, net.Address.parseIp4("127.0.0.", 0));
try testing.expectError(error.InvalidCharacter, net.Address.parseIp4("100..0.1", 0));
try testing.expectError(error.NonCanonical, net.Address.parseIp4("127.01.0.1", 0));
}
test "parse and render UNIX addresses" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
if (!net.has_unix_sockets) return error.SkipZigTest;
const addr = net.Address.initUnix("/tmp/testpath") catch unreachable;
try std.testing.expectFmt("/tmp/testpath", "{f}", .{addr});
const too_long = [_]u8{'a'} ** 200;
try testing.expectError(error.NameTooLong, net.Address.initUnix(too_long[0..]));
}
test "resolve DNS" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
if (builtin.os.tag == .windows) {
_ = try std.os.windows.WSAStartup(2, 2);
}
defer {
if (builtin.os.tag == .windows) {
std.os.windows.WSACleanup() catch unreachable;
}
}
// Resolve localhost, this should not fail.
{
const localhost_v4 = try net.Address.parseIp("127.0.0.1", 80);
const localhost_v6 = try net.Address.parseIp("::2", 80);
const result = try net.getAddressList(testing.allocator, "localhost", 80);
defer result.deinit();
for (result.addrs) |addr| {
if (addr.eql(localhost_v4) or addr.eql(localhost_v6)) break;
} else @panic("unexpected address for localhost");
}
{
// The tests are required to work even when there is no Internet connection,
// so some of these errors we must accept and skip the test.
const result = net.getAddressList(testing.allocator, "example.com", 80) catch |err| switch (err) {
error.UnknownHostName => return error.SkipZigTest,
error.TemporaryNameServerFailure => return error.SkipZigTest,
else => return err,
};
result.deinit();
}
}
test "listen on a port, send bytes, receive bytes" {
if (builtin.single_threaded) return error.SkipZigTest;
if (builtin.os.tag == .wasi) return error.SkipZigTest;
if (builtin.os.tag == .windows) {
_ = try std.os.windows.WSAStartup(2, 2);
}
defer {
if (builtin.os.tag == .windows) {
std.os.windows.WSACleanup() catch unreachable;
}
}
// Try only the IPv4 variant as some CI builders have no IPv6 localhost
// configured.
const localhost = try net.Address.parseIp("127.0.0.1", 0);
var server = try localhost.listen(.{});
defer server.deinit();
const S = struct {
fn clientFn(server_address: net.Address) !void {
const socket = try net.tcpConnectToAddress(server_address);
defer socket.close();
var stream_writer = socket.writer(&.{});
try stream_writer.interface.writeAll("Hello world!");
}
};
const t = try std.Thread.spawn(.{}, S.clientFn, .{server.listen_address});
defer t.join();
var client = try server.accept();
defer client.stream.close();
var buf: [16]u8 = undefined;
var stream_reader = client.stream.reader(&.{});
const n = try stream_reader.interface().readSliceShort(&buf);
try testing.expectEqual(@as(usize, 12), n);
try testing.expectEqualSlices(u8, "Hello world!", buf[0..n]);
}
test "listen on an in use port" {
if (builtin.os.tag != .linux and comptime !builtin.os.tag.isDarwin() and builtin.os.tag != .windows) {
// TODO build abstractions for other operating systems
return error.SkipZigTest;
}
const localhost = try net.Address.parseIp("127.0.0.1", 0);
var server1 = try localhost.listen(.{ .reuse_address = true });
defer server1.deinit();
var server2 = try server1.listen_address.listen(.{ .reuse_address = true });
defer server2.deinit();
}
fn testClientToHost(allocator: mem.Allocator, name: []const u8, port: u16) anyerror!void {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
const connection = try net.tcpConnectToHost(allocator, name, port);
defer connection.close();
var buf: [100]u8 = undefined;
const len = try connection.read(&buf);
const msg = buf[0..len];
try testing.expect(mem.eql(u8, msg, "hello from server\n"));
}
fn testClient(addr: net.Address) anyerror!void {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
const socket_file = try net.tcpConnectToAddress(addr);
defer socket_file.close();
var buf: [100]u8 = undefined;
const len = try socket_file.read(&buf);
const msg = buf[0..len];
try testing.expect(mem.eql(u8, msg, "hello from server\n"));
}
fn testServer(server: *net.Server) anyerror!void {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
var client = try server.accept();
const stream = client.stream.writer();
try stream.print("hello from server\n", .{});
}
test "listen on a unix socket, send bytes, receive bytes" {
if (builtin.single_threaded) return error.SkipZigTest;
if (!net.has_unix_sockets) return error.SkipZigTest;
if (builtin.os.tag == .windows) {
_ = try std.os.windows.WSAStartup(2, 2);
}
defer {
if (builtin.os.tag == .windows) {
std.os.windows.WSACleanup() catch unreachable;
}
}
const socket_path = try generateFileName("socket.unix");
defer testing.allocator.free(socket_path);
const socket_addr = try net.Address.initUnix(socket_path);
defer std.fs.cwd().deleteFile(socket_path) catch {};
var server = try socket_addr.listen(.{});
defer server.deinit();
const S = struct {
fn clientFn(path: []const u8) !void {
const socket = try net.connectUnixSocket(path);
defer socket.close();
var stream_writer = socket.writer(&.{});
try stream_writer.interface.writeAll("Hello world!");
}
};
const t = try std.Thread.spawn(.{}, S.clientFn, .{socket_path});
defer t.join();
var client = try server.accept();
defer client.stream.close();
var buf: [16]u8 = undefined;
var stream_reader = client.stream.reader(&.{});
const n = try stream_reader.interface().readSliceShort(&buf);
try testing.expectEqual(@as(usize, 12), n);
try testing.expectEqualSlices(u8, "Hello world!", buf[0..n]);
}
test "listen on a unix socket with reuse_address option" {
if (!net.has_unix_sockets) return error.SkipZigTest;
// Windows doesn't implement reuse port option.
if (builtin.os.tag == .windows) return error.SkipZigTest;
const socket_path = try generateFileName("socket.unix");
defer testing.allocator.free(socket_path);
const socket_addr = try net.Address.initUnix(socket_path);
defer std.fs.cwd().deleteFile(socket_path) catch {};
var server = try socket_addr.listen(.{ .reuse_address = true });
server.deinit();
}
fn generateFileName(base_name: []const u8) ![]const u8 {
const random_bytes_count = 12;
const sub_path_len = comptime std.fs.base64_encoder.calcSize(random_bytes_count);
var random_bytes: [12]u8 = undefined;
std.crypto.random.bytes(&random_bytes);
var sub_path: [sub_path_len]u8 = undefined;
_ = std.fs.base64_encoder.encode(&sub_path, &random_bytes);
return std.fmt.allocPrint(testing.allocator, "{s}-{s}", .{ sub_path[0..], base_name });
}
test "non-blocking tcp server" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
if (true) {
// https://github.com/ziglang/zig/issues/18315
return error.SkipZigTest;
}
const localhost = try net.Address.parseIp("127.0.0.1", 0);
var server = localhost.listen(.{ .force_nonblocking = true });
defer server.deinit();
const accept_err = server.accept();
try testing.expectError(error.WouldBlock, accept_err);
const socket_file = try net.tcpConnectToAddress(server.listen_address);
defer socket_file.close();
var client = try server.accept();
defer client.stream.close();
const stream = client.stream.writer();
try stream.print("hello from server\n", .{});
var buf: [100]u8 = undefined;
const len = try socket_file.read(&buf);
const msg = buf[0..len];
try testing.expect(mem.eql(u8, msg, "hello from server\n"));
}

View File

@ -57,7 +57,7 @@ pub var argv: [][*:0]u8 = if (builtin.link_libc) undefined else switch (native_o
};
/// Call from Windows-specific code if you already have a WTF-16LE encoded, null terminated string.
/// Otherwise use `access` or `accessZ`.
/// Otherwise use `access`.
pub fn accessW(path: [*:0]const u16) windows.GetFileAttributesError!void {
const ret = try windows.GetFileAttributesW(path);
if (ret != windows.INVALID_FILE_ATTRIBUTES) {
@ -137,8 +137,6 @@ pub fn getFdPath(fd: std.posix.fd_t, out_buffer: *[max_path_bytes]u8) std.posix.
switch (err) {
error.NotLink => unreachable,
error.BadPathName => unreachable,
error.InvalidUtf8 => unreachable, // WASI-only
error.InvalidWtf8 => unreachable, // Windows-only
error.UnsupportedReparsePointType => unreachable, // Windows-only
error.NetworkNotFound => unreachable, // Windows-only
else => |e| return e,
@ -153,7 +151,6 @@ pub fn getFdPath(fd: std.posix.fd_t, out_buffer: *[max_path_bytes]u8) std.posix.
const target = posix.readlinkZ(proc_path, out_buffer) catch |err| switch (err) {
error.UnsupportedReparsePointType => unreachable,
error.NotLink => unreachable,
error.InvalidUtf8 => unreachable, // WASI-only
else => |e| return e,
};
return target;
@ -201,28 +198,13 @@ pub fn getFdPath(fd: std.posix.fd_t, out_buffer: *[max_path_bytes]u8) std.posix.
}
}
/// WASI-only. Same as `fstatat` but targeting WASI.
/// `pathname` should be encoded as valid UTF-8.
/// See also `fstatat`.
pub fn fstatat_wasi(dirfd: posix.fd_t, pathname: []const u8, flags: wasi.lookupflags_t) posix.FStatAtError!wasi.filestat_t {
var stat: wasi.filestat_t = undefined;
switch (wasi.path_filestat_get(dirfd, flags, pathname.ptr, pathname.len, &stat)) {
.SUCCESS => return stat,
.INVAL => unreachable,
.BADF => unreachable, // Always a race condition.
.NOMEM => return error.SystemResources,
.ACCES => return error.AccessDenied,
.FAULT => unreachable,
.NAMETOOLONG => return error.NameTooLong,
.NOENT => return error.FileNotFound,
.NOTDIR => return error.FileNotFound,
.NOTCAPABLE => return error.AccessDenied,
.ILSEQ => return error.InvalidUtf8,
else => |err| return posix.unexpectedErrno(err),
}
}
pub const FstatError = error{
SystemResources,
AccessDenied,
Unexpected,
};
pub fn fstat_wasi(fd: posix.fd_t) posix.FStatError!wasi.filestat_t {
pub fn fstat_wasi(fd: posix.fd_t) FstatError!wasi.filestat_t {
var stat: wasi.filestat_t = undefined;
switch (wasi.fd_filestat_get(fd, &stat)) {
.SUCCESS => return stat,

View File

@ -479,50 +479,7 @@ pub const SHUT = struct {
pub const RDWR = 2;
};
pub const SIG = struct {
pub const BLOCK = 0;
pub const UNBLOCK = 1;
pub const SETMASK = 2;
pub const HUP = 1;
pub const INT = 2;
pub const QUIT = 3;
pub const ILL = 4;
pub const TRAP = 5;
pub const ABRT = 6;
pub const IOT = ABRT;
pub const BUS = 7;
pub const FPE = 8;
pub const KILL = 9;
pub const USR1 = 10;
pub const SEGV = 11;
pub const USR2 = 12;
pub const PIPE = 13;
pub const ALRM = 14;
pub const TERM = 15;
pub const STKFLT = 16;
pub const CHLD = 17;
pub const CONT = 18;
pub const STOP = 19;
pub const TSTP = 20;
pub const TTIN = 21;
pub const TTOU = 22;
pub const URG = 23;
pub const XCPU = 24;
pub const XFSZ = 25;
pub const VTALRM = 26;
pub const PROF = 27;
pub const WINCH = 28;
pub const IO = 29;
pub const POLL = 29;
pub const PWR = 30;
pub const SYS = 31;
pub const UNUSED = SIG.SYS;
pub const ERR: ?Sigaction.handler_fn = @ptrFromInt(std.math.maxInt(usize));
pub const DFL: ?Sigaction.handler_fn = @ptrFromInt(0);
pub const IGN: ?Sigaction.handler_fn = @ptrFromInt(1);
};
pub const SIG = linux.SIG;
pub const Sigaction = extern struct {
pub const handler_fn = *align(1) const fn (i32) callconv(.c) void;

View File

@ -1,10 +1,8 @@
//! This file provides the system interface functions for Linux matching those
//! that are provided by libc, whether or not libc is linked. The following
//! abstractions are made:
//! * Work around kernel bugs and limitations. For example, see sendmmsg.
//! * Implement all the syscalls in the same way that libc functions will
//! provide `rename` when only the `renameat` syscall exists.
//! * Does not support POSIX thread cancellation.
const std = @import("../std.zig");
const builtin = @import("builtin");
const assert = std.debug.assert;
@ -624,7 +622,7 @@ pub fn fork() usize {
} else if (@hasField(SYS, "fork")) {
return syscall0(.fork);
} else {
return syscall2(.clone, SIG.CHLD, 0);
return syscall2(.clone, @intFromEnum(SIG.CHLD), 0);
}
}
@ -1534,16 +1532,16 @@ pub fn getrandom(buf: [*]u8, count: usize, flags: u32) usize {
return syscall3(.getrandom, @intFromPtr(buf), count, flags);
}
pub fn kill(pid: pid_t, sig: i32) usize {
return syscall2(.kill, @as(usize, @bitCast(@as(isize, pid))), @as(usize, @bitCast(@as(isize, sig))));
pub fn kill(pid: pid_t, sig: SIG) usize {
return syscall2(.kill, @as(usize, @bitCast(@as(isize, pid))), @intFromEnum(sig));
}
pub fn tkill(tid: pid_t, sig: i32) usize {
return syscall2(.tkill, @as(usize, @bitCast(@as(isize, tid))), @as(usize, @bitCast(@as(isize, sig))));
pub fn tkill(tid: pid_t, sig: SIG) usize {
return syscall2(.tkill, @as(usize, @bitCast(@as(isize, tid))), @intFromEnum(sig));
}
pub fn tgkill(tgid: pid_t, tid: pid_t, sig: i32) usize {
return syscall3(.tgkill, @as(usize, @bitCast(@as(isize, tgid))), @as(usize, @bitCast(@as(isize, tid))), @as(usize, @bitCast(@as(isize, sig))));
pub fn tgkill(tgid: pid_t, tid: pid_t, sig: SIG) usize {
return syscall3(.tgkill, @as(usize, @bitCast(@as(isize, tgid))), @as(usize, @bitCast(@as(isize, tid))), @intFromEnum(sig));
}
pub fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8) usize {
@ -1836,7 +1834,7 @@ pub fn seteuid(euid: uid_t) usize {
// id will not be changed. Since uid_t is unsigned, this wraps around to the
// max value in C.
comptime assert(@typeInfo(uid_t) == .int and @typeInfo(uid_t).int.signedness == .unsigned);
return setresuid(std.math.maxInt(uid_t), euid, std.math.maxInt(uid_t));
return setresuid(maxInt(uid_t), euid, maxInt(uid_t));
}
pub fn setegid(egid: gid_t) usize {
@ -1847,7 +1845,7 @@ pub fn setegid(egid: gid_t) usize {
// id will not be changed. Since gid_t is unsigned, this wraps around to the
// max value in C.
comptime assert(@typeInfo(uid_t) == .int and @typeInfo(uid_t).int.signedness == .unsigned);
return setresgid(std.math.maxInt(gid_t), egid, std.math.maxInt(gid_t));
return setresgid(maxInt(gid_t), egid, maxInt(gid_t));
}
pub fn getresuid(ruid: *uid_t, euid: *uid_t, suid: *uid_t) usize {
@ -1925,11 +1923,11 @@ pub fn sigprocmask(flags: u32, noalias set: ?*const sigset_t, noalias oldset: ?*
return syscall4(.rt_sigprocmask, flags, @intFromPtr(set), @intFromPtr(oldset), NSIG / 8);
}
pub fn sigaction(sig: u8, noalias act: ?*const Sigaction, noalias oact: ?*Sigaction) usize {
assert(sig > 0);
assert(sig < NSIG);
assert(sig != SIG.KILL);
assert(sig != SIG.STOP);
pub fn sigaction(sig: SIG, noalias act: ?*const Sigaction, noalias oact: ?*Sigaction) usize {
assert(@intFromEnum(sig) > 0);
assert(@intFromEnum(sig) < NSIG);
assert(sig != .KILL);
assert(sig != .STOP);
var ksa: k_sigaction = undefined;
var oldksa: k_sigaction = undefined;
@ -1960,8 +1958,8 @@ pub fn sigaction(sig: u8, noalias act: ?*const Sigaction, noalias oact: ?*Sigact
const result = switch (native_arch) {
// The sparc version of rt_sigaction needs the restorer function to be passed as an argument too.
.sparc, .sparc64 => syscall5(.rt_sigaction, sig, ksa_arg, oldksa_arg, @intFromPtr(ksa.restorer), mask_size),
else => syscall4(.rt_sigaction, sig, ksa_arg, oldksa_arg, mask_size),
.sparc, .sparc64 => syscall5(.rt_sigaction, @intFromEnum(sig), ksa_arg, oldksa_arg, @intFromPtr(ksa.restorer), mask_size),
else => syscall4(.rt_sigaction, @intFromEnum(sig), ksa_arg, oldksa_arg, mask_size),
};
if (E.init(result) != .SUCCESS) return result;
@ -2011,27 +2009,27 @@ pub fn sigfillset() sigset_t {
return [_]SigsetElement{~@as(SigsetElement, 0)} ** sigset_len;
}
fn sigset_bit_index(sig: usize) struct { word: usize, mask: SigsetElement } {
assert(sig > 0);
assert(sig < NSIG);
const bit = sig - 1;
fn sigset_bit_index(sig: SIG) struct { word: usize, mask: SigsetElement } {
assert(@intFromEnum(sig) > 0);
assert(@intFromEnum(sig) < NSIG);
const bit = @intFromEnum(sig) - 1;
return .{
.word = bit / @bitSizeOf(SigsetElement),
.mask = @as(SigsetElement, 1) << @truncate(bit % @bitSizeOf(SigsetElement)),
};
}
pub fn sigaddset(set: *sigset_t, sig: usize) void {
pub fn sigaddset(set: *sigset_t, sig: SIG) void {
const index = sigset_bit_index(sig);
(set.*)[index.word] |= index.mask;
}
pub fn sigdelset(set: *sigset_t, sig: usize) void {
pub fn sigdelset(set: *sigset_t, sig: SIG) void {
const index = sigset_bit_index(sig);
(set.*)[index.word] ^= index.mask;
}
pub fn sigismember(set: *const sigset_t, sig: usize) bool {
pub fn sigismember(set: *const sigset_t, sig: SIG) bool {
const index = sigset_bit_index(sig);
return ((set.*)[index.word] & index.mask) != 0;
}
@ -2081,44 +2079,7 @@ pub fn sendmsg(fd: i32, msg: *const msghdr_const, flags: u32) usize {
}
}
pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize {
if (@typeInfo(usize).int.bits > @typeInfo(@typeInfo(mmsghdr).@"struct".fields[1].type).int.bits) {
// workaround kernel brokenness:
// if adding up all iov_len overflows a i32 then split into multiple calls
// see https://www.openwall.com/lists/musl/2014/06/07/5
const kvlen = if (vlen > IOV_MAX) IOV_MAX else vlen; // matches kernel
var next_unsent: usize = 0;
for (msgvec[0..kvlen], 0..) |*msg, i| {
var size: i32 = 0;
const msg_iovlen = @as(usize, @intCast(msg.hdr.iovlen)); // kernel side this is treated as unsigned
for (msg.hdr.iov[0..msg_iovlen]) |iov| {
if (iov.len > std.math.maxInt(i32) or @addWithOverflow(size, @as(i32, @intCast(iov.len)))[1] != 0) {
// batch-send all messages up to the current message
if (next_unsent < i) {
const batch_size = i - next_unsent;
const r = syscall4(.sendmmsg, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(&msgvec[next_unsent]), batch_size, flags);
if (E.init(r) != .SUCCESS) return next_unsent;
if (r < batch_size) return next_unsent + r;
}
// send current message as own packet
const r = sendmsg(fd, &msg.hdr, flags);
if (E.init(r) != .SUCCESS) return r;
// Linux limits the total bytes sent by sendmsg to INT_MAX, so this cast is safe.
msg.len = @as(u32, @intCast(r));
next_unsent = i + 1;
break;
}
size += @intCast(iov.len);
}
}
if (next_unsent < kvlen or next_unsent == 0) { // want to make sure at least one syscall occurs (e.g. to trigger MSG.EOR)
const batch_size = kvlen - next_unsent;
const r = syscall4(.sendmmsg, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(&msgvec[next_unsent]), batch_size, flags);
if (E.init(r) != .SUCCESS) return r;
return next_unsent + r;
}
return kvlen;
}
pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr, vlen: u32, flags: u32) usize {
return syscall4(.sendmmsg, @as(usize, @bitCast(@as(isize, fd))), @intFromPtr(msgvec), vlen, flags);
}
@ -2674,11 +2635,11 @@ pub fn pidfd_getfd(pidfd: fd_t, targetfd: fd_t, flags: u32) usize {
);
}
pub fn pidfd_send_signal(pidfd: fd_t, sig: i32, info: ?*siginfo_t, flags: u32) usize {
pub fn pidfd_send_signal(pidfd: fd_t, sig: SIG, info: ?*siginfo_t, flags: u32) usize {
return syscall4(
.pidfd_send_signal,
@as(usize, @bitCast(@as(isize, pidfd))),
@as(usize, @bitCast(@as(isize, sig))),
@intFromEnum(sig),
@intFromPtr(info),
flags,
);
@ -3775,136 +3736,138 @@ pub const SA = if (is_mips) struct {
pub const RESTORER = 0x04000000;
};
pub const SIG = if (is_mips) struct {
pub const SIG = if (is_mips) enum(u32) {
pub const BLOCK = 1;
pub const UNBLOCK = 2;
pub const SETMASK = 3;
// https://github.com/torvalds/linux/blob/ca91b9500108d4cf083a635c2e11c884d5dd20ea/arch/mips/include/uapi/asm/signal.h#L25
pub const HUP = 1;
pub const INT = 2;
pub const QUIT = 3;
pub const ILL = 4;
pub const TRAP = 5;
pub const ABRT = 6;
pub const IOT = ABRT;
pub const EMT = 7;
pub const FPE = 8;
pub const KILL = 9;
pub const BUS = 10;
pub const SEGV = 11;
pub const SYS = 12;
pub const PIPE = 13;
pub const ALRM = 14;
pub const TERM = 15;
pub const USR1 = 16;
pub const USR2 = 17;
pub const CHLD = 18;
pub const PWR = 19;
pub const WINCH = 20;
pub const URG = 21;
pub const IO = 22;
pub const POLL = IO;
pub const STOP = 23;
pub const TSTP = 24;
pub const CONT = 25;
pub const TTIN = 26;
pub const TTOU = 27;
pub const VTALRM = 28;
pub const PROF = 29;
pub const XCPU = 30;
pub const XFZ = 31;
pub const ERR: ?Sigaction.handler_fn = @ptrFromInt(maxInt(usize));
pub const DFL: ?Sigaction.handler_fn = @ptrFromInt(0);
pub const IGN: ?Sigaction.handler_fn = @ptrFromInt(1);
} else if (is_sparc) struct {
pub const IOT: SIG = .ABRT;
pub const POLL: SIG = .IO;
// /arch/mips/include/uapi/asm/signal.h#L25
HUP = 1,
INT = 2,
QUIT = 3,
ILL = 4,
TRAP = 5,
ABRT = 6,
EMT = 7,
FPE = 8,
KILL = 9,
BUS = 10,
SEGV = 11,
SYS = 12,
PIPE = 13,
ALRM = 14,
TERM = 15,
USR1 = 16,
USR2 = 17,
CHLD = 18,
PWR = 19,
WINCH = 20,
URG = 21,
IO = 22,
STOP = 23,
TSTP = 24,
CONT = 25,
TTIN = 26,
TTOU = 27,
VTALRM = 28,
PROF = 29,
XCPU = 30,
XFZ = 31,
} else if (is_sparc) enum(u32) {
pub const BLOCK = 1;
pub const UNBLOCK = 2;
pub const SETMASK = 4;
pub const HUP = 1;
pub const INT = 2;
pub const QUIT = 3;
pub const ILL = 4;
pub const TRAP = 5;
pub const ABRT = 6;
pub const EMT = 7;
pub const FPE = 8;
pub const KILL = 9;
pub const BUS = 10;
pub const SEGV = 11;
pub const SYS = 12;
pub const PIPE = 13;
pub const ALRM = 14;
pub const TERM = 15;
pub const URG = 16;
pub const STOP = 17;
pub const TSTP = 18;
pub const CONT = 19;
pub const CHLD = 20;
pub const TTIN = 21;
pub const TTOU = 22;
pub const POLL = 23;
pub const XCPU = 24;
pub const XFSZ = 25;
pub const VTALRM = 26;
pub const PROF = 27;
pub const WINCH = 28;
pub const LOST = 29;
pub const USR1 = 30;
pub const USR2 = 31;
pub const IOT = ABRT;
pub const CLD = CHLD;
pub const PWR = LOST;
pub const IO = SIG.POLL;
pub const ERR: ?Sigaction.handler_fn = @ptrFromInt(maxInt(usize));
pub const DFL: ?Sigaction.handler_fn = @ptrFromInt(0);
pub const IGN: ?Sigaction.handler_fn = @ptrFromInt(1);
} else struct {
pub const IOT: SIG = .ABRT;
pub const CLD: SIG = .CHLD;
pub const PWR: SIG = .LOST;
pub const POLL: SIG = .IO;
HUP = 1,
INT = 2,
QUIT = 3,
ILL = 4,
TRAP = 5,
ABRT = 6,
EMT = 7,
FPE = 8,
KILL = 9,
BUS = 10,
SEGV = 11,
SYS = 12,
PIPE = 13,
ALRM = 14,
TERM = 15,
URG = 16,
STOP = 17,
TSTP = 18,
CONT = 19,
CHLD = 20,
TTIN = 21,
TTOU = 22,
IO = 23,
XCPU = 24,
XFSZ = 25,
VTALRM = 26,
PROF = 27,
WINCH = 28,
LOST = 29,
USR1 = 30,
USR2 = 31,
} else enum(u32) {
pub const BLOCK = 0;
pub const UNBLOCK = 1;
pub const SETMASK = 2;
pub const HUP = 1;
pub const INT = 2;
pub const QUIT = 3;
pub const ILL = 4;
pub const TRAP = 5;
pub const ABRT = 6;
pub const IOT = ABRT;
pub const BUS = 7;
pub const FPE = 8;
pub const KILL = 9;
pub const USR1 = 10;
pub const SEGV = 11;
pub const USR2 = 12;
pub const PIPE = 13;
pub const ALRM = 14;
pub const TERM = 15;
pub const STKFLT = 16;
pub const CHLD = 17;
pub const CONT = 18;
pub const STOP = 19;
pub const TSTP = 20;
pub const TTIN = 21;
pub const TTOU = 22;
pub const URG = 23;
pub const XCPU = 24;
pub const XFSZ = 25;
pub const VTALRM = 26;
pub const PROF = 27;
pub const WINCH = 28;
pub const IO = 29;
pub const POLL = 29;
pub const PWR = 30;
pub const SYS = 31;
pub const UNUSED = SIG.SYS;
pub const ERR: ?Sigaction.handler_fn = @ptrFromInt(maxInt(usize));
pub const DFL: ?Sigaction.handler_fn = @ptrFromInt(0);
pub const IGN: ?Sigaction.handler_fn = @ptrFromInt(1);
pub const POLL: SIG = .IO;
pub const IOT: SIG = .ABRT;
HUP = 1,
INT = 2,
QUIT = 3,
ILL = 4,
TRAP = 5,
ABRT = 6,
BUS = 7,
FPE = 8,
KILL = 9,
USR1 = 10,
SEGV = 11,
USR2 = 12,
PIPE = 13,
ALRM = 14,
TERM = 15,
STKFLT = 16,
CHLD = 17,
CONT = 18,
STOP = 19,
TSTP = 20,
TTIN = 21,
TTOU = 22,
URG = 23,
XCPU = 24,
XFSZ = 25,
VTALRM = 26,
PROF = 27,
WINCH = 28,
IO = 29,
PWR = 30,
SYS = 31,
};
pub const kernel_rwf = u32;
@ -5825,7 +5788,7 @@ pub const TFD = switch (native_arch) {
};
const k_sigaction_funcs = struct {
const handler = ?*align(1) const fn (i32) callconv(.c) void;
const handler = ?*align(1) const fn (SIG) callconv(.c) void;
const restorer = *const fn () callconv(.c) void;
};
@ -5856,8 +5819,8 @@ pub const k_sigaction = switch (native_arch) {
///
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
pub const Sigaction = struct {
pub const handler_fn = *align(1) const fn (i32) callconv(.c) void;
pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.c) void;
pub const handler_fn = *align(1) const fn (SIG) callconv(.c) void;
pub const sigaction_fn = *const fn (SIG, *const siginfo_t, ?*anyopaque) callconv(.c) void;
handler: extern union {
handler: ?handler_fn,
@ -5994,11 +5957,6 @@ pub const mmsghdr = extern struct {
len: u32,
};
pub const mmsghdr_const = extern struct {
hdr: msghdr_const,
len: u32,
};
pub const epoll_data = extern union {
ptr: usize,
fd: i32,
@ -6304,14 +6262,14 @@ const siginfo_fields_union = extern union {
pub const siginfo_t = if (is_mips)
extern struct {
signo: i32,
signo: SIG,
code: i32,
errno: i32,
fields: siginfo_fields_union,
}
else
extern struct {
signo: i32,
signo: SIG,
errno: i32,
code: i32,
fields: siginfo_fields_union,
@ -7140,12 +7098,6 @@ pub const IPPROTO = struct {
pub const MAX = 256;
};
pub const RR = struct {
pub const A = 1;
pub const CNAME = 5;
pub const AAAA = 28;
};
pub const tcp_repair_opt = extern struct {
opt_code: u32,
opt_val: u32,
@ -8700,7 +8652,7 @@ pub const PR = enum(i32) {
pub const SET_MM_MAP = 14;
pub const SET_MM_MAP_SIZE = 15;
pub const SET_PTRACER_ANY = std.math.maxInt(c_ulong);
pub const SET_PTRACER_ANY = maxInt(c_ulong);
pub const FP_MODE_FR = 1 << 0;
pub const FP_MODE_FRE = 1 << 1;
@ -9884,8 +9836,10 @@ pub const msghdr = extern struct {
name: ?*sockaddr,
namelen: socklen_t,
iov: [*]iovec,
/// The kernel and glibc use `usize` for this field; POSIX and musl use `c_int`.
iovlen: usize,
control: ?*anyopaque,
/// The kernel and glibc use `usize` for this field; POSIX and musl use `socklen_t`.
controllen: usize,
flags: u32,
};
@ -9902,6 +9856,7 @@ pub const msghdr_const = extern struct {
// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/linux/socket.h?id=b320789d6883cc00ac78ce83bccbfe7ed58afcf0#n105
pub const cmsghdr = extern struct {
/// The kernel and glibc use `usize` for this field; musl uses `socklen_t`.
len: usize,
level: i32,
type: i32,

View File

@ -3,14 +3,14 @@ const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const mem = std.mem;
const net = std.net;
const net = std.Io.net;
const posix = std.posix;
const linux = std.os.linux;
const testing = std.testing;
const is_linux = builtin.os.tag == .linux;
const page_size_min = std.heap.page_size_min;
fd: posix.fd_t = -1,
fd: linux.fd_t = -1,
sq: SubmissionQueue,
cq: CompletionQueue,
flags: u32,
@ -62,7 +62,7 @@ pub fn init_params(entries: u16, p: *linux.io_uring_params) !IoUring {
.NOSYS => return error.SystemOutdated,
else => |errno| return posix.unexpectedErrno(errno),
}
const fd = @as(posix.fd_t, @intCast(res));
const fd = @as(linux.fd_t, @intCast(res));
assert(fd >= 0);
errdefer posix.close(fd);
@ -341,7 +341,7 @@ pub fn cq_advance(self: *IoUring, count: u32) void {
/// apply to the write, since the fsync may complete before the write is issued to the disk.
/// You should preferably use `link_with_next_sqe()` on a write's SQE to link it with an fsync,
/// or else insert a full write barrier using `drain_previous_sqes()` when queueing an fsync.
pub fn fsync(self: *IoUring, user_data: u64, fd: posix.fd_t, flags: u32) !*linux.io_uring_sqe {
pub fn fsync(self: *IoUring, user_data: u64, fd: linux.fd_t, flags: u32) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
sqe.prep_fsync(fd, flags);
sqe.user_data = user_data;
@ -386,7 +386,7 @@ pub const ReadBuffer = union(enum) {
pub fn read(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
buffer: ReadBuffer,
offset: u64,
) !*linux.io_uring_sqe {
@ -409,7 +409,7 @@ pub fn read(
pub fn write(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
buffer: []const u8,
offset: u64,
) !*linux.io_uring_sqe {
@ -433,7 +433,7 @@ pub fn write(
/// See https://github.com/axboe/liburing/issues/291
///
/// Returns a pointer to the SQE so that you can further modify the SQE for advanced use cases.
pub fn splice(self: *IoUring, user_data: u64, fd_in: posix.fd_t, off_in: u64, fd_out: posix.fd_t, off_out: u64, len: usize) !*linux.io_uring_sqe {
pub fn splice(self: *IoUring, user_data: u64, fd_in: linux.fd_t, off_in: u64, fd_out: linux.fd_t, off_out: u64, len: usize) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
sqe.prep_splice(fd_in, off_in, fd_out, off_out, len);
sqe.user_data = user_data;
@ -448,7 +448,7 @@ pub fn splice(self: *IoUring, user_data: u64, fd_in: posix.fd_t, off_in: u64, fd
pub fn read_fixed(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
buffer: *posix.iovec,
offset: u64,
buffer_index: u16,
@ -466,7 +466,7 @@ pub fn read_fixed(
pub fn writev(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
iovecs: []const posix.iovec_const,
offset: u64,
) !*linux.io_uring_sqe {
@ -484,7 +484,7 @@ pub fn writev(
pub fn write_fixed(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
buffer: *posix.iovec,
offset: u64,
buffer_index: u16,
@ -501,7 +501,7 @@ pub fn write_fixed(
pub fn accept(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
addr: ?*posix.sockaddr,
addrlen: ?*posix.socklen_t,
flags: u32,
@ -523,7 +523,7 @@ pub fn accept(
pub fn accept_multishot(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
addr: ?*posix.sockaddr,
addrlen: ?*posix.socklen_t,
flags: u32,
@ -548,7 +548,7 @@ pub fn accept_multishot(
pub fn accept_direct(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
addr: ?*posix.sockaddr,
addrlen: ?*posix.socklen_t,
flags: u32,
@ -564,7 +564,7 @@ pub fn accept_direct(
pub fn accept_multishot_direct(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
addr: ?*posix.sockaddr,
addrlen: ?*posix.socklen_t,
flags: u32,
@ -580,7 +580,7 @@ pub fn accept_multishot_direct(
pub fn connect(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
addr: *const posix.sockaddr,
addrlen: posix.socklen_t,
) !*linux.io_uring_sqe {
@ -595,8 +595,8 @@ pub fn connect(
pub fn epoll_ctl(
self: *IoUring,
user_data: u64,
epfd: posix.fd_t,
fd: posix.fd_t,
epfd: linux.fd_t,
fd: linux.fd_t,
op: u32,
ev: ?*linux.epoll_event,
) !*linux.io_uring_sqe {
@ -626,7 +626,7 @@ pub const RecvBuffer = union(enum) {
pub fn recv(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
buffer: RecvBuffer,
flags: u32,
) !*linux.io_uring_sqe {
@ -650,7 +650,7 @@ pub fn recv(
pub fn send(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
buffer: []const u8,
flags: u32,
) !*linux.io_uring_sqe {
@ -678,7 +678,7 @@ pub fn send(
pub fn send_zc(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
buffer: []const u8,
send_flags: u32,
zc_flags: u16,
@ -695,7 +695,7 @@ pub fn send_zc(
pub fn send_zc_fixed(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
buffer: []const u8,
send_flags: u32,
zc_flags: u16,
@ -713,8 +713,8 @@ pub fn send_zc_fixed(
pub fn recvmsg(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
msg: *posix.msghdr,
fd: linux.fd_t,
msg: *linux.msghdr,
flags: u32,
) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
@ -729,8 +729,8 @@ pub fn recvmsg(
pub fn sendmsg(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
msg: *const posix.msghdr_const,
fd: linux.fd_t,
msg: *const linux.msghdr_const,
flags: u32,
) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
@ -745,8 +745,8 @@ pub fn sendmsg(
pub fn sendmsg_zc(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
msg: *const posix.msghdr_const,
fd: linux.fd_t,
msg: *const linux.msghdr_const,
flags: u32,
) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
@ -761,7 +761,7 @@ pub fn sendmsg_zc(
pub fn openat(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
path: [*:0]const u8,
flags: linux.O,
mode: posix.mode_t,
@ -786,7 +786,7 @@ pub fn openat(
pub fn openat_direct(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
path: [*:0]const u8,
flags: linux.O,
mode: posix.mode_t,
@ -801,7 +801,7 @@ pub fn openat_direct(
/// Queues (but does not submit) an SQE to perform a `close(2)`.
/// Returns a pointer to the SQE.
/// Available since 5.6.
pub fn close(self: *IoUring, user_data: u64, fd: posix.fd_t) !*linux.io_uring_sqe {
pub fn close(self: *IoUring, user_data: u64, fd: linux.fd_t) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
sqe.prep_close(fd);
sqe.user_data = user_data;
@ -896,7 +896,7 @@ pub fn link_timeout(
pub fn poll_add(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
poll_mask: u32,
) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
@ -939,7 +939,7 @@ pub fn poll_update(
pub fn fallocate(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
mode: i32,
offset: u64,
len: u64,
@ -955,7 +955,7 @@ pub fn fallocate(
pub fn statx(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
path: [:0]const u8,
flags: u32,
mask: u32,
@ -1008,9 +1008,9 @@ pub fn shutdown(
pub fn renameat(
self: *IoUring,
user_data: u64,
old_dir_fd: posix.fd_t,
old_dir_fd: linux.fd_t,
old_path: [*:0]const u8,
new_dir_fd: posix.fd_t,
new_dir_fd: linux.fd_t,
new_path: [*:0]const u8,
flags: u32,
) !*linux.io_uring_sqe {
@ -1025,7 +1025,7 @@ pub fn renameat(
pub fn unlinkat(
self: *IoUring,
user_data: u64,
dir_fd: posix.fd_t,
dir_fd: linux.fd_t,
path: [*:0]const u8,
flags: u32,
) !*linux.io_uring_sqe {
@ -1040,7 +1040,7 @@ pub fn unlinkat(
pub fn mkdirat(
self: *IoUring,
user_data: u64,
dir_fd: posix.fd_t,
dir_fd: linux.fd_t,
path: [*:0]const u8,
mode: posix.mode_t,
) !*linux.io_uring_sqe {
@ -1056,7 +1056,7 @@ pub fn symlinkat(
self: *IoUring,
user_data: u64,
target: [*:0]const u8,
new_dir_fd: posix.fd_t,
new_dir_fd: linux.fd_t,
link_path: [*:0]const u8,
) !*linux.io_uring_sqe {
const sqe = try self.get_sqe();
@ -1070,9 +1070,9 @@ pub fn symlinkat(
pub fn linkat(
self: *IoUring,
user_data: u64,
old_dir_fd: posix.fd_t,
old_dir_fd: linux.fd_t,
old_path: [*:0]const u8,
new_dir_fd: posix.fd_t,
new_dir_fd: linux.fd_t,
new_path: [*:0]const u8,
flags: u32,
) !*linux.io_uring_sqe {
@ -1144,7 +1144,7 @@ pub fn waitid(
/// Registering file descriptors will wait for the ring to idle.
/// Files are automatically unregistered by the kernel when the ring is torn down.
/// An application need unregister only if it wants to register a new array of file descriptors.
pub fn register_files(self: *IoUring, fds: []const posix.fd_t) !void {
pub fn register_files(self: *IoUring, fds: []const linux.fd_t) !void {
assert(self.fd >= 0);
const res = linux.io_uring_register(
self.fd,
@ -1163,7 +1163,7 @@ pub fn register_files(self: *IoUring, fds: []const posix.fd_t) !void {
/// * removing an existing entry (set the fd to -1)
/// * replacing an existing entry with a new fd
/// Adding new file descriptors must be done with `register_files`.
pub fn register_files_update(self: *IoUring, offset: u32, fds: []const posix.fd_t) !void {
pub fn register_files_update(self: *IoUring, offset: u32, fds: []const linux.fd_t) !void {
assert(self.fd >= 0);
const FilesUpdate = extern struct {
@ -1232,7 +1232,7 @@ pub fn register_file_alloc_range(self: *IoUring, offset: u32, len: u32) !void {
/// Registers the file descriptor for an eventfd that will be notified of completion events on
/// an io_uring instance.
/// Only a single a eventfd can be registered at any given point in time.
pub fn register_eventfd(self: *IoUring, fd: posix.fd_t) !void {
pub fn register_eventfd(self: *IoUring, fd: linux.fd_t) !void {
assert(self.fd >= 0);
const res = linux.io_uring_register(
self.fd,
@ -1247,7 +1247,7 @@ pub fn register_eventfd(self: *IoUring, fd: posix.fd_t) !void {
/// an io_uring instance. Notifications are only posted for events that complete in an async manner.
/// This means that events that complete inline while being submitted do not trigger a notification event.
/// Only a single eventfd can be registered at any given point in time.
pub fn register_eventfd_async(self: *IoUring, fd: posix.fd_t) !void {
pub fn register_eventfd_async(self: *IoUring, fd: linux.fd_t) !void {
assert(self.fd >= 0);
const res = linux.io_uring_register(
self.fd,
@ -1405,7 +1405,7 @@ pub fn socket_direct_alloc(
pub fn bind(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
addr: *const posix.sockaddr,
addrlen: posix.socklen_t,
flags: u32,
@ -1422,7 +1422,7 @@ pub fn bind(
pub fn listen(
self: *IoUring,
user_data: u64,
fd: posix.fd_t,
fd: linux.fd_t,
backlog: usize,
flags: u32,
) !*linux.io_uring_sqe {
@ -1513,7 +1513,7 @@ pub const SubmissionQueue = struct {
sqe_head: u32 = 0,
sqe_tail: u32 = 0,
pub fn init(fd: posix.fd_t, p: linux.io_uring_params) !SubmissionQueue {
pub fn init(fd: linux.fd_t, p: linux.io_uring_params) !SubmissionQueue {
assert(fd >= 0);
assert((p.features & linux.IORING_FEAT_SINGLE_MMAP) != 0);
const size = @max(
@ -1576,7 +1576,7 @@ pub const CompletionQueue = struct {
overflow: *u32,
cqes: []linux.io_uring_cqe,
pub fn init(fd: posix.fd_t, p: linux.io_uring_params, sq: SubmissionQueue) !CompletionQueue {
pub fn init(fd: linux.fd_t, p: linux.io_uring_params, sq: SubmissionQueue) !CompletionQueue {
assert(fd >= 0);
assert((p.features & linux.IORING_FEAT_SINGLE_MMAP) != 0);
const mmap = sq.mmap;
@ -1677,7 +1677,7 @@ pub const BufferGroup = struct {
}
// Prepare recv operation which will select buffer from this group.
pub fn recv(self: *BufferGroup, user_data: u64, fd: posix.fd_t, flags: u32) !*linux.io_uring_sqe {
pub fn recv(self: *BufferGroup, user_data: u64, fd: linux.fd_t, flags: u32) !*linux.io_uring_sqe {
var sqe = try self.ring.get_sqe();
sqe.prep_rw(.RECV, fd, 0, 0, 0);
sqe.rw_flags = flags;
@ -1688,7 +1688,7 @@ pub const BufferGroup = struct {
}
// Prepare multishot recv operation which will select buffer from this group.
pub fn recv_multishot(self: *BufferGroup, user_data: u64, fd: posix.fd_t, flags: u32) !*linux.io_uring_sqe {
pub fn recv_multishot(self: *BufferGroup, user_data: u64, fd: linux.fd_t, flags: u32) !*linux.io_uring_sqe {
var sqe = try self.recv(user_data, fd, flags);
sqe.ioprio |= linux.IORING_RECV_MULTISHOT;
return sqe;
@ -1732,7 +1732,7 @@ pub const BufferGroup = struct {
/// `entries` is the number of entries requested in the buffer ring, must be power of 2.
/// `group_id` is the chosen buffer group ID, unique in IO_Uring.
pub fn setup_buf_ring(
fd: posix.fd_t,
fd: linux.fd_t,
entries: u16,
group_id: u16,
flags: linux.io_uring_buf_reg.Flags,
@ -1758,7 +1758,7 @@ pub fn setup_buf_ring(
}
fn register_buf_ring(
fd: posix.fd_t,
fd: linux.fd_t,
addr: u64,
entries: u32,
group_id: u16,
@ -1780,7 +1780,7 @@ fn register_buf_ring(
try handle_register_buf_ring_result(res);
}
fn unregister_buf_ring(fd: posix.fd_t, group_id: u16) !void {
fn unregister_buf_ring(fd: linux.fd_t, group_id: u16) !void {
var reg = mem.zeroInit(linux.io_uring_buf_reg, .{
.bgid = group_id,
});
@ -1802,7 +1802,7 @@ fn handle_register_buf_ring_result(res: usize) !void {
}
// Unregisters a previously registered shared buffer ring, returned from io_uring_setup_buf_ring.
pub fn free_buf_ring(fd: posix.fd_t, br: *align(page_size_min) linux.io_uring_buf_ring, entries: u32, group_id: u16) void {
pub fn free_buf_ring(fd: linux.fd_t, br: *align(page_size_min) linux.io_uring_buf_ring, entries: u32, group_id: u16) void {
unregister_buf_ring(fd, group_id) catch {};
var mmap: []align(page_size_min) u8 = undefined;
mmap.ptr = @ptrCast(br);
@ -1873,7 +1873,7 @@ test "nop" {
};
defer {
ring.deinit();
testing.expectEqual(@as(posix.fd_t, -1), ring.fd) catch @panic("test failed");
testing.expectEqual(@as(linux.fd_t, -1), ring.fd) catch @panic("test failed");
}
const sqe = try ring.nop(0xaaaaaaaa);
@ -1949,7 +1949,7 @@ test "readv" {
// https://github.com/torvalds/linux/blob/v5.4/fs/io_uring.c#L3119-L3124 vs
// https://github.com/torvalds/linux/blob/v5.8/fs/io_uring.c#L6687-L6691
// We therefore avoid stressing sparse fd sets here:
var registered_fds = [_]posix.fd_t{0} ** 1;
var registered_fds = [_]linux.fd_t{0} ** 1;
const fd_index = 0;
registered_fds[fd_index] = fd;
try ring.register_files(registered_fds[0..]);
@ -2361,28 +2361,31 @@ test "sendmsg/recvmsg" {
};
defer ring.deinit();
var address_server = try net.Address.parseIp4("127.0.0.1", 0);
var address_server: linux.sockaddr.in = .{
.port = 0,
.addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
};
const server = try posix.socket(address_server.any.family, posix.SOCK.DGRAM, 0);
const server = try posix.socket(address_server.family, posix.SOCK.DGRAM, 0);
defer posix.close(server);
try posix.setsockopt(server, posix.SOL.SOCKET, posix.SO.REUSEPORT, &mem.toBytes(@as(c_int, 1)));
try posix.setsockopt(server, posix.SOL.SOCKET, posix.SO.REUSEADDR, &mem.toBytes(@as(c_int, 1)));
try posix.bind(server, &address_server.any, address_server.getOsSockLen());
try posix.bind(server, addrAny(&address_server), @sizeOf(linux.sockaddr.in));
// set address_server to the OS-chosen IP/port.
var slen: posix.socklen_t = address_server.getOsSockLen();
try posix.getsockname(server, &address_server.any, &slen);
var slen: posix.socklen_t = @sizeOf(linux.sockaddr.in);
try posix.getsockname(server, addrAny(&address_server), &slen);
const client = try posix.socket(address_server.any.family, posix.SOCK.DGRAM, 0);
const client = try posix.socket(address_server.family, posix.SOCK.DGRAM, 0);
defer posix.close(client);
const buffer_send = [_]u8{42} ** 128;
const iovecs_send = [_]posix.iovec_const{
posix.iovec_const{ .base = &buffer_send, .len = buffer_send.len },
};
const msg_send: posix.msghdr_const = .{
.name = &address_server.any,
.namelen = address_server.getOsSockLen(),
const msg_send: linux.msghdr_const = .{
.name = addrAny(&address_server),
.namelen = @sizeOf(linux.sockaddr.in),
.iov = &iovecs_send,
.iovlen = 1,
.control = null,
@ -2398,11 +2401,13 @@ test "sendmsg/recvmsg" {
var iovecs_recv = [_]posix.iovec{
posix.iovec{ .base = &buffer_recv, .len = buffer_recv.len },
};
const addr = [_]u8{0} ** 4;
var address_recv = net.Address.initIp4(addr, 0);
var msg_recv: posix.msghdr = .{
.name = &address_recv.any,
.namelen = address_recv.getOsSockLen(),
var address_recv: linux.sockaddr.in = .{
.port = 0,
.addr = 0,
};
var msg_recv: linux.msghdr = .{
.name = addrAny(&address_recv),
.namelen = @sizeOf(linux.sockaddr.in),
.iov = &iovecs_recv,
.iovlen = 1,
.control = null,
@ -2441,6 +2446,8 @@ test "sendmsg/recvmsg" {
test "timeout (after a relative time)" {
if (!is_linux) return error.SkipZigTest;
const io = testing.io;
var ring = IoUring.init(1, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@ -2452,12 +2459,12 @@ test "timeout (after a relative time)" {
const margin = 5;
const ts: linux.kernel_timespec = .{ .sec = 0, .nsec = ms * 1000000 };
const started = std.time.milliTimestamp();
const started = try std.Io.Clock.awake.now(io);
const sqe = try ring.timeout(0x55555555, &ts, 0, 0);
try testing.expectEqual(linux.IORING_OP.TIMEOUT, sqe.opcode);
try testing.expectEqual(@as(u32, 1), try ring.submit());
const cqe = try ring.copy_cqe();
const stopped = std.time.milliTimestamp();
const stopped = try std.Io.Clock.awake.now(io);
try testing.expectEqual(linux.io_uring_cqe{
.user_data = 0x55555555,
@ -2466,7 +2473,8 @@ test "timeout (after a relative time)" {
}, cqe);
// Tests should not depend on timings: skip test if outside margin.
if (!std.math.approxEqAbs(f64, ms, @as(f64, @floatFromInt(stopped - started)), margin)) return error.SkipZigTest;
const ms_elapsed = started.durationTo(stopped).toMilliseconds();
if (ms_elapsed > margin) return error.SkipZigTest;
}
test "timeout (after a number of completions)" {
@ -2777,7 +2785,7 @@ test "register_files_update" {
const fd = try posix.openZ("/dev/zero", .{ .ACCMODE = .RDONLY, .CLOEXEC = true }, 0);
defer posix.close(fd);
var registered_fds = [_]posix.fd_t{0} ** 2;
var registered_fds = [_]linux.fd_t{0} ** 2;
const fd_index = 0;
const fd_index2 = 1;
registered_fds[fd_index] = fd;
@ -2861,19 +2869,22 @@ test "shutdown" {
};
defer ring.deinit();
var address = try net.Address.parseIp4("127.0.0.1", 0);
var address: linux.sockaddr.in = .{
.port = 0,
.addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
};
// Socket bound, expect shutdown to work
{
const server = try posix.socket(address.any.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
const server = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
defer posix.close(server);
try posix.setsockopt(server, posix.SOL.SOCKET, posix.SO.REUSEADDR, &mem.toBytes(@as(c_int, 1)));
try posix.bind(server, &address.any, address.getOsSockLen());
try posix.bind(server, addrAny(&address), @sizeOf(linux.sockaddr.in));
try posix.listen(server, 1);
// set address to the OS-chosen IP/port.
var slen: posix.socklen_t = address.getOsSockLen();
try posix.getsockname(server, &address.any, &slen);
var slen: posix.socklen_t = @sizeOf(linux.sockaddr.in);
try posix.getsockname(server, addrAny(&address), &slen);
const shutdown_sqe = try ring.shutdown(0x445445445, server, linux.SHUT.RD);
try testing.expectEqual(linux.IORING_OP.SHUTDOWN, shutdown_sqe.opcode);
@ -2898,7 +2909,7 @@ test "shutdown" {
// Socket not bound, expect to fail with ENOTCONN
{
const server = try posix.socket(address.any.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
const server = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
defer posix.close(server);
const shutdown_sqe = ring.shutdown(0x445445445, server, linux.SHUT.RD) catch |err| switch (err) {
@ -2966,22 +2977,11 @@ test "renameat" {
}, cqe);
// Validate that the old file doesn't exist anymore
{
_ = tmp.dir.openFile(old_path, .{}) catch |err| switch (err) {
error.FileNotFound => {},
else => std.debug.panic("unexpected error: {}", .{err}),
};
}
try testing.expectError(error.FileNotFound, tmp.dir.openFile(old_path, .{}));
// Validate that the new file exists with the proper content
{
const new_file = try tmp.dir.openFile(new_path, .{});
defer new_file.close();
var new_file_data: [16]u8 = undefined;
const bytes_read = try new_file.readAll(&new_file_data);
try testing.expectEqualStrings("hello", new_file_data[0..bytes_read]);
}
try testing.expectEqualStrings("hello", try tmp.dir.readFile(new_path, &new_file_data));
}
test "unlinkat" {
@ -3179,12 +3179,8 @@ test "linkat" {
}, cqe);
// Validate the second file
const second_file = try tmp.dir.openFile(second_path, .{});
defer second_file.close();
var second_file_data: [16]u8 = undefined;
const bytes_read = try second_file.readAll(&second_file_data);
try testing.expectEqualStrings("hello", second_file_data[0..bytes_read]);
try testing.expectEqualStrings("hello", try tmp.dir.readFile(second_path, &second_file_data));
}
test "provide_buffers: read" {
@ -3588,7 +3584,10 @@ const SocketTestHarness = struct {
fn createSocketTestHarness(ring: *IoUring) !SocketTestHarness {
// Create a TCP server socket
var address = try net.Address.parseIp4("127.0.0.1", 0);
var address: linux.sockaddr.in = .{
.port = 0,
.addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
};
const listener_socket = try createListenerSocket(&address);
errdefer posix.close(listener_socket);
@ -3598,9 +3597,9 @@ fn createSocketTestHarness(ring: *IoUring) !SocketTestHarness {
_ = try ring.accept(0xaaaaaaaa, listener_socket, &accept_addr, &accept_addr_len, 0);
// Create a TCP client socket
const client = try posix.socket(address.any.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
errdefer posix.close(client);
_ = try ring.connect(0xcccccccc, client, &address.any, address.getOsSockLen());
_ = try ring.connect(0xcccccccc, client, addrAny(&address), @sizeOf(linux.sockaddr.in));
try testing.expectEqual(@as(u32, 2), try ring.submit());
@ -3636,18 +3635,18 @@ fn createSocketTestHarness(ring: *IoUring) !SocketTestHarness {
};
}
fn createListenerSocket(address: *net.Address) !posix.socket_t {
fn createListenerSocket(address: *linux.sockaddr.in) !posix.socket_t {
const kernel_backlog = 1;
const listener_socket = try posix.socket(address.any.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
const listener_socket = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
errdefer posix.close(listener_socket);
try posix.setsockopt(listener_socket, posix.SOL.SOCKET, posix.SO.REUSEADDR, &mem.toBytes(@as(c_int, 1)));
try posix.bind(listener_socket, &address.any, address.getOsSockLen());
try posix.bind(listener_socket, addrAny(address), @sizeOf(linux.sockaddr.in));
try posix.listen(listener_socket, kernel_backlog);
// set address to the OS-chosen IP/port.
var slen: posix.socklen_t = address.getOsSockLen();
try posix.getsockname(listener_socket, &address.any, &slen);
var slen: posix.socklen_t = @sizeOf(linux.sockaddr.in);
try posix.getsockname(listener_socket, addrAny(address), &slen);
return listener_socket;
}
@ -3662,7 +3661,10 @@ test "accept multishot" {
};
defer ring.deinit();
var address = try net.Address.parseIp4("127.0.0.1", 0);
var address: linux.sockaddr.in = .{
.port = 0,
.addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
};
const listener_socket = try createListenerSocket(&address);
defer posix.close(listener_socket);
@ -3676,9 +3678,9 @@ test "accept multishot" {
var nr: usize = 4; // number of clients to connect
while (nr > 0) : (nr -= 1) {
// connect client
const client = try posix.socket(address.any.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
errdefer posix.close(client);
try posix.connect(client, &address.any, address.getOsSockLen());
try posix.connect(client, addrAny(&address), @sizeOf(linux.sockaddr.in));
// test accept completion
var cqe = try ring.copy_cqe();
@ -3756,10 +3758,13 @@ test "accept_direct" {
else => return err,
};
defer ring.deinit();
var address = try net.Address.parseIp4("127.0.0.1", 0);
var address: linux.sockaddr.in = .{
.port = 0,
.addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
};
// register direct file descriptors
var registered_fds = [_]posix.fd_t{-1} ** 2;
var registered_fds = [_]linux.fd_t{-1} ** 2;
try ring.register_files(registered_fds[0..]);
const listener_socket = try createListenerSocket(&address);
@ -3779,8 +3784,8 @@ test "accept_direct" {
try testing.expectEqual(@as(u32, 1), try ring.submit());
// connect
const client = try posix.socket(address.any.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
try posix.connect(client, &address.any, address.getOsSockLen());
const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
try posix.connect(client, addrAny(&address), @sizeOf(linux.sockaddr.in));
defer posix.close(client);
// accept completion
@ -3813,8 +3818,8 @@ test "accept_direct" {
_ = try ring.accept_direct(accept_userdata, listener_socket, null, null, 0);
try testing.expectEqual(@as(u32, 1), try ring.submit());
// connect
const client = try posix.socket(address.any.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
try posix.connect(client, &address.any, address.getOsSockLen());
const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
try posix.connect(client, addrAny(&address), @sizeOf(linux.sockaddr.in));
defer posix.close(client);
// completion with error
const cqe_accept = try ring.copy_cqe();
@ -3830,6 +3835,11 @@ test "accept_direct" {
test "accept_multishot_direct" {
try skipKernelLessThan(.{ .major = 5, .minor = 19, .patch = 0 });
if (builtin.cpu.arch == .riscv64) {
// https://github.com/ziglang/zig/issues/25734
return error.SkipZigTest;
}
var ring = IoUring.init(1, 0) catch |err| switch (err) {
error.SystemOutdated => return error.SkipZigTest,
error.PermissionDenied => return error.SkipZigTest,
@ -3837,9 +3847,12 @@ test "accept_multishot_direct" {
};
defer ring.deinit();
var address = try net.Address.parseIp4("127.0.0.1", 0);
var address: linux.sockaddr.in = .{
.port = 0,
.addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
};
var registered_fds = [_]posix.fd_t{-1} ** 2;
var registered_fds = [_]linux.fd_t{-1} ** 2;
try ring.register_files(registered_fds[0..]);
const listener_socket = try createListenerSocket(&address);
@ -3855,8 +3868,8 @@ test "accept_multishot_direct" {
for (registered_fds) |_| {
// connect
const client = try posix.socket(address.any.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
try posix.connect(client, &address.any, address.getOsSockLen());
const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
try posix.connect(client, addrAny(&address), @sizeOf(linux.sockaddr.in));
defer posix.close(client);
// accept completion
@ -3870,8 +3883,8 @@ test "accept_multishot_direct" {
// Multishot is terminated (more flag is not set).
{
// connect
const client = try posix.socket(address.any.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
try posix.connect(client, &address.any, address.getOsSockLen());
const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
try posix.connect(client, addrAny(&address), @sizeOf(linux.sockaddr.in));
defer posix.close(client);
// completion with error
const cqe_accept = try ring.copy_cqe();
@ -3902,7 +3915,7 @@ test "socket" {
// test completion
var cqe = try ring.copy_cqe();
try testing.expectEqual(posix.E.SUCCESS, cqe.err());
const fd: posix.fd_t = @intCast(cqe.res);
const fd: linux.fd_t = @intCast(cqe.res);
try testing.expect(fd > 2);
posix.close(fd);
@ -3918,7 +3931,7 @@ test "socket_direct/socket_direct_alloc/close_direct" {
};
defer ring.deinit();
var registered_fds = [_]posix.fd_t{-1} ** 3;
var registered_fds = [_]linux.fd_t{-1} ** 3;
try ring.register_files(registered_fds[0..]);
// create socket in registered file descriptor at index 0 (last param)
@ -3944,7 +3957,10 @@ test "socket_direct/socket_direct_alloc/close_direct" {
try testing.expect(cqe_socket.res == 2); // returns registered file index
// use sockets from registered_fds in connect operation
var address = try net.Address.parseIp4("127.0.0.1", 0);
var address: linux.sockaddr.in = .{
.port = 0,
.addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
};
const listener_socket = try createListenerSocket(&address);
defer posix.close(listener_socket);
const accept_userdata: u64 = 0xaaaaaaaa;
@ -3954,7 +3970,7 @@ test "socket_direct/socket_direct_alloc/close_direct" {
// prepare accept
_ = try ring.accept(accept_userdata, listener_socket, null, null, 0);
// prepare connect with fixed socket
const connect_sqe = try ring.connect(connect_userdata, @intCast(fd_index), &address.any, address.getOsSockLen());
const connect_sqe = try ring.connect(connect_userdata, @intCast(fd_index), addrAny(&address), @sizeOf(linux.sockaddr.in));
connect_sqe.flags |= linux.IOSQE_FIXED_FILE; // fd is fixed file index
// submit both
try testing.expectEqual(@as(u32, 2), try ring.submit());
@ -3996,7 +4012,7 @@ test "openat_direct/close_direct" {
};
defer ring.deinit();
var registered_fds = [_]posix.fd_t{-1} ** 3;
var registered_fds = [_]linux.fd_t{-1} ** 3;
try ring.register_files(registered_fds[0..]);
var tmp = std.testing.tmpDir(.{});
@ -4383,7 +4399,7 @@ test "ring mapped buffers multishot recv" {
fn buf_grp_recv_submit_get_cqe(
ring: *IoUring,
buf_grp: *BufferGroup,
fd: posix.fd_t,
fd: linux.fd_t,
user_data: u64,
) !linux.io_uring_cqe {
// prepare and submit recv
@ -4483,24 +4499,27 @@ test "bind/listen/connect" {
// LISTEN is higher required operation
if (!probe.is_supported(.LISTEN)) return error.SkipZigTest;
var addr = net.Address.initIp4([4]u8{ 127, 0, 0, 1 }, 0);
const proto: u32 = if (addr.any.family == linux.AF.UNIX) 0 else linux.IPPROTO.TCP;
var addr: linux.sockaddr.in = .{
.port = 0,
.addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
};
const proto: u32 = if (addr.family == linux.AF.UNIX) 0 else linux.IPPROTO.TCP;
const listen_fd = brk: {
// Create socket
_ = try ring.socket(1, addr.any.family, linux.SOCK.STREAM | linux.SOCK.CLOEXEC, proto, 0);
_ = try ring.socket(1, addr.family, linux.SOCK.STREAM | linux.SOCK.CLOEXEC, proto, 0);
try testing.expectEqual(1, try ring.submit());
var cqe = try ring.copy_cqe();
try testing.expectEqual(1, cqe.user_data);
try testing.expectEqual(posix.E.SUCCESS, cqe.err());
const listen_fd: posix.fd_t = @intCast(cqe.res);
const listen_fd: linux.fd_t = @intCast(cqe.res);
try testing.expect(listen_fd > 2);
// Prepare: set socket option * 2, bind, listen
var optval: u32 = 1;
(try ring.setsockopt(2, listen_fd, linux.SOL.SOCKET, linux.SO.REUSEADDR, mem.asBytes(&optval))).link_next();
(try ring.setsockopt(3, listen_fd, linux.SOL.SOCKET, linux.SO.REUSEPORT, mem.asBytes(&optval))).link_next();
(try ring.bind(4, listen_fd, &addr.any, addr.getOsSockLen(), 0)).link_next();
(try ring.bind(4, listen_fd, addrAny(&addr), @sizeOf(linux.sockaddr.in), 0)).link_next();
_ = try ring.listen(5, listen_fd, 1, 0);
// Submit 4 operations
try testing.expectEqual(4, try ring.submit());
@ -4521,28 +4540,28 @@ test "bind/listen/connect" {
try testing.expectEqual(1, optval);
// Read system assigned port into addr
var addr_len: posix.socklen_t = addr.getOsSockLen();
try posix.getsockname(listen_fd, &addr.any, &addr_len);
var addr_len: posix.socklen_t = @sizeOf(linux.sockaddr.in);
try posix.getsockname(listen_fd, addrAny(&addr), &addr_len);
break :brk listen_fd;
};
const connect_fd = brk: {
// Create connect socket
_ = try ring.socket(6, addr.any.family, linux.SOCK.STREAM | linux.SOCK.CLOEXEC, proto, 0);
_ = try ring.socket(6, addr.family, linux.SOCK.STREAM | linux.SOCK.CLOEXEC, proto, 0);
try testing.expectEqual(1, try ring.submit());
const cqe = try ring.copy_cqe();
try testing.expectEqual(6, cqe.user_data);
try testing.expectEqual(posix.E.SUCCESS, cqe.err());
// Get connect socket fd
const connect_fd: posix.fd_t = @intCast(cqe.res);
const connect_fd: linux.fd_t = @intCast(cqe.res);
try testing.expect(connect_fd > 2 and connect_fd != listen_fd);
break :brk connect_fd;
};
// Prepare accept/connect operations
_ = try ring.accept(7, listen_fd, null, null, 0);
_ = try ring.connect(8, connect_fd, &addr.any, addr.getOsSockLen());
_ = try ring.connect(8, connect_fd, addrAny(&addr), @sizeOf(linux.sockaddr.in));
try testing.expectEqual(2, try ring.submit());
// Get listener accepted socket
var accept_fd: posix.socket_t = 0;
@ -4604,3 +4623,7 @@ fn testSendRecv(ring: *IoUring, send_fd: posix.socket_t, recv_fd: posix.socket_t
try testing.expectEqualSlices(u8, buffer_send, buffer_recv[0..buffer_send.len]);
try testing.expectEqualSlices(u8, buffer_send, buffer_recv[buffer_send.len..]);
}
fn addrAny(addr: *linux.sockaddr.in) *linux.sockaddr {
return @ptrCast(addr);
}

View File

@ -136,7 +136,13 @@ pub fn clone() callconv(.naked) u64 {
);
}
pub const restore = restore_rt;
pub fn restore() callconv(.naked) noreturn {
asm volatile (
\\svc 0
:
: [number] "{r1}" (@intFromEnum(SYS.sigreturn)),
);
}
pub fn restore_rt() callconv(.naked) noreturn {
asm volatile (

View File

@ -1,5 +1,7 @@
const std = @import("../../std.zig");
const builtin = @import("builtin");
const std = @import("../../std.zig");
const assert = std.debug.assert;
const linux = std.os.linux;
const mem = std.mem;
const elf = std.elf;
@ -128,58 +130,32 @@ test "fadvise" {
}
test "sigset_t" {
std.debug.assert(@sizeOf(linux.sigset_t) == (linux.NSIG / 8));
const SIG = linux.SIG;
assert(@sizeOf(linux.sigset_t) == (linux.NSIG / 8));
var sigset = linux.sigemptyset();
// See that none are set, then set each one, see that they're all set, then
// remove them all, and then see that none are set.
for (1..linux.NSIG) |i| {
try expectEqual(linux.sigismember(&sigset, @truncate(i)), false);
const sig = std.meta.intToEnum(SIG, i) catch continue;
try expectEqual(false, linux.sigismember(&sigset, sig));
}
for (1..linux.NSIG) |i| {
linux.sigaddset(&sigset, @truncate(i));
const sig = std.meta.intToEnum(SIG, i) catch continue;
linux.sigaddset(&sigset, sig);
}
for (1..linux.NSIG) |i| {
try expectEqual(linux.sigismember(&sigset, @truncate(i)), true);
const sig = std.meta.intToEnum(SIG, i) catch continue;
try expectEqual(true, linux.sigismember(&sigset, sig));
}
for (1..linux.NSIG) |i| {
linux.sigdelset(&sigset, @truncate(i));
const sig = std.meta.intToEnum(SIG, i) catch continue;
linux.sigdelset(&sigset, sig);
}
for (1..linux.NSIG) |i| {
try expectEqual(linux.sigismember(&sigset, @truncate(i)), false);
}
// Kernel sigset_t is either 2+ 32-bit values or 1+ 64-bit value(s).
const sigset_len = @typeInfo(linux.sigset_t).array.len;
const sigset_elemis64 = 64 == @bitSizeOf(@typeInfo(linux.sigset_t).array.child);
linux.sigaddset(&sigset, 1);
try expectEqual(sigset[0], 1);
if (sigset_len > 1) {
try expectEqual(sigset[1], 0);
}
linux.sigaddset(&sigset, 31);
try expectEqual(sigset[0], 0x4000_0001);
if (sigset_len > 1) {
try expectEqual(sigset[1], 0);
}
linux.sigaddset(&sigset, 36);
if (sigset_elemis64) {
try expectEqual(sigset[0], 0x8_4000_0001);
} else {
try expectEqual(sigset[0], 0x4000_0001);
try expectEqual(sigset[1], 0x8);
}
linux.sigaddset(&sigset, 64);
if (sigset_elemis64) {
try expectEqual(sigset[0], 0x8000_0008_4000_0001);
} else {
try expectEqual(sigset[0], 0x4000_0001);
try expectEqual(sigset[1], 0x8000_0008);
const sig = std.meta.intToEnum(SIG, i) catch continue;
try expectEqual(false, linux.sigismember(&sigset, sig));
}
}
@ -187,14 +163,16 @@ test "sigfillset" {
// unlike the C library, all the signals are set in the kernel-level fillset
const sigset = linux.sigfillset();
for (1..linux.NSIG) |i| {
try expectEqual(linux.sigismember(&sigset, @truncate(i)), true);
const sig = std.meta.intToEnum(linux.SIG, i) catch continue;
try expectEqual(true, linux.sigismember(&sigset, sig));
}
}
test "sigemptyset" {
const sigset = linux.sigemptyset();
for (1..linux.NSIG) |i| {
try expectEqual(linux.sigismember(&sigset, @truncate(i)), false);
const sig = std.meta.intToEnum(linux.SIG, i) catch continue;
try expectEqual(false, linux.sigismember(&sigset, sig));
}
}
@ -208,14 +186,14 @@ test "sysinfo" {
}
comptime {
std.debug.assert(128 == @as(u32, @bitCast(linux.FUTEX_OP{ .cmd = @enumFromInt(0), .private = true, .realtime = false })));
std.debug.assert(256 == @as(u32, @bitCast(linux.FUTEX_OP{ .cmd = @enumFromInt(0), .private = false, .realtime = true })));
assert(128 == @as(u32, @bitCast(linux.FUTEX_OP{ .cmd = @enumFromInt(0), .private = true, .realtime = false })));
assert(256 == @as(u32, @bitCast(linux.FUTEX_OP{ .cmd = @enumFromInt(0), .private = false, .realtime = true })));
// Check futex_param4 union is packed correctly
const param_union = linux.futex_param4{
.val2 = 0xaabbcc,
};
std.debug.assert(@intFromPtr(param_union.timeout) == 0xaabbcc);
assert(@intFromPtr(param_union.timeout) == 0xaabbcc);
}
test "futex v1" {
@ -298,8 +276,8 @@ test "futex v1" {
}
comptime {
std.debug.assert(2 == @as(u32, @bitCast(linux.FUTEX2_FLAGS{ .size = .U32, .private = false })));
std.debug.assert(128 == @as(u32, @bitCast(linux.FUTEX2_FLAGS{ .size = @enumFromInt(0), .private = true })));
assert(2 == @as(u32, @bitCast(linux.FUTEX2_FLAGS{ .size = .U32, .private = false })));
assert(128 == @as(u32, @bitCast(linux.FUTEX2_FLAGS{ .size = @enumFromInt(0), .private = true })));
}
test "futex2_waitv" {

View File

@ -159,12 +159,14 @@ pub fn clone() callconv(.naked) u32 {
pub fn restore() callconv(.naked) noreturn {
switch (builtin.zig_backend) {
.stage2_c => asm volatile (
\\ addl $4, %%esp
\\ movl %[number], %%eax
\\ int $0x80
:
: [number] "i" (@intFromEnum(SYS.sigreturn)),
),
else => asm volatile (
\\ addl $4, %%esp
\\ int $0x80
:
: [number] "{eax}" (@intFromEnum(SYS.sigreturn)),

View File

@ -5,12 +5,14 @@
//! slices as well as APIs which accept null-terminated WTF16LE byte buffers.
const builtin = @import("builtin");
const native_arch = builtin.cpu.arch;
const std = @import("../std.zig");
const Io = std.Io;
const mem = std.mem;
const assert = std.debug.assert;
const math = std.math;
const maxInt = std.math.maxInt;
const native_arch = builtin.cpu.arch;
const UnexpectedError = std.posix.UnexpectedError;
test {
@ -87,7 +89,7 @@ pub fn OpenFile(sub_path_w: []const u16, options: OpenFileOptions) OpenError!HAN
};
var attr = OBJECT_ATTRIBUTES{
.Length = @sizeOf(OBJECT_ATTRIBUTES),
.RootDirectory = if (std.fs.path.isAbsoluteWindowsWTF16(sub_path_w)) null else options.dir,
.RootDirectory = if (std.fs.path.isAbsoluteWindowsWtf16(sub_path_w)) null else options.dir,
.Attributes = if (options.sa) |ptr| blk: { // Note we do not use OBJ_CASE_INSENSITIVE here.
const inherit: ULONG = if (ptr.bInheritHandle == TRUE) OBJ_INHERIT else 0;
break :blk inherit;
@ -146,7 +148,7 @@ pub fn OpenFile(sub_path_w: []const u16, options: OpenFileOptions) OpenError!HAN
// call has failed. There is not really a sane way to handle
// this other than retrying the creation after the OS finishes
// the deletion.
std.Thread.sleep(std.time.ns_per_ms);
_ = kernel32.SleepEx(1, TRUE);
continue;
},
.VIRUS_INFECTED, .VIRUS_DELETED => return error.AntivirusInterference,
@ -604,7 +606,7 @@ pub const ReadFileError = error{
BrokenPipe,
/// The specified network name is no longer available.
ConnectionResetByPeer,
OperationAborted,
Canceled,
/// Unable to read file due to lock.
LockViolation,
/// Known to be possible when:
@ -654,7 +656,7 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64) ReadFileError!usiz
pub const WriteFileError = error{
SystemResources,
OperationAborted,
Canceled,
BrokenPipe,
NotOpenForWriting,
/// The process cannot access the file because another process has locked
@ -694,7 +696,7 @@ pub fn WriteFile(
switch (GetLastError()) {
.INVALID_USER_BUFFER => return error.SystemResources,
.NOT_ENOUGH_MEMORY => return error.SystemResources,
.OPERATION_ABORTED => return error.OperationAborted,
.OPERATION_ABORTED => return error.Canceled,
.NOT_ENOUGH_QUOTA => return error.SystemResources,
.IO_PENDING => unreachable,
.NO_DATA => return error.BrokenPipe,
@ -845,7 +847,7 @@ pub fn CreateSymbolicLink(
// the C:\ drive.
.rooted => break :target_path target_path,
// Keep relative paths relative, but anything else needs to get NT-prefixed.
else => if (!std.fs.path.isAbsoluteWindowsWTF16(target_path))
else => if (!std.fs.path.isAbsoluteWindowsWtf16(target_path))
break :target_path target_path,
},
// Already an NT path, no need to do anything to it
@ -854,7 +856,7 @@ pub fn CreateSymbolicLink(
}
var prefixed_target_path = try wToPrefixedFileW(dir, target_path);
// We do this after prefixing to ensure that drive-relative paths are treated as absolute
is_target_absolute = std.fs.path.isAbsoluteWindowsWTF16(prefixed_target_path.span());
is_target_absolute = std.fs.path.isAbsoluteWindowsWtf16(prefixed_target_path.span());
break :target_path prefixed_target_path.span();
};
@ -862,7 +864,7 @@ pub fn CreateSymbolicLink(
var buffer: [MAXIMUM_REPARSE_DATA_BUFFER_SIZE]u8 = undefined;
const buf_len = @sizeOf(SYMLINK_DATA) + final_target_path.len * 4;
const header_len = @sizeOf(ULONG) + @sizeOf(USHORT) * 2;
const target_is_absolute = std.fs.path.isAbsoluteWindowsWTF16(final_target_path);
const target_is_absolute = std.fs.path.isAbsoluteWindowsWtf16(final_target_path);
const symlink_data = SYMLINK_DATA{
.ReparseTag = IO_REPARSE_TAG_SYMLINK,
.ReparseDataLength = @intCast(buf_len - header_len),
@ -903,7 +905,7 @@ pub fn ReadLink(dir: ?HANDLE, sub_path_w: []const u16, out_buffer: []u8) ReadLin
};
var attr = OBJECT_ATTRIBUTES{
.Length = @sizeOf(OBJECT_ATTRIBUTES),
.RootDirectory = if (std.fs.path.isAbsoluteWindowsWTF16(sub_path_w)) null else dir,
.RootDirectory = if (std.fs.path.isAbsoluteWindowsWtf16(sub_path_w)) null else dir,
.Attributes = 0, // Note we do not use OBJ_CASE_INSENSITIVE here.
.ObjectName = &nt_name,
.SecurityDescriptor = null,
@ -1033,7 +1035,7 @@ pub fn DeleteFile(sub_path_w: []const u16, options: DeleteFileOptions) DeleteFil
var attr = OBJECT_ATTRIBUTES{
.Length = @sizeOf(OBJECT_ATTRIBUTES),
.RootDirectory = if (std.fs.path.isAbsoluteWindowsWTF16(sub_path_w)) null else options.dir,
.RootDirectory = if (std.fs.path.isAbsoluteWindowsWtf16(sub_path_w)) null else options.dir,
.Attributes = 0, // Note we do not use OBJ_CASE_INSENSITIVE here.
.ObjectName = &nt_name,
.SecurityDescriptor = null,
@ -1572,131 +1574,6 @@ pub fn GetFileAttributesW(lpFileName: [*:0]const u16) GetFileAttributesError!DWO
return rc;
}
pub fn WSAStartup(majorVersion: u8, minorVersion: u8) !ws2_32.WSADATA {
var wsadata: ws2_32.WSADATA = undefined;
return switch (ws2_32.WSAStartup((@as(WORD, minorVersion) << 8) | majorVersion, &wsadata)) {
0 => wsadata,
else => |err_int| switch (@as(ws2_32.WinsockError, @enumFromInt(@as(u16, @intCast(err_int))))) {
.WSASYSNOTREADY => return error.SystemNotAvailable,
.WSAVERNOTSUPPORTED => return error.VersionNotSupported,
.WSAEINPROGRESS => return error.BlockingOperationInProgress,
.WSAEPROCLIM => return error.ProcessFdQuotaExceeded,
else => |err| return unexpectedWSAError(err),
},
};
}
pub fn WSACleanup() !void {
return switch (ws2_32.WSACleanup()) {
0 => {},
ws2_32.SOCKET_ERROR => switch (ws2_32.WSAGetLastError()) {
.WSANOTINITIALISED => return error.NotInitialized,
.WSAENETDOWN => return error.NetworkNotAvailable,
.WSAEINPROGRESS => return error.BlockingOperationInProgress,
else => |err| return unexpectedWSAError(err),
},
else => unreachable,
};
}
var wsa_startup_mutex: std.Thread.Mutex = .{};
pub fn callWSAStartup() !void {
wsa_startup_mutex.lock();
defer wsa_startup_mutex.unlock();
// Here we could use a flag to prevent multiple threads to prevent
// multiple calls to WSAStartup, but it doesn't matter. We're globally
// leaking the resource intentionally, and the mutex already prevents
// data races within the WSAStartup function.
_ = WSAStartup(2, 2) catch |err| switch (err) {
error.SystemNotAvailable => return error.SystemResources,
error.VersionNotSupported => return error.Unexpected,
error.BlockingOperationInProgress => return error.Unexpected,
error.ProcessFdQuotaExceeded => return error.ProcessFdQuotaExceeded,
error.Unexpected => return error.Unexpected,
};
}
/// Microsoft requires WSAStartup to be called to initialize, or else
/// WSASocketW will return WSANOTINITIALISED.
/// Since this is a standard library, we do not have the luxury of
/// putting initialization code anywhere, because we would not want
/// to pay the cost of calling WSAStartup if there ended up being no
/// networking. Also, if Zig code is used as a library, Zig is not in
/// charge of the start code, and we couldn't put in any initialization
/// code even if we wanted to.
/// The documentation for WSAStartup mentions that there must be a
/// matching WSACleanup call. It is not possible for the Zig Standard
/// Library to honor this for the same reason - there is nowhere to put
/// deinitialization code.
/// So, API users of the zig std lib have two options:
/// * (recommended) The simple, cross-platform way: just call `WSASocketW`
/// and don't worry about it. Zig will call WSAStartup() in a thread-safe
/// manner and never deinitialize networking. This is ideal for an
/// application which has the capability to do networking.
/// * The getting-your-hands-dirty way: call `WSAStartup()` before doing
/// networking, so that the error handling code for WSANOTINITIALISED never
/// gets run, which then allows the application or library to call `WSACleanup()`.
/// This could make sense for a library, which has init and deinit
/// functions for the whole library's lifetime.
pub fn WSASocketW(
af: i32,
socket_type: i32,
protocol: i32,
protocolInfo: ?*ws2_32.WSAPROTOCOL_INFOW,
g: ws2_32.GROUP,
dwFlags: DWORD,
) !ws2_32.SOCKET {
var first = true;
while (true) {
const rc = ws2_32.WSASocketW(af, socket_type, protocol, protocolInfo, g, dwFlags);
if (rc == ws2_32.INVALID_SOCKET) {
switch (ws2_32.WSAGetLastError()) {
.WSAEAFNOSUPPORT => return error.AddressFamilyNotSupported,
.WSAEMFILE => return error.ProcessFdQuotaExceeded,
.WSAENOBUFS => return error.SystemResources,
.WSAEPROTONOSUPPORT => return error.ProtocolNotSupported,
.WSANOTINITIALISED => {
if (!first) return error.Unexpected;
first = false;
try callWSAStartup();
continue;
},
else => |err| return unexpectedWSAError(err),
}
}
return rc;
}
}
pub fn bind(s: ws2_32.SOCKET, name: *const ws2_32.sockaddr, namelen: ws2_32.socklen_t) i32 {
return ws2_32.bind(s, name, @as(i32, @intCast(namelen)));
}
pub fn listen(s: ws2_32.SOCKET, backlog: u31) i32 {
return ws2_32.listen(s, backlog);
}
pub fn closesocket(s: ws2_32.SOCKET) !void {
switch (ws2_32.closesocket(s)) {
0 => {},
ws2_32.SOCKET_ERROR => switch (ws2_32.WSAGetLastError()) {
else => |err| return unexpectedWSAError(err),
},
else => unreachable,
}
}
pub fn accept(s: ws2_32.SOCKET, name: ?*ws2_32.sockaddr, namelen: ?*ws2_32.socklen_t) ws2_32.SOCKET {
assert((name == null) == (namelen == null));
return ws2_32.accept(s, name, @as(?*i32, @ptrCast(namelen)));
}
pub fn getsockname(s: ws2_32.SOCKET, name: *ws2_32.sockaddr, namelen: *ws2_32.socklen_t) i32 {
return ws2_32.getsockname(s, name, @as(*i32, @ptrCast(namelen)));
}
pub fn getpeername(s: ws2_32.SOCKET, name: *ws2_32.sockaddr, namelen: *ws2_32.socklen_t) i32 {
return ws2_32.getpeername(s, name, @as(*i32, @ptrCast(namelen)));
}
@ -2219,25 +2096,25 @@ pub fn peb() *PEB {
/// Universal Time (UTC).
/// This function returns the number of nanoseconds since the canonical epoch,
/// which is the POSIX one (Jan 01, 1970 AD).
pub fn fromSysTime(hns: i64) i128 {
pub fn fromSysTime(hns: i64) Io.Timestamp {
const adjusted_epoch: i128 = hns + std.time.epoch.windows * (std.time.ns_per_s / 100);
return adjusted_epoch * 100;
return .fromNanoseconds(@intCast(adjusted_epoch * 100));
}
pub fn toSysTime(ns: i128) i64 {
const hns = @divFloor(ns, 100);
pub fn toSysTime(ns: Io.Timestamp) i64 {
const hns = @divFloor(ns.nanoseconds, 100);
return @as(i64, @intCast(hns)) - std.time.epoch.windows * (std.time.ns_per_s / 100);
}
pub fn fileTimeToNanoSeconds(ft: FILETIME) i128 {
pub fn fileTimeToNanoSeconds(ft: FILETIME) Io.Timestamp {
const hns = (@as(i64, ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
return fromSysTime(hns);
}
/// Converts a number of nanoseconds since the POSIX epoch to a Windows FILETIME.
pub fn nanoSecondsToFileTime(ns: i128) FILETIME {
pub fn nanoSecondsToFileTime(ns: Io.Timestamp) FILETIME {
const adjusted: u64 = @bitCast(toSysTime(ns));
return FILETIME{
return .{
.dwHighDateTime = @as(u32, @truncate(adjusted >> 32)),
.dwLowDateTime = @as(u32, @truncate(adjusted)),
};
@ -2425,7 +2302,7 @@ pub fn normalizePath(comptime T: type, path: []T) RemoveDotDirsError!usize {
return prefix_len + try removeDotDirsSanitized(T, path[prefix_len..new_len]);
}
pub const Wtf8ToPrefixedFileWError = error{InvalidWtf8} || Wtf16ToPrefixedFileWError;
pub const Wtf8ToPrefixedFileWError = Wtf16ToPrefixedFileWError;
/// Same as `sliceToPrefixedFileW` but accepts a pointer
/// to a null-terminated WTF-8 encoded path.
@ -2438,7 +2315,9 @@ pub fn cStrToPrefixedFileW(dir: ?HANDLE, s: [*:0]const u8) Wtf8ToPrefixedFileWEr
/// https://wtf-8.codeberg.page/
pub fn sliceToPrefixedFileW(dir: ?HANDLE, path: []const u8) Wtf8ToPrefixedFileWError!PathSpace {
var temp_path: PathSpace = undefined;
temp_path.len = try std.unicode.wtf8ToWtf16Le(&temp_path.data, path);
temp_path.len = std.unicode.wtf8ToWtf16Le(&temp_path.data, path) catch |err| switch (err) {
error.InvalidWtf8 => return error.BadPathName,
};
temp_path.data[temp_path.len] = 0;
return wToPrefixedFileW(dir, temp_path.span());
}
@ -2812,38 +2691,6 @@ inline fn MAKELANGID(p: c_ushort, s: c_ushort) LANGID {
return (s << 10) | p;
}
/// Loads a Winsock extension function in runtime specified by a GUID.
pub fn loadWinsockExtensionFunction(comptime T: type, sock: ws2_32.SOCKET, guid: GUID) !T {
var function: T = undefined;
var num_bytes: DWORD = undefined;
const rc = ws2_32.WSAIoctl(
sock,
ws2_32.SIO_GET_EXTENSION_FUNCTION_POINTER,
&guid,
@sizeOf(GUID),
@as(?*anyopaque, @ptrFromInt(@intFromPtr(&function))),
@sizeOf(T),
&num_bytes,
null,
null,
);
if (rc == ws2_32.SOCKET_ERROR) {
return switch (ws2_32.WSAGetLastError()) {
.WSAEOPNOTSUPP => error.OperationNotSupported,
.WSAENOTSOCK => error.FileDescriptorNotASocket,
else => |err| unexpectedWSAError(err),
};
}
if (num_bytes != @sizeOf(T)) {
return error.ShortRead;
}
return function;
}
/// Call this when you made a windows DLL call or something that does SetLastError
/// and you get an unexpected error.
pub fn unexpectedError(err: Win32Error) UnexpectedError {
@ -2881,6 +2728,20 @@ pub fn unexpectedStatus(status: NTSTATUS) UnexpectedError {
return error.Unexpected;
}
pub fn statusBug(status: NTSTATUS) UnexpectedError {
switch (builtin.mode) {
.Debug => std.debug.panic("programmer bug caused syscall status: {t}", .{status}),
else => return error.Unexpected,
}
}
pub fn errorBug(err: Win32Error) UnexpectedError {
switch (builtin.mode) {
.Debug => std.debug.panic("programmer bug caused syscall status: {t}", .{err}),
else => return error.Unexpected,
}
}
pub const Win32Error = @import("windows/win32error.zig").Win32Error;
pub const NTSTATUS = @import("windows/ntstatus.zig").NTSTATUS;
pub const LANG = @import("windows/lang.zig");
@ -5737,3 +5598,16 @@ pub fn ProcessBaseAddress(handle: HANDLE) ProcessBaseAddressError!HMODULE {
const ppeb: *const PEB = @ptrCast(@alignCast(peb_out.ptr));
return ppeb.ImageBaseAddress;
}
pub fn wtf8ToWtf16Le(wtf16le: []u16, wtf8: []const u8) error{ BadPathName, NameTooLong }!usize {
// Each u8 in UTF-8/WTF-8 correlates to at most one u16 in UTF-16LE/WTF-16LE.
if (wtf16le.len < wtf8.len) {
const utf16_len = std.unicode.calcUtf16LeLenImpl(wtf8, .can_encode_surrogate_half) catch
return error.BadPathName;
if (utf16_len > wtf16le.len)
return error.NameTooLong;
}
return std.unicode.wtf8ToWtf16Le(wtf16le, wtf8) catch |err| switch (err) {
error.InvalidWtf8 => return error.BadPathName,
};
}

View File

@ -326,10 +326,11 @@ pub extern "kernel32" fn ExitProcess(
exit_code: UINT,
) callconv(.winapi) noreturn;
// TODO: SleepEx with bAlertable=false.
pub extern "kernel32" fn Sleep(
// TODO: implement via ntdll instead
pub extern "kernel32" fn SleepEx(
dwMilliseconds: DWORD,
) callconv(.winapi) void;
bAlertable: BOOL,
) callconv(.winapi) DWORD;
// TODO: Wrapper around NtQueryInformationProcess with `PROCESS_BASIC_INFORMATION`.
pub extern "kernel32" fn GetExitCodeProcess(

View File

@ -237,28 +237,3 @@ test "removeDotDirs" {
try testRemoveDotDirs("a\\b\\..\\", "a\\");
try testRemoveDotDirs("a\\b\\..\\c", "a\\c");
}
test "loadWinsockExtensionFunction" {
_ = try windows.WSAStartup(2, 2);
defer windows.WSACleanup() catch unreachable;
const LPFN_CONNECTEX = *const fn (
Socket: windows.ws2_32.SOCKET,
SockAddr: *const windows.ws2_32.sockaddr,
SockLen: std.posix.socklen_t,
SendBuf: ?*const anyopaque,
SendBufLen: windows.DWORD,
BytesSent: *windows.DWORD,
Overlapped: *windows.OVERLAPPED,
) callconv(.winapi) windows.BOOL;
_ = windows.loadWinsockExtensionFunction(
LPFN_CONNECTEX,
try std.posix.socket(std.posix.AF.INET, std.posix.SOCK.DGRAM, 0),
windows.ws2_32.WSAID_CONNECTEX,
) catch |err| switch (err) {
error.OperationNotSupported => unreachable,
error.ShortRead => unreachable,
else => |e| return e,
};
}

View File

@ -702,28 +702,32 @@ pub const FIONBIO = -2147195266;
pub const ADDRINFOEX_VERSION_2 = 2;
pub const ADDRINFOEX_VERSION_3 = 3;
pub const ADDRINFOEX_VERSION_4 = 4;
pub const NS_ALL = 0;
pub const NS_SAP = 1;
pub const NS_NDS = 2;
pub const NS_PEER_BROWSE = 3;
pub const NS_SLP = 5;
pub const NS_DHCP = 6;
pub const NS_TCPIP_LOCAL = 10;
pub const NS_TCPIP_HOSTS = 11;
pub const NS_DNS = 12;
pub const NS_NETBT = 13;
pub const NS_WINS = 14;
pub const NS_NLA = 15;
pub const NS_NBP = 20;
pub const NS_MS = 30;
pub const NS_STDA = 31;
pub const NS_NTDS = 32;
pub const NS_EMAIL = 37;
pub const NS_X500 = 40;
pub const NS_NIS = 41;
pub const NS_NISPLUS = 42;
pub const NS_WRQ = 50;
pub const NS_NETDES = 60;
pub const NS = enum(u32) {
ALL = 0,
SAP = 1,
NDS = 2,
PEER_BROWSE = 3,
SLP = 5,
DHCP = 6,
TCPIP_LOCAL = 10,
TCPIP_HOSTS = 11,
DNS = 12,
NETBT = 13,
WINS = 14,
NLA = 15,
NBP = 20,
MS = 30,
STDA = 31,
NTDS = 32,
EMAIL = 37,
X500 = 40,
NIS = 41,
NISPLUS = 42,
WRQ = 50,
NETDES = 60,
};
pub const NI_NOFQDN = 1;
pub const NI_NUMERICHOST = 2;
pub const NI_NAMEREQD = 4;
@ -1080,31 +1084,18 @@ pub const WSANETWORKEVENTS = extern struct {
iErrorCode: [10]i32,
};
pub const addrinfo = addrinfoa;
pub const addrinfoa = extern struct {
pub const ADDRINFOEXW = extern struct {
flags: AI,
family: i32,
socktype: i32,
protocol: i32,
addrlen: usize,
canonname: ?[*:0]u8,
canonname: ?[*:0]u16,
addr: ?*sockaddr,
next: ?*addrinfo,
};
pub const addrinfoexA = extern struct {
flags: AI,
family: i32,
socktype: i32,
protocol: i32,
addrlen: usize,
canonname: [*:0]u8,
addr: *sockaddr,
blob: *anyopaque,
blob: ?*anyopaque,
bloblen: usize,
provider: *GUID,
next: *addrinfoexA,
provider: ?*GUID,
next: ?*ADDRINFOEXW,
};
pub const sockaddr = extern struct {
@ -1271,130 +1262,105 @@ pub const timeval = extern struct {
usec: LONG,
};
// https://docs.microsoft.com/en-au/windows/win32/winsock/windows-sockets-error-codes-2
/// https://docs.microsoft.com/en-au/windows/win32/winsock/windows-sockets-error-codes-2
pub const WinsockError = enum(u16) {
/// Specified event object handle is invalid.
/// An application attempts to use an event object, but the specified handle is not valid.
WSA_INVALID_HANDLE = 6,
INVALID_HANDLE = 6,
/// Insufficient memory available.
/// An application used a Windows Sockets function that directly maps to a Windows function.
/// The Windows function is indicating a lack of required memory resources.
WSA_NOT_ENOUGH_MEMORY = 8,
NOT_ENOUGH_MEMORY = 8,
/// One or more parameters are invalid.
/// An application used a Windows Sockets function which directly maps to a Windows function.
/// The Windows function is indicating a problem with one or more parameters.
WSA_INVALID_PARAMETER = 87,
INVALID_PARAMETER = 87,
/// Overlapped operation aborted.
/// An overlapped operation was canceled due to the closure of the socket, or the execution of the SIO_FLUSH command in WSAIoctl.
WSA_OPERATION_ABORTED = 995,
OPERATION_ABORTED = 995,
/// Overlapped I/O event object not in signaled state.
/// The application has tried to determine the status of an overlapped operation which is not yet completed.
/// Applications that use WSAGetOverlappedResult (with the fWait flag set to FALSE) in a polling mode to determine when an overlapped operation has completed, get this error code until the operation is complete.
WSA_IO_INCOMPLETE = 996,
IO_INCOMPLETE = 996,
/// The application has initiated an overlapped operation that cannot be completed immediately.
/// A completion indication will be given later when the operation has been completed.
WSA_IO_PENDING = 997,
IO_PENDING = 997,
/// Interrupted function call.
/// A blocking operation was interrupted by a call to WSACancelBlockingCall.
WSAEINTR = 10004,
EINTR = 10004,
/// File handle is not valid.
/// The file handle supplied is not valid.
WSAEBADF = 10009,
EBADF = 10009,
/// Permission denied.
/// An attempt was made to access a socket in a way forbidden by its access permissions.
/// An example is using a broadcast address for sendto without broadcast permission being set using setsockopt(SO.BROADCAST).
/// Another possible reason for the WSAEACCES error is that when the bind function is called (on Windows NT 4.0 with SP4 and later), another application, service, or kernel mode driver is bound to the same address with exclusive access.
/// Such exclusive access is a new feature of Windows NT 4.0 with SP4 and later, and is implemented by using the SO.EXCLUSIVEADDRUSE option.
WSAEACCES = 10013,
EACCES = 10013,
/// Bad address.
/// The system detected an invalid pointer address in attempting to use a pointer argument of a call.
/// This error occurs if an application passes an invalid pointer value, or if the length of the buffer is too small.
/// For instance, if the length of an argument, which is a sockaddr structure, is smaller than the sizeof(sockaddr).
WSAEFAULT = 10014,
EFAULT = 10014,
/// Invalid argument.
/// Some invalid argument was supplied (for example, specifying an invalid level to the setsockopt function).
/// In some instances, it also refers to the current state of the socketfor instance, calling accept on a socket that is not listening.
WSAEINVAL = 10022,
EINVAL = 10022,
/// Too many open files.
/// Too many open sockets. Each implementation may have a maximum number of socket handles available, either globally, per process, or per thread.
WSAEMFILE = 10024,
EMFILE = 10024,
/// Resource temporarily unavailable.
/// This error is returned from operations on nonblocking sockets that cannot be completed immediately, for example recv when no data is queued to be read from the socket.
/// It is a nonfatal error, and the operation should be retried later.
/// It is normal for WSAEWOULDBLOCK to be reported as the result from calling connect on a nonblocking SOCK.STREAM socket, since some time must elapse for the connection to be established.
WSAEWOULDBLOCK = 10035,
EWOULDBLOCK = 10035,
/// Operation now in progress.
/// A blocking operation is currently executing.
/// Windows Sockets only allows a single blocking operationper- task or threadto be outstanding, and if any other function call is made (whether or not it references that or any other socket) the function fails with the WSAEINPROGRESS error.
WSAEINPROGRESS = 10036,
EINPROGRESS = 10036,
/// Operation already in progress.
/// An operation was attempted on a nonblocking socket with an operation already in progressthat is, calling connect a second time on a nonblocking socket that is already connecting, or canceling an asynchronous request (WSAAsyncGetXbyY) that has already been canceled or completed.
WSAEALREADY = 10037,
EALREADY = 10037,
/// Socket operation on nonsocket.
/// An operation was attempted on something that is not a socket.
/// Either the socket handle parameter did not reference a valid socket, or for select, a member of an fd_set was not valid.
WSAENOTSOCK = 10038,
ENOTSOCK = 10038,
/// Destination address required.
/// A required address was omitted from an operation on a socket.
/// For example, this error is returned if sendto is called with the remote address of ADDR_ANY.
WSAEDESTADDRREQ = 10039,
EDESTADDRREQ = 10039,
/// Message too long.
/// A message sent on a datagram socket was larger than the internal message buffer or some other network limit, or the buffer used to receive a datagram was smaller than the datagram itself.
WSAEMSGSIZE = 10040,
EMSGSIZE = 10040,
/// Protocol wrong type for socket.
/// A protocol was specified in the socket function call that does not support the semantics of the socket type requested.
/// For example, the ARPA Internet UDP protocol cannot be specified with a socket type of SOCK.STREAM.
WSAEPROTOTYPE = 10041,
EPROTOTYPE = 10041,
/// Bad protocol option.
/// An unknown, invalid or unsupported option or level was specified in a getsockopt or setsockopt call.
WSAENOPROTOOPT = 10042,
ENOPROTOOPT = 10042,
/// Protocol not supported.
/// The requested protocol has not been configured into the system, or no implementation for it exists.
/// For example, a socket call requests a SOCK.DGRAM socket, but specifies a stream protocol.
WSAEPROTONOSUPPORT = 10043,
EPROTONOSUPPORT = 10043,
/// Socket type not supported.
/// The support for the specified socket type does not exist in this address family.
/// For example, the optional type SOCK.RAW might be selected in a socket call, and the implementation does not support SOCK.RAW sockets at all.
WSAESOCKTNOSUPPORT = 10044,
ESOCKTNOSUPPORT = 10044,
/// Operation not supported.
/// The attempted operation is not supported for the type of object referenced.
/// Usually this occurs when a socket descriptor to a socket that cannot support this operation is trying to accept a connection on a datagram socket.
WSAEOPNOTSUPP = 10045,
EOPNOTSUPP = 10045,
/// Protocol family not supported.
/// The protocol family has not been configured into the system or no implementation for it exists.
/// This message has a slightly different meaning from WSAEAFNOSUPPORT.
/// However, it is interchangeable in most cases, and all Windows Sockets functions that return one of these messages also specify WSAEAFNOSUPPORT.
WSAEPFNOSUPPORT = 10046,
EPFNOSUPPORT = 10046,
/// Address family not supported by protocol family.
/// An address incompatible with the requested protocol was used.
/// All sockets are created with an associated address family (that is, AF.INET for Internet Protocols) and a generic protocol type (that is, SOCK.STREAM).
/// This error is returned if an incorrect protocol is explicitly requested in the socket call, or if an address of the wrong family is used for a socket, for example, in sendto.
WSAEAFNOSUPPORT = 10047,
EAFNOSUPPORT = 10047,
/// Address already in use.
/// Typically, only one usage of each socket address (protocol/IP address/port) is permitted.
/// This error occurs if an application attempts to bind a socket to an IP address/port that has already been used for an existing socket, or a socket that was not closed properly, or one that is still in the process of closing.
@ -1402,115 +1368,91 @@ pub const WinsockError = enum(u16) {
/// Client applications usually need not call bind at allconnect chooses an unused port automatically.
/// When bind is called with a wildcard address (involving ADDR_ANY), a WSAEADDRINUSE error could be delayed until the specific address is committed.
/// This could happen with a call to another function later, including connect, listen, WSAConnect, or WSAJoinLeaf.
WSAEADDRINUSE = 10048,
EADDRINUSE = 10048,
/// Cannot assign requested address.
/// The requested address is not valid in its context.
/// This normally results from an attempt to bind to an address that is not valid for the local computer.
/// This can also result from connect, sendto, WSAConnect, WSAJoinLeaf, or WSASendTo when the remote address or port is not valid for a remote computer (for example, address or port 0).
WSAEADDRNOTAVAIL = 10049,
EADDRNOTAVAIL = 10049,
/// Network is down.
/// A socket operation encountered a dead network.
/// This could indicate a serious failure of the network system (that is, the protocol stack that the Windows Sockets DLL runs over), the network interface, or the local network itself.
WSAENETDOWN = 10050,
ENETDOWN = 10050,
/// Network is unreachable.
/// A socket operation was attempted to an unreachable network.
/// This usually means the local software knows no route to reach the remote host.
WSAENETUNREACH = 10051,
ENETUNREACH = 10051,
/// Network dropped connection on reset.
/// The connection has been broken due to keep-alive activity detecting a failure while the operation was in progress.
/// It can also be returned by setsockopt if an attempt is made to set SO.KEEPALIVE on a connection that has already failed.
WSAENETRESET = 10052,
ENETRESET = 10052,
/// Software caused connection abort.
/// An established connection was aborted by the software in your host computer, possibly due to a data transmission time-out or protocol error.
WSAECONNABORTED = 10053,
ECONNABORTED = 10053,
/// Connection reset by peer.
/// An existing connection was forcibly closed by the remote host.
/// This normally results if the peer application on the remote host is suddenly stopped, the host is rebooted, the host or remote network interface is disabled, or the remote host uses a hard close (see setsockopt for more information on the SO.LINGER option on the remote socket).
/// This error may also result if a connection was broken due to keep-alive activity detecting a failure while one or more operations are in progress.
/// Operations that were in progress fail with WSAENETRESET. Subsequent operations fail with WSAECONNRESET.
WSAECONNRESET = 10054,
ECONNRESET = 10054,
/// No buffer space available.
/// An operation on a socket could not be performed because the system lacked sufficient buffer space or because a queue was full.
WSAENOBUFS = 10055,
ENOBUFS = 10055,
/// Socket is already connected.
/// A connect request was made on an already-connected socket.
/// Some implementations also return this error if sendto is called on a connected SOCK.DGRAM socket (for SOCK.STREAM sockets, the to parameter in sendto is ignored) although other implementations treat this as a legal occurrence.
WSAEISCONN = 10056,
EISCONN = 10056,
/// Socket is not connected.
/// A request to send or receive data was disallowed because the socket is not connected and (when sending on a datagram socket using sendto) no address was supplied.
/// Any other type of operation might also return this errorfor example, setsockopt setting SO.KEEPALIVE if the connection has been reset.
WSAENOTCONN = 10057,
ENOTCONN = 10057,
/// Cannot send after socket shutdown.
/// A request to send or receive data was disallowed because the socket had already been shut down in that direction with a previous shutdown call.
/// By calling shutdown a partial close of a socket is requested, which is a signal that sending or receiving, or both have been discontinued.
WSAESHUTDOWN = 10058,
ESHUTDOWN = 10058,
/// Too many references.
/// Too many references to some kernel object.
WSAETOOMANYREFS = 10059,
ETOOMANYREFS = 10059,
/// Connection timed out.
/// A connection attempt failed because the connected party did not properly respond after a period of time, or the established connection failed because the connected host has failed to respond.
WSAETIMEDOUT = 10060,
ETIMEDOUT = 10060,
/// Connection refused.
/// No connection could be made because the target computer actively refused it.
/// This usually results from trying to connect to a service that is inactive on the foreign hostthat is, one with no server application running.
WSAECONNREFUSED = 10061,
ECONNREFUSED = 10061,
/// Cannot translate name.
/// Cannot translate a name.
WSAELOOP = 10062,
ELOOP = 10062,
/// Name too long.
/// A name component or a name was too long.
WSAENAMETOOLONG = 10063,
ENAMETOOLONG = 10063,
/// Host is down.
/// A socket operation failed because the destination host is down. A socket operation encountered a dead host.
/// Networking activity on the local host has not been initiated.
/// These conditions are more likely to be indicated by the error WSAETIMEDOUT.
WSAEHOSTDOWN = 10064,
EHOSTDOWN = 10064,
/// No route to host.
/// A socket operation was attempted to an unreachable host. See WSAENETUNREACH.
WSAEHOSTUNREACH = 10065,
EHOSTUNREACH = 10065,
/// Directory not empty.
/// Cannot remove a directory that is not empty.
WSAENOTEMPTY = 10066,
ENOTEMPTY = 10066,
/// Too many processes.
/// A Windows Sockets implementation may have a limit on the number of applications that can use it simultaneously.
/// WSAStartup may fail with this error if the limit has been reached.
WSAEPROCLIM = 10067,
EPROCLIM = 10067,
/// User quota exceeded.
/// Ran out of user quota.
WSAEUSERS = 10068,
EUSERS = 10068,
/// Disk quota exceeded.
/// Ran out of disk quota.
WSAEDQUOT = 10069,
EDQUOT = 10069,
/// Stale file handle reference.
/// The file handle reference is no longer available.
WSAESTALE = 10070,
ESTALE = 10070,
/// Item is remote.
/// The item is not available locally.
WSAEREMOTE = 10071,
EREMOTE = 10071,
/// Network subsystem is unavailable.
/// This error is returned by WSAStartup if the Windows Sockets implementation cannot function at this time because the underlying system it uses to provide network services is currently unavailable.
/// Users should check:
@ -1518,47 +1460,38 @@ pub const WinsockError = enum(u16) {
/// - That they are not trying to use more than one Windows Sockets implementation simultaneously.
/// - If there is more than one Winsock DLL on your system, be sure the first one in the path is appropriate for the network subsystem currently loaded.
/// - The Windows Sockets implementation documentation to be sure all necessary components are currently installed and configured correctly.
WSASYSNOTREADY = 10091,
SYSNOTREADY = 10091,
/// Winsock.dll version out of range.
/// The current Windows Sockets implementation does not support the Windows Sockets specification version requested by the application.
/// Check that no old Windows Sockets DLL files are being accessed.
WSAVERNOTSUPPORTED = 10092,
VERNOTSUPPORTED = 10092,
/// Successful WSAStartup not yet performed.
/// Either the application has not called WSAStartup or WSAStartup failed.
/// The application may be accessing a socket that the current active task does not own (that is, trying to share a socket between tasks), or WSACleanup has been called too many times.
WSANOTINITIALISED = 10093,
NOTINITIALISED = 10093,
/// Graceful shutdown in progress.
/// Returned by WSARecv and WSARecvFrom to indicate that the remote party has initiated a graceful shutdown sequence.
WSAEDISCON = 10101,
EDISCON = 10101,
/// No more results.
/// No more results can be returned by the WSALookupServiceNext function.
WSAENOMORE = 10102,
ENOMORE = 10102,
/// Call has been canceled.
/// A call to the WSALookupServiceEnd function was made while this call was still processing. The call has been canceled.
WSAECANCELLED = 10103,
ECANCELLED = 10103,
/// Procedure call table is invalid.
/// The service provider procedure call table is invalid.
/// A service provider returned a bogus procedure table to Ws2_32.dll.
/// This is usually caused by one or more of the function pointers being NULL.
WSAEINVALIDPROCTABLE = 10104,
EINVALIDPROCTABLE = 10104,
/// Service provider is invalid.
/// The requested service provider is invalid.
/// This error is returned by the WSCGetProviderInfo and WSCGetProviderInfo32 functions if the protocol entry specified could not be found.
/// This error is also returned if the service provider returned a version number other than 2.0.
WSAEINVALIDPROVIDER = 10105,
EINVALIDPROVIDER = 10105,
/// Service provider failed to initialize.
/// The requested service provider could not be loaded or initialized.
/// This error is returned if either a service provider's DLL could not be loaded (LoadLibrary failed) or the provider's WSPStartup or NSPStartup function failed.
WSAEPROVIDERFAILEDINIT = 10106,
EPROVIDERFAILEDINIT = 10106,
/// System call failure.
/// A system call that should never fail has failed.
/// This is a generic error code, returned under various conditions.
@ -1566,157 +1499,120 @@ pub const WinsockError = enum(u16) {
/// For example, if a call to WaitForMultipleEvents fails or one of the registry functions fails trying to manipulate the protocol/namespace catalogs.
/// Returned when a provider does not return SUCCESS and does not provide an extended error code.
/// Can indicate a service provider implementation error.
WSASYSCALLFAILURE = 10107,
SYSCALLFAILURE = 10107,
/// Service not found.
/// No such service is known. The service cannot be found in the specified name space.
WSASERVICE_NOT_FOUND = 10108,
SERVICE_NOT_FOUND = 10108,
/// Class type not found.
/// The specified class was not found.
WSATYPE_NOT_FOUND = 10109,
TYPE_NOT_FOUND = 10109,
/// No more results.
/// No more results can be returned by the WSALookupServiceNext function.
WSA_E_NO_MORE = 10110,
E_NO_MORE = 10110,
/// Call was canceled.
/// A call to the WSALookupServiceEnd function was made while this call was still processing. The call has been canceled.
WSA_E_CANCELLED = 10111,
E_CANCELLED = 10111,
/// Database query was refused.
/// A database query failed because it was actively refused.
WSAEREFUSED = 10112,
EREFUSED = 10112,
/// Host not found.
/// No such host is known. The name is not an official host name or alias, or it cannot be found in the database(s) being queried.
/// This error may also be returned for protocol and service queries, and means that the specified name could not be found in the relevant database.
WSAHOST_NOT_FOUND = 11001,
HOST_NOT_FOUND = 11001,
/// Nonauthoritative host not found.
/// This is usually a temporary error during host name resolution and means that the local server did not receive a response from an authoritative server. A retry at some time later may be successful.
WSATRY_AGAIN = 11002,
TRY_AGAIN = 11002,
/// This is a nonrecoverable error.
/// This indicates that some sort of nonrecoverable error occurred during a database lookup.
/// This may be because the database files (for example, BSD-compatible HOSTS, SERVICES, or PROTOCOLS files) could not be found, or a DNS request was returned by the server with a severe error.
WSANO_RECOVERY = 11003,
NO_RECOVERY = 11003,
/// Valid name, no data record of requested type.
/// The requested name is valid and was found in the database, but it does not have the correct associated data being resolved for.
/// The usual example for this is a host name-to-address translation attempt (using gethostbyname or WSAAsyncGetHostByName) which uses the DNS (Domain Name Server).
/// An MX record is returned but no A recordindicating the host itself exists, but is not directly reachable.
WSANO_DATA = 11004,
NO_DATA = 11004,
/// QoS receivers.
/// At least one QoS reserve has arrived.
WSA_QOS_RECEIVERS = 11005,
QOS_RECEIVERS = 11005,
/// QoS senders.
/// At least one QoS send path has arrived.
WSA_QOS_SENDERS = 11006,
QOS_SENDERS = 11006,
/// No QoS senders.
/// There are no QoS senders.
WSA_QOS_NO_SENDERS = 11007,
QOS_NO_SENDERS = 11007,
/// QoS no receivers.
/// There are no QoS receivers.
WSA_QOS_NO_RECEIVERS = 11008,
QOS_NO_RECEIVERS = 11008,
/// QoS request confirmed.
/// The QoS reserve request has been confirmed.
WSA_QOS_REQUEST_CONFIRMED = 11009,
QOS_REQUEST_CONFIRMED = 11009,
/// QoS admission error.
/// A QoS error occurred due to lack of resources.
WSA_QOS_ADMISSION_FAILURE = 11010,
QOS_ADMISSION_FAILURE = 11010,
/// QoS policy failure.
/// The QoS request was rejected because the policy system couldn't allocate the requested resource within the existing policy.
WSA_QOS_POLICY_FAILURE = 11011,
QOS_POLICY_FAILURE = 11011,
/// QoS bad style.
/// An unknown or conflicting QoS style was encountered.
WSA_QOS_BAD_STYLE = 11012,
QOS_BAD_STYLE = 11012,
/// QoS bad object.
/// A problem was encountered with some part of the filterspec or the provider-specific buffer in general.
WSA_QOS_BAD_OBJECT = 11013,
QOS_BAD_OBJECT = 11013,
/// QoS traffic control error.
/// An error with the underlying traffic control (TC) API as the generic QoS request was converted for local enforcement by the TC API.
/// This could be due to an out of memory error or to an internal QoS provider error.
WSA_QOS_TRAFFIC_CTRL_ERROR = 11014,
QOS_TRAFFIC_CTRL_ERROR = 11014,
/// QoS generic error.
/// A general QoS error.
WSA_QOS_GENERIC_ERROR = 11015,
QOS_GENERIC_ERROR = 11015,
/// QoS service type error.
/// An invalid or unrecognized service type was found in the QoS flowspec.
WSA_QOS_ESERVICETYPE = 11016,
QOS_ESERVICETYPE = 11016,
/// QoS flowspec error.
/// An invalid or inconsistent flowspec was found in the QOS structure.
WSA_QOS_EFLOWSPEC = 11017,
QOS_EFLOWSPEC = 11017,
/// Invalid QoS provider buffer.
/// An invalid QoS provider-specific buffer.
WSA_QOS_EPROVSPECBUF = 11018,
QOS_EPROVSPECBUF = 11018,
/// Invalid QoS filter style.
/// An invalid QoS filter style was used.
WSA_QOS_EFILTERSTYLE = 11019,
QOS_EFILTERSTYLE = 11019,
/// Invalid QoS filter type.
/// An invalid QoS filter type was used.
WSA_QOS_EFILTERTYPE = 11020,
QOS_EFILTERTYPE = 11020,
/// Incorrect QoS filter count.
/// An incorrect number of QoS FILTERSPECs were specified in the FLOWDESCRIPTOR.
WSA_QOS_EFILTERCOUNT = 11021,
QOS_EFILTERCOUNT = 11021,
/// Invalid QoS object length.
/// An object with an invalid ObjectLength field was specified in the QoS provider-specific buffer.
WSA_QOS_EOBJLENGTH = 11022,
QOS_EOBJLENGTH = 11022,
/// Incorrect QoS flow count.
/// An incorrect number of flow descriptors was specified in the QoS structure.
WSA_QOS_EFLOWCOUNT = 11023,
QOS_EFLOWCOUNT = 11023,
/// Unrecognized QoS object.
/// An unrecognized object was found in the QoS provider-specific buffer.
WSA_QOS_EUNKOWNPSOBJ = 11024,
QOS_EUNKOWNPSOBJ = 11024,
/// Invalid QoS policy object.
/// An invalid policy object was found in the QoS provider-specific buffer.
WSA_QOS_EPOLICYOBJ = 11025,
QOS_EPOLICYOBJ = 11025,
/// Invalid QoS flow descriptor.
/// An invalid QoS flow descriptor was found in the flow descriptor list.
WSA_QOS_EFLOWDESC = 11026,
QOS_EFLOWDESC = 11026,
/// Invalid QoS provider-specific flowspec.
/// An invalid or inconsistent flowspec was found in the QoS provider-specific buffer.
WSA_QOS_EPSFLOWSPEC = 11027,
QOS_EPSFLOWSPEC = 11027,
/// Invalid QoS provider-specific filterspec.
/// An invalid FILTERSPEC was found in the QoS provider-specific buffer.
WSA_QOS_EPSFILTERSPEC = 11028,
QOS_EPSFILTERSPEC = 11028,
/// Invalid QoS shape discard mode object.
/// An invalid shape discard mode object was found in the QoS provider-specific buffer.
WSA_QOS_ESDMODEOBJ = 11029,
QOS_ESDMODEOBJ = 11029,
/// Invalid QoS shaping rate object.
/// An invalid shaping rate object was found in the QoS provider-specific buffer.
WSA_QOS_ESHAPERATEOBJ = 11030,
QOS_ESHAPERATEOBJ = 11030,
/// Reserved policy QoS element type.
/// A reserved policy element was found in the QoS provider-specific buffer.
WSA_QOS_RESERVED_PETYPE = 11031,
QOS_RESERVED_PETYPE = 11031,
_,
};
@ -1946,18 +1842,6 @@ pub extern "ws2_32" fn WSAConnectByNameW(
Reserved: *OVERLAPPED,
) callconv(.winapi) BOOL;
pub extern "ws2_32" fn WSAConnectByNameA(
s: SOCKET,
nodename: [*:0]const u8,
servicename: [*:0]const u8,
LocalAddressLength: ?*u32,
LocalAddress: ?*sockaddr,
RemoteAddressLength: ?*u32,
RemoteAddress: ?*sockaddr,
timeout: ?*const timeval,
Reserved: *OVERLAPPED,
) callconv(.winapi) BOOL;
pub extern "ws2_32" fn WSAConnectByList(
s: SOCKET,
SocketAddress: *SOCKET_ADDRESS_LIST,
@ -1971,12 +1855,6 @@ pub extern "ws2_32" fn WSAConnectByList(
pub extern "ws2_32" fn WSACreateEvent() callconv(.winapi) HANDLE;
pub extern "ws2_32" fn WSADuplicateSocketA(
s: SOCKET,
dwProcessId: u32,
lpProtocolInfo: *WSAPROTOCOL_INFOA,
) callconv(.winapi) i32;
pub extern "ws2_32" fn WSADuplicateSocketW(
s: SOCKET,
dwProcessId: u32,
@ -1989,12 +1867,6 @@ pub extern "ws2_32" fn WSAEnumNetworkEvents(
lpNetworkEvents: *WSANETWORKEVENTS,
) callconv(.winapi) i32;
pub extern "ws2_32" fn WSAEnumProtocolsA(
lpiProtocols: ?*i32,
lpProtocolBuffer: ?*WSAPROTOCOL_INFOA,
lpdwBufferLength: *u32,
) callconv(.winapi) i32;
pub extern "ws2_32" fn WSAEnumProtocolsW(
lpiProtocols: ?*i32,
lpProtocolBuffer: ?*WSAPROTOCOL_INFOW,
@ -2137,15 +2009,6 @@ pub extern "ws2_32" fn WSASetEvent(
hEvent: HANDLE,
) callconv(.winapi) BOOL;
pub extern "ws2_32" fn WSASocketA(
af: i32,
@"type": i32,
protocol: i32,
lpProtocolInfo: ?*WSAPROTOCOL_INFOA,
g: u32,
dwFlags: u32,
) callconv(.winapi) SOCKET;
pub extern "ws2_32" fn WSASocketW(
af: i32,
@"type": i32,
@ -2163,14 +2026,6 @@ pub extern "ws2_32" fn WSAWaitForMultipleEvents(
fAlertable: BOOL,
) callconv(.winapi) u32;
pub extern "ws2_32" fn WSAAddressToStringA(
lpsaAddress: *sockaddr,
dwAddressLength: u32,
lpProtocolInfo: ?*WSAPROTOCOL_INFOA,
lpszAddressString: [*]u8,
lpdwAddressStringLength: *u32,
) callconv(.winapi) i32;
pub extern "ws2_32" fn WSAAddressToStringW(
lpsaAddress: *sockaddr,
dwAddressLength: u32,
@ -2179,14 +2034,6 @@ pub extern "ws2_32" fn WSAAddressToStringW(
lpdwAddressStringLength: *u32,
) callconv(.winapi) i32;
pub extern "ws2_32" fn WSAStringToAddressA(
AddressString: [*:0]const u8,
AddressFamily: i32,
lpProtocolInfo: ?*WSAPROTOCOL_INFOA,
lpAddress: *sockaddr,
lpAddressLength: *i32,
) callconv(.winapi) i32;
pub extern "ws2_32" fn WSAStringToAddressW(
AddressString: [*:0]const u16,
AddressFamily: i32,
@ -2251,32 +2098,14 @@ pub extern "ws2_32" fn WSAProviderCompleteAsyncCall(
iRetCode: i32,
) callconv(.winapi) i32;
pub extern "mswsock" fn EnumProtocolsA(
lpiProtocols: ?*i32,
lpProtocolBuffer: *anyopaque,
lpdwBufferLength: *u32,
) callconv(.winapi) i32;
pub extern "mswsock" fn EnumProtocolsW(
lpiProtocols: ?*i32,
lpProtocolBuffer: *anyopaque,
lpdwBufferLength: *u32,
) callconv(.winapi) i32;
pub extern "mswsock" fn GetAddressByNameA(
dwNameSpace: u32,
lpServiceType: *GUID,
lpServiceName: ?[*:0]u8,
lpiProtocols: ?*i32,
dwResolution: u32,
lpServiceAsyncInfo: ?*SERVICE_ASYNC_INFO,
lpCsaddrBuffer: *anyopaque,
lpAliasBuffer: ?[*:0]const u8,
lpdwAliasBufferLength: *u32,
) callconv(.winapi) i32;
pub extern "mswsock" fn GetAddressByNameW(
dwNameSpace: u32,
dwNameSpace: NS,
lpServiceType: *GUID,
lpServiceName: ?[*:0]u16,
lpiProtocols: ?*i32,
@ -2288,45 +2117,28 @@ pub extern "mswsock" fn GetAddressByNameW(
lpdwAliasBufferLength: *u32,
) callconv(.winapi) i32;
pub extern "mswsock" fn GetTypeByNameA(
lpServiceName: [*:0]u8,
lpServiceType: *GUID,
) callconv(.winapi) i32;
pub extern "mswsock" fn GetTypeByNameW(
lpServiceName: [*:0]u16,
lpServiceType: *GUID,
) callconv(.winapi) i32;
pub extern "mswsock" fn GetNameByTypeA(
lpServiceType: *GUID,
lpServiceName: [*:0]u8,
dwNameLength: u32,
) callconv(.winapi) i32;
pub extern "mswsock" fn GetNameByTypeW(
lpServiceType: *GUID,
lpServiceName: [*:0]u16,
dwNameLength: u32,
) callconv(.winapi) i32;
pub extern "ws2_32" fn getaddrinfo(
pNodeName: ?[*:0]const u8,
pServiceName: ?[*:0]const u8,
pHints: ?*const addrinfoa,
ppResult: *?*addrinfoa,
) callconv(.winapi) i32;
pub extern "ws2_32" fn GetAddrInfoExA(
pName: ?[*:0]const u8,
pServiceName: ?[*:0]const u8,
dwNameSapce: u32,
pub extern "ws2_32" fn GetAddrInfoExW(
pName: ?[*:0]const u16,
pServiceName: ?[*:0]const u16,
dwNameSpace: NS,
lpNspId: ?*GUID,
hints: ?*const addrinfoexA,
ppResult: **addrinfoexA,
hints: ?*const ADDRINFOEXW,
ppResult: **ADDRINFOEXW,
timeout: ?*timeval,
lpOverlapped: ?*OVERLAPPED,
lpCompletionRoutine: ?LPLOOKUPSERVICE_COMPLETION_ROUTINE,
lpNameHandle: ?*HANDLE,
) callconv(.winapi) i32;
pub extern "ws2_32" fn GetAddrInfoExCancel(
@ -2337,12 +2149,8 @@ pub extern "ws2_32" fn GetAddrInfoExOverlappedResult(
lpOverlapped: *OVERLAPPED,
) callconv(.winapi) i32;
pub extern "ws2_32" fn freeaddrinfo(
pAddrInfo: ?*addrinfoa,
) callconv(.winapi) void;
pub extern "ws2_32" fn FreeAddrInfoEx(
pAddrInfoEx: ?*addrinfoexA,
pub extern "ws2_32" fn FreeAddrInfoExW(
pAddrInfoEx: ?*ADDRINFOEXW,
) callconv(.winapi) void;
pub extern "ws2_32" fn getnameinfo(
@ -2354,7 +2162,3 @@ pub extern "ws2_32" fn getnameinfo(
ServiceBufferName: u32,
Flags: i32,
) callconv(.winapi) i32;
pub extern "iphlpapi" fn if_nametoindex(
InterfaceName: [*:0]const u8,
) callconv(.winapi) u32;

File diff suppressed because it is too large Load Diff

View File

@ -109,64 +109,6 @@ test "open smoke test" {
}
}
test "openat smoke test" {
if (native_os == .windows) return error.SkipZigTest;
// TODO verify file attributes using `fstatat`
var tmp = tmpDir(.{});
defer tmp.cleanup();
var fd: posix.fd_t = undefined;
const mode: posix.mode_t = if (native_os == .windows) 0 else 0o666;
// Create some file using `openat`.
fd = try posix.openat(tmp.dir.fd, "some_file", CommonOpenFlags.lower(.{
.ACCMODE = .RDWR,
.CREAT = true,
.EXCL = true,
}), mode);
posix.close(fd);
// Try this again with the same flags. This op should fail with error.PathAlreadyExists.
try expectError(error.PathAlreadyExists, posix.openat(tmp.dir.fd, "some_file", CommonOpenFlags.lower(.{
.ACCMODE = .RDWR,
.CREAT = true,
.EXCL = true,
}), mode));
// Try opening without `EXCL` flag.
fd = try posix.openat(tmp.dir.fd, "some_file", CommonOpenFlags.lower(.{
.ACCMODE = .RDWR,
.CREAT = true,
}), mode);
posix.close(fd);
// Try opening as a directory which should fail.
try expectError(error.NotDir, posix.openat(tmp.dir.fd, "some_file", CommonOpenFlags.lower(.{
.ACCMODE = .RDWR,
.DIRECTORY = true,
}), mode));
// Create some directory
try posix.mkdirat(tmp.dir.fd, "some_dir", mode);
// Open dir using `open`
fd = try posix.openat(tmp.dir.fd, "some_dir", CommonOpenFlags.lower(.{
.ACCMODE = .RDONLY,
.DIRECTORY = true,
}), mode);
posix.close(fd);
// Try opening as file which should fail (skip on wasi+libc due to
// https://github.com/bytecodealliance/wasmtime/issues/9054)
if (native_os != .wasi or !builtin.link_libc) {
try expectError(error.IsDir, posix.openat(tmp.dir.fd, "some_dir", CommonOpenFlags.lower(.{
.ACCMODE = .RDWR,
}), mode));
}
}
test "readlink on Windows" {
if (native_os != .windows) return error.SkipZigTest;
@ -226,49 +168,6 @@ test "linkat with different directories" {
}
}
test "fstatat" {
if ((builtin.cpu.arch == .riscv32 or builtin.cpu.arch.isLoongArch()) and builtin.os.tag == .linux and !builtin.link_libc) return error.SkipZigTest; // No `fstatat()`.
// enable when `fstat` and `fstatat` are implemented on Windows
if (native_os == .windows) return error.SkipZigTest;
var tmp = tmpDir(.{});
defer tmp.cleanup();
// create dummy file
const contents = "nonsense";
try tmp.dir.writeFile(.{ .sub_path = "file.txt", .data = contents });
// fetch file's info on the opened fd directly
const file = try tmp.dir.openFile("file.txt", .{});
const stat = try posix.fstat(file.handle);
defer file.close();
// now repeat but using `fstatat` instead
const statat = try posix.fstatat(tmp.dir.fd, "file.txt", posix.AT.SYMLINK_NOFOLLOW);
try expectEqual(stat.dev, statat.dev);
try expectEqual(stat.ino, statat.ino);
try expectEqual(stat.nlink, statat.nlink);
try expectEqual(stat.mode, statat.mode);
try expectEqual(stat.uid, statat.uid);
try expectEqual(stat.gid, statat.gid);
try expectEqual(stat.rdev, statat.rdev);
try expectEqual(stat.size, statat.size);
try expectEqual(stat.blksize, statat.blksize);
// The stat.blocks/statat.blocks count is managed by the filesystem and may
// change if the file is stored in a journal or "inline".
// try expectEqual(stat.blocks, statat.blocks);
// s390x-linux does not have nanosecond precision for fstat(), but it does for
// fstatat(). As a result, comparing the timestamps isn't worth the effort
if (!(builtin.cpu.arch == .s390x and builtin.os.tag == .linux)) {
try expectEqual(stat.atime(), statat.atime());
try expectEqual(stat.mtime(), statat.mtime());
try expectEqual(stat.ctime(), statat.ctime());
}
}
test "readlinkat" {
var tmp = tmpDir(.{});
defer tmp.cleanup();
@ -621,25 +520,6 @@ test "getrlimit and setrlimit" {
}
}
test "shutdown socket" {
if (native_os == .wasi)
return error.SkipZigTest;
if (native_os == .windows) {
_ = try std.os.windows.WSAStartup(2, 2);
}
defer {
if (native_os == .windows) {
std.os.windows.WSACleanup() catch unreachable;
}
}
const sock = try posix.socket(posix.AF.INET, posix.SOCK.STREAM, 0);
posix.shutdown(sock, .both) catch |err| switch (err) {
error.SocketNotConnected => {},
else => |e| return e,
};
std.net.Stream.close(.{ .handle = sock });
}
test "sigrtmin/max" {
if (native_os == .wasi or native_os == .windows or native_os == .macos) {
return error.SkipZigTest;
@ -656,14 +536,15 @@ test "sigset empty/full" {
var set: posix.sigset_t = posix.sigemptyset();
for (1..posix.NSIG) |i| {
try expectEqual(false, posix.sigismember(&set, @truncate(i)));
const sig = std.meta.intToEnum(posix.SIG, i) catch continue;
try expectEqual(false, posix.sigismember(&set, sig));
}
// The C library can reserve some (unnamed) signals, so can't check the full
// NSIG set is defined, but just test a couple:
set = posix.sigfillset();
try expectEqual(true, posix.sigismember(&set, @truncate(posix.SIG.CHLD)));
try expectEqual(true, posix.sigismember(&set, @truncate(posix.SIG.INT)));
try expectEqual(true, posix.sigismember(&set, .CHLD));
try expectEqual(true, posix.sigismember(&set, .INT));
}
// Some signals (i.e., 32 - 34 on glibc/musl) are not allowed to be added to a
@ -684,25 +565,30 @@ test "sigset add/del" {
// See that none are set, then set each one, see that they're all set, then
// remove them all, and then see that none are set.
for (1..posix.NSIG) |i| {
try expectEqual(false, posix.sigismember(&sigset, @truncate(i)));
const sig = std.meta.intToEnum(posix.SIG, i) catch continue;
try expectEqual(false, posix.sigismember(&sigset, sig));
}
for (1..posix.NSIG) |i| {
if (!reserved_signo(i)) {
posix.sigaddset(&sigset, @truncate(i));
const sig = std.meta.intToEnum(posix.SIG, i) catch continue;
posix.sigaddset(&sigset, sig);
}
}
for (1..posix.NSIG) |i| {
if (!reserved_signo(i)) {
try expectEqual(true, posix.sigismember(&sigset, @truncate(i)));
const sig = std.meta.intToEnum(posix.SIG, i) catch continue;
try expectEqual(true, posix.sigismember(&sigset, sig));
}
}
for (1..posix.NSIG) |i| {
if (!reserved_signo(i)) {
posix.sigdelset(&sigset, @truncate(i));
const sig = std.meta.intToEnum(posix.SIG, i) catch continue;
posix.sigdelset(&sigset, sig);
}
}
for (1..posix.NSIG) |i| {
try expectEqual(false, posix.sigismember(&sigset, @truncate(i)));
const sig = std.meta.intToEnum(posix.SIG, i) catch continue;
try expectEqual(false, posix.sigismember(&sigset, sig));
}
}
@ -731,11 +617,8 @@ test "dup & dup2" {
try dup2ed.writeAll("dup2");
}
var file = try tmp.dir.openFile("os_dup_test", .{});
defer file.close();
var buf: [7]u8 = undefined;
try testing.expectEqualStrings("dupdup2", buf[0..try file.readAll(&buf)]);
var buffer: [8]u8 = undefined;
try testing.expectEqualStrings("dupdup2", try tmp.dir.readFile("os_dup_test", &buffer));
}
test "writev longer than IOV_MAX" {
@ -966,20 +849,6 @@ test "isatty" {
try expectEqual(posix.isatty(file.handle), false);
}
test "read with empty buffer" {
var tmp = tmpDir(.{});
defer tmp.cleanup();
var file = try tmp.dir.createFile("read_empty", .{ .read = true });
defer file.close();
const bytes = try a.alloc(u8, 0);
defer a.free(bytes);
const rc = try posix.read(file.handle, bytes);
try expectEqual(rc, 0);
}
test "pread with empty buffer" {
var tmp = tmpDir(.{});
defer tmp.cleanup();

View File

@ -1,5 +1,9 @@
const std = @import("../std.zig");
const ChildProcess = @This();
const builtin = @import("builtin");
const native_os = builtin.os.tag;
const std = @import("../std.zig");
const unicode = std.unicode;
const fs = std.fs;
const process = std.process;
@ -11,9 +15,7 @@ const mem = std.mem;
const EnvMap = std.process.EnvMap;
const maxInt = std.math.maxInt;
const assert = std.debug.assert;
const native_os = builtin.os.tag;
const Allocator = std.mem.Allocator;
const ChildProcess = @This();
const ArrayList = std.ArrayList;
pub const Id = switch (native_os) {
@ -317,16 +319,23 @@ pub fn waitForSpawn(self: *ChildProcess) SpawnError!void {
const err_pipe = self.err_pipe orelse return;
self.err_pipe = null;
// Wait for the child to report any errors in or before `execvpe`.
if (readIntFd(err_pipe)) |child_err_int| {
const report = readIntFd(err_pipe);
posix.close(err_pipe);
if (report) |child_err_int| {
const child_err: SpawnError = @errorCast(@errorFromInt(child_err_int));
self.term = child_err;
return child_err;
} else |_| {
// Write end closed by CLOEXEC at the time of the `execvpe` call, indicating success!
posix.close(err_pipe);
} else |read_err| switch (read_err) {
error.EndOfStream => {
// Write end closed by CLOEXEC at the time of the `execvpe` call,
// indicating success.
},
else => {
// Problem reading the error from the error reporting pipe. We
// don't know if the child is alive or dead. Better to assume it is
// alive so the resource does not risk being leaked.
},
}
}
@ -563,6 +572,10 @@ fn spawnPosix(self: *ChildProcess) SpawnError!void {
error.BadPathName => unreachable, // Windows-only
error.WouldBlock => unreachable,
error.NetworkNotFound => unreachable, // Windows-only
error.Canceled => unreachable, // temporarily in the posix error set
error.SharingViolation => unreachable, // Windows-only
error.PipeBusy => unreachable, // not a pipe
error.AntivirusInterference => unreachable, // Windows-only
else => |e| return e,
}
else
@ -1014,8 +1027,14 @@ fn writeIntFd(fd: i32, value: ErrInt) !void {
fn readIntFd(fd: i32) !ErrInt {
var buffer: [8]u8 = undefined;
var fr: std.fs.File.Reader = .initStreaming(.{ .handle = fd }, &buffer);
return @intCast(fr.interface.takeInt(u64, .little) catch return error.SystemResources);
var i: usize = 0;
while (i < buffer.len) {
const n = try std.posix.read(fd, buffer[i..]);
if (n == 0) return error.EndOfStream;
i += n;
}
const int = mem.readInt(u64, &buffer, .little);
return @intCast(int);
}
const ErrInt = std.meta.Int(.unsigned, @sizeOf(anyerror) * 8);
@ -1065,16 +1084,24 @@ fn windowsCreateProcessPathExt(
// or a version with a supported PATHEXT appended. We then try calling CreateProcessW
// with the found versions in the appropriate order.
// In the future, child process execution needs to move to Io implementation.
// Under those conditions, here we will have access to lower level directory
// opening function knowing which implementation we are in. Here, we imitate
// that scenario.
var threaded: std.Io.Threaded = .init_single_threaded;
const io = threaded.ioBasic();
var dir = dir: {
// needs to be null-terminated
try dir_buf.append(allocator, 0);
defer dir_buf.shrinkRetainingCapacity(dir_path_len);
const dir_path_z = dir_buf.items[0 .. dir_buf.items.len - 1 :0];
const prefixed_path = try windows.wToPrefixedFileW(null, dir_path_z);
break :dir fs.cwd().openDirW(prefixed_path.span().ptr, .{ .iterate = true }) catch
return error.FileNotFound;
break :dir threaded.dirOpenDirWindows(.cwd(), prefixed_path.span(), .{
.iterate = true,
}) catch return error.FileNotFound;
};
defer dir.close();
defer dir.close(io);
// Add wildcard and null-terminator
try app_buf.append(allocator, '*');
@ -1108,7 +1135,7 @@ fn windowsCreateProcessPathExt(
.Buffer = @constCast(app_name_wildcard.ptr),
};
const rc = windows.ntdll.NtQueryDirectoryFile(
dir.fd,
dir.handle,
null,
null,
null,

View File

@ -652,7 +652,6 @@ inline fn callMainWithArgs(argc: usize, argv: [*][*:0]u8, envp: [][*:0]u8) u8 {
std.os.environ = envp;
std.debug.maybeEnableSegfaultHandler();
maybeIgnoreSigpipe();
return callMain();
}
@ -756,39 +755,3 @@ pub fn call_wWinMain() std.os.windows.INT {
// second parameter hPrevInstance, MSDN: "This parameter is always NULL"
return root.wWinMain(hInstance, null, lpCmdLine, nCmdShow);
}
fn maybeIgnoreSigpipe() void {
const have_sigpipe_support = switch (builtin.os.tag) {
.linux,
.plan9,
.illumos,
.netbsd,
.openbsd,
.haiku,
.macos,
.ios,
.watchos,
.tvos,
.visionos,
.dragonfly,
.freebsd,
.serenity,
=> true,
else => false,
};
if (have_sigpipe_support and !std.options.keep_sigpipe) {
const posix = std.posix;
const act: posix.Sigaction = .{
// Set handler to a noop function instead of `SIG.IGN` to prevent
// leaking signal disposition to a child process.
.handler = .{ .handler = noopSigHandler },
.mask = posix.sigemptyset(),
.flags = 0,
};
posix.sigaction(posix.SIG.PIPE, &act, null);
}
}
fn noopSigHandler(_: i32) callconv(.c) void {}

View File

@ -85,7 +85,6 @@ pub const macho = @import("macho.zig");
pub const math = @import("math.zig");
pub const mem = @import("mem.zig");
pub const meta = @import("meta.zig");
pub const net = @import("net.zig");
pub const os = @import("os.zig");
pub const once = @import("once.zig").once;
pub const pdb = @import("pdb.zig");
@ -145,19 +144,6 @@ pub const Options = struct {
crypto_fork_safety: bool = true,
/// By default Zig disables SIGPIPE by setting a "no-op" handler for it. Set this option
/// to `true` to prevent that.
///
/// Note that we use a "no-op" handler instead of SIG_IGN because it will not be inherited by
/// any child process.
///
/// SIGPIPE is triggered when a process attempts to write to a broken pipe. By default, SIGPIPE
/// will terminate the process instead of exiting. It doesn't trigger the panic handler so in many
/// cases it's unclear why the process was terminated. By capturing SIGPIPE instead, functions that
/// write to broken pipes will return the EPIPE error (error.BrokenPipe) and the program can handle
/// it like any other error.
keep_sigpipe: bool = false,
/// By default, std.http.Client will support HTTPS connections. Set this option to `true` to
/// disable TLS support.
///

View File

@ -977,7 +977,7 @@ test pipeToFileSystem {
const data = @embedFile("tar/testdata/example.tar");
var reader: std.Io.Reader = .fixed(data);
var tmp = testing.tmpDir(.{ .no_follow = true });
var tmp = testing.tmpDir(.{ .follow_symlinks = false });
defer tmp.cleanup();
const dir = tmp.dir;
@ -1010,7 +1010,7 @@ test "pipeToFileSystem root_dir" {
// with strip_components = 1
{
var tmp = testing.tmpDir(.{ .no_follow = true });
var tmp = testing.tmpDir(.{ .follow_symlinks = false });
defer tmp.cleanup();
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
@ -1032,7 +1032,7 @@ test "pipeToFileSystem root_dir" {
// with strip_components = 0
{
reader = .fixed(data);
var tmp = testing.tmpDir(.{ .no_follow = true });
var tmp = testing.tmpDir(.{ .follow_symlinks = false });
defer tmp.cleanup();
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
@ -1084,7 +1084,7 @@ test "pipeToFileSystem strip_components" {
const data = @embedFile("tar/testdata/example.tar");
var reader: std.Io.Reader = .fixed(data);
var tmp = testing.tmpDir(.{ .no_follow = true });
var tmp = testing.tmpDir(.{ .follow_symlinks = false });
defer tmp.cleanup();
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
defer diagnostics.deinit();
@ -1145,7 +1145,7 @@ test "executable bit" {
for ([_]PipeOptions.ModeMode{ .ignore, .executable_bit_only }) |opt| {
var reader: std.Io.Reader = .fixed(data);
var tmp = testing.tmpDir(.{ .no_follow = true });
var tmp = testing.tmpDir(.{ .follow_symlinks = false });
//defer tmp.cleanup();
pipeToFileSystem(tmp.dir, &reader, .{

View File

@ -1,7 +1,9 @@
const Writer = @This();
const std = @import("std");
const Io = std.Io;
const assert = std.debug.assert;
const testing = std.testing;
const Writer = @This();
const block_size = @sizeOf(Header);
@ -14,9 +16,8 @@ pub const Options = struct {
mtime: u64 = 0,
};
underlying_writer: *std.Io.Writer,
underlying_writer: *Io.Writer,
prefix: []const u8 = "",
mtime_now: u64 = 0,
const Error = error{
WriteFailed,
@ -36,16 +37,27 @@ pub fn writeDir(w: *Writer, sub_path: []const u8, options: Options) Error!void {
try w.writeHeader(.directory, sub_path, "", 0, options);
}
pub const WriteFileError = std.Io.Writer.FileError || Error || std.fs.File.Reader.SizeError;
pub const WriteFileError = Io.Writer.FileError || Error || Io.File.Reader.SizeError;
pub fn writeFileTimestamp(
w: *Writer,
sub_path: []const u8,
file_reader: *Io.File.Reader,
mtime: Io.Timestamp,
) WriteFileError!void {
return writeFile(w, sub_path, file_reader, @intCast(mtime.toSeconds()));
}
pub fn writeFile(
w: *Writer,
sub_path: []const u8,
file_reader: *std.fs.File.Reader,
stat_mtime: i128,
file_reader: *Io.File.Reader,
/// If you want to match the file format's expectations, it wants number of
/// seconds since POSIX epoch. Zero is also a great option here to make
/// generated tarballs more reproducible.
mtime: u64,
) WriteFileError!void {
const size = try file_reader.getSize();
const mtime: u64 = @intCast(@divFloor(stat_mtime, std.time.ns_per_s));
var header: Header = .{};
try w.setPath(&header, sub_path);
@ -58,7 +70,7 @@ pub fn writeFile(
try w.writePadding64(size);
}
pub const WriteFileStreamError = Error || std.Io.Reader.StreamError;
pub const WriteFileStreamError = Error || Io.Reader.StreamError;
/// Writes file reading file content from `reader`. Reads exactly `size` bytes
/// from `reader`, or returns `error.EndOfStream`.
@ -66,7 +78,7 @@ pub fn writeFileStream(
w: *Writer,
sub_path: []const u8,
size: u64,
reader: *std.Io.Reader,
reader: *Io.Reader,
options: Options,
) WriteFileStreamError!void {
try w.writeHeader(.regular, sub_path, "", size, options);
@ -136,15 +148,15 @@ fn writeExtendedHeader(w: *Writer, typeflag: Header.FileType, buffers: []const [
try w.writePadding(len);
}
fn writePadding(w: *Writer, bytes: usize) std.Io.Writer.Error!void {
fn writePadding(w: *Writer, bytes: usize) Io.Writer.Error!void {
return writePaddingPos(w, bytes % block_size);
}
fn writePadding64(w: *Writer, bytes: u64) std.Io.Writer.Error!void {
fn writePadding64(w: *Writer, bytes: u64) Io.Writer.Error!void {
return writePaddingPos(w, @intCast(bytes % block_size));
}
fn writePaddingPos(w: *Writer, pos: usize) std.Io.Writer.Error!void {
fn writePaddingPos(w: *Writer, pos: usize) Io.Writer.Error!void {
if (pos == 0) return;
try w.underlying_writer.splatByteAll(0, block_size - pos);
}
@ -153,7 +165,7 @@ fn writePaddingPos(w: *Writer, pos: usize) std.Io.Writer.Error!void {
/// "reasonable system must not assume that such a block exists when reading an
/// archive". Therefore, the Zig standard library recommends to not call this
/// function.
pub fn finishPedantically(w: *Writer) std.Io.Writer.Error!void {
pub fn finishPedantically(w: *Writer) Io.Writer.Error!void {
try w.underlying_writer.splatByteAll(0, block_size * 2);
}
@ -236,7 +248,6 @@ pub const Header = extern struct {
}
// Integer number of seconds since January 1, 1970, 00:00 Coordinated Universal Time.
// mtime == 0 will use current time
pub fn setMtime(w: *Header, mtime: u64) error{OctalOverflow}!void {
try octal(&w.mtime, mtime);
}
@ -248,7 +259,7 @@ pub const Header = extern struct {
try octal(&w.checksum, checksum);
}
pub fn write(h: *Header, bw: *std.Io.Writer) error{ OctalOverflow, WriteFailed }!void {
pub fn write(h: *Header, bw: *Io.Writer) error{ OctalOverflow, WriteFailed }!void {
try h.updateChecksum();
try bw.writeAll(std.mem.asBytes(h));
}
@ -396,14 +407,14 @@ test "write files" {
{
const root = "root";
var output: std.Io.Writer.Allocating = .init(testing.allocator);
var output: Io.Writer.Allocating = .init(testing.allocator);
var w: Writer = .{ .underlying_writer = &output.writer };
defer output.deinit();
try w.setRoot(root);
for (files) |file|
try w.writeFileBytes(file.path, file.content, .{});
var input: std.Io.Reader = .fixed(output.written());
var input: Io.Reader = .fixed(output.written());
var it: std.tar.Iterator = .init(&input, .{
.file_name_buffer = &file_name_buffer,
.link_name_buffer = &link_name_buffer,
@ -424,7 +435,7 @@ test "write files" {
try testing.expectEqual('/', actual.name[root.len..][0]);
try testing.expectEqualStrings(expected.path, actual.name[root.len + 1 ..]);
var content: std.Io.Writer.Allocating = .init(testing.allocator);
var content: Io.Writer.Allocating = .init(testing.allocator);
defer content.deinit();
try it.streamRemaining(actual, &content.writer);
try testing.expectEqualSlices(u8, expected.content, content.written());
@ -432,15 +443,15 @@ test "write files" {
}
// without root
{
var output: std.Io.Writer.Allocating = .init(testing.allocator);
var output: Io.Writer.Allocating = .init(testing.allocator);
var w: Writer = .{ .underlying_writer = &output.writer };
defer output.deinit();
for (files) |file| {
var content: std.Io.Reader = .fixed(file.content);
var content: Io.Reader = .fixed(file.content);
try w.writeFileStream(file.path, file.content.len, &content, .{});
}
var input: std.Io.Reader = .fixed(output.written());
var input: Io.Reader = .fixed(output.written());
var it: std.tar.Iterator = .init(&input, .{
.file_name_buffer = &file_name_buffer,
.link_name_buffer = &link_name_buffer,
@ -452,7 +463,7 @@ test "write files" {
const expected = files[i];
try testing.expectEqualStrings(expected.path, actual.name);
var content: std.Io.Writer.Allocating = .init(testing.allocator);
var content: Io.Writer.Allocating = .init(testing.allocator);
defer content.deinit();
try it.streamRemaining(actual, &content.writer);
try testing.expectEqualSlices(u8, expected.content, content.written());

View File

@ -28,6 +28,9 @@ pub var allocator_instance: std.heap.GeneralPurposeAllocator(.{
break :b .init;
};
pub var io_instance: std.Io.Threaded = undefined;
pub const io = io_instance.io();
/// TODO https://github.com/ziglang/zig/issues/5738
pub var log_level = std.log.Level.warn;
@ -1145,6 +1148,7 @@ pub fn checkAllAllocationFailures(backing_allocator: std.mem.Allocator, comptime
} else |err| switch (err) {
error.OutOfMemory => {
if (failing_allocator_inst.allocated_bytes != failing_allocator_inst.freed_bytes) {
const tty_config = std.Io.tty.detectConfig(.stderr());
print(
"\nfail_index: {d}/{d}\nallocated bytes: {d}\nfreed bytes: {d}\nallocations: {d}\ndeallocations: {d}\nallocation that was made to fail: {f}",
.{
@ -1154,7 +1158,10 @@ pub fn checkAllAllocationFailures(backing_allocator: std.mem.Allocator, comptime
failing_allocator_inst.freed_bytes,
failing_allocator_inst.allocations,
failing_allocator_inst.deallocations,
failing_allocator_inst.getStackTrace(),
std.debug.FormatStackTrace{
.stack_trace = failing_allocator_inst.getStackTrace(),
.tty_config = tty_config,
},
},
);
return error.MemoryLeakDetected;

View File

@ -8,74 +8,6 @@ const posix = std.posix;
pub const epoch = @import("time/epoch.zig");
/// Get a calendar timestamp, in seconds, relative to UTC 1970-01-01.
/// Precision of timing depends on the hardware and operating system.
/// The return value is signed because it is possible to have a date that is
/// before the epoch.
/// See `posix.clock_gettime` for a POSIX timestamp.
pub fn timestamp() i64 {
return @divFloor(milliTimestamp(), ms_per_s);
}
/// Get a calendar timestamp, in milliseconds, relative to UTC 1970-01-01.
/// Precision of timing depends on the hardware and operating system.
/// The return value is signed because it is possible to have a date that is
/// before the epoch.
/// See `posix.clock_gettime` for a POSIX timestamp.
pub fn milliTimestamp() i64 {
return @as(i64, @intCast(@divFloor(nanoTimestamp(), ns_per_ms)));
}
/// Get a calendar timestamp, in microseconds, relative to UTC 1970-01-01.
/// Precision of timing depends on the hardware and operating system.
/// The return value is signed because it is possible to have a date that is
/// before the epoch.
/// See `posix.clock_gettime` for a POSIX timestamp.
pub fn microTimestamp() i64 {
return @as(i64, @intCast(@divFloor(nanoTimestamp(), ns_per_us)));
}
/// Get a calendar timestamp, in nanoseconds, relative to UTC 1970-01-01.
/// Precision of timing depends on the hardware and operating system.
/// On Windows this has a maximum granularity of 100 nanoseconds.
/// The return value is signed because it is possible to have a date that is
/// before the epoch.
/// See `posix.clock_gettime` for a POSIX timestamp.
pub fn nanoTimestamp() i128 {
switch (builtin.os.tag) {
.windows => {
// RtlGetSystemTimePrecise() has a granularity of 100 nanoseconds and uses the NTFS/Windows epoch,
// which is 1601-01-01.
const epoch_adj = epoch.windows * (ns_per_s / 100);
return @as(i128, windows.ntdll.RtlGetSystemTimePrecise() + epoch_adj) * 100;
},
.wasi => {
var ns: std.os.wasi.timestamp_t = undefined;
const err = std.os.wasi.clock_time_get(.REALTIME, 1, &ns);
assert(err == .SUCCESS);
return ns;
},
.uefi => {
const value, _ = std.os.uefi.system_table.runtime_services.getTime() catch return 0;
return value.toEpoch();
},
else => {
const ts = posix.clock_gettime(.REALTIME) catch |err| switch (err) {
error.UnsupportedClock, error.Unexpected => return 0, // "Precision of timing depends on hardware and OS".
};
return (@as(i128, ts.sec) * ns_per_s) + ts.nsec;
},
}
}
test milliTimestamp {
const time_0 = milliTimestamp();
std.Thread.sleep(ns_per_ms);
const time_1 = milliTimestamp();
const interval = time_1 - time_0;
try testing.expect(interval > 0);
}
// Divisions of a nanosecond.
pub const ns_per_us = 1000;
pub const ns_per_ms = 1000 * ns_per_us;
@ -268,9 +200,11 @@ pub const Timer = struct {
};
test Timer {
const io = std.testing.io;
var timer = try Timer.start();
std.Thread.sleep(10 * ns_per_ms);
try std.Io.Clock.Duration.sleep(.{ .clock = .awake, .raw = .fromMilliseconds(10) }, io);
const time_0 = timer.read();
try testing.expect(time_0 > 0);

View File

@ -1809,30 +1809,6 @@ pub fn wtf8ToWtf16Le(wtf16le: []u16, wtf8: []const u8) error{InvalidWtf8}!usize
return utf8ToUtf16LeImpl(wtf16le, wtf8, .can_encode_surrogate_half);
}
fn checkUtf8ToUtf16LeOverflowImpl(utf8: []const u8, utf16le: []const u16, comptime surrogates: Surrogates) !bool {
// Each u8 in UTF-8/WTF-8 correlates to at most one u16 in UTF-16LE/WTF-16LE.
if (utf16le.len >= utf8.len) return false;
const utf16_len = calcUtf16LeLenImpl(utf8, surrogates) catch {
return switch (surrogates) {
.cannot_encode_surrogate_half => error.InvalidUtf8,
.can_encode_surrogate_half => error.InvalidWtf8,
};
};
return utf16_len > utf16le.len;
}
/// Checks if calling `utf8ToUtf16Le` would overflow. Might fail if utf8 is not
/// valid UTF-8.
pub fn checkUtf8ToUtf16LeOverflow(utf8: []const u8, utf16le: []const u16) error{InvalidUtf8}!bool {
return checkUtf8ToUtf16LeOverflowImpl(utf8, utf16le, .cannot_encode_surrogate_half);
}
/// Checks if calling `utf8ToUtf16Le` would overflow. Might fail if wtf8 is not
/// valid WTF-8.
pub fn checkWtf8ToWtf16LeOverflow(wtf8: []const u8, wtf16le: []const u16) error{InvalidWtf8}!bool {
return checkUtf8ToUtf16LeOverflowImpl(wtf8, wtf16le, .can_encode_surrogate_half);
}
/// Surrogate codepoints (U+D800 to U+DFFF) are replaced by the Unicode replacement
/// character (U+FFFD).
/// All surrogate codepoints and the replacement character are encoded as three
@ -2039,7 +2015,6 @@ fn testRoundtripWtf8(wtf8: []const u8) !void {
var wtf16_buf: [32]u16 = undefined;
const wtf16_len = try wtf8ToWtf16Le(&wtf16_buf, wtf8);
try testing.expectEqual(wtf16_len, calcWtf16LeLen(wtf8));
try testing.expectEqual(false, checkWtf8ToWtf16LeOverflow(wtf8, &wtf16_buf));
const wtf16 = wtf16_buf[0..wtf16_len];
var roundtripped_buf: [32]u8 = undefined;

View File

@ -6,6 +6,7 @@ const std = @import("std.zig");
const tokenizer = @import("zig/tokenizer.zig");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const Io = std.Io;
const Writer = std.Io.Writer;
pub const ErrorBundle = @import("zig/ErrorBundle.zig");
@ -52,9 +53,9 @@ pub const Color = enum {
/// Assume stderr is a terminal.
on,
pub fn get_tty_conf(color: Color) std.Io.tty.Config {
pub fn get_tty_conf(color: Color) Io.tty.Config {
return switch (color) {
.auto => std.Io.tty.detectConfig(std.fs.File.stderr()),
.auto => Io.tty.detectConfig(std.fs.File.stderr()),
.on => .escape_codes,
.off => .no_color,
};
@ -323,7 +324,7 @@ pub const BuildId = union(enum) {
try std.testing.expectError(error.InvalidBuildIdStyle, parse("yaddaxxx"));
}
pub fn format(id: BuildId, writer: *std.Io.Writer) std.Io.Writer.Error!void {
pub fn format(id: BuildId, writer: *Writer) Writer.Error!void {
switch (id) {
.none, .fast, .uuid, .sha1, .md5 => {
try writer.writeAll(@tagName(id));
@ -558,7 +559,7 @@ test isUnderscore {
/// If the source can be UTF-16LE encoded, this function asserts that `gpa`
/// will align a byte-sized allocation to at least 2. Allocators that don't do
/// this are rare.
pub fn readSourceFileToEndAlloc(gpa: Allocator, file_reader: *std.fs.File.Reader) ![:0]u8 {
pub fn readSourceFileToEndAlloc(gpa: Allocator, file_reader: *Io.File.Reader) ![:0]u8 {
var buffer: std.ArrayList(u8) = .empty;
defer buffer.deinit(gpa);
@ -620,8 +621,8 @@ pub fn putAstErrorsIntoBundle(
try wip_errors.addZirErrorMessages(zir, tree, tree.source, path);
}
pub fn resolveTargetQueryOrFatal(target_query: std.Target.Query) std.Target {
return std.zig.system.resolveTargetQuery(target_query) catch |err|
pub fn resolveTargetQueryOrFatal(io: Io, target_query: std.Target.Query) std.Target {
return std.zig.system.resolveTargetQuery(io, target_query) catch |err|
std.process.fatal("unable to resolve target: {s}", .{@errorName(err)});
}

View File

@ -6,12 +6,13 @@
//! There is one special encoding for this data structure. If both arrays are
//! empty, it means there are no errors. This special encoding exists so that
//! heap allocation is not needed in the common case of no errors.
const ErrorBundle = @This();
const std = @import("std");
const ErrorBundle = @This();
const Io = std.Io;
const Writer = std.Io.Writer;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const Writer = std.Io.Writer;
string_bytes: []const u8,
/// The first thing in this array is an `ErrorMessageList`.
@ -156,7 +157,7 @@ pub fn nullTerminatedString(eb: ErrorBundle, index: String) [:0]const u8 {
}
pub const RenderOptions = struct {
ttyconf: std.Io.tty.Config,
ttyconf: Io.tty.Config,
include_reference_trace: bool = true,
include_source_line: bool = true,
include_log_text: bool = true,
@ -190,7 +191,7 @@ fn renderErrorMessageToWriter(
err_msg_index: MessageIndex,
w: *Writer,
kind: []const u8,
color: std.Io.tty.Color,
color: Io.tty.Color,
indent: usize,
) (Writer.Error || std.posix.UnexpectedError)!void {
const ttyconf = options.ttyconf;
@ -806,7 +807,7 @@ pub const Wip = struct {
};
defer bundle.deinit(std.testing.allocator);
const ttyconf: std.Io.tty.Config = .no_color;
const ttyconf: Io.tty.Config = .no_color;
var bundle_buf: Writer.Allocating = .init(std.testing.allocator);
const bundle_bw = &bundle_buf.interface;

View File

@ -329,7 +329,7 @@ fn findNativeIncludeDirPosix(self: *LibCInstallation, args: FindNativeOptions) F
defer search_dir.close();
if (self.include_dir == null) {
if (search_dir.accessZ(include_dir_example_file, .{})) |_| {
if (search_dir.access(include_dir_example_file, .{})) |_| {
self.include_dir = try allocator.dupeZ(u8, search_path);
} else |err| switch (err) {
error.FileNotFound => {},
@ -338,7 +338,7 @@ fn findNativeIncludeDirPosix(self: *LibCInstallation, args: FindNativeOptions) F
}
if (self.sys_include_dir == null) {
if (search_dir.accessZ(sys_include_dir_example_file, .{})) |_| {
if (search_dir.access(sys_include_dir_example_file, .{})) |_| {
self.sys_include_dir = try allocator.dupeZ(u8, search_path);
} else |err| switch (err) {
error.FileNotFound => {},
@ -382,7 +382,7 @@ fn findNativeIncludeDirWindows(
};
defer dir.close();
dir.accessZ("stdlib.h", .{}) catch |err| switch (err) {
dir.access("stdlib.h", .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => return error.FileSystem,
};
@ -429,7 +429,7 @@ fn findNativeCrtDirWindows(
};
defer dir.close();
dir.accessZ("ucrt.lib", .{}) catch |err| switch (err) {
dir.access("ucrt.lib", .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => return error.FileSystem,
};
@ -496,7 +496,7 @@ fn findNativeKernel32LibDir(
};
defer dir.close();
dir.accessZ("kernel32.lib", .{}) catch |err| switch (err) {
dir.access("kernel32.lib", .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => return error.FileSystem,
};
@ -531,7 +531,7 @@ fn findNativeMsvcIncludeDir(
};
defer dir.close();
dir.accessZ("vcruntime.h", .{}) catch |err| switch (err) {
dir.access("vcruntime.h", .{}) catch |err| switch (err) {
error.FileNotFound => return error.LibCStdLibHeaderNotFound,
else => return error.FileSystem,
};

View File

@ -1,3 +1,14 @@
const builtin = @import("builtin");
const std = @import("../std.zig");
const mem = std.mem;
const elf = std.elf;
const fs = std.fs;
const assert = std.debug.assert;
const Target = std.Target;
const native_endian = builtin.cpu.arch.endian();
const posix = std.posix;
const Io = std.Io;
pub const NativePaths = @import("system/NativePaths.zig");
pub const windows = @import("system/windows.zig");
@ -199,14 +210,14 @@ pub const DetectError = error{
OSVersionDetectionFail,
Unexpected,
ProcessNotFound,
};
} || Io.Cancelable;
/// Given a `Target.Query`, which specifies in detail which parts of the
/// target should be detected natively, which should be standard or default,
/// and which are provided explicitly, this function resolves the native
/// components by detecting the native system, and then resolves
/// standard/default parts relative to that.
pub fn resolveTargetQuery(query: Target.Query) DetectError!Target {
pub fn resolveTargetQuery(io: Io, query: Target.Query) DetectError!Target {
// Until https://github.com/ziglang/zig/issues/4592 is implemented (support detecting the
// native CPU architecture as being different than the current target), we use this:
const query_cpu_arch = query.cpu_arch orelse builtin.cpu.arch;
@ -356,10 +367,10 @@ pub fn resolveTargetQuery(query: Target.Query) DetectError!Target {
}
var cpu = switch (query.cpu_model) {
.native => detectNativeCpuAndFeatures(query_cpu_arch, os, query),
.native => detectNativeCpuAndFeatures(io, query_cpu_arch, os, query),
.baseline => Target.Cpu.baseline(query_cpu_arch, os),
.determined_by_arch_os => if (query.cpu_arch == null)
detectNativeCpuAndFeatures(query_cpu_arch, os, query)
detectNativeCpuAndFeatures(io, query_cpu_arch, os, query)
else
Target.Cpu.baseline(query_cpu_arch, os),
.explicit => |model| model.toCpu(query_cpu_arch),
@ -411,7 +422,34 @@ pub fn resolveTargetQuery(query: Target.Query) DetectError!Target {
query.cpu_features_sub,
);
var result = try detectAbiAndDynamicLinker(cpu, os, query);
var result = detectAbiAndDynamicLinker(io, cpu, os, query) catch |err| switch (err) {
error.Canceled => |e| return e,
error.Unexpected => |e| return e,
error.WouldBlock => return error.Unexpected,
error.BrokenPipe => return error.Unexpected,
error.ConnectionResetByPeer => return error.Unexpected,
error.Timeout => return error.Unexpected,
error.NotOpenForReading => return error.Unexpected,
error.SocketUnconnected => return error.Unexpected,
error.AccessDenied,
error.ProcessNotFound,
error.SymLinkLoop,
error.ProcessFdQuotaExceeded,
error.SystemFdQuotaExceeded,
error.SystemResources,
error.IsDir,
error.DeviceBusy,
error.InputOutput,
error.LockViolation,
error.FileSystem,
error.UnableToOpenElfFile,
error.UnhelpfulFile,
error.InvalidElfFile,
error.RelativeShebang,
=> return defaultAbiAndDynamicLinker(cpu, os, query),
};
// These CPU feature hacks have to come after ABI detection.
{
@ -483,7 +521,7 @@ fn updateCpuFeatures(
set.removeFeatureSet(sub_set);
}
fn detectNativeCpuAndFeatures(cpu_arch: Target.Cpu.Arch, os: Target.Os, query: Target.Query) ?Target.Cpu {
fn detectNativeCpuAndFeatures(io: Io, cpu_arch: Target.Cpu.Arch, os: Target.Os, query: Target.Query) ?Target.Cpu {
// Here we switch on a comptime value rather than `cpu_arch`. This is valid because `cpu_arch`,
// although it is a runtime value, is guaranteed to be one of the architectures in the set
// of the respective switch prong.
@ -494,7 +532,7 @@ fn detectNativeCpuAndFeatures(cpu_arch: Target.Cpu.Arch, os: Target.Os, query: T
}
switch (builtin.os.tag) {
.linux => return linux.detectNativeCpuAndFeatures(),
.linux => return linux.detectNativeCpuAndFeatures(io),
.macos => return darwin.macos.detectNativeCpuAndFeatures(),
.windows => return windows.detectNativeCpuAndFeatures(),
else => {},
@ -506,53 +544,42 @@ fn detectNativeCpuAndFeatures(cpu_arch: Target.Cpu.Arch, os: Target.Os, query: T
}
pub const AbiAndDynamicLinkerFromFileError = error{
FileSystem,
SystemResources,
Canceled,
AccessDenied,
Unexpected,
Unseekable,
ReadFailed,
EndOfStream,
NameTooLong,
StaticElfFile,
InvalidElfFile,
StreamTooLong,
Timeout,
SymLinkLoop,
SystemResources,
ProcessFdQuotaExceeded,
SystemFdQuotaExceeded,
UnableToReadElfFile,
InvalidElfClass,
InvalidElfVersion,
InvalidElfEndian,
InvalidElfFile,
InvalidElfMagic,
Unexpected,
UnexpectedEndOfFile,
NameTooLong,
ProcessNotFound,
StaticElfFile,
IsDir,
WouldBlock,
InputOutput,
BrokenPipe,
ConnectionResetByPeer,
NotOpenForReading,
SocketUnconnected,
LockViolation,
FileSystem,
};
pub fn abiAndDynamicLinkerFromFile(
file: fs.File,
fn abiAndDynamicLinkerFromFile(
file_reader: *Io.File.Reader,
header: *const elf.Header,
cpu: Target.Cpu,
os: Target.Os,
ld_info_list: []const LdInfo,
query: Target.Query,
) AbiAndDynamicLinkerFromFileError!Target {
var hdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 align(@alignOf(elf.Elf64_Ehdr)) = undefined;
_ = try preadAtLeast(file, &hdr_buf, 0, hdr_buf.len);
const hdr32: *elf.Elf32_Ehdr = @ptrCast(&hdr_buf);
const hdr64: *elf.Elf64_Ehdr = @ptrCast(&hdr_buf);
if (!mem.eql(u8, hdr32.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic;
const elf_endian: std.builtin.Endian = switch (hdr32.e_ident[elf.EI.DATA]) {
elf.ELFDATA2LSB => .little,
elf.ELFDATA2MSB => .big,
else => return error.InvalidElfEndian,
};
const need_bswap = elf_endian != native_endian;
if (hdr32.e_ident[elf.EI.VERSION] != 1) return error.InvalidElfVersion;
const is_64 = switch (hdr32.e_ident[elf.EI.CLASS]) {
elf.ELFCLASS32 => false,
elf.ELFCLASS64 => true,
else => return error.InvalidElfClass,
};
var phoff = elfInt(is_64, need_bswap, hdr32.e_phoff, hdr64.e_phoff);
const phentsize = elfInt(is_64, need_bswap, hdr32.e_phentsize, hdr64.e_phentsize);
const phnum = elfInt(is_64, need_bswap, hdr32.e_phnum, hdr64.e_phnum);
const io = file_reader.io;
var result: Target = .{
.cpu = cpu,
.os = os,
@ -563,36 +590,19 @@ pub fn abiAndDynamicLinkerFromFile(
var rpath_offset: ?u64 = null; // Found inside PT_DYNAMIC
const look_for_ld = query.dynamic_linker.get() == null;
var ph_buf: [16 * @sizeOf(elf.Elf64_Phdr)]u8 align(@alignOf(elf.Elf64_Phdr)) = undefined;
if (phentsize > @sizeOf(elf.Elf64_Phdr)) return error.InvalidElfFile;
var ph_i: u16 = 0;
var got_dyn_section: bool = false;
while (ph_i < phnum) {
// Reserve some bytes so that we can deref the 64-bit struct fields
// even when the ELF file is 32-bits.
const ph_reserve: usize = @sizeOf(elf.Elf64_Phdr) - @sizeOf(elf.Elf32_Phdr);
const ph_read_byte_len = try preadAtLeast(file, ph_buf[0 .. ph_buf.len - ph_reserve], phoff, phentsize);
var ph_buf_i: usize = 0;
while (ph_buf_i < ph_read_byte_len and ph_i < phnum) : ({
ph_i += 1;
phoff += phentsize;
ph_buf_i += phentsize;
}) {
const ph32: *elf.Elf32_Phdr = @ptrCast(@alignCast(&ph_buf[ph_buf_i]));
const ph64: *elf.Elf64_Phdr = @ptrCast(@alignCast(&ph_buf[ph_buf_i]));
const p_type = elfInt(is_64, need_bswap, ph32.p_type, ph64.p_type);
switch (p_type) {
{
var it = header.iterateProgramHeaders(file_reader);
while (try it.next()) |phdr| switch (phdr.p_type) {
elf.PT_INTERP => {
got_dyn_section = true;
if (look_for_ld) {
const p_offset = elfInt(is_64, need_bswap, ph32.p_offset, ph64.p_offset);
const p_filesz = elfInt(is_64, need_bswap, ph32.p_filesz, ph64.p_filesz);
const p_filesz = phdr.p_filesz;
if (p_filesz > result.dynamic_linker.buffer.len) return error.NameTooLong;
const filesz: usize = @intCast(p_filesz);
_ = try preadAtLeast(file, result.dynamic_linker.buffer[0..filesz], p_offset, filesz);
try file_reader.seekTo(phdr.p_offset);
try file_reader.interface.readSliceAll(result.dynamic_linker.buffer[0..filesz]);
// PT_INTERP includes a null byte in filesz.
const len = filesz - 1;
// dynamic_linker.max_byte is "max", not "len".
@ -614,119 +624,56 @@ pub fn abiAndDynamicLinkerFromFile(
elf.PT_DYNAMIC => {
got_dyn_section = true;
if (builtin.target.os.tag == .linux and result.isGnuLibC() and
query.glibc_version == null)
{
var dyn_off = elfInt(is_64, need_bswap, ph32.p_offset, ph64.p_offset);
const p_filesz = elfInt(is_64, need_bswap, ph32.p_filesz, ph64.p_filesz);
const dyn_size: usize = if (is_64) @sizeOf(elf.Elf64_Dyn) else @sizeOf(elf.Elf32_Dyn);
const dyn_num = p_filesz / dyn_size;
var dyn_buf: [16 * @sizeOf(elf.Elf64_Dyn)]u8 align(@alignOf(elf.Elf64_Dyn)) = undefined;
var dyn_i: usize = 0;
dyn: while (dyn_i < dyn_num) {
// Reserve some bytes so that we can deref the 64-bit struct fields
// even when the ELF file is 32-bits.
const dyn_reserve: usize = @sizeOf(elf.Elf64_Dyn) - @sizeOf(elf.Elf32_Dyn);
const dyn_read_byte_len = try preadAtLeast(
file,
dyn_buf[0 .. dyn_buf.len - dyn_reserve],
dyn_off,
dyn_size,
);
var dyn_buf_i: usize = 0;
while (dyn_buf_i < dyn_read_byte_len and dyn_i < dyn_num) : ({
dyn_i += 1;
dyn_off += dyn_size;
dyn_buf_i += dyn_size;
}) {
const dyn32: *elf.Elf32_Dyn = @ptrCast(@alignCast(&dyn_buf[dyn_buf_i]));
const dyn64: *elf.Elf64_Dyn = @ptrCast(@alignCast(&dyn_buf[dyn_buf_i]));
const tag = elfInt(is_64, need_bswap, dyn32.d_tag, dyn64.d_tag);
const val = elfInt(is_64, need_bswap, dyn32.d_val, dyn64.d_val);
if (tag == elf.DT_RUNPATH) {
rpath_offset = val;
break :dyn;
}
if (builtin.target.os.tag == .linux and result.isGnuLibC() and query.glibc_version == null) {
var dyn_it = header.iterateDynamicSection(file_reader, phdr.p_offset, phdr.p_filesz);
while (try dyn_it.next()) |dyn| {
if (dyn.d_tag == elf.DT_RUNPATH) {
rpath_offset = dyn.d_val;
break;
}
}
}
},
else => continue,
}
}
};
}
if (!got_dyn_section) {
return error.StaticElfFile;
}
if (builtin.target.os.tag == .linux and result.isGnuLibC() and
query.glibc_version == null)
{
const shstrndx = elfInt(is_64, need_bswap, hdr32.e_shstrndx, hdr64.e_shstrndx);
var shoff = elfInt(is_64, need_bswap, hdr32.e_shoff, hdr64.e_shoff);
const shentsize = elfInt(is_64, need_bswap, hdr32.e_shentsize, hdr64.e_shentsize);
const str_section_off = shoff + @as(u64, shentsize) * @as(u64, shstrndx);
var sh_buf: [16 * @sizeOf(elf.Elf64_Shdr)]u8 align(@alignOf(elf.Elf64_Shdr)) = undefined;
if (sh_buf.len < shentsize) return error.InvalidElfFile;
_ = try preadAtLeast(file, &sh_buf, str_section_off, shentsize);
const shstr32: *elf.Elf32_Shdr = @ptrCast(@alignCast(&sh_buf));
const shstr64: *elf.Elf64_Shdr = @ptrCast(@alignCast(&sh_buf));
const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset);
const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size);
var strtab_buf: [4096:0]u8 = undefined;
const shstrtab_len = @min(shstrtab_size, strtab_buf.len);
const shstrtab_read_len = try preadAtLeast(file, &strtab_buf, shstrtab_off, shstrtab_len);
const shstrtab = strtab_buf[0..shstrtab_read_len];
const shnum = elfInt(is_64, need_bswap, hdr32.e_shnum, hdr64.e_shnum);
var sh_i: u16 = 0;
const dynstr: ?struct { offset: u64, size: u64 } = find_dyn_str: while (sh_i < shnum) {
// Reserve some bytes so that we can deref the 64-bit struct fields
// even when the ELF file is 32-bits.
const sh_reserve: usize = @sizeOf(elf.Elf64_Shdr) - @sizeOf(elf.Elf32_Shdr);
const sh_read_byte_len = try preadAtLeast(
file,
sh_buf[0 .. sh_buf.len - sh_reserve],
shoff,
shentsize,
);
var sh_buf_i: usize = 0;
while (sh_buf_i < sh_read_byte_len and sh_i < shnum) : ({
sh_i += 1;
shoff += shentsize;
sh_buf_i += shentsize;
}) {
const sh32: *elf.Elf32_Shdr = @ptrCast(@alignCast(&sh_buf[sh_buf_i]));
const sh64: *elf.Elf64_Shdr = @ptrCast(@alignCast(&sh_buf[sh_buf_i]));
const sh_name_off = elfInt(is_64, need_bswap, sh32.sh_name, sh64.sh_name);
const sh_name = mem.sliceTo(shstrtab[sh_name_off..], 0);
if (mem.eql(u8, sh_name, ".dynstr")) {
break :find_dyn_str .{
.offset = elfInt(is_64, need_bswap, sh32.sh_offset, sh64.sh_offset),
.size = elfInt(is_64, need_bswap, sh32.sh_size, sh64.sh_size),
if (builtin.target.os.tag == .linux and result.isGnuLibC() and query.glibc_version == null) {
const str_section_off = header.shoff + @as(u64, header.shentsize) * @as(u64, header.shstrndx);
try file_reader.seekTo(str_section_off);
const shstr = try elf.takeSectionHeader(&file_reader.interface, header.is_64, header.endian);
var strtab_buf: [4096]u8 = undefined;
const shstrtab = strtab_buf[0..@min(shstr.sh_size, strtab_buf.len)];
try file_reader.seekTo(shstr.sh_offset);
try file_reader.interface.readSliceAll(shstrtab);
const dynstr: ?struct { offset: u64, size: u64 } = find_dyn_str: {
var it = header.iterateSectionHeaders(file_reader);
while (try it.next()) |shdr| {
const end = mem.findScalarPos(u8, shstrtab, shdr.sh_name, 0) orelse continue;
const sh_name = shstrtab[shdr.sh_name..end :0];
if (mem.eql(u8, sh_name, ".dynstr")) break :find_dyn_str .{
.offset = shdr.sh_offset,
.size = shdr.sh_size,
};
} else break :find_dyn_str null;
};
}
}
} else null;
if (dynstr) |ds| {
if (rpath_offset) |rpoff| {
if (rpoff > ds.size) return error.InvalidElfFile;
const rpoff_file = ds.offset + rpoff;
const rp_max_size = ds.size - rpoff;
const strtab_len = @min(rp_max_size, strtab_buf.len);
const strtab_read_len = try preadAtLeast(file, &strtab_buf, rpoff_file, strtab_len);
const strtab = strtab_buf[0..strtab_read_len];
try file_reader.seekTo(rpoff_file);
const rpath_list = try file_reader.interface.takeSentinel(0);
if (rpath_list.len > rp_max_size) return error.StreamTooLong;
const rpath_list = mem.sliceTo(strtab, 0);
var it = mem.tokenizeScalar(u8, rpath_list, ':');
while (it.next()) |rpath| {
if (glibcVerFromRPath(rpath)) |ver| {
if (glibcVerFromRPath(io, rpath)) |ver| {
result.os.version_range.linux.glibc = ver;
return result;
} else |err| switch (err) {
@ -741,7 +688,7 @@ pub fn abiAndDynamicLinkerFromFile(
// There is no DT_RUNPATH so we try to find libc.so.6 inside the same
// directory as the dynamic linker.
if (fs.path.dirname(dl_path)) |rpath| {
if (glibcVerFromRPath(rpath)) |ver| {
if (glibcVerFromRPath(io, rpath)) |ver| {
result.os.version_range.linux.glibc = ver;
return result;
} else |err| switch (err) {
@ -755,8 +702,6 @@ pub fn abiAndDynamicLinkerFromFile(
var link_buf: [posix.PATH_MAX]u8 = undefined;
const link_name = posix.readlink(dl_path, &link_buf) catch |err| switch (err) {
error.NameTooLong => unreachable,
error.InvalidUtf8 => unreachable, // WASI only
error.InvalidWtf8 => unreachable, // Windows only
error.BadPathName => unreachable, // Windows only
error.UnsupportedReparsePointType => unreachable, // Windows only
error.NetworkNotFound => unreachable, // Windows only
@ -806,7 +751,7 @@ pub fn abiAndDynamicLinkerFromFile(
@memcpy(path_buf[index..][0..abi.len], abi);
index += abi.len;
const rpath = path_buf[0..index];
if (glibcVerFromRPath(rpath)) |ver| {
if (glibcVerFromRPath(io, rpath)) |ver| {
result.os.version_range.linux.glibc = ver;
return result;
} else |err| switch (err) {
@ -845,29 +790,25 @@ test glibcVerFromLinkName {
try std.testing.expectError(error.InvalidGnuLibCVersion, glibcVerFromLinkName("ld-2.37.4.5.so", "ld-"));
}
fn glibcVerFromRPath(rpath: []const u8) !std.SemanticVersion {
fn glibcVerFromRPath(io: Io, rpath: []const u8) !std.SemanticVersion {
var dir = fs.cwd().openDir(rpath, .{}) catch |err| switch (err) {
error.NameTooLong => unreachable,
error.InvalidUtf8 => unreachable, // WASI only
error.InvalidWtf8 => unreachable, // Windows-only
error.BadPathName => unreachable,
error.DeviceBusy => unreachable,
error.NetworkNotFound => unreachable, // Windows-only
error.NameTooLong => return error.Unexpected,
error.BadPathName => return error.Unexpected,
error.DeviceBusy => return error.Unexpected,
error.NetworkNotFound => return error.Unexpected, // Windows-only
error.FileNotFound,
error.NotDir,
error.AccessDenied,
error.PermissionDenied,
error.NoDevice,
=> return error.GLibCNotFound,
error.FileNotFound => return error.GLibCNotFound,
error.NotDir => return error.GLibCNotFound,
error.AccessDenied => return error.GLibCNotFound,
error.PermissionDenied => return error.GLibCNotFound,
error.NoDevice => return error.GLibCNotFound,
error.ProcessNotFound,
error.ProcessFdQuotaExceeded,
error.SystemFdQuotaExceeded,
error.SystemResources,
error.SymLinkLoop,
error.Unexpected,
=> |e| return e,
error.ProcessFdQuotaExceeded => |e| return e,
error.SystemFdQuotaExceeded => |e| return e,
error.SystemResources => |e| return e,
error.SymLinkLoop => |e| return e,
error.Unexpected => |e| return e,
error.Canceled => |e| return e,
};
defer dir.close();
@ -879,143 +820,85 @@ fn glibcVerFromRPath(rpath: []const u8) !std.SemanticVersion {
// .dynstr section, and finding the max version number of symbols
// that start with "GLIBC_2.".
const glibc_so_basename = "libc.so.6";
var f = dir.openFile(glibc_so_basename, .{}) catch |err| switch (err) {
error.NameTooLong => unreachable,
error.InvalidUtf8 => unreachable, // WASI only
error.InvalidWtf8 => unreachable, // Windows only
error.BadPathName => unreachable, // Windows only
error.PipeBusy => unreachable, // Windows-only
error.SharingViolation => unreachable, // Windows-only
error.NetworkNotFound => unreachable, // Windows-only
error.AntivirusInterference => unreachable, // Windows-only
error.FileLocksNotSupported => unreachable, // No lock requested.
error.NoSpaceLeft => unreachable, // read-only
error.PathAlreadyExists => unreachable, // read-only
error.DeviceBusy => unreachable, // read-only
error.FileBusy => unreachable, // read-only
error.WouldBlock => unreachable, // not using O_NONBLOCK
error.NoDevice => unreachable, // not asking for a special device
error.AccessDenied,
error.PermissionDenied,
error.FileNotFound,
error.NotDir,
error.IsDir,
=> return error.GLibCNotFound,
var file = dir.openFile(glibc_so_basename, .{}) catch |err| switch (err) {
error.NameTooLong => return error.Unexpected,
error.BadPathName => return error.Unexpected,
error.PipeBusy => return error.Unexpected, // Windows-only
error.SharingViolation => return error.Unexpected, // Windows-only
error.NetworkNotFound => return error.Unexpected, // Windows-only
error.AntivirusInterference => return error.Unexpected, // Windows-only
error.FileLocksNotSupported => return error.Unexpected, // No lock requested.
error.NoSpaceLeft => return error.Unexpected, // read-only
error.PathAlreadyExists => return error.Unexpected, // read-only
error.DeviceBusy => return error.Unexpected, // read-only
error.FileBusy => return error.Unexpected, // read-only
error.NoDevice => return error.Unexpected, // not asking for a special device
error.FileTooBig => return error.Unexpected,
error.WouldBlock => return error.Unexpected, // not opened in non-blocking
error.ProcessNotFound,
error.ProcessFdQuotaExceeded,
error.SystemFdQuotaExceeded,
error.SystemResources,
error.SymLinkLoop,
error.Unexpected,
=> |e| return e,
error.AccessDenied => return error.GLibCNotFound,
error.PermissionDenied => return error.GLibCNotFound,
error.FileNotFound => return error.GLibCNotFound,
error.NotDir => return error.GLibCNotFound,
error.IsDir => return error.GLibCNotFound,
error.ProcessNotFound => |e| return e,
error.ProcessFdQuotaExceeded => |e| return e,
error.SystemFdQuotaExceeded => |e| return e,
error.SystemResources => |e| return e,
error.SymLinkLoop => |e| return e,
error.Unexpected => |e| return e,
error.Canceled => |e| return e,
};
defer f.close();
defer file.close();
return glibcVerFromSoFile(f) catch |err| switch (err) {
// Empirically, glibc 2.34 libc.so .dynstr section is 32441 bytes on my system.
var buffer: [8000]u8 = undefined;
var file_reader: Io.File.Reader = .initAdapted(file, io, &buffer);
return glibcVerFromSoFile(&file_reader) catch |err| switch (err) {
error.InvalidElfMagic,
error.InvalidElfEndian,
error.InvalidElfClass,
error.InvalidElfFile,
error.InvalidElfVersion,
error.InvalidGnuLibCVersion,
error.UnexpectedEndOfFile,
error.EndOfStream,
=> return error.GLibCNotFound,
error.SystemResources,
error.UnableToReadElfFile,
error.Unexpected,
error.FileSystem,
error.ProcessNotFound,
=> |e| return e,
error.ReadFailed => return file_reader.err.?,
else => |e| return e,
};
}
fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion {
var hdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 align(@alignOf(elf.Elf64_Ehdr)) = undefined;
_ = try preadAtLeast(file, &hdr_buf, 0, hdr_buf.len);
const hdr32: *elf.Elf32_Ehdr = @ptrCast(&hdr_buf);
const hdr64: *elf.Elf64_Ehdr = @ptrCast(&hdr_buf);
if (!mem.eql(u8, hdr32.e_ident[0..4], elf.MAGIC)) return error.InvalidElfMagic;
const elf_endian: std.builtin.Endian = switch (hdr32.e_ident[elf.EI.DATA]) {
elf.ELFDATA2LSB => .little,
elf.ELFDATA2MSB => .big,
else => return error.InvalidElfEndian,
fn glibcVerFromSoFile(file_reader: *Io.File.Reader) !std.SemanticVersion {
const header = try elf.Header.read(&file_reader.interface);
const str_section_off = header.shoff + @as(u64, header.shentsize) * @as(u64, header.shstrndx);
try file_reader.seekTo(str_section_off);
const shstr = try elf.takeSectionHeader(&file_reader.interface, header.is_64, header.endian);
var strtab_buf: [4096]u8 = undefined;
const shstrtab = strtab_buf[0..@min(shstr.sh_size, strtab_buf.len)];
try file_reader.seekTo(shstr.sh_offset);
try file_reader.interface.readSliceAll(shstrtab);
const dynstr: struct { offset: u64, size: u64 } = find_dyn_str: {
var it = header.iterateSectionHeaders(file_reader);
while (try it.next()) |shdr| {
const end = mem.findScalarPos(u8, shstrtab, shdr.sh_name, 0) orelse continue;
const sh_name = shstrtab[shdr.sh_name..end :0];
if (mem.eql(u8, sh_name, ".dynstr")) break :find_dyn_str .{
.offset = shdr.sh_offset,
.size = shdr.sh_size,
};
const need_bswap = elf_endian != native_endian;
if (hdr32.e_ident[elf.EI.VERSION] != 1) return error.InvalidElfVersion;
const is_64 = switch (hdr32.e_ident[elf.EI.CLASS]) {
elf.ELFCLASS32 => false,
elf.ELFCLASS64 => true,
else => return error.InvalidElfClass,
};
const shstrndx = elfInt(is_64, need_bswap, hdr32.e_shstrndx, hdr64.e_shstrndx);
var shoff = elfInt(is_64, need_bswap, hdr32.e_shoff, hdr64.e_shoff);
const shentsize = elfInt(is_64, need_bswap, hdr32.e_shentsize, hdr64.e_shentsize);
const str_section_off = shoff + @as(u64, shentsize) * @as(u64, shstrndx);
var sh_buf: [16 * @sizeOf(elf.Elf64_Shdr)]u8 align(@alignOf(elf.Elf64_Shdr)) = undefined;
if (sh_buf.len < shentsize) return error.InvalidElfFile;
_ = try preadAtLeast(file, &sh_buf, str_section_off, shentsize);
const shstr32: *elf.Elf32_Shdr = @ptrCast(@alignCast(&sh_buf));
const shstr64: *elf.Elf64_Shdr = @ptrCast(@alignCast(&sh_buf));
const shstrtab_off = elfInt(is_64, need_bswap, shstr32.sh_offset, shstr64.sh_offset);
const shstrtab_size = elfInt(is_64, need_bswap, shstr32.sh_size, shstr64.sh_size);
var strtab_buf: [4096:0]u8 = undefined;
const shstrtab_len = @min(shstrtab_size, strtab_buf.len);
const shstrtab_read_len = try preadAtLeast(file, &strtab_buf, shstrtab_off, shstrtab_len);
const shstrtab = strtab_buf[0..shstrtab_read_len];
const shnum = elfInt(is_64, need_bswap, hdr32.e_shnum, hdr64.e_shnum);
var sh_i: u16 = 0;
const dynstr: struct { offset: u64, size: u64 } = find_dyn_str: while (sh_i < shnum) {
// Reserve some bytes so that we can deref the 64-bit struct fields
// even when the ELF file is 32-bits.
const sh_reserve: usize = @sizeOf(elf.Elf64_Shdr) - @sizeOf(elf.Elf32_Shdr);
const sh_read_byte_len = try preadAtLeast(
file,
sh_buf[0 .. sh_buf.len - sh_reserve],
shoff,
shentsize,
);
var sh_buf_i: usize = 0;
while (sh_buf_i < sh_read_byte_len and sh_i < shnum) : ({
sh_i += 1;
shoff += shentsize;
sh_buf_i += shentsize;
}) {
const sh32: *elf.Elf32_Shdr = @ptrCast(@alignCast(&sh_buf[sh_buf_i]));
const sh64: *elf.Elf64_Shdr = @ptrCast(@alignCast(&sh_buf[sh_buf_i]));
const sh_name_off = elfInt(is_64, need_bswap, sh32.sh_name, sh64.sh_name);
const sh_name = mem.sliceTo(shstrtab[sh_name_off..], 0);
if (mem.eql(u8, sh_name, ".dynstr")) {
break :find_dyn_str .{
.offset = elfInt(is_64, need_bswap, sh32.sh_offset, sh64.sh_offset),
.size = elfInt(is_64, need_bswap, sh32.sh_size, sh64.sh_size),
};
}
}
} else return error.InvalidGnuLibCVersion;
};
// Here we loop over all the strings in the dynstr string table, assuming that any
// strings that start with "GLIBC_2." indicate the existence of such a glibc version,
// and furthermore, that the system-installed glibc is at minimum that version.
// Empirically, glibc 2.34 libc.so .dynstr section is 32441 bytes on my system.
// Here I use double this value plus some headroom. This makes it only need
// a single read syscall here.
var buf: [80000]u8 = undefined;
if (buf.len < dynstr.size) return error.InvalidGnuLibCVersion;
const dynstr_size: usize = @intCast(dynstr.size);
const dynstr_bytes = buf[0..dynstr_size];
_ = try preadAtLeast(file, dynstr_bytes, dynstr.offset, dynstr_bytes.len);
var it = mem.splitScalar(u8, dynstr_bytes, 0);
var max_ver: std.SemanticVersion = .{ .major = 2, .minor = 2, .patch = 5 };
while (it.next()) |s| {
var offset: u64 = 0;
try file_reader.seekTo(dynstr.offset);
while (offset < dynstr.size) {
if (file_reader.interface.takeSentinel(0)) |s| {
if (mem.startsWith(u8, s, "GLIBC_2.")) {
const chopped = s["GLIBC_".len..];
const ver = Target.Query.parseVersion(chopped) catch |err| switch (err) {
@ -1027,7 +910,13 @@ fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion {
.lt, .eq => continue,
}
}
offset += s.len + 1;
} else |err| switch (err) {
error.EndOfStream, error.StreamTooLong => break,
error.ReadFailed => |e| return e,
}
}
return max_ver;
}
@ -1044,11 +933,7 @@ fn glibcVerFromSoFile(file: fs.File) !std.SemanticVersion {
/// answer to these questions, or if there is a shebang line, then it chases the referenced
/// file recursively. If that does not provide the answer, then the function falls back to
/// defaults.
fn detectAbiAndDynamicLinker(
cpu: Target.Cpu,
os: Target.Os,
query: Target.Query,
) DetectError!Target {
fn detectAbiAndDynamicLinker(io: Io, cpu: Target.Cpu, os: Target.Os, query: Target.Query) !Target {
const native_target_has_ld = comptime Target.DynamicLinker.kind(builtin.os.tag) != .none;
const is_linux = builtin.target.os.tag == .linux;
const is_illumos = builtin.target.os.tag == .illumos;
@ -1111,21 +996,7 @@ fn detectAbiAndDynamicLinker(
const ld_info_list = ld_info_list_buffer[0..ld_info_list_len];
// Best case scenario: the executable is dynamically linked, and we can iterate
// over our own shared objects and find a dynamic linker.
const elf_file = elf_file: {
// This block looks for a shebang line in /usr/bin/env,
// if it finds one, then instead of using /usr/bin/env as the ELF file to examine, it uses the file it references instead,
// doing the same logic recursively in case it finds another shebang line.
var file_name: []const u8 = switch (os.tag) {
// Since /usr/bin/env is hard-coded into the shebang line of many portable scripts, it's a
// reasonably reliable path to start with.
else => "/usr/bin/env",
// Haiku does not have a /usr root directory.
.haiku => "/bin/env",
};
var file_reader: Io.File.Reader = undefined;
// According to `man 2 execve`:
//
// The kernel imposes a maximum length on the text
@ -1137,23 +1008,37 @@ fn detectAbiAndDynamicLinker(
// Tests show that bash and zsh consider 255 as total limit,
// *including* "#!" characters and ignoring newline.
// For safety, we set max length as 255 + \n (1).
var buffer: [255 + 1]u8 = undefined;
const max_shebang_line_size = 256;
var file_reader_buffer: [4096]u8 = undefined;
comptime assert(file_reader_buffer.len >= max_shebang_line_size);
// Best case scenario: the executable is dynamically linked, and we can iterate
// over our own shared objects and find a dynamic linker.
const header = elf_file: {
// This block looks for a shebang line in "/usr/bin/env". If it finds
// one, then instead of using "/usr/bin/env" as the ELF file to examine,
// it uses the file it references instead, doing the same logic
// recursively in case it finds another shebang line.
var file_name: []const u8 = switch (os.tag) {
// Since /usr/bin/env is hard-coded into the shebang line of many
// portable scripts, it's a reasonably reliable path to start with.
else => "/usr/bin/env",
// Haiku does not have a /usr root directory.
.haiku => "/bin/env",
};
while (true) {
// Interpreter path can be relative on Linux, but
// for simplicity we are asserting it is an absolute path.
const file = fs.openFileAbsolute(file_name, .{}) catch |err| switch (err) {
error.NoSpaceLeft => unreachable,
error.NameTooLong => unreachable,
error.PathAlreadyExists => unreachable,
error.SharingViolation => unreachable,
error.InvalidUtf8 => unreachable, // WASI only
error.InvalidWtf8 => unreachable, // Windows only
error.BadPathName => unreachable,
error.PipeBusy => unreachable,
error.FileLocksNotSupported => unreachable,
error.WouldBlock => unreachable,
error.FileBusy => unreachable, // opened without write permissions
error.AntivirusInterference => unreachable, // Windows-only error
error.NoSpaceLeft => return error.Unexpected,
error.NameTooLong => return error.Unexpected,
error.PathAlreadyExists => return error.Unexpected,
error.SharingViolation => return error.Unexpected,
error.BadPathName => return error.Unexpected,
error.PipeBusy => return error.Unexpected,
error.FileLocksNotSupported => return error.Unexpected,
error.FileBusy => return error.Unexpected, // opened without write permissions
error.AntivirusInterference => return error.Unexpected, // Windows-only error
error.IsDir,
error.NotDir,
@ -1164,43 +1049,30 @@ fn detectAbiAndDynamicLinker(
error.NetworkNotFound,
error.FileTooBig,
error.Unexpected,
=> |e| {
std.log.warn("Encountered error: {s}, falling back to default ABI and dynamic linker.", .{@errorName(e)});
return defaultAbiAndDynamicLinker(cpu, os, query);
},
=> return error.UnableToOpenElfFile,
else => |e| return e,
};
var is_elf_file = false;
defer if (is_elf_file == false) file.close();
defer if (!is_elf_file) file.close();
// Shortest working interpreter path is "#!/i" (4)
// (interpreter is "/i", assuming all paths are absolute, like in above comment).
// ELF magic number length is also 4.
//
// If file is shorter than that, it is definitely not ELF file
// nor file with "shebang" line.
const min_len: usize = 4;
file_reader = .initAdapted(file, io, &file_reader_buffer);
file_name = undefined; // it aliases file_reader_buffer
const len = preadAtLeast(file, &buffer, 0, min_len) catch |err| switch (err) {
error.UnexpectedEndOfFile,
error.UnableToReadElfFile,
error.ProcessNotFound,
=> return defaultAbiAndDynamicLinker(cpu, os, query),
else => |e| return e,
const header = elf.Header.read(&file_reader.interface) catch |hdr_err| switch (hdr_err) {
error.EndOfStream,
error.InvalidElfMagic,
=> {
const shebang_line = file_reader.interface.takeSentinel('\n') catch |err| switch (err) {
error.ReadFailed => return file_reader.err.?,
// It's neither an ELF file nor file with shebang line.
error.EndOfStream, error.StreamTooLong => return error.UnhelpfulFile,
};
const content = buffer[0..len];
if (mem.eql(u8, content[0..4], std.elf.MAGIC)) {
// It is very likely ELF file!
is_elf_file = true;
break :elf_file file;
} else if (mem.eql(u8, content[0..2], "#!")) {
if (!mem.startsWith(u8, shebang_line, "#!")) return error.UnhelpfulFile;
// We detected shebang, now parse entire line.
// Trim leading "#!", spaces and tabs.
const trimmed_line = mem.trimStart(u8, content[2..], &.{ ' ', '\t' });
const trimmed_line = mem.trimStart(u8, shebang_line[2..], &.{ ' ', '\t' });
// This line can have:
// * Interpreter path only,
@ -1210,41 +1082,38 @@ fn detectAbiAndDynamicLinker(
// Separate path and args.
const path_end = mem.indexOfAny(u8, path_maybe_args, &.{ ' ', '\t', 0 }) orelse path_maybe_args.len;
file_name = path_maybe_args[0..path_end];
const unvalidated_path = path_maybe_args[0..path_end];
file_name = if (fs.path.isAbsolute(unvalidated_path)) unvalidated_path else return error.RelativeShebang;
continue;
} else {
// Not a ELF file, not a shell script with "shebang line", invalid duck.
return defaultAbiAndDynamicLinker(cpu, os, query);
}
},
error.InvalidElfVersion,
error.InvalidElfClass,
error.InvalidElfEndian,
=> return error.InvalidElfFile,
error.ReadFailed => return file_reader.err.?,
};
is_elf_file = true;
break :elf_file header;
}
};
defer elf_file.close();
defer file_reader.file.close(io);
// TODO: inline this function and combine the buffer we already read above to find
// the possible shebang line with the buffer we use for the ELF header.
return abiAndDynamicLinkerFromFile(elf_file, cpu, os, ld_info_list, query) catch |err| switch (err) {
return abiAndDynamicLinkerFromFile(&file_reader, &header, cpu, os, ld_info_list, query) catch |err| switch (err) {
error.FileSystem,
error.SystemResources,
error.SymLinkLoop,
error.ProcessFdQuotaExceeded,
error.SystemFdQuotaExceeded,
error.ProcessNotFound,
error.Canceled,
=> |e| return e,
error.UnableToReadElfFile,
error.InvalidElfClass,
error.InvalidElfVersion,
error.InvalidElfEndian,
error.InvalidElfFile,
error.InvalidElfMagic,
error.Unexpected,
error.UnexpectedEndOfFile,
error.NameTooLong,
error.StaticElfFile,
// Finally, we fall back on the standard path.
=> |e| {
std.log.warn("Encountered error: {s}, falling back to default ABI and dynamic linker.", .{@errorName(e)});
error.ReadFailed => return file_reader.err.?,
else => |e| {
std.log.warn("encountered {t}; falling back to default ABI and dynamic linker", .{e});
return defaultAbiAndDynamicLinker(cpu, os, query);
},
};
@ -1269,59 +1138,6 @@ const LdInfo = struct {
abi: Target.Abi,
};
fn preadAtLeast(file: fs.File, buf: []u8, offset: u64, min_read_len: usize) !usize {
var i: usize = 0;
while (i < min_read_len) {
const len = file.pread(buf[i..], offset + i) catch |err| switch (err) {
error.OperationAborted => unreachable, // Windows-only
error.WouldBlock => unreachable, // Did not request blocking mode
error.Canceled => unreachable, // timerfd is unseekable
error.NotOpenForReading => unreachable,
error.SystemResources => return error.SystemResources,
error.IsDir => return error.UnableToReadElfFile,
error.BrokenPipe => return error.UnableToReadElfFile,
error.Unseekable => return error.UnableToReadElfFile,
error.ConnectionResetByPeer => return error.UnableToReadElfFile,
error.ConnectionTimedOut => return error.UnableToReadElfFile,
error.SocketNotConnected => return error.UnableToReadElfFile,
error.Unexpected => return error.Unexpected,
error.InputOutput => return error.FileSystem,
error.AccessDenied => return error.Unexpected,
error.ProcessNotFound => return error.ProcessNotFound,
error.LockViolation => return error.UnableToReadElfFile,
};
if (len == 0) return error.UnexpectedEndOfFile;
i += len;
}
return i;
}
fn elfInt(is_64: bool, need_bswap: bool, int_32: anytype, int_64: anytype) @TypeOf(int_64) {
if (is_64) {
if (need_bswap) {
return @byteSwap(int_64);
} else {
return int_64;
}
} else {
if (need_bswap) {
return @byteSwap(int_32);
} else {
return int_32;
}
}
}
const builtin = @import("builtin");
const std = @import("../std.zig");
const mem = std.mem;
const elf = std.elf;
const fs = std.fs;
const assert = std.debug.assert;
const Target = std.Target;
const native_endian = builtin.cpu.arch.endian();
const posix = std.posix;
test {
_ = NativePaths;

View File

@ -1,5 +1,7 @@
const std = @import("std");
const builtin = @import("builtin");
const std = @import("std");
const Io = std.Io;
const mem = std.mem;
const fs = std.fs;
const fmt = std.fmt;
@ -344,7 +346,7 @@ fn testParser(
expected_model: *const Target.Cpu.Model,
input: []const u8,
) !void {
var r: std.Io.Reader = .fixed(input);
var r: Io.Reader = .fixed(input);
const result = try parser.parse(arch, &r);
try testing.expectEqual(expected_model, result.?.model);
try testing.expect(expected_model.features.eql(result.?.features));
@ -357,7 +359,7 @@ fn testParser(
// When all the lines have been analyzed the finalize method is called.
fn CpuinfoParser(comptime impl: anytype) type {
return struct {
fn parse(arch: Target.Cpu.Arch, reader: *std.Io.Reader) !?Target.Cpu {
fn parse(arch: Target.Cpu.Arch, reader: *Io.Reader) !?Target.Cpu {
var obj: impl = .{};
while (try reader.takeDelimiter('\n')) |line| {
const colon_pos = mem.indexOfScalar(u8, line, ':') orelse continue;
@ -376,14 +378,14 @@ inline fn getAArch64CpuFeature(comptime feat_reg: []const u8) u64 {
);
}
pub fn detectNativeCpuAndFeatures() ?Target.Cpu {
pub fn detectNativeCpuAndFeatures(io: Io) ?Target.Cpu {
var file = fs.openFileAbsolute("/proc/cpuinfo", .{}) catch |err| switch (err) {
else => return null,
};
defer file.close();
var buffer: [4096]u8 = undefined; // "flags" lines can get pretty long.
var file_reader = file.reader(&buffer);
var file_reader = file.reader(io, &buffer);
const current_arch = builtin.cpu.arch;
switch (current_arch) {

View File

@ -360,7 +360,7 @@ pub fn updateFileOnDisk(file: *File, comp: *Compilation) !void {
file.stat = .{
.size = file.source.?.len,
.inode = 0, // dummy value
.mtime = 0, // dummy value
.mtime = .zero, // dummy value
};
}

Some files were not shown because too many files have changed in this diff Show More