mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 14:23:09 +00:00
std: updating to std.Io interface
got the build runner compiling
This commit is contained in:
parent
066864a0bf
commit
47aa5a70a5
@ -1,5 +1,8 @@
|
|||||||
const std = @import("std");
|
const runner = @This();
|
||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
|
|
||||||
|
const std = @import("std");
|
||||||
|
const Io = std.Io;
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
const fmt = std.fmt;
|
const fmt = std.fmt;
|
||||||
const mem = std.mem;
|
const mem = std.mem;
|
||||||
@ -11,7 +14,6 @@ const WebServer = std.Build.WebServer;
|
|||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
const fatal = std.process.fatal;
|
const fatal = std.process.fatal;
|
||||||
const Writer = std.Io.Writer;
|
const Writer = std.Io.Writer;
|
||||||
const runner = @This();
|
|
||||||
const tty = std.Io.tty;
|
const tty = std.Io.tty;
|
||||||
|
|
||||||
pub const root = @import("@build");
|
pub const root = @import("@build");
|
||||||
@ -75,6 +77,7 @@ pub fn main() !void {
|
|||||||
.io = io,
|
.io = io,
|
||||||
.arena = arena,
|
.arena = arena,
|
||||||
.cache = .{
|
.cache = .{
|
||||||
|
.io = io,
|
||||||
.gpa = arena,
|
.gpa = arena,
|
||||||
.manifest_dir = try local_cache_directory.handle.makeOpenPath("h", .{}),
|
.manifest_dir = try local_cache_directory.handle.makeOpenPath("h", .{}),
|
||||||
},
|
},
|
||||||
@ -84,7 +87,7 @@ pub fn main() !void {
|
|||||||
.zig_lib_directory = zig_lib_directory,
|
.zig_lib_directory = zig_lib_directory,
|
||||||
.host = .{
|
.host = .{
|
||||||
.query = .{},
|
.query = .{},
|
||||||
.result = try std.zig.system.resolveTargetQuery(.{}),
|
.result = try std.zig.system.resolveTargetQuery(io, .{}),
|
||||||
},
|
},
|
||||||
.time_report = false,
|
.time_report = false,
|
||||||
};
|
};
|
||||||
@ -121,7 +124,7 @@ pub fn main() !void {
|
|||||||
var watch = false;
|
var watch = false;
|
||||||
var fuzz: ?std.Build.Fuzz.Mode = null;
|
var fuzz: ?std.Build.Fuzz.Mode = null;
|
||||||
var debounce_interval_ms: u16 = 50;
|
var debounce_interval_ms: u16 = 50;
|
||||||
var webui_listen: ?std.net.Address = null;
|
var webui_listen: ?Io.net.IpAddress = null;
|
||||||
|
|
||||||
if (try std.zig.EnvVar.ZIG_BUILD_ERROR_STYLE.get(arena)) |str| {
|
if (try std.zig.EnvVar.ZIG_BUILD_ERROR_STYLE.get(arena)) |str| {
|
||||||
if (std.meta.stringToEnum(ErrorStyle, str)) |style| {
|
if (std.meta.stringToEnum(ErrorStyle, str)) |style| {
|
||||||
@ -288,11 +291,11 @@ pub fn main() !void {
|
|||||||
});
|
});
|
||||||
};
|
};
|
||||||
} else if (mem.eql(u8, arg, "--webui")) {
|
} else if (mem.eql(u8, arg, "--webui")) {
|
||||||
webui_listen = std.net.Address.parseIp("::1", 0) catch unreachable;
|
if (webui_listen == null) webui_listen = .{ .ip6 = .loopback(0) };
|
||||||
} else if (mem.startsWith(u8, arg, "--webui=")) {
|
} else if (mem.startsWith(u8, arg, "--webui=")) {
|
||||||
const addr_str = arg["--webui=".len..];
|
const addr_str = arg["--webui=".len..];
|
||||||
if (std.mem.eql(u8, addr_str, "-")) fatal("web interface cannot listen on stdio", .{});
|
if (std.mem.eql(u8, addr_str, "-")) fatal("web interface cannot listen on stdio", .{});
|
||||||
webui_listen = std.net.Address.parseIpAndPort(addr_str) catch |err| {
|
webui_listen = Io.net.IpAddress.parseLiteral(addr_str) catch |err| {
|
||||||
fatal("invalid web UI address '{s}': {s}", .{ addr_str, @errorName(err) });
|
fatal("invalid web UI address '{s}': {s}", .{ addr_str, @errorName(err) });
|
||||||
};
|
};
|
||||||
} else if (mem.eql(u8, arg, "--debug-log")) {
|
} else if (mem.eql(u8, arg, "--debug-log")) {
|
||||||
@ -334,14 +337,10 @@ pub fn main() !void {
|
|||||||
watch = true;
|
watch = true;
|
||||||
} else if (mem.eql(u8, arg, "--time-report")) {
|
} else if (mem.eql(u8, arg, "--time-report")) {
|
||||||
graph.time_report = true;
|
graph.time_report = true;
|
||||||
if (webui_listen == null) {
|
if (webui_listen == null) webui_listen = .{ .ip6 = .loopback(0) };
|
||||||
webui_listen = std.net.Address.parseIp("::1", 0) catch unreachable;
|
|
||||||
}
|
|
||||||
} else if (mem.eql(u8, arg, "--fuzz")) {
|
} else if (mem.eql(u8, arg, "--fuzz")) {
|
||||||
fuzz = .{ .forever = undefined };
|
fuzz = .{ .forever = undefined };
|
||||||
if (webui_listen == null) {
|
if (webui_listen == null) webui_listen = .{ .ip6 = .loopback(0) };
|
||||||
webui_listen = std.net.Address.parseIp("::1", 0) catch unreachable;
|
|
||||||
}
|
|
||||||
} else if (mem.startsWith(u8, arg, "--fuzz=")) {
|
} else if (mem.startsWith(u8, arg, "--fuzz=")) {
|
||||||
const value = arg["--fuzz=".len..];
|
const value = arg["--fuzz=".len..];
|
||||||
if (value.len == 0) fatal("missing argument to --fuzz", .{});
|
if (value.len == 0) fatal("missing argument to --fuzz", .{});
|
||||||
@ -550,13 +549,15 @@ pub fn main() !void {
|
|||||||
|
|
||||||
var w: Watch = w: {
|
var w: Watch = w: {
|
||||||
if (!watch) break :w undefined;
|
if (!watch) break :w undefined;
|
||||||
if (!Watch.have_impl) fatal("--watch not yet implemented for {s}", .{@tagName(builtin.os.tag)});
|
if (!Watch.have_impl) fatal("--watch not yet implemented for {t}", .{builtin.os.tag});
|
||||||
break :w try .init();
|
break :w try .init();
|
||||||
};
|
};
|
||||||
|
|
||||||
try run.thread_pool.init(thread_pool_options);
|
try run.thread_pool.init(thread_pool_options);
|
||||||
defer run.thread_pool.deinit();
|
defer run.thread_pool.deinit();
|
||||||
|
|
||||||
|
const now = Io.Timestamp.now(io, .awake) catch |err| fatal("failed to collect timestamp: {t}", .{err});
|
||||||
|
|
||||||
run.web_server = if (webui_listen) |listen_address| ws: {
|
run.web_server = if (webui_listen) |listen_address| ws: {
|
||||||
if (builtin.single_threaded) unreachable; // `fatal` above
|
if (builtin.single_threaded) unreachable; // `fatal` above
|
||||||
break :ws .init(.{
|
break :ws .init(.{
|
||||||
@ -568,11 +569,12 @@ pub fn main() !void {
|
|||||||
.root_prog_node = main_progress_node,
|
.root_prog_node = main_progress_node,
|
||||||
.watch = watch,
|
.watch = watch,
|
||||||
.listen_address = listen_address,
|
.listen_address = listen_address,
|
||||||
|
.base_timestamp = now,
|
||||||
});
|
});
|
||||||
} else null;
|
} else null;
|
||||||
|
|
||||||
if (run.web_server) |*ws| {
|
if (run.web_server) |*ws| {
|
||||||
ws.start() catch |err| fatal("failed to start web server: {s}", .{@errorName(err)});
|
ws.start() catch |err| fatal("failed to start web server: {t}", .{err});
|
||||||
}
|
}
|
||||||
|
|
||||||
rebuild: while (true) : (if (run.error_style.clearOnUpdate()) {
|
rebuild: while (true) : (if (run.error_style.clearOnUpdate()) {
|
||||||
@ -755,6 +757,7 @@ fn runStepNames(
|
|||||||
fuzz: ?std.Build.Fuzz.Mode,
|
fuzz: ?std.Build.Fuzz.Mode,
|
||||||
) !void {
|
) !void {
|
||||||
const gpa = run.gpa;
|
const gpa = run.gpa;
|
||||||
|
const io = b.graph.io;
|
||||||
const step_stack = &run.step_stack;
|
const step_stack = &run.step_stack;
|
||||||
const thread_pool = &run.thread_pool;
|
const thread_pool = &run.thread_pool;
|
||||||
|
|
||||||
@ -858,6 +861,7 @@ fn runStepNames(
|
|||||||
assert(mode == .limit);
|
assert(mode == .limit);
|
||||||
var f = std.Build.Fuzz.init(
|
var f = std.Build.Fuzz.init(
|
||||||
gpa,
|
gpa,
|
||||||
|
io,
|
||||||
thread_pool,
|
thread_pool,
|
||||||
step_stack.keys(),
|
step_stack.keys(),
|
||||||
parent_prog_node,
|
parent_prog_node,
|
||||||
|
|||||||
@ -2,6 +2,7 @@
|
|||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
|
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
|
const Io = std.Io;
|
||||||
const fatal = std.process.fatal;
|
const fatal = std.process.fatal;
|
||||||
const testing = std.testing;
|
const testing = std.testing;
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
@ -16,6 +17,7 @@ var fba: std.heap.FixedBufferAllocator = .init(&fba_buffer);
|
|||||||
var fba_buffer: [8192]u8 = undefined;
|
var fba_buffer: [8192]u8 = undefined;
|
||||||
var stdin_buffer: [4096]u8 = undefined;
|
var stdin_buffer: [4096]u8 = undefined;
|
||||||
var stdout_buffer: [4096]u8 = undefined;
|
var stdout_buffer: [4096]u8 = undefined;
|
||||||
|
var runner_threaded_io: Io.Threaded = .init_single_threaded;
|
||||||
|
|
||||||
/// Keep in sync with logic in `std.Build.addRunArtifact` which decides whether
|
/// Keep in sync with logic in `std.Build.addRunArtifact` which decides whether
|
||||||
/// the test runner will communicate with the build runner via `std.zig.Server`.
|
/// the test runner will communicate with the build runner via `std.zig.Server`.
|
||||||
@ -63,8 +65,6 @@ pub fn main() void {
|
|||||||
fuzz_abi.fuzzer_init(.fromSlice(cache_dir));
|
fuzz_abi.fuzzer_init(.fromSlice(cache_dir));
|
||||||
}
|
}
|
||||||
|
|
||||||
fba.reset();
|
|
||||||
|
|
||||||
if (listen) {
|
if (listen) {
|
||||||
return mainServer() catch @panic("internal test runner failure");
|
return mainServer() catch @panic("internal test runner failure");
|
||||||
} else {
|
} else {
|
||||||
@ -74,7 +74,7 @@ pub fn main() void {
|
|||||||
|
|
||||||
fn mainServer() !void {
|
fn mainServer() !void {
|
||||||
@disableInstrumentation();
|
@disableInstrumentation();
|
||||||
var stdin_reader = std.fs.File.stdin().readerStreaming(&stdin_buffer);
|
var stdin_reader = std.fs.File.stdin().readerStreaming(runner_threaded_io.io(), &stdin_buffer);
|
||||||
var stdout_writer = std.fs.File.stdout().writerStreaming(&stdout_buffer);
|
var stdout_writer = std.fs.File.stdout().writerStreaming(&stdout_buffer);
|
||||||
var server = try std.zig.Server.init(.{
|
var server = try std.zig.Server.init(.{
|
||||||
.in = &stdin_reader.interface,
|
.in = &stdin_reader.interface,
|
||||||
@ -131,7 +131,7 @@ fn mainServer() !void {
|
|||||||
|
|
||||||
.run_test => {
|
.run_test => {
|
||||||
testing.allocator_instance = .{};
|
testing.allocator_instance = .{};
|
||||||
testing.io_instance = .init(fba.allocator());
|
testing.io_instance = .init(testing.allocator);
|
||||||
log_err_count = 0;
|
log_err_count = 0;
|
||||||
const index = try server.receiveBody_u32();
|
const index = try server.receiveBody_u32();
|
||||||
const test_fn = builtin.test_functions[index];
|
const test_fn = builtin.test_functions[index];
|
||||||
@ -154,7 +154,6 @@ fn mainServer() !void {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
testing.io_instance.deinit();
|
testing.io_instance.deinit();
|
||||||
fba.reset();
|
|
||||||
const leak_count = testing.allocator_instance.detectLeaks();
|
const leak_count = testing.allocator_instance.detectLeaks();
|
||||||
testing.allocator_instance.deinitWithoutLeakChecks();
|
testing.allocator_instance.deinitWithoutLeakChecks();
|
||||||
try server.serveTestResults(.{
|
try server.serveTestResults(.{
|
||||||
@ -234,10 +233,10 @@ fn mainTerminal() void {
|
|||||||
var leaks: usize = 0;
|
var leaks: usize = 0;
|
||||||
for (test_fn_list, 0..) |test_fn, i| {
|
for (test_fn_list, 0..) |test_fn, i| {
|
||||||
testing.allocator_instance = .{};
|
testing.allocator_instance = .{};
|
||||||
testing.io_instance = .init(fba.allocator());
|
testing.io_instance = .init(testing.allocator);
|
||||||
defer {
|
defer {
|
||||||
if (testing.allocator_instance.deinit() == .leak) leaks += 1;
|
|
||||||
testing.io_instance.deinit();
|
testing.io_instance.deinit();
|
||||||
|
if (testing.allocator_instance.deinit() == .leak) leaks += 1;
|
||||||
}
|
}
|
||||||
testing.log_level = .warn;
|
testing.log_level = .warn;
|
||||||
|
|
||||||
@ -324,7 +323,7 @@ pub fn mainSimple() anyerror!void {
|
|||||||
.stage2_aarch64, .stage2_riscv64 => true,
|
.stage2_aarch64, .stage2_riscv64 => true,
|
||||||
else => false,
|
else => false,
|
||||||
};
|
};
|
||||||
// is the backend capable of calling `std.Io.Writer.print`?
|
// is the backend capable of calling `Io.Writer.print`?
|
||||||
const enable_print = switch (builtin.zig_backend) {
|
const enable_print = switch (builtin.zig_backend) {
|
||||||
.stage2_aarch64, .stage2_riscv64 => true,
|
.stage2_aarch64, .stage2_riscv64 => true,
|
||||||
else => false,
|
else => false,
|
||||||
|
|||||||
@ -1837,6 +1837,8 @@ pub fn runAllowFail(
|
|||||||
if (!process.can_spawn)
|
if (!process.can_spawn)
|
||||||
return error.ExecNotSupported;
|
return error.ExecNotSupported;
|
||||||
|
|
||||||
|
const io = b.graph.io;
|
||||||
|
|
||||||
const max_output_size = 400 * 1024;
|
const max_output_size = 400 * 1024;
|
||||||
var child = std.process.Child.init(argv, b.allocator);
|
var child = std.process.Child.init(argv, b.allocator);
|
||||||
child.stdin_behavior = .Ignore;
|
child.stdin_behavior = .Ignore;
|
||||||
@ -1847,7 +1849,7 @@ pub fn runAllowFail(
|
|||||||
try Step.handleVerbose2(b, null, child.env_map, argv);
|
try Step.handleVerbose2(b, null, child.env_map, argv);
|
||||||
try child.spawn();
|
try child.spawn();
|
||||||
|
|
||||||
var stdout_reader = child.stdout.?.readerStreaming(&.{});
|
var stdout_reader = child.stdout.?.readerStreaming(io, &.{});
|
||||||
const stdout = stdout_reader.interface.allocRemaining(b.allocator, .limited(max_output_size)) catch {
|
const stdout = stdout_reader.interface.allocRemaining(b.allocator, .limited(max_output_size)) catch {
|
||||||
return error.ReadFailure;
|
return error.ReadFailure;
|
||||||
};
|
};
|
||||||
|
|||||||
@ -3,8 +3,10 @@
|
|||||||
//! not to withstand attacks using specially-crafted input.
|
//! not to withstand attacks using specially-crafted input.
|
||||||
|
|
||||||
const Cache = @This();
|
const Cache = @This();
|
||||||
const std = @import("std");
|
|
||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
|
|
||||||
|
const std = @import("std");
|
||||||
|
const Io = std.Io;
|
||||||
const crypto = std.crypto;
|
const crypto = std.crypto;
|
||||||
const fs = std.fs;
|
const fs = std.fs;
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
@ -15,6 +17,7 @@ const Allocator = std.mem.Allocator;
|
|||||||
const log = std.log.scoped(.cache);
|
const log = std.log.scoped(.cache);
|
||||||
|
|
||||||
gpa: Allocator,
|
gpa: Allocator,
|
||||||
|
io: Io,
|
||||||
manifest_dir: fs.Dir,
|
manifest_dir: fs.Dir,
|
||||||
hash: HashHelper = .{},
|
hash: HashHelper = .{},
|
||||||
/// This value is accessed from multiple threads, protected by mutex.
|
/// This value is accessed from multiple threads, protected by mutex.
|
||||||
@ -661,9 +664,10 @@ pub const Manifest = struct {
|
|||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
const gpa = self.cache.gpa;
|
const gpa = self.cache.gpa;
|
||||||
|
const io = self.cache.io;
|
||||||
const input_file_count = self.files.entries.len;
|
const input_file_count = self.files.entries.len;
|
||||||
var tiny_buffer: [1]u8 = undefined; // allows allocRemaining to detect limit exceeded
|
var tiny_buffer: [1]u8 = undefined; // allows allocRemaining to detect limit exceeded
|
||||||
var manifest_reader = self.manifest_file.?.reader(&tiny_buffer); // Reads positionally from zero.
|
var manifest_reader = self.manifest_file.?.reader(io, &tiny_buffer); // Reads positionally from zero.
|
||||||
const limit: std.Io.Limit = .limited(manifest_file_size_max);
|
const limit: std.Io.Limit = .limited(manifest_file_size_max);
|
||||||
const file_contents = manifest_reader.interface.allocRemaining(gpa, limit) catch |err| switch (err) {
|
const file_contents = manifest_reader.interface.allocRemaining(gpa, limit) catch |err| switch (err) {
|
||||||
error.OutOfMemory => return error.OutOfMemory,
|
error.OutOfMemory => return error.OutOfMemory,
|
||||||
@ -1337,7 +1341,8 @@ test "cache file and then recall it" {
|
|||||||
var digest2: HexDigest = undefined;
|
var digest2: HexDigest = undefined;
|
||||||
|
|
||||||
{
|
{
|
||||||
var cache = Cache{
|
var cache: Cache = .{
|
||||||
|
.io = io,
|
||||||
.gpa = testing.allocator,
|
.gpa = testing.allocator,
|
||||||
.manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
|
.manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
|
||||||
};
|
};
|
||||||
@ -1402,7 +1407,8 @@ test "check that changing a file makes cache fail" {
|
|||||||
var digest2: HexDigest = undefined;
|
var digest2: HexDigest = undefined;
|
||||||
|
|
||||||
{
|
{
|
||||||
var cache = Cache{
|
var cache: Cache = .{
|
||||||
|
.io = io,
|
||||||
.gpa = testing.allocator,
|
.gpa = testing.allocator,
|
||||||
.manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
|
.manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
|
||||||
};
|
};
|
||||||
@ -1451,6 +1457,8 @@ test "check that changing a file makes cache fail" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "no file inputs" {
|
test "no file inputs" {
|
||||||
|
const io = testing.io;
|
||||||
|
|
||||||
var tmp = testing.tmpDir(.{});
|
var tmp = testing.tmpDir(.{});
|
||||||
defer tmp.cleanup();
|
defer tmp.cleanup();
|
||||||
|
|
||||||
@ -1459,7 +1467,8 @@ test "no file inputs" {
|
|||||||
var digest1: HexDigest = undefined;
|
var digest1: HexDigest = undefined;
|
||||||
var digest2: HexDigest = undefined;
|
var digest2: HexDigest = undefined;
|
||||||
|
|
||||||
var cache = Cache{
|
var cache: Cache = .{
|
||||||
|
.io = io,
|
||||||
.gpa = testing.allocator,
|
.gpa = testing.allocator,
|
||||||
.manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
|
.manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
|
||||||
};
|
};
|
||||||
@ -1517,7 +1526,8 @@ test "Manifest with files added after initial hash work" {
|
|||||||
var digest3: HexDigest = undefined;
|
var digest3: HexDigest = undefined;
|
||||||
|
|
||||||
{
|
{
|
||||||
var cache = Cache{
|
var cache: Cache = .{
|
||||||
|
.io = io,
|
||||||
.gpa = testing.allocator,
|
.gpa = testing.allocator,
|
||||||
.manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
|
.manifest_dir = try tmp.dir.makeOpenPath(temp_manifest_dir, .{}),
|
||||||
};
|
};
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
const std = @import("../std.zig");
|
const std = @import("../std.zig");
|
||||||
|
const Io = std.Io;
|
||||||
const Build = std.Build;
|
const Build = std.Build;
|
||||||
const Cache = Build.Cache;
|
const Cache = Build.Cache;
|
||||||
const Step = std.Build.Step;
|
const Step = std.Build.Step;
|
||||||
@ -14,6 +15,7 @@ const Fuzz = @This();
|
|||||||
const build_runner = @import("root");
|
const build_runner = @import("root");
|
||||||
|
|
||||||
gpa: Allocator,
|
gpa: Allocator,
|
||||||
|
io: Io,
|
||||||
mode: Mode,
|
mode: Mode,
|
||||||
|
|
||||||
/// Allocated into `gpa`.
|
/// Allocated into `gpa`.
|
||||||
@ -75,6 +77,7 @@ const CoverageMap = struct {
|
|||||||
|
|
||||||
pub fn init(
|
pub fn init(
|
||||||
gpa: Allocator,
|
gpa: Allocator,
|
||||||
|
io: Io,
|
||||||
thread_pool: *std.Thread.Pool,
|
thread_pool: *std.Thread.Pool,
|
||||||
all_steps: []const *Build.Step,
|
all_steps: []const *Build.Step,
|
||||||
root_prog_node: std.Progress.Node,
|
root_prog_node: std.Progress.Node,
|
||||||
@ -111,6 +114,7 @@ pub fn init(
|
|||||||
|
|
||||||
return .{
|
return .{
|
||||||
.gpa = gpa,
|
.gpa = gpa,
|
||||||
|
.io = io,
|
||||||
.mode = mode,
|
.mode = mode,
|
||||||
.run_steps = run_steps,
|
.run_steps = run_steps,
|
||||||
.wait_group = .{},
|
.wait_group = .{},
|
||||||
@ -484,6 +488,7 @@ fn addEntryPoint(fuzz: *Fuzz, coverage_id: u64, addr: u64) error{ AlreadyReporte
|
|||||||
|
|
||||||
pub fn waitAndPrintReport(fuzz: *Fuzz) void {
|
pub fn waitAndPrintReport(fuzz: *Fuzz) void {
|
||||||
assert(fuzz.mode == .limit);
|
assert(fuzz.mode == .limit);
|
||||||
|
const io = fuzz.io;
|
||||||
|
|
||||||
fuzz.wait_group.wait();
|
fuzz.wait_group.wait();
|
||||||
fuzz.wait_group.reset();
|
fuzz.wait_group.reset();
|
||||||
@ -506,7 +511,7 @@ pub fn waitAndPrintReport(fuzz: *Fuzz) void {
|
|||||||
|
|
||||||
const fuzz_abi = std.Build.abi.fuzz;
|
const fuzz_abi = std.Build.abi.fuzz;
|
||||||
var rbuf: [0x1000]u8 = undefined;
|
var rbuf: [0x1000]u8 = undefined;
|
||||||
var r = coverage_file.reader(&rbuf);
|
var r = coverage_file.reader(io, &rbuf);
|
||||||
|
|
||||||
var header: fuzz_abi.SeenPcsHeader = undefined;
|
var header: fuzz_abi.SeenPcsHeader = undefined;
|
||||||
r.interface.readSliceAll(std.mem.asBytes(&header)) catch |err| {
|
r.interface.readSliceAll(std.mem.asBytes(&header)) catch |err| {
|
||||||
|
|||||||
@ -1,9 +1,11 @@
|
|||||||
const Step = @This();
|
const Step = @This();
|
||||||
|
const builtin = @import("builtin");
|
||||||
|
|
||||||
const std = @import("../std.zig");
|
const std = @import("../std.zig");
|
||||||
|
const Io = std.Io;
|
||||||
const Build = std.Build;
|
const Build = std.Build;
|
||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
const builtin = @import("builtin");
|
|
||||||
const Cache = Build.Cache;
|
const Cache = Build.Cache;
|
||||||
const Path = Cache.Path;
|
const Path = Cache.Path;
|
||||||
const ArrayList = std.ArrayList;
|
const ArrayList = std.ArrayList;
|
||||||
@ -327,7 +329,7 @@ pub fn cast(step: *Step, comptime T: type) ?*T {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// For debugging purposes, prints identifying information about this Step.
|
/// For debugging purposes, prints identifying information about this Step.
|
||||||
pub fn dump(step: *Step, w: *std.Io.Writer, tty_config: std.Io.tty.Config) void {
|
pub fn dump(step: *Step, w: *Io.Writer, tty_config: Io.tty.Config) void {
|
||||||
if (step.debug_stack_trace.instruction_addresses.len > 0) {
|
if (step.debug_stack_trace.instruction_addresses.len > 0) {
|
||||||
w.print("name: '{s}'. creation stack trace:\n", .{step.name}) catch {};
|
w.print("name: '{s}'. creation stack trace:\n", .{step.name}) catch {};
|
||||||
std.debug.writeStackTrace(&step.debug_stack_trace, w, tty_config) catch {};
|
std.debug.writeStackTrace(&step.debug_stack_trace, w, tty_config) catch {};
|
||||||
@ -382,7 +384,7 @@ pub fn addError(step: *Step, comptime fmt: []const u8, args: anytype) error{OutO
|
|||||||
|
|
||||||
pub const ZigProcess = struct {
|
pub const ZigProcess = struct {
|
||||||
child: std.process.Child,
|
child: std.process.Child,
|
||||||
poller: std.Io.Poller(StreamEnum),
|
poller: Io.Poller(StreamEnum),
|
||||||
progress_ipc_fd: if (std.Progress.have_ipc) ?std.posix.fd_t else void,
|
progress_ipc_fd: if (std.Progress.have_ipc) ?std.posix.fd_t else void,
|
||||||
|
|
||||||
pub const StreamEnum = enum { stdout, stderr };
|
pub const StreamEnum = enum { stdout, stderr };
|
||||||
@ -458,7 +460,7 @@ pub fn evalZigProcess(
|
|||||||
const zp = try gpa.create(ZigProcess);
|
const zp = try gpa.create(ZigProcess);
|
||||||
zp.* = .{
|
zp.* = .{
|
||||||
.child = child,
|
.child = child,
|
||||||
.poller = std.Io.poll(gpa, ZigProcess.StreamEnum, .{
|
.poller = Io.poll(gpa, ZigProcess.StreamEnum, .{
|
||||||
.stdout = child.stdout.?,
|
.stdout = child.stdout.?,
|
||||||
.stderr = child.stderr.?,
|
.stderr = child.stderr.?,
|
||||||
}),
|
}),
|
||||||
@ -505,11 +507,12 @@ pub fn evalZigProcess(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Wrapper around `std.fs.Dir.updateFile` that handles verbose and error output.
|
/// Wrapper around `std.fs.Dir.updateFile` that handles verbose and error output.
|
||||||
pub fn installFile(s: *Step, src_lazy_path: Build.LazyPath, dest_path: []const u8) !std.fs.Dir.PrevStatus {
|
pub fn installFile(s: *Step, src_lazy_path: Build.LazyPath, dest_path: []const u8) !Io.Dir.PrevStatus {
|
||||||
const b = s.owner;
|
const b = s.owner;
|
||||||
|
const io = b.graph.io;
|
||||||
const src_path = src_lazy_path.getPath3(b, s);
|
const src_path = src_lazy_path.getPath3(b, s);
|
||||||
try handleVerbose(b, null, &.{ "install", "-C", b.fmt("{f}", .{src_path}), dest_path });
|
try handleVerbose(b, null, &.{ "install", "-C", b.fmt("{f}", .{src_path}), dest_path });
|
||||||
return src_path.root_dir.handle.updateFile(src_path.sub_path, std.fs.cwd(), dest_path, .{}) catch |err| {
|
return Io.Dir.updateFile(src_path.root_dir.handle.adaptToNewApi(), io, src_path.sub_path, .cwd(), dest_path, .{}) catch |err| {
|
||||||
return s.fail("unable to update file from '{f}' to '{s}': {s}", .{
|
return s.fail("unable to update file from '{f}' to '{s}': {s}", .{
|
||||||
src_path, dest_path, @errorName(err),
|
src_path, dest_path, @errorName(err),
|
||||||
});
|
});
|
||||||
@ -738,7 +741,7 @@ pub fn allocPrintCmd2(
|
|||||||
argv: []const []const u8,
|
argv: []const []const u8,
|
||||||
) Allocator.Error![]u8 {
|
) Allocator.Error![]u8 {
|
||||||
const shell = struct {
|
const shell = struct {
|
||||||
fn escape(writer: *std.Io.Writer, string: []const u8, is_argv0: bool) !void {
|
fn escape(writer: *Io.Writer, string: []const u8, is_argv0: bool) !void {
|
||||||
for (string) |c| {
|
for (string) |c| {
|
||||||
if (switch (c) {
|
if (switch (c) {
|
||||||
else => true,
|
else => true,
|
||||||
@ -772,7 +775,7 @@ pub fn allocPrintCmd2(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
var aw: std.Io.Writer.Allocating = .init(gpa);
|
var aw: Io.Writer.Allocating = .init(gpa);
|
||||||
defer aw.deinit();
|
defer aw.deinit();
|
||||||
const writer = &aw.writer;
|
const writer = &aw.writer;
|
||||||
if (opt_cwd) |cwd| writer.print("cd {s} && ", .{cwd}) catch return error.OutOfMemory;
|
if (opt_cwd) |cwd| writer.print("cd {s} && ", .{cwd}) catch return error.OutOfMemory;
|
||||||
|
|||||||
@ -538,8 +538,10 @@ test Options {
|
|||||||
defer arena.deinit();
|
defer arena.deinit();
|
||||||
|
|
||||||
var graph: std.Build.Graph = .{
|
var graph: std.Build.Graph = .{
|
||||||
|
.io = io,
|
||||||
.arena = arena.allocator(),
|
.arena = arena.allocator(),
|
||||||
.cache = .{
|
.cache = .{
|
||||||
|
.io = io,
|
||||||
.gpa = arena.allocator(),
|
.gpa = arena.allocator(),
|
||||||
.manifest_dir = std.fs.cwd(),
|
.manifest_dir = std.fs.cwd(),
|
||||||
},
|
},
|
||||||
|
|||||||
@ -761,6 +761,7 @@ const IndexedOutput = struct {
|
|||||||
};
|
};
|
||||||
fn make(step: *Step, options: Step.MakeOptions) !void {
|
fn make(step: *Step, options: Step.MakeOptions) !void {
|
||||||
const b = step.owner;
|
const b = step.owner;
|
||||||
|
const io = b.graph.io;
|
||||||
const arena = b.allocator;
|
const arena = b.allocator;
|
||||||
const run: *Run = @fieldParentPtr("step", step);
|
const run: *Run = @fieldParentPtr("step", step);
|
||||||
const has_side_effects = run.hasSideEffects();
|
const has_side_effects = run.hasSideEffects();
|
||||||
@ -834,7 +835,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
|
|||||||
defer file.close();
|
defer file.close();
|
||||||
|
|
||||||
var buf: [1024]u8 = undefined;
|
var buf: [1024]u8 = undefined;
|
||||||
var file_reader = file.reader(&buf);
|
var file_reader = file.reader(io, &buf);
|
||||||
_ = file_reader.interface.streamRemaining(&result.writer) catch |err| switch (err) {
|
_ = file_reader.interface.streamRemaining(&result.writer) catch |err| switch (err) {
|
||||||
error.ReadFailed => return step.fail(
|
error.ReadFailed => return step.fail(
|
||||||
"failed to read from '{f}': {t}",
|
"failed to read from '{f}': {t}",
|
||||||
@ -1067,6 +1068,7 @@ pub fn rerunInFuzzMode(
|
|||||||
) !void {
|
) !void {
|
||||||
const step = &run.step;
|
const step = &run.step;
|
||||||
const b = step.owner;
|
const b = step.owner;
|
||||||
|
const io = b.graph.io;
|
||||||
const arena = b.allocator;
|
const arena = b.allocator;
|
||||||
var argv_list: std.ArrayList([]const u8) = .empty;
|
var argv_list: std.ArrayList([]const u8) = .empty;
|
||||||
for (run.argv.items) |arg| {
|
for (run.argv.items) |arg| {
|
||||||
@ -1093,7 +1095,7 @@ pub fn rerunInFuzzMode(
|
|||||||
defer file.close();
|
defer file.close();
|
||||||
|
|
||||||
var buf: [1024]u8 = undefined;
|
var buf: [1024]u8 = undefined;
|
||||||
var file_reader = file.reader(&buf);
|
var file_reader = file.reader(io, &buf);
|
||||||
_ = file_reader.interface.streamRemaining(&result.writer) catch |err| switch (err) {
|
_ = file_reader.interface.streamRemaining(&result.writer) catch |err| switch (err) {
|
||||||
error.ReadFailed => return file_reader.err.?,
|
error.ReadFailed => return file_reader.err.?,
|
||||||
error.WriteFailed => return error.OutOfMemory,
|
error.WriteFailed => return error.OutOfMemory,
|
||||||
@ -2090,6 +2092,7 @@ fn sendRunFuzzTestMessage(
|
|||||||
|
|
||||||
fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult {
|
fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult {
|
||||||
const b = run.step.owner;
|
const b = run.step.owner;
|
||||||
|
const io = b.graph.io;
|
||||||
const arena = b.allocator;
|
const arena = b.allocator;
|
||||||
|
|
||||||
try child.spawn();
|
try child.spawn();
|
||||||
@ -2113,7 +2116,7 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult {
|
|||||||
defer file.close();
|
defer file.close();
|
||||||
// TODO https://github.com/ziglang/zig/issues/23955
|
// TODO https://github.com/ziglang/zig/issues/23955
|
||||||
var read_buffer: [1024]u8 = undefined;
|
var read_buffer: [1024]u8 = undefined;
|
||||||
var file_reader = file.reader(&read_buffer);
|
var file_reader = file.reader(io, &read_buffer);
|
||||||
var write_buffer: [1024]u8 = undefined;
|
var write_buffer: [1024]u8 = undefined;
|
||||||
var stdin_writer = child.stdin.?.writer(&write_buffer);
|
var stdin_writer = child.stdin.?.writer(&write_buffer);
|
||||||
_ = stdin_writer.interface.sendFileAll(&file_reader, .unlimited) catch |err| switch (err) {
|
_ = stdin_writer.interface.sendFileAll(&file_reader, .unlimited) catch |err| switch (err) {
|
||||||
@ -2159,7 +2162,7 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult {
|
|||||||
stdout_bytes = try poller.toOwnedSlice(.stdout);
|
stdout_bytes = try poller.toOwnedSlice(.stdout);
|
||||||
stderr_bytes = try poller.toOwnedSlice(.stderr);
|
stderr_bytes = try poller.toOwnedSlice(.stderr);
|
||||||
} else {
|
} else {
|
||||||
var stdout_reader = stdout.readerStreaming(&.{});
|
var stdout_reader = stdout.readerStreaming(io, &.{});
|
||||||
stdout_bytes = stdout_reader.interface.allocRemaining(arena, run.stdio_limit) catch |err| switch (err) {
|
stdout_bytes = stdout_reader.interface.allocRemaining(arena, run.stdio_limit) catch |err| switch (err) {
|
||||||
error.OutOfMemory => return error.OutOfMemory,
|
error.OutOfMemory => return error.OutOfMemory,
|
||||||
error.ReadFailed => return stdout_reader.err.?,
|
error.ReadFailed => return stdout_reader.err.?,
|
||||||
@ -2167,7 +2170,7 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
} else if (child.stderr) |stderr| {
|
} else if (child.stderr) |stderr| {
|
||||||
var stderr_reader = stderr.readerStreaming(&.{});
|
var stderr_reader = stderr.readerStreaming(io, &.{});
|
||||||
stderr_bytes = stderr_reader.interface.allocRemaining(arena, run.stdio_limit) catch |err| switch (err) {
|
stderr_bytes = stderr_reader.interface.allocRemaining(arena, run.stdio_limit) catch |err| switch (err) {
|
||||||
error.OutOfMemory => return error.OutOfMemory,
|
error.OutOfMemory => return error.OutOfMemory,
|
||||||
error.ReadFailed => return stderr_reader.err.?,
|
error.ReadFailed => return stderr_reader.err.?,
|
||||||
|
|||||||
@ -3,11 +3,13 @@
|
|||||||
//! not be used during the normal build process, but as a utility run by a
|
//! not be used during the normal build process, but as a utility run by a
|
||||||
//! developer with intention to update source files, which will then be
|
//! developer with intention to update source files, which will then be
|
||||||
//! committed to version control.
|
//! committed to version control.
|
||||||
|
const UpdateSourceFiles = @This();
|
||||||
|
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
|
const Io = std.Io;
|
||||||
const Step = std.Build.Step;
|
const Step = std.Build.Step;
|
||||||
const fs = std.fs;
|
const fs = std.fs;
|
||||||
const ArrayList = std.ArrayList;
|
const ArrayList = std.ArrayList;
|
||||||
const UpdateSourceFiles = @This();
|
|
||||||
|
|
||||||
step: Step,
|
step: Step,
|
||||||
output_source_files: std.ArrayListUnmanaged(OutputSourceFile),
|
output_source_files: std.ArrayListUnmanaged(OutputSourceFile),
|
||||||
@ -70,22 +72,21 @@ pub fn addBytesToSource(usf: *UpdateSourceFiles, bytes: []const u8, sub_path: []
|
|||||||
fn make(step: *Step, options: Step.MakeOptions) !void {
|
fn make(step: *Step, options: Step.MakeOptions) !void {
|
||||||
_ = options;
|
_ = options;
|
||||||
const b = step.owner;
|
const b = step.owner;
|
||||||
|
const io = b.graph.io;
|
||||||
const usf: *UpdateSourceFiles = @fieldParentPtr("step", step);
|
const usf: *UpdateSourceFiles = @fieldParentPtr("step", step);
|
||||||
|
|
||||||
var any_miss = false;
|
var any_miss = false;
|
||||||
for (usf.output_source_files.items) |output_source_file| {
|
for (usf.output_source_files.items) |output_source_file| {
|
||||||
if (fs.path.dirname(output_source_file.sub_path)) |dirname| {
|
if (fs.path.dirname(output_source_file.sub_path)) |dirname| {
|
||||||
b.build_root.handle.makePath(dirname) catch |err| {
|
b.build_root.handle.makePath(dirname) catch |err| {
|
||||||
return step.fail("unable to make path '{f}{s}': {s}", .{
|
return step.fail("unable to make path '{f}{s}': {t}", .{ b.build_root, dirname, err });
|
||||||
b.build_root, dirname, @errorName(err),
|
|
||||||
});
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
switch (output_source_file.contents) {
|
switch (output_source_file.contents) {
|
||||||
.bytes => |bytes| {
|
.bytes => |bytes| {
|
||||||
b.build_root.handle.writeFile(.{ .sub_path = output_source_file.sub_path, .data = bytes }) catch |err| {
|
b.build_root.handle.writeFile(.{ .sub_path = output_source_file.sub_path, .data = bytes }) catch |err| {
|
||||||
return step.fail("unable to write file '{f}{s}': {s}", .{
|
return step.fail("unable to write file '{f}{s}': {t}", .{
|
||||||
b.build_root, output_source_file.sub_path, @errorName(err),
|
b.build_root, output_source_file.sub_path, err,
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
any_miss = true;
|
any_miss = true;
|
||||||
@ -94,15 +95,16 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
|
|||||||
if (!step.inputs.populated()) try step.addWatchInput(file_source);
|
if (!step.inputs.populated()) try step.addWatchInput(file_source);
|
||||||
|
|
||||||
const source_path = file_source.getPath2(b, step);
|
const source_path = file_source.getPath2(b, step);
|
||||||
const prev_status = fs.Dir.updateFile(
|
const prev_status = Io.Dir.updateFile(
|
||||||
fs.cwd(),
|
.cwd(),
|
||||||
|
io,
|
||||||
source_path,
|
source_path,
|
||||||
b.build_root.handle,
|
b.build_root.handle.adaptToNewApi(),
|
||||||
output_source_file.sub_path,
|
output_source_file.sub_path,
|
||||||
.{},
|
.{},
|
||||||
) catch |err| {
|
) catch |err| {
|
||||||
return step.fail("unable to update file from '{s}' to '{f}{s}': {s}", .{
|
return step.fail("unable to update file from '{s}' to '{f}{s}': {t}", .{
|
||||||
source_path, b.build_root, output_source_file.sub_path, @errorName(err),
|
source_path, b.build_root, output_source_file.sub_path, err,
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
any_miss = any_miss or prev_status == .stale;
|
any_miss = any_miss or prev_status == .stale;
|
||||||
|
|||||||
@ -2,6 +2,7 @@
|
|||||||
//! the local cache which has a set of files that have either been generated
|
//! the local cache which has a set of files that have either been generated
|
||||||
//! during the build, or are copied from the source package.
|
//! during the build, or are copied from the source package.
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
|
const Io = std.Io;
|
||||||
const Step = std.Build.Step;
|
const Step = std.Build.Step;
|
||||||
const fs = std.fs;
|
const fs = std.fs;
|
||||||
const ArrayList = std.ArrayList;
|
const ArrayList = std.ArrayList;
|
||||||
@ -174,6 +175,7 @@ fn maybeUpdateName(write_file: *WriteFile) void {
|
|||||||
fn make(step: *Step, options: Step.MakeOptions) !void {
|
fn make(step: *Step, options: Step.MakeOptions) !void {
|
||||||
_ = options;
|
_ = options;
|
||||||
const b = step.owner;
|
const b = step.owner;
|
||||||
|
const io = b.graph.io;
|
||||||
const arena = b.allocator;
|
const arena = b.allocator;
|
||||||
const gpa = arena;
|
const gpa = arena;
|
||||||
const write_file: *WriteFile = @fieldParentPtr("step", step);
|
const write_file: *WriteFile = @fieldParentPtr("step", step);
|
||||||
@ -264,40 +266,27 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
|
|||||||
};
|
};
|
||||||
defer cache_dir.close();
|
defer cache_dir.close();
|
||||||
|
|
||||||
const cwd = fs.cwd();
|
|
||||||
|
|
||||||
for (write_file.files.items) |file| {
|
for (write_file.files.items) |file| {
|
||||||
if (fs.path.dirname(file.sub_path)) |dirname| {
|
if (fs.path.dirname(file.sub_path)) |dirname| {
|
||||||
cache_dir.makePath(dirname) catch |err| {
|
cache_dir.makePath(dirname) catch |err| {
|
||||||
return step.fail("unable to make path '{f}{s}{c}{s}': {s}", .{
|
return step.fail("unable to make path '{f}{s}{c}{s}': {t}", .{
|
||||||
b.cache_root, cache_path, fs.path.sep, dirname, @errorName(err),
|
b.cache_root, cache_path, fs.path.sep, dirname, err,
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
switch (file.contents) {
|
switch (file.contents) {
|
||||||
.bytes => |bytes| {
|
.bytes => |bytes| {
|
||||||
cache_dir.writeFile(.{ .sub_path = file.sub_path, .data = bytes }) catch |err| {
|
cache_dir.writeFile(.{ .sub_path = file.sub_path, .data = bytes }) catch |err| {
|
||||||
return step.fail("unable to write file '{f}{s}{c}{s}': {s}", .{
|
return step.fail("unable to write file '{f}{s}{c}{s}': {t}", .{
|
||||||
b.cache_root, cache_path, fs.path.sep, file.sub_path, @errorName(err),
|
b.cache_root, cache_path, fs.path.sep, file.sub_path, err,
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
},
|
},
|
||||||
.copy => |file_source| {
|
.copy => |file_source| {
|
||||||
const source_path = file_source.getPath2(b, step);
|
const source_path = file_source.getPath2(b, step);
|
||||||
const prev_status = fs.Dir.updateFile(
|
const prev_status = Io.Dir.updateFile(.cwd(), io, source_path, cache_dir.adaptToNewApi(), file.sub_path, .{}) catch |err| {
|
||||||
cwd,
|
return step.fail("unable to update file from '{s}' to '{f}{s}{c}{s}': {t}", .{
|
||||||
source_path,
|
source_path, b.cache_root, cache_path, fs.path.sep, file.sub_path, err,
|
||||||
cache_dir,
|
|
||||||
file.sub_path,
|
|
||||||
.{},
|
|
||||||
) catch |err| {
|
|
||||||
return step.fail("unable to update file from '{s}' to '{f}{s}{c}{s}': {s}", .{
|
|
||||||
source_path,
|
|
||||||
b.cache_root,
|
|
||||||
cache_path,
|
|
||||||
fs.path.sep,
|
|
||||||
file.sub_path,
|
|
||||||
@errorName(err),
|
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
// At this point we already will mark the step as a cache miss.
|
// At this point we already will mark the step as a cache miss.
|
||||||
@ -331,10 +320,11 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
|
|||||||
switch (entry.kind) {
|
switch (entry.kind) {
|
||||||
.directory => try cache_dir.makePath(dest_path),
|
.directory => try cache_dir.makePath(dest_path),
|
||||||
.file => {
|
.file => {
|
||||||
const prev_status = fs.Dir.updateFile(
|
const prev_status = Io.Dir.updateFile(
|
||||||
src_entry_path.root_dir.handle,
|
src_entry_path.root_dir.handle.adaptToNewApi(),
|
||||||
|
io,
|
||||||
src_entry_path.sub_path,
|
src_entry_path.sub_path,
|
||||||
cache_dir,
|
cache_dir.adaptToNewApi(),
|
||||||
dest_path,
|
dest_path,
|
||||||
.{},
|
.{},
|
||||||
) catch |err| {
|
) catch |err| {
|
||||||
|
|||||||
@ -3,14 +3,15 @@ thread_pool: *std.Thread.Pool,
|
|||||||
graph: *const Build.Graph,
|
graph: *const Build.Graph,
|
||||||
all_steps: []const *Build.Step,
|
all_steps: []const *Build.Step,
|
||||||
listen_address: net.IpAddress,
|
listen_address: net.IpAddress,
|
||||||
ttyconf: std.Io.tty.Config,
|
ttyconf: Io.tty.Config,
|
||||||
root_prog_node: std.Progress.Node,
|
root_prog_node: std.Progress.Node,
|
||||||
watch: bool,
|
watch: bool,
|
||||||
|
|
||||||
tcp_server: ?net.Server,
|
tcp_server: ?net.Server,
|
||||||
serve_thread: ?std.Thread,
|
serve_thread: ?std.Thread,
|
||||||
|
|
||||||
base_timestamp: i128,
|
/// Uses `Io.Clock.awake`.
|
||||||
|
base_timestamp: i96,
|
||||||
/// The "step name" data which trails `abi.Hello`, for the steps in `all_steps`.
|
/// The "step name" data which trails `abi.Hello`, for the steps in `all_steps`.
|
||||||
step_names_trailing: []u8,
|
step_names_trailing: []u8,
|
||||||
|
|
||||||
@ -53,15 +54,17 @@ pub const Options = struct {
|
|||||||
thread_pool: *std.Thread.Pool,
|
thread_pool: *std.Thread.Pool,
|
||||||
graph: *const std.Build.Graph,
|
graph: *const std.Build.Graph,
|
||||||
all_steps: []const *Build.Step,
|
all_steps: []const *Build.Step,
|
||||||
ttyconf: std.Io.tty.Config,
|
ttyconf: Io.tty.Config,
|
||||||
root_prog_node: std.Progress.Node,
|
root_prog_node: std.Progress.Node,
|
||||||
watch: bool,
|
watch: bool,
|
||||||
listen_address: net.IpAddress,
|
listen_address: net.IpAddress,
|
||||||
|
base_timestamp: Io.Timestamp,
|
||||||
};
|
};
|
||||||
pub fn init(opts: Options) WebServer {
|
pub fn init(opts: Options) WebServer {
|
||||||
// The upcoming `std.Io` interface should allow us to use `Io.async` and `Io.concurrent`
|
// The upcoming `Io` interface should allow us to use `Io.async` and `Io.concurrent`
|
||||||
// instead of threads, so that the web server can function in single-threaded builds.
|
// instead of threads, so that the web server can function in single-threaded builds.
|
||||||
comptime assert(!builtin.single_threaded);
|
comptime assert(!builtin.single_threaded);
|
||||||
|
assert(opts.base_timestamp.clock == .awake);
|
||||||
|
|
||||||
const all_steps = opts.all_steps;
|
const all_steps = opts.all_steps;
|
||||||
|
|
||||||
@ -106,7 +109,7 @@ pub fn init(opts: Options) WebServer {
|
|||||||
.tcp_server = null,
|
.tcp_server = null,
|
||||||
.serve_thread = null,
|
.serve_thread = null,
|
||||||
|
|
||||||
.base_timestamp = std.time.nanoTimestamp(),
|
.base_timestamp = opts.base_timestamp.nanoseconds,
|
||||||
.step_names_trailing = step_names_trailing,
|
.step_names_trailing = step_names_trailing,
|
||||||
|
|
||||||
.step_status_bits = step_status_bits,
|
.step_status_bits = step_status_bits,
|
||||||
@ -147,32 +150,34 @@ pub fn deinit(ws: *WebServer) void {
|
|||||||
pub fn start(ws: *WebServer) error{AlreadyReported}!void {
|
pub fn start(ws: *WebServer) error{AlreadyReported}!void {
|
||||||
assert(ws.tcp_server == null);
|
assert(ws.tcp_server == null);
|
||||||
assert(ws.serve_thread == null);
|
assert(ws.serve_thread == null);
|
||||||
|
const io = ws.graph.io;
|
||||||
|
|
||||||
ws.tcp_server = ws.listen_address.listen(.{ .reuse_address = true }) catch |err| {
|
ws.tcp_server = ws.listen_address.listen(io, .{ .reuse_address = true }) catch |err| {
|
||||||
log.err("failed to listen to port {d}: {s}", .{ ws.listen_address.getPort(), @errorName(err) });
|
log.err("failed to listen to port {d}: {s}", .{ ws.listen_address.getPort(), @errorName(err) });
|
||||||
return error.AlreadyReported;
|
return error.AlreadyReported;
|
||||||
};
|
};
|
||||||
ws.serve_thread = std.Thread.spawn(.{}, serve, .{ws}) catch |err| {
|
ws.serve_thread = std.Thread.spawn(.{}, serve, .{ws}) catch |err| {
|
||||||
log.err("unable to spawn web server thread: {s}", .{@errorName(err)});
|
log.err("unable to spawn web server thread: {s}", .{@errorName(err)});
|
||||||
ws.tcp_server.?.deinit();
|
ws.tcp_server.?.deinit(io);
|
||||||
ws.tcp_server = null;
|
ws.tcp_server = null;
|
||||||
return error.AlreadyReported;
|
return error.AlreadyReported;
|
||||||
};
|
};
|
||||||
|
|
||||||
log.info("web interface listening at http://{f}/", .{ws.tcp_server.?.listen_address});
|
log.info("web interface listening at http://{f}/", .{ws.tcp_server.?.socket.address});
|
||||||
if (ws.listen_address.getPort() == 0) {
|
if (ws.listen_address.getPort() == 0) {
|
||||||
log.info("hint: pass '--webui={f}' to use the same port next time", .{ws.tcp_server.?.listen_address});
|
log.info("hint: pass '--webui={f}' to use the same port next time", .{ws.tcp_server.?.socket.address});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fn serve(ws: *WebServer) void {
|
fn serve(ws: *WebServer) void {
|
||||||
|
const io = ws.graph.io;
|
||||||
while (true) {
|
while (true) {
|
||||||
const connection = ws.tcp_server.?.accept() catch |err| {
|
var stream = ws.tcp_server.?.accept(io) catch |err| {
|
||||||
log.err("failed to accept connection: {s}", .{@errorName(err)});
|
log.err("failed to accept connection: {s}", .{@errorName(err)});
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
_ = std.Thread.spawn(.{}, accept, .{ ws, connection }) catch |err| {
|
_ = std.Thread.spawn(.{}, accept, .{ ws, stream }) catch |err| {
|
||||||
log.err("unable to spawn connection thread: {s}", .{@errorName(err)});
|
log.err("unable to spawn connection thread: {s}", .{@errorName(err)});
|
||||||
connection.stream.close();
|
stream.close(io);
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@ -227,6 +232,7 @@ pub fn finishBuild(ws: *WebServer, opts: struct {
|
|||||||
|
|
||||||
ws.fuzz = Fuzz.init(
|
ws.fuzz = Fuzz.init(
|
||||||
ws.gpa,
|
ws.gpa,
|
||||||
|
ws.graph.io,
|
||||||
ws.thread_pool,
|
ws.thread_pool,
|
||||||
ws.all_steps,
|
ws.all_steps,
|
||||||
ws.root_prog_node,
|
ws.root_prog_node,
|
||||||
@ -241,17 +247,25 @@ pub fn finishBuild(ws: *WebServer, opts: struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn now(s: *const WebServer) i64 {
|
pub fn now(s: *const WebServer) i64 {
|
||||||
return @intCast(std.time.nanoTimestamp() - s.base_timestamp);
|
const io = s.graph.io;
|
||||||
|
const base: Io.Timestamp = .{ .nanoseconds = s.base_timestamp, .clock = .awake };
|
||||||
|
const ts = Io.Timestamp.now(io, base.clock) catch base;
|
||||||
|
return @intCast(base.durationTo(ts).toNanoseconds());
|
||||||
}
|
}
|
||||||
|
|
||||||
fn accept(ws: *WebServer, connection: net.Server.Connection) void {
|
fn accept(ws: *WebServer, stream: net.Stream) void {
|
||||||
defer connection.stream.close();
|
const io = ws.graph.io;
|
||||||
|
defer {
|
||||||
|
// `net.Stream.close` wants to helpfully overwrite `stream` with
|
||||||
|
// `undefined`, but it cannot do so since it is an immutable parameter.
|
||||||
|
var copy = stream;
|
||||||
|
copy.close(io);
|
||||||
|
}
|
||||||
var send_buffer: [4096]u8 = undefined;
|
var send_buffer: [4096]u8 = undefined;
|
||||||
var recv_buffer: [4096]u8 = undefined;
|
var recv_buffer: [4096]u8 = undefined;
|
||||||
var connection_reader = connection.stream.reader(&recv_buffer);
|
var connection_reader = stream.reader(io, &recv_buffer);
|
||||||
var connection_writer = connection.stream.writer(&send_buffer);
|
var connection_writer = stream.writer(io, &send_buffer);
|
||||||
var server: http.Server = .init(connection_reader.interface(), &connection_writer.interface);
|
var server: http.Server = .init(&connection_reader.interface, &connection_writer.interface);
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
var request = server.receiveHead() catch |err| switch (err) {
|
var request = server.receiveHead() catch |err| switch (err) {
|
||||||
@ -466,12 +480,9 @@ pub fn serveFile(
|
|||||||
},
|
},
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
pub fn serveTarFile(
|
pub fn serveTarFile(ws: *WebServer, request: *http.Server.Request, paths: []const Cache.Path) !void {
|
||||||
ws: *WebServer,
|
|
||||||
request: *http.Server.Request,
|
|
||||||
paths: []const Cache.Path,
|
|
||||||
) !void {
|
|
||||||
const gpa = ws.gpa;
|
const gpa = ws.gpa;
|
||||||
|
const io = ws.graph.io;
|
||||||
|
|
||||||
var send_buffer: [0x4000]u8 = undefined;
|
var send_buffer: [0x4000]u8 = undefined;
|
||||||
var response = try request.respondStreaming(&send_buffer, .{
|
var response = try request.respondStreaming(&send_buffer, .{
|
||||||
@ -496,7 +507,7 @@ pub fn serveTarFile(
|
|||||||
defer file.close();
|
defer file.close();
|
||||||
const stat = try file.stat();
|
const stat = try file.stat();
|
||||||
var read_buffer: [1024]u8 = undefined;
|
var read_buffer: [1024]u8 = undefined;
|
||||||
var file_reader: std.fs.File.Reader = .initSize(file, &read_buffer, stat.size);
|
var file_reader: Io.File.Reader = .initSize(file.adaptToNewApi(), io, &read_buffer, stat.size);
|
||||||
|
|
||||||
// TODO: this logic is completely bogus -- obviously so, because `path.root_dir.path` can
|
// TODO: this logic is completely bogus -- obviously so, because `path.root_dir.path` can
|
||||||
// be cwd-relative. This is also related to why linkification doesn't work in the fuzzer UI:
|
// be cwd-relative. This is also related to why linkification doesn't work in the fuzzer UI:
|
||||||
@ -566,7 +577,7 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim
|
|||||||
child.stderr_behavior = .Pipe;
|
child.stderr_behavior = .Pipe;
|
||||||
try child.spawn();
|
try child.spawn();
|
||||||
|
|
||||||
var poller = std.Io.poll(gpa, enum { stdout, stderr }, .{
|
var poller = Io.poll(gpa, enum { stdout, stderr }, .{
|
||||||
.stdout = child.stdout.?,
|
.stdout = child.stdout.?,
|
||||||
.stderr = child.stderr.?,
|
.stderr = child.stderr.?,
|
||||||
});
|
});
|
||||||
@ -842,7 +853,10 @@ const cache_control_header: http.Header = .{
|
|||||||
};
|
};
|
||||||
|
|
||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
|
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
|
const Io = std.Io;
|
||||||
|
const net = std.Io.net;
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
const mem = std.mem;
|
const mem = std.mem;
|
||||||
const log = std.log.scoped(.web_server);
|
const log = std.log.scoped(.web_server);
|
||||||
@ -852,6 +866,5 @@ const Cache = Build.Cache;
|
|||||||
const Fuzz = Build.Fuzz;
|
const Fuzz = Build.Fuzz;
|
||||||
const abi = Build.abi;
|
const abi = Build.abi;
|
||||||
const http = std.http;
|
const http = std.http;
|
||||||
const net = std.Io.net;
|
|
||||||
|
|
||||||
const WebServer = @This();
|
const WebServer = @This();
|
||||||
|
|||||||
@ -654,6 +654,10 @@ pub const VTable = struct {
|
|||||||
conditionWait: *const fn (?*anyopaque, cond: *Condition, mutex: *Mutex) Cancelable!void,
|
conditionWait: *const fn (?*anyopaque, cond: *Condition, mutex: *Mutex) Cancelable!void,
|
||||||
conditionWake: *const fn (?*anyopaque, cond: *Condition, wake: Condition.Wake) void,
|
conditionWake: *const fn (?*anyopaque, cond: *Condition, wake: Condition.Wake) void,
|
||||||
|
|
||||||
|
dirMake: *const fn (?*anyopaque, dir: Dir, sub_path: []const u8, mode: Dir.Mode) Dir.MakeError!void,
|
||||||
|
dirStat: *const fn (?*anyopaque, dir: Dir) Dir.StatError!Dir.Stat,
|
||||||
|
dirStatPath: *const fn (?*anyopaque, dir: Dir, sub_path: []const u8) Dir.StatError!File.Stat,
|
||||||
|
fileStat: *const fn (?*anyopaque, file: File) File.StatError!File.Stat,
|
||||||
createFile: *const fn (?*anyopaque, dir: Dir, sub_path: []const u8, flags: File.CreateFlags) File.OpenError!File,
|
createFile: *const fn (?*anyopaque, dir: Dir, sub_path: []const u8, flags: File.CreateFlags) File.OpenError!File,
|
||||||
fileOpen: *const fn (?*anyopaque, dir: Dir, sub_path: []const u8, flags: File.OpenFlags) File.OpenError!File,
|
fileOpen: *const fn (?*anyopaque, dir: Dir, sub_path: []const u8, flags: File.OpenFlags) File.OpenError!File,
|
||||||
fileClose: *const fn (?*anyopaque, File) void,
|
fileClose: *const fn (?*anyopaque, File) void,
|
||||||
@ -804,6 +808,10 @@ pub const Timestamp = struct {
|
|||||||
assert(lhs.clock == rhs.clock);
|
assert(lhs.clock == rhs.clock);
|
||||||
return std.math.compare(lhs.nanoseconds, op, rhs.nanoseconds);
|
return std.math.compare(lhs.nanoseconds, op, rhs.nanoseconds);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn toSeconds(t: Timestamp) i64 {
|
||||||
|
return @intCast(@divTrunc(t.nanoseconds, std.time.ns_per_s));
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const Duration = struct {
|
pub const Duration = struct {
|
||||||
@ -831,6 +839,10 @@ pub const Duration = struct {
|
|||||||
return @intCast(@divTrunc(d.nanoseconds, std.time.ns_per_s));
|
return @intCast(@divTrunc(d.nanoseconds, std.time.ns_per_s));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn toNanoseconds(d: Duration) i96 {
|
||||||
|
return d.nanoseconds;
|
||||||
|
}
|
||||||
|
|
||||||
pub fn sleep(duration: Duration, io: Io) SleepError!void {
|
pub fn sleep(duration: Duration, io: Io) SleepError!void {
|
||||||
return io.vtable.sleep(io.userdata, .{ .duration = .{ .duration = duration, .clock = .awake } });
|
return io.vtable.sleep(io.userdata, .{ .duration = .{ .duration = duration, .clock = .awake } });
|
||||||
}
|
}
|
||||||
|
|||||||
@ -6,6 +6,9 @@ const File = Io.File;
|
|||||||
|
|
||||||
handle: Handle,
|
handle: Handle,
|
||||||
|
|
||||||
|
pub const Mode = Io.File.Mode;
|
||||||
|
pub const default_mode: Mode = 0o755;
|
||||||
|
|
||||||
pub fn cwd() Dir {
|
pub fn cwd() Dir {
|
||||||
return .{ .handle = std.fs.cwd().fd };
|
return .{ .handle = std.fs.cwd().fd };
|
||||||
}
|
}
|
||||||
@ -47,8 +50,9 @@ pub const UpdateFileError = File.OpenError;
|
|||||||
|
|
||||||
/// Check the file size, mtime, and mode of `source_path` and `dest_path`. If
|
/// Check the file size, mtime, and mode of `source_path` and `dest_path`. If
|
||||||
/// they are equal, does nothing. Otherwise, atomically copies `source_path` to
|
/// they are equal, does nothing. Otherwise, atomically copies `source_path` to
|
||||||
/// `dest_path`. The destination file gains the mtime, atime, and mode of the
|
/// `dest_path`, creating the parent directory hierarchy as needed. The
|
||||||
/// source file so that the next call to `updateFile` will not need a copy.
|
/// destination file gains the mtime, atime, and mode of the source file so
|
||||||
|
/// that the next call to `updateFile` will not need a copy.
|
||||||
///
|
///
|
||||||
/// Returns the previous status of the file before updating.
|
/// Returns the previous status of the file before updating.
|
||||||
///
|
///
|
||||||
@ -65,7 +69,7 @@ pub fn updateFile(
|
|||||||
options: std.fs.Dir.CopyFileOptions,
|
options: std.fs.Dir.CopyFileOptions,
|
||||||
) !PrevStatus {
|
) !PrevStatus {
|
||||||
var src_file = try source_dir.openFile(io, source_path, .{});
|
var src_file = try source_dir.openFile(io, source_path, .{});
|
||||||
defer src_file.close();
|
defer src_file.close(io);
|
||||||
|
|
||||||
const src_stat = try src_file.stat(io);
|
const src_stat = try src_file.stat(io);
|
||||||
const actual_mode = options.override_mode orelse src_stat.mode;
|
const actual_mode = options.override_mode orelse src_stat.mode;
|
||||||
@ -93,13 +97,13 @@ pub fn updateFile(
|
|||||||
}
|
}
|
||||||
|
|
||||||
var buffer: [1000]u8 = undefined; // Used only when direct fd-to-fd is not available.
|
var buffer: [1000]u8 = undefined; // Used only when direct fd-to-fd is not available.
|
||||||
var atomic_file = try dest_dir.atomicFile(io, dest_path, .{
|
var atomic_file = try std.fs.Dir.atomicFile(.adaptFromNewApi(dest_dir), dest_path, .{
|
||||||
.mode = actual_mode,
|
.mode = actual_mode,
|
||||||
.write_buffer = &buffer,
|
.write_buffer = &buffer,
|
||||||
});
|
});
|
||||||
defer atomic_file.deinit();
|
defer atomic_file.deinit();
|
||||||
|
|
||||||
var src_reader: File.Reader = .initSize(io, src_file, &.{}, src_stat.size);
|
var src_reader: File.Reader = .initSize(src_file, io, &.{}, src_stat.size);
|
||||||
const dest_writer = &atomic_file.file_writer.interface;
|
const dest_writer = &atomic_file.file_writer.interface;
|
||||||
|
|
||||||
_ = dest_writer.sendFileAll(&src_reader, .unlimited) catch |err| switch (err) {
|
_ = dest_writer.sendFileAll(&src_reader, .unlimited) catch |err| switch (err) {
|
||||||
@ -111,3 +115,154 @@ pub fn updateFile(
|
|||||||
try atomic_file.renameIntoPlace();
|
try atomic_file.renameIntoPlace();
|
||||||
return .stale;
|
return .stale;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub const ReadFileError = File.OpenError || File.Reader.Error;
|
||||||
|
|
||||||
|
/// Read all of file contents using a preallocated buffer.
|
||||||
|
///
|
||||||
|
/// The returned slice has the same pointer as `buffer`. If the length matches `buffer.len`
|
||||||
|
/// the situation is ambiguous. It could either mean that the entire file was read, and
|
||||||
|
/// it exactly fits the buffer, or it could mean the buffer was not big enough for the
|
||||||
|
/// entire file.
|
||||||
|
///
|
||||||
|
/// * On Windows, `file_path` should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
|
||||||
|
/// * On WASI, `file_path` should be encoded as valid UTF-8.
|
||||||
|
/// * On other platforms, `file_path` is an opaque sequence of bytes with no particular encoding.
|
||||||
|
pub fn readFile(dir: Dir, io: Io, file_path: []const u8, buffer: []u8) ReadFileError![]u8 {
|
||||||
|
var file = try dir.openFile(io, file_path, .{});
|
||||||
|
defer file.close(io);
|
||||||
|
|
||||||
|
var reader = file.reader(io, &.{});
|
||||||
|
const n = reader.interface.readSliceShort(buffer) catch |err| switch (err) {
|
||||||
|
error.ReadFailed => return reader.err.?,
|
||||||
|
};
|
||||||
|
|
||||||
|
return buffer[0..n];
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const MakeError = error{
|
||||||
|
/// In WASI, this error may occur when the file descriptor does
|
||||||
|
/// not hold the required rights to create a new directory relative to it.
|
||||||
|
AccessDenied,
|
||||||
|
PermissionDenied,
|
||||||
|
DiskQuota,
|
||||||
|
PathAlreadyExists,
|
||||||
|
SymLinkLoop,
|
||||||
|
LinkQuotaExceeded,
|
||||||
|
NameTooLong,
|
||||||
|
FileNotFound,
|
||||||
|
SystemResources,
|
||||||
|
NoSpaceLeft,
|
||||||
|
NotDir,
|
||||||
|
ReadOnlyFileSystem,
|
||||||
|
/// WASI-only; file paths must be valid UTF-8.
|
||||||
|
InvalidUtf8,
|
||||||
|
/// Windows-only; file paths provided by the user must be valid WTF-8.
|
||||||
|
/// https://simonsapin.github.io/wtf-8/
|
||||||
|
InvalidWtf8,
|
||||||
|
BadPathName,
|
||||||
|
NoDevice,
|
||||||
|
/// On Windows, `\\server` or `\\server\share` was not found.
|
||||||
|
NetworkNotFound,
|
||||||
|
} || Io.Cancelable || Io.UnexpectedError;
|
||||||
|
|
||||||
|
/// Creates a single directory with a relative or absolute path.
|
||||||
|
///
|
||||||
|
/// * On Windows, `sub_path` should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
|
||||||
|
/// * On WASI, `sub_path` should be encoded as valid UTF-8.
|
||||||
|
/// * On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
|
||||||
|
///
|
||||||
|
/// Related:
|
||||||
|
/// * `makePath`
|
||||||
|
/// * `makeDirAbsolute`
|
||||||
|
pub fn makeDir(dir: Dir, io: Io, sub_path: []const u8) MakeError!void {
|
||||||
|
return io.vtable.dirMake(io.userdata, dir, sub_path, default_mode);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const MakePathError = MakeError || StatPathError;
|
||||||
|
|
||||||
|
/// Calls makeDir iteratively to make an entire path, creating any parent
|
||||||
|
/// directories that do not exist.
|
||||||
|
///
|
||||||
|
/// Returns success if the path already exists and is a directory.
|
||||||
|
///
|
||||||
|
/// This function is not atomic, and if it returns an error, the file system
|
||||||
|
/// may have been modified regardless.
|
||||||
|
///
|
||||||
|
/// Fails on an empty path with `error.BadPathName` as that is not a path that
|
||||||
|
/// can be created.
|
||||||
|
///
|
||||||
|
/// On Windows, `sub_path` should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
|
||||||
|
/// On WASI, `sub_path` should be encoded as valid UTF-8.
|
||||||
|
/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
|
||||||
|
///
|
||||||
|
/// Paths containing `..` components are handled differently depending on the platform:
|
||||||
|
/// - On Windows, `..` are resolved before the path is passed to NtCreateFile, meaning
|
||||||
|
/// a `sub_path` like "first/../second" will resolve to "second" and only a
|
||||||
|
/// `./second` directory will be created.
|
||||||
|
/// - On other platforms, `..` are not resolved before the path is passed to `mkdirat`,
|
||||||
|
/// meaning a `sub_path` like "first/../second" will create both a `./first`
|
||||||
|
/// and a `./second` directory.
|
||||||
|
pub fn makePath(dir: Dir, io: Io, sub_path: []const u8) MakePathError!void {
|
||||||
|
_ = try makePathStatus(dir, io, sub_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const MakePathStatus = enum { existed, created };
|
||||||
|
|
||||||
|
/// Same as `makePath` except returns whether the path already existed or was
|
||||||
|
/// successfully created.
|
||||||
|
pub fn makePathStatus(dir: Dir, io: Io, sub_path: []const u8) MakePathError!MakePathStatus {
|
||||||
|
var it = try std.fs.path.componentIterator(sub_path);
|
||||||
|
var status: MakePathStatus = .existed;
|
||||||
|
var component = it.last() orelse return error.BadPathName;
|
||||||
|
while (true) {
|
||||||
|
if (makeDir(dir, io, component.path)) |_| {
|
||||||
|
status = .created;
|
||||||
|
} else |err| switch (err) {
|
||||||
|
error.PathAlreadyExists => {
|
||||||
|
// stat the file and return an error if it's not a directory
|
||||||
|
// this is important because otherwise a dangling symlink
|
||||||
|
// could cause an infinite loop
|
||||||
|
check_dir: {
|
||||||
|
// workaround for windows, see https://github.com/ziglang/zig/issues/16738
|
||||||
|
const fstat = statPath(dir, io, component.path) catch |stat_err| switch (stat_err) {
|
||||||
|
error.IsDir => break :check_dir,
|
||||||
|
else => |e| return e,
|
||||||
|
};
|
||||||
|
if (fstat.kind != .directory) return error.NotDir;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
error.FileNotFound => |e| {
|
||||||
|
component = it.previous() orelse return e;
|
||||||
|
continue;
|
||||||
|
},
|
||||||
|
else => |e| return e,
|
||||||
|
}
|
||||||
|
component = it.next() orelse return status;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const Stat = File.Stat;
|
||||||
|
pub const StatError = File.StatError;
|
||||||
|
|
||||||
|
pub fn stat(dir: Dir, io: Io) StatError!Stat {
|
||||||
|
return io.vtable.dirStat(io.userdata, dir);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const StatPathError = File.OpenError || File.StatError;
|
||||||
|
|
||||||
|
/// Returns metadata for a file inside the directory.
|
||||||
|
///
|
||||||
|
/// On Windows, this requires three syscalls. On other operating systems, it
|
||||||
|
/// only takes one.
|
||||||
|
///
|
||||||
|
/// Symlinks are followed.
|
||||||
|
///
|
||||||
|
/// `sub_path` may be absolute, in which case `self` is ignored.
|
||||||
|
///
|
||||||
|
/// * On Windows, `sub_path` should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
|
||||||
|
/// * On WASI, `sub_path` should be encoded as valid UTF-8.
|
||||||
|
/// * On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
|
||||||
|
pub fn statPath(dir: Dir, io: Io, sub_path: []const u8) StatPathError!File.Stat {
|
||||||
|
return io.vtable.dirStatPath(io.userdata, dir, sub_path);
|
||||||
|
}
|
||||||
|
|||||||
@ -446,7 +446,11 @@ pub const Reader = struct {
|
|||||||
|
|
||||||
fn stream(io_reader: *Io.Reader, w: *Io.Writer, limit: Io.Limit) Io.Reader.StreamError!usize {
|
fn stream(io_reader: *Io.Reader, w: *Io.Writer, limit: Io.Limit) Io.Reader.StreamError!usize {
|
||||||
const r: *Reader = @alignCast(@fieldParentPtr("interface", io_reader));
|
const r: *Reader = @alignCast(@fieldParentPtr("interface", io_reader));
|
||||||
switch (r.mode) {
|
return streamMode(r, w, limit, r.mode);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn streamMode(r: *Reader, w: *Io.Writer, limit: Io.Limit, mode: Reader.Mode) Io.Reader.StreamError!usize {
|
||||||
|
switch (mode) {
|
||||||
.positional, .streaming => return w.sendFile(r, limit) catch |write_err| switch (write_err) {
|
.positional, .streaming => return w.sendFile(r, limit) catch |write_err| switch (write_err) {
|
||||||
error.Unimplemented => {
|
error.Unimplemented => {
|
||||||
r.mode = r.mode.toReading();
|
r.mode = r.mode.toReading();
|
||||||
|
|||||||
@ -63,7 +63,17 @@ const Closure = struct {
|
|||||||
|
|
||||||
pub const InitError = std.Thread.CpuCountError || Allocator.Error;
|
pub const InitError = std.Thread.CpuCountError || Allocator.Error;
|
||||||
|
|
||||||
pub fn init(gpa: Allocator) Pool {
|
/// Related:
|
||||||
|
/// * `init_single_threaded`
|
||||||
|
pub fn init(
|
||||||
|
/// Must be threadsafe. Only used for the following functions:
|
||||||
|
/// * `Io.VTable.async`
|
||||||
|
/// * `Io.VTable.concurrent`
|
||||||
|
/// * `Io.VTable.groupAsync`
|
||||||
|
/// If these functions are avoided, then `Allocator.failing` may be passed
|
||||||
|
/// here.
|
||||||
|
gpa: Allocator,
|
||||||
|
) Pool {
|
||||||
var pool: Pool = .{
|
var pool: Pool = .{
|
||||||
.allocator = gpa,
|
.allocator = gpa,
|
||||||
.threads = .empty,
|
.threads = .empty,
|
||||||
@ -77,6 +87,20 @@ pub fn init(gpa: Allocator) Pool {
|
|||||||
return pool;
|
return pool;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Statically initialize such that any call to the following functions will
|
||||||
|
/// fail with `error.OutOfMemory`:
|
||||||
|
/// * `Io.VTable.async`
|
||||||
|
/// * `Io.VTable.concurrent`
|
||||||
|
/// * `Io.VTable.groupAsync`
|
||||||
|
/// When initialized this way, `deinit` is safe, but unnecessary to call.
|
||||||
|
pub const init_single_threaded: Pool = .{
|
||||||
|
.allocator = .failing,
|
||||||
|
.threads = .empty,
|
||||||
|
.stack_size = std.Thread.SpawnConfig.default_stack_size,
|
||||||
|
.cpu_count = 1,
|
||||||
|
.concurrent_count = 0,
|
||||||
|
};
|
||||||
|
|
||||||
pub fn deinit(pool: *Pool) void {
|
pub fn deinit(pool: *Pool) void {
|
||||||
const gpa = pool.allocator;
|
const gpa = pool.allocator;
|
||||||
pool.join();
|
pool.join();
|
||||||
@ -136,6 +160,10 @@ pub fn io(pool: *Pool) Io {
|
|||||||
.conditionWait = conditionWait,
|
.conditionWait = conditionWait,
|
||||||
.conditionWake = conditionWake,
|
.conditionWake = conditionWake,
|
||||||
|
|
||||||
|
.dirMake = dirMake,
|
||||||
|
.dirStat = dirStat,
|
||||||
|
.dirStatPath = dirStatPath,
|
||||||
|
.fileStat = fileStat,
|
||||||
.createFile = createFile,
|
.createFile = createFile,
|
||||||
.fileOpen = fileOpen,
|
.fileOpen = fileOpen,
|
||||||
.fileClose = fileClose,
|
.fileClose = fileClose,
|
||||||
@ -520,10 +548,11 @@ fn groupAsync(
|
|||||||
|
|
||||||
fn groupWait(userdata: ?*anyopaque, group: *Io.Group, token: *anyopaque) void {
|
fn groupWait(userdata: ?*anyopaque, group: *Io.Group, token: *anyopaque) void {
|
||||||
const pool: *Pool = @ptrCast(@alignCast(userdata));
|
const pool: *Pool = @ptrCast(@alignCast(userdata));
|
||||||
_ = pool;
|
const gpa = pool.allocator;
|
||||||
|
|
||||||
if (builtin.single_threaded) return;
|
if (builtin.single_threaded) return;
|
||||||
|
|
||||||
|
// TODO these primitives are too high level, need to check cancel on EINTR
|
||||||
const group_state: *std.atomic.Value(usize) = @ptrCast(&group.state);
|
const group_state: *std.atomic.Value(usize) = @ptrCast(&group.state);
|
||||||
const reset_event: *ResetEvent = @ptrCast(&group.context);
|
const reset_event: *ResetEvent = @ptrCast(&group.context);
|
||||||
std.Thread.WaitGroup.waitStateless(group_state, reset_event);
|
std.Thread.WaitGroup.waitStateless(group_state, reset_event);
|
||||||
@ -531,8 +560,9 @@ fn groupWait(userdata: ?*anyopaque, group: *Io.Group, token: *anyopaque) void {
|
|||||||
var node: *std.SinglyLinkedList.Node = @ptrCast(@alignCast(token));
|
var node: *std.SinglyLinkedList.Node = @ptrCast(@alignCast(token));
|
||||||
while (true) {
|
while (true) {
|
||||||
const gc: *GroupClosure = @fieldParentPtr("node", node);
|
const gc: *GroupClosure = @fieldParentPtr("node", node);
|
||||||
gc.closure.requestCancel();
|
const node_next = node.next;
|
||||||
node = node.next orelse break;
|
gc.free(gpa);
|
||||||
|
node = node_next orelse break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -724,6 +754,41 @@ fn conditionWake(userdata: ?*anyopaque, cond: *Io.Condition, wake: Io.Condition.
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn dirMake(userdata: ?*anyopaque, dir: Io.Dir, sub_path: []const u8, mode: Io.Dir.Mode) Io.Dir.MakeError!void {
|
||||||
|
const pool: *Pool = @ptrCast(@alignCast(userdata));
|
||||||
|
try pool.checkCancel();
|
||||||
|
|
||||||
|
_ = dir;
|
||||||
|
_ = sub_path;
|
||||||
|
_ = mode;
|
||||||
|
@panic("TODO");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn dirStat(userdata: ?*anyopaque, dir: Io.Dir) Io.Dir.StatError!Io.Dir.Stat {
|
||||||
|
const pool: *Pool = @ptrCast(@alignCast(userdata));
|
||||||
|
try pool.checkCancel();
|
||||||
|
|
||||||
|
_ = dir;
|
||||||
|
@panic("TODO");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn dirStatPath(userdata: ?*anyopaque, dir: Io.Dir, sub_path: []const u8) Io.Dir.StatError!Io.File.Stat {
|
||||||
|
const pool: *Pool = @ptrCast(@alignCast(userdata));
|
||||||
|
try pool.checkCancel();
|
||||||
|
|
||||||
|
_ = dir;
|
||||||
|
_ = sub_path;
|
||||||
|
@panic("TODO");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fileStat(userdata: ?*anyopaque, file: Io.File) Io.File.StatError!Io.File.Stat {
|
||||||
|
const pool: *Pool = @ptrCast(@alignCast(userdata));
|
||||||
|
try pool.checkCancel();
|
||||||
|
|
||||||
|
_ = file;
|
||||||
|
@panic("TODO");
|
||||||
|
}
|
||||||
|
|
||||||
fn createFile(
|
fn createFile(
|
||||||
userdata: ?*anyopaque,
|
userdata: ?*anyopaque,
|
||||||
dir: Io.Dir,
|
dir: Io.Dir,
|
||||||
|
|||||||
@ -2827,6 +2827,8 @@ pub const Allocating = struct {
|
|||||||
};
|
};
|
||||||
|
|
||||||
test "discarding sendFile" {
|
test "discarding sendFile" {
|
||||||
|
const io = testing.io;
|
||||||
|
|
||||||
var tmp_dir = testing.tmpDir(.{});
|
var tmp_dir = testing.tmpDir(.{});
|
||||||
defer tmp_dir.cleanup();
|
defer tmp_dir.cleanup();
|
||||||
|
|
||||||
@ -2837,7 +2839,7 @@ test "discarding sendFile" {
|
|||||||
try file_writer.interface.writeByte('h');
|
try file_writer.interface.writeByte('h');
|
||||||
try file_writer.interface.flush();
|
try file_writer.interface.flush();
|
||||||
|
|
||||||
var file_reader = file_writer.moveToReader();
|
var file_reader = file_writer.moveToReader(io);
|
||||||
try file_reader.seekTo(0);
|
try file_reader.seekTo(0);
|
||||||
|
|
||||||
var w_buffer: [256]u8 = undefined;
|
var w_buffer: [256]u8 = undefined;
|
||||||
@ -2847,6 +2849,8 @@ test "discarding sendFile" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "allocating sendFile" {
|
test "allocating sendFile" {
|
||||||
|
const io = testing.io;
|
||||||
|
|
||||||
var tmp_dir = testing.tmpDir(.{});
|
var tmp_dir = testing.tmpDir(.{});
|
||||||
defer tmp_dir.cleanup();
|
defer tmp_dir.cleanup();
|
||||||
|
|
||||||
@ -2857,7 +2861,7 @@ test "allocating sendFile" {
|
|||||||
try file_writer.interface.writeAll("abcd");
|
try file_writer.interface.writeAll("abcd");
|
||||||
try file_writer.interface.flush();
|
try file_writer.interface.flush();
|
||||||
|
|
||||||
var file_reader = file_writer.moveToReader();
|
var file_reader = file_writer.moveToReader(io);
|
||||||
try file_reader.seekTo(0);
|
try file_reader.seekTo(0);
|
||||||
try file_reader.interface.fill(2);
|
try file_reader.interface.fill(2);
|
||||||
|
|
||||||
|
|||||||
@ -57,6 +57,39 @@ pub const IpAddress = union(enum) {
|
|||||||
|
|
||||||
pub const Family = @typeInfo(IpAddress).@"union".tag_type.?;
|
pub const Family = @typeInfo(IpAddress).@"union".tag_type.?;
|
||||||
|
|
||||||
|
pub const ParseLiteralError = error{ InvalidAddress, InvalidPort };
|
||||||
|
|
||||||
|
/// Parse an IP address which may include a port.
|
||||||
|
///
|
||||||
|
/// For IPv4, this is written `address:port`.
|
||||||
|
///
|
||||||
|
/// For IPv6, RFC 3986 defines this as an "IP literal", and the port is
|
||||||
|
/// differentiated from the address by surrounding the address part in
|
||||||
|
/// brackets "[addr]:port". Even if the port is not given, the brackets are
|
||||||
|
/// mandatory.
|
||||||
|
pub fn parseLiteral(text: []const u8) ParseLiteralError!IpAddress {
|
||||||
|
if (text.len == 0) return error.InvalidAddress;
|
||||||
|
if (text[0] == '[') {
|
||||||
|
const addr_end = std.mem.indexOfScalar(u8, text, ']') orelse
|
||||||
|
return error.InvalidAddress;
|
||||||
|
const addr_text = text[1..addr_end];
|
||||||
|
const port: u16 = p: {
|
||||||
|
if (addr_end == text.len - 1) break :p 0;
|
||||||
|
if (text[addr_end + 1] != ':') return error.InvalidAddress;
|
||||||
|
break :p std.fmt.parseInt(u16, text[addr_end + 2 ..], 10) catch return error.InvalidPort;
|
||||||
|
};
|
||||||
|
return parseIp6(addr_text, port) catch error.InvalidAddress;
|
||||||
|
}
|
||||||
|
if (std.mem.indexOfScalar(u8, text, ':')) |i| {
|
||||||
|
const addr = Ip4Address.parse(text[0..i], 0) catch return error.InvalidAddress;
|
||||||
|
return .{ .ip4 = .{
|
||||||
|
.bytes = addr.bytes,
|
||||||
|
.port = std.fmt.parseInt(u16, text[i + 1 ..], 10) catch return error.InvalidPort,
|
||||||
|
} };
|
||||||
|
}
|
||||||
|
return parseIp4(text, 0) catch error.InvalidAddress;
|
||||||
|
}
|
||||||
|
|
||||||
/// Parse the given IP address string into an `IpAddress` value.
|
/// Parse the given IP address string into an `IpAddress` value.
|
||||||
///
|
///
|
||||||
/// This is a pure function but it cannot handle IPv6 addresses that have
|
/// This is a pure function but it cannot handle IPv6 addresses that have
|
||||||
|
|||||||
@ -77,7 +77,9 @@ pub const LookupError = error{
|
|||||||
InvalidDnsAAAARecord,
|
InvalidDnsAAAARecord,
|
||||||
InvalidDnsCnameRecord,
|
InvalidDnsCnameRecord,
|
||||||
NameServerFailure,
|
NameServerFailure,
|
||||||
} || Io.Timestamp.Error || IpAddress.BindError || Io.File.OpenError || Io.File.Reader.Error || Io.Cancelable;
|
/// Failed to open or read "/etc/hosts" or "/etc/resolv.conf".
|
||||||
|
DetectingNetworkConfigurationFailed,
|
||||||
|
} || Io.Timestamp.Error || IpAddress.BindError || Io.Cancelable;
|
||||||
|
|
||||||
pub const LookupResult = struct {
|
pub const LookupResult = struct {
|
||||||
/// How many `LookupOptions.addresses_buffer` elements are populated.
|
/// How many `LookupOptions.addresses_buffer` elements are populated.
|
||||||
@ -428,14 +430,25 @@ fn lookupHosts(host_name: HostName, io: Io, options: LookupOptions) !LookupResul
|
|||||||
error.AccessDenied,
|
error.AccessDenied,
|
||||||
=> return .empty,
|
=> return .empty,
|
||||||
|
|
||||||
else => |e| return e,
|
error.Canceled => |e| return e,
|
||||||
|
|
||||||
|
else => {
|
||||||
|
// TODO populate optional diagnostic struct
|
||||||
|
return error.DetectingNetworkConfigurationFailed;
|
||||||
|
},
|
||||||
};
|
};
|
||||||
defer file.close(io);
|
defer file.close(io);
|
||||||
|
|
||||||
var line_buf: [512]u8 = undefined;
|
var line_buf: [512]u8 = undefined;
|
||||||
var file_reader = file.reader(io, &line_buf);
|
var file_reader = file.reader(io, &line_buf);
|
||||||
return lookupHostsReader(host_name, options, &file_reader.interface) catch |err| switch (err) {
|
return lookupHostsReader(host_name, options, &file_reader.interface) catch |err| switch (err) {
|
||||||
error.ReadFailed => return file_reader.err.?,
|
error.ReadFailed => switch (file_reader.err.?) {
|
||||||
|
error.Canceled => |e| return e,
|
||||||
|
else => {
|
||||||
|
// TODO populate optional diagnostic struct
|
||||||
|
return error.DetectingNetworkConfigurationFailed;
|
||||||
|
},
|
||||||
|
},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -211,11 +211,11 @@ test "listen on a port, send bytes, receive bytes" {
|
|||||||
const t = try std.Thread.spawn(.{}, S.clientFn, .{server.socket.address});
|
const t = try std.Thread.spawn(.{}, S.clientFn, .{server.socket.address});
|
||||||
defer t.join();
|
defer t.join();
|
||||||
|
|
||||||
var client = try server.accept(io);
|
var stream = try server.accept(io);
|
||||||
defer client.stream.close(io);
|
defer stream.close(io);
|
||||||
var buf: [16]u8 = undefined;
|
var buf: [16]u8 = undefined;
|
||||||
var stream_reader = client.stream.reader(io, &.{});
|
var stream_reader = stream.reader(io, &.{});
|
||||||
const n = try stream_reader.interface().readSliceShort(&buf);
|
const n = try stream_reader.interface.readSliceShort(&buf);
|
||||||
|
|
||||||
try testing.expectEqual(@as(usize, 12), n);
|
try testing.expectEqual(@as(usize, 12), n);
|
||||||
try testing.expectEqualSlices(u8, "Hello world!", buf[0..n]);
|
try testing.expectEqualSlices(u8, "Hello world!", buf[0..n]);
|
||||||
@ -267,10 +267,9 @@ fn testServer(server: *net.Server) anyerror!void {
|
|||||||
|
|
||||||
const io = testing.io;
|
const io = testing.io;
|
||||||
|
|
||||||
var client = try server.accept(io);
|
var stream = try server.accept(io);
|
||||||
|
var writer = stream.writer(io, &.{});
|
||||||
const stream = client.stream.writer(io);
|
try writer.interface.print("hello from server\n", .{});
|
||||||
try stream.print("hello from server\n", .{});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
test "listen on a unix socket, send bytes, receive bytes" {
|
test "listen on a unix socket, send bytes, receive bytes" {
|
||||||
@ -310,11 +309,11 @@ test "listen on a unix socket, send bytes, receive bytes" {
|
|||||||
const t = try std.Thread.spawn(.{}, S.clientFn, .{socket_path});
|
const t = try std.Thread.spawn(.{}, S.clientFn, .{socket_path});
|
||||||
defer t.join();
|
defer t.join();
|
||||||
|
|
||||||
var client = try server.accept(io);
|
var stream = try server.accept(io);
|
||||||
defer client.stream.close(io);
|
defer stream.close(io);
|
||||||
var buf: [16]u8 = undefined;
|
var buf: [16]u8 = undefined;
|
||||||
var stream_reader = client.stream.reader(io, &.{});
|
var stream_reader = stream.reader(io, &.{});
|
||||||
const n = try stream_reader.interface().readSliceShort(&buf);
|
const n = try stream_reader.interface.readSliceShort(&buf);
|
||||||
|
|
||||||
try testing.expectEqual(@as(usize, 12), n);
|
try testing.expectEqual(@as(usize, 12), n);
|
||||||
try testing.expectEqualSlices(u8, "Hello world!", buf[0..n]);
|
try testing.expectEqualSlices(u8, "Hello world!", buf[0..n]);
|
||||||
@ -366,10 +365,10 @@ test "non-blocking tcp server" {
|
|||||||
const socket_file = try net.tcpConnectToAddress(server.socket.address);
|
const socket_file = try net.tcpConnectToAddress(server.socket.address);
|
||||||
defer socket_file.close();
|
defer socket_file.close();
|
||||||
|
|
||||||
var client = try server.accept(io);
|
var stream = try server.accept(io);
|
||||||
defer client.stream.close(io);
|
defer stream.close(io);
|
||||||
const stream = client.stream.writer(io);
|
var writer = stream.writer(io, .{});
|
||||||
try stream.print("hello from server\n", .{});
|
try writer.interface.print("hello from server\n", .{});
|
||||||
|
|
||||||
var buf: [100]u8 = undefined;
|
var buf: [100]u8 = undefined;
|
||||||
const len = try socket_file.read(&buf);
|
const len = try socket_file.read(&buf);
|
||||||
|
|||||||
@ -105,6 +105,14 @@ pub const Options = struct {
|
|||||||
/// Verify that the server certificate is authorized by a given ca bundle.
|
/// Verify that the server certificate is authorized by a given ca bundle.
|
||||||
bundle: Certificate.Bundle,
|
bundle: Certificate.Bundle,
|
||||||
},
|
},
|
||||||
|
write_buffer: []u8,
|
||||||
|
read_buffer: []u8,
|
||||||
|
/// Cryptographically secure random bytes. The pointer is not captured; data is only
|
||||||
|
/// read during `init`.
|
||||||
|
entropy: *const [176]u8,
|
||||||
|
/// Current time according to the wall clock / calendar, in seconds.
|
||||||
|
realtime_now_seconds: i64,
|
||||||
|
|
||||||
/// If non-null, ssl secrets are logged to this stream. Creating such a log file allows
|
/// If non-null, ssl secrets are logged to this stream. Creating such a log file allows
|
||||||
/// other programs with access to that file to decrypt all traffic over this connection.
|
/// other programs with access to that file to decrypt all traffic over this connection.
|
||||||
///
|
///
|
||||||
@ -120,8 +128,6 @@ pub const Options = struct {
|
|||||||
/// application layer itself verifies that the amount of data received equals
|
/// application layer itself verifies that the amount of data received equals
|
||||||
/// the amount of data expected, such as HTTP with the Content-Length header.
|
/// the amount of data expected, such as HTTP with the Content-Length header.
|
||||||
allow_truncation_attacks: bool = false,
|
allow_truncation_attacks: bool = false,
|
||||||
write_buffer: []u8,
|
|
||||||
read_buffer: []u8,
|
|
||||||
/// Populated when `error.TlsAlert` is returned from `init`.
|
/// Populated when `error.TlsAlert` is returned from `init`.
|
||||||
alert: ?*tls.Alert = null,
|
alert: ?*tls.Alert = null,
|
||||||
};
|
};
|
||||||
@ -189,14 +195,12 @@ pub fn init(input: *Reader, output: *Writer, options: Options) InitError!Client
|
|||||||
};
|
};
|
||||||
const host_len: u16 = @intCast(host.len);
|
const host_len: u16 = @intCast(host.len);
|
||||||
|
|
||||||
var random_buffer: [176]u8 = undefined;
|
const client_hello_rand = options.entropy[0..32].*;
|
||||||
crypto.random.bytes(&random_buffer);
|
|
||||||
const client_hello_rand = random_buffer[0..32].*;
|
|
||||||
var key_seq: u64 = 0;
|
var key_seq: u64 = 0;
|
||||||
var server_hello_rand: [32]u8 = undefined;
|
var server_hello_rand: [32]u8 = undefined;
|
||||||
const legacy_session_id = random_buffer[32..64].*;
|
const legacy_session_id = options.entropy[32..64].*;
|
||||||
|
|
||||||
var key_share = KeyShare.init(random_buffer[64..176].*) catch |err| switch (err) {
|
var key_share = KeyShare.init(options.entropy[64..176].*) catch |err| switch (err) {
|
||||||
// Only possible to happen if the seed is all zeroes.
|
// Only possible to happen if the seed is all zeroes.
|
||||||
error.IdentityElement => return error.InsufficientEntropy,
|
error.IdentityElement => return error.InsufficientEntropy,
|
||||||
};
|
};
|
||||||
@ -321,7 +325,7 @@ pub fn init(input: *Reader, output: *Writer, options: Options) InitError!Client
|
|||||||
var handshake_cipher: tls.HandshakeCipher = undefined;
|
var handshake_cipher: tls.HandshakeCipher = undefined;
|
||||||
var main_cert_pub_key: CertificatePublicKey = undefined;
|
var main_cert_pub_key: CertificatePublicKey = undefined;
|
||||||
var tls12_negotiated_group: ?tls.NamedGroup = null;
|
var tls12_negotiated_group: ?tls.NamedGroup = null;
|
||||||
const now_sec = std.time.timestamp();
|
const now_sec = options.realtime_now_seconds;
|
||||||
|
|
||||||
var cleartext_fragment_start: usize = 0;
|
var cleartext_fragment_start: usize = 0;
|
||||||
var cleartext_fragment_end: usize = 0;
|
var cleartext_fragment_end: usize = 0;
|
||||||
|
|||||||
@ -434,7 +434,7 @@ const Module = struct {
|
|||||||
};
|
};
|
||||||
errdefer pdb_file.close();
|
errdefer pdb_file.close();
|
||||||
|
|
||||||
const pdb_reader = try arena.create(std.fs.File.Reader);
|
const pdb_reader = try arena.create(Io.File.Reader);
|
||||||
pdb_reader.* = pdb_file.reader(try arena.alloc(u8, 4096));
|
pdb_reader.* = pdb_file.reader(try arena.alloc(u8, 4096));
|
||||||
|
|
||||||
var pdb = Pdb.init(gpa, pdb_reader) catch |err| switch (err) {
|
var pdb = Pdb.init(gpa, pdb_reader) catch |err| switch (err) {
|
||||||
@ -544,6 +544,7 @@ fn findModule(si: *SelfInfo, gpa: Allocator, address: usize) error{ MissingDebug
|
|||||||
}
|
}
|
||||||
|
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
|
const Io = std.Io;
|
||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
const Dwarf = std.debug.Dwarf;
|
const Dwarf = std.debug.Dwarf;
|
||||||
const Pdb = std.debug.Pdb;
|
const Pdb = std.debug.Pdb;
|
||||||
|
|||||||
@ -710,7 +710,7 @@ pub const ProgramHeaderIterator = struct {
|
|||||||
const offset = it.phoff + size * it.index;
|
const offset = it.phoff + size * it.index;
|
||||||
try it.file_reader.seekTo(offset);
|
try it.file_reader.seekTo(offset);
|
||||||
|
|
||||||
return takeProgramHeader(&it.file_reader.interface, it.is_64, it.endian);
|
return try takeProgramHeader(&it.file_reader.interface, it.is_64, it.endian);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -731,7 +731,7 @@ pub const ProgramHeaderBufferIterator = struct {
|
|||||||
const offset = it.phoff + size * it.index;
|
const offset = it.phoff + size * it.index;
|
||||||
var reader = Io.Reader.fixed(it.buf[offset..]);
|
var reader = Io.Reader.fixed(it.buf[offset..]);
|
||||||
|
|
||||||
return takeProgramHeader(&reader, it.is_64, it.endian);
|
return try takeProgramHeader(&reader, it.is_64, it.endian);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -771,7 +771,7 @@ pub const SectionHeaderIterator = struct {
|
|||||||
const offset = it.shoff + size * it.index;
|
const offset = it.shoff + size * it.index;
|
||||||
try it.file_reader.seekTo(offset);
|
try it.file_reader.seekTo(offset);
|
||||||
|
|
||||||
return takeSectionHeader(&it.file_reader.interface, it.is_64, it.endian);
|
return try takeSectionHeader(&it.file_reader.interface, it.is_64, it.endian);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -793,7 +793,7 @@ pub const SectionHeaderBufferIterator = struct {
|
|||||||
if (offset > it.buf.len) return error.EndOfStream;
|
if (offset > it.buf.len) return error.EndOfStream;
|
||||||
var reader = Io.Reader.fixed(it.buf[@intCast(offset)..]);
|
var reader = Io.Reader.fixed(it.buf[@intCast(offset)..]);
|
||||||
|
|
||||||
return takeSectionHeader(&reader, it.is_64, it.endian);
|
return try takeSectionHeader(&reader, it.is_64, it.endian);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -826,12 +826,12 @@ pub const DynamicSectionIterator = struct {
|
|||||||
|
|
||||||
file_reader: *Io.File.Reader,
|
file_reader: *Io.File.Reader,
|
||||||
|
|
||||||
pub fn next(it: *SectionHeaderIterator) !?Elf64_Dyn {
|
pub fn next(it: *DynamicSectionIterator) !?Elf64_Dyn {
|
||||||
if (it.offset >= it.end_offset) return null;
|
if (it.offset >= it.end_offset) return null;
|
||||||
const size: u64 = if (it.is_64) @sizeOf(Elf64_Dyn) else @sizeOf(Elf32_Dyn);
|
const size: u64 = if (it.is_64) @sizeOf(Elf64_Dyn) else @sizeOf(Elf32_Dyn);
|
||||||
defer it.offset += size;
|
defer it.offset += size;
|
||||||
try it.file_reader.seekTo(it.offset);
|
try it.file_reader.seekTo(it.offset);
|
||||||
return takeDynamicSection(&it.file_reader.interface, it.is_64, it.endian);
|
return try takeDynamicSection(&it.file_reader.interface, it.is_64, it.endian);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@ -1,6 +1,11 @@
|
|||||||
|
//! Deprecated in favor of `Io.Dir`.
|
||||||
const Dir = @This();
|
const Dir = @This();
|
||||||
|
|
||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
|
const native_os = builtin.os.tag;
|
||||||
|
|
||||||
const std = @import("../std.zig");
|
const std = @import("../std.zig");
|
||||||
|
const Io = std.Io;
|
||||||
const File = std.fs.File;
|
const File = std.fs.File;
|
||||||
const AtomicFile = std.fs.AtomicFile;
|
const AtomicFile = std.fs.AtomicFile;
|
||||||
const base64_encoder = fs.base64_encoder;
|
const base64_encoder = fs.base64_encoder;
|
||||||
@ -12,7 +17,6 @@ const Allocator = std.mem.Allocator;
|
|||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
const linux = std.os.linux;
|
const linux = std.os.linux;
|
||||||
const windows = std.os.windows;
|
const windows = std.os.windows;
|
||||||
const native_os = builtin.os.tag;
|
|
||||||
const have_flock = @TypeOf(posix.system.flock) != void;
|
const have_flock = @TypeOf(posix.system.flock) != void;
|
||||||
|
|
||||||
fd: Handle,
|
fd: Handle,
|
||||||
@ -1189,84 +1193,41 @@ pub fn createFileW(self: Dir, sub_path_w: []const u16, flags: File.CreateFlags)
|
|||||||
return file;
|
return file;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const MakeError = posix.MakeDirError;
|
/// Deprecated in favor of `Io.Dir.MakeError`.
|
||||||
|
pub const MakeError = Io.Dir.MakeError;
|
||||||
|
|
||||||
/// Creates a single directory with a relative or absolute path.
|
/// Deprecated in favor of `Io.Dir.makeDir`.
|
||||||
/// To create multiple directories to make an entire path, see `makePath`.
|
|
||||||
/// To operate on only absolute paths, see `makeDirAbsolute`.
|
|
||||||
/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
|
|
||||||
/// On WASI, `sub_path` should be encoded as valid UTF-8.
|
|
||||||
/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
|
|
||||||
pub fn makeDir(self: Dir, sub_path: []const u8) MakeError!void {
|
pub fn makeDir(self: Dir, sub_path: []const u8) MakeError!void {
|
||||||
try posix.mkdirat(self.fd, sub_path, default_mode);
|
var threaded: Io.Threaded = .init_single_threaded;
|
||||||
|
const io = threaded.io();
|
||||||
|
return Io.Dir.makeDir(.{ .handle = self.fd }, io, sub_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Same as `makeDir`, but `sub_path` is null-terminated.
|
/// Deprecated in favor of `Io.Dir.makeDir`.
|
||||||
/// To create multiple directories to make an entire path, see `makePath`.
|
|
||||||
/// To operate on only absolute paths, see `makeDirAbsoluteZ`.
|
|
||||||
pub fn makeDirZ(self: Dir, sub_path: [*:0]const u8) MakeError!void {
|
pub fn makeDirZ(self: Dir, sub_path: [*:0]const u8) MakeError!void {
|
||||||
try posix.mkdiratZ(self.fd, sub_path, default_mode);
|
try posix.mkdiratZ(self.fd, sub_path, default_mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a single directory with a relative or absolute null-terminated WTF-16 LE-encoded path.
|
/// Deprecated in favor of `Io.Dir.makeDir`.
|
||||||
/// To create multiple directories to make an entire path, see `makePath`.
|
|
||||||
/// To operate on only absolute paths, see `makeDirAbsoluteW`.
|
|
||||||
pub fn makeDirW(self: Dir, sub_path: [*:0]const u16) MakeError!void {
|
pub fn makeDirW(self: Dir, sub_path: [*:0]const u16) MakeError!void {
|
||||||
try posix.mkdiratW(self.fd, mem.span(sub_path), default_mode);
|
try posix.mkdiratW(self.fd, mem.span(sub_path), default_mode);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Calls makeDir iteratively to make an entire path
|
/// Deprecated in favor of `Io.Dir.makePath`.
|
||||||
/// (i.e. creating any parent directories that do not exist).
|
pub fn makePath(self: Dir, sub_path: []const u8) MakePathError!void {
|
||||||
/// Returns success if the path already exists and is a directory.
|
|
||||||
/// This function is not atomic, and if it returns an error, the file system may
|
|
||||||
/// have been modified regardless.
|
|
||||||
/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
|
|
||||||
/// On WASI, `sub_path` should be encoded as valid UTF-8.
|
|
||||||
/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
|
|
||||||
/// Fails on an empty path with `error.BadPathName` as that is not a path that can be created.
|
|
||||||
///
|
|
||||||
/// Paths containing `..` components are handled differently depending on the platform:
|
|
||||||
/// - On Windows, `..` are resolved before the path is passed to NtCreateFile, meaning
|
|
||||||
/// a `sub_path` like "first/../second" will resolve to "second" and only a
|
|
||||||
/// `./second` directory will be created.
|
|
||||||
/// - On other platforms, `..` are not resolved before the path is passed to `mkdirat`,
|
|
||||||
/// meaning a `sub_path` like "first/../second" will create both a `./first`
|
|
||||||
/// and a `./second` directory.
|
|
||||||
pub fn makePath(self: Dir, sub_path: []const u8) (MakeError || StatFileError)!void {
|
|
||||||
_ = try self.makePathStatus(sub_path);
|
_ = try self.makePathStatus(sub_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const MakePathStatus = enum { existed, created };
|
/// Deprecated in favor of `Io.Dir.MakePathStatus`.
|
||||||
/// Same as `makePath` except returns whether the path already existed or was successfully created.
|
pub const MakePathStatus = Io.Dir.MakePathStatus;
|
||||||
pub fn makePathStatus(self: Dir, sub_path: []const u8) (MakeError || StatFileError)!MakePathStatus {
|
/// Deprecated in favor of `Io.Dir.MakePathError`.
|
||||||
var it = try fs.path.componentIterator(sub_path);
|
pub const MakePathError = Io.Dir.MakePathError;
|
||||||
var status: MakePathStatus = .existed;
|
|
||||||
var component = it.last() orelse return error.BadPathName;
|
/// Deprecated in favor of `Io.Dir.makePathStatus`.
|
||||||
while (true) {
|
pub fn makePathStatus(self: Dir, sub_path: []const u8) MakePathError!MakePathStatus {
|
||||||
if (self.makeDir(component.path)) |_| {
|
var threaded: Io.Threaded = .init_single_threaded;
|
||||||
status = .created;
|
const io = threaded.io();
|
||||||
} else |err| switch (err) {
|
return Io.Dir.makePathStatus(.{ .handle = self.fd }, io, sub_path);
|
||||||
error.PathAlreadyExists => {
|
|
||||||
// stat the file and return an error if it's not a directory
|
|
||||||
// this is important because otherwise a dangling symlink
|
|
||||||
// could cause an infinite loop
|
|
||||||
check_dir: {
|
|
||||||
// workaround for windows, see https://github.com/ziglang/zig/issues/16738
|
|
||||||
const fstat = self.statFile(component.path) catch |stat_err| switch (stat_err) {
|
|
||||||
error.IsDir => break :check_dir,
|
|
||||||
else => |e| return e,
|
|
||||||
};
|
|
||||||
if (fstat.kind != .directory) return error.NotDir;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
error.FileNotFound => |e| {
|
|
||||||
component = it.previous() orelse return e;
|
|
||||||
continue;
|
|
||||||
},
|
|
||||||
else => |e| return e,
|
|
||||||
}
|
|
||||||
component = it.next() orelse return status;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Windows only. Calls makeOpenDirAccessMaskW iteratively to make an entire path
|
/// Windows only. Calls makeOpenDirAccessMaskW iteratively to make an entire path
|
||||||
@ -2052,20 +2013,11 @@ pub fn readLinkW(self: Dir, sub_path_w: []const u16, buffer: []u8) ![]u8 {
|
|||||||
return windows.ReadLink(self.fd, sub_path_w, buffer);
|
return windows.ReadLink(self.fd, sub_path_w, buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Read all of file contents using a preallocated buffer.
|
/// Deprecated in favor of `Io.Dir.readFile`.
|
||||||
/// The returned slice has the same pointer as `buffer`. If the length matches `buffer.len`
|
|
||||||
/// the situation is ambiguous. It could either mean that the entire file was read, and
|
|
||||||
/// it exactly fits the buffer, or it could mean the buffer was not big enough for the
|
|
||||||
/// entire file.
|
|
||||||
/// On Windows, `file_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
|
|
||||||
/// On WASI, `file_path` should be encoded as valid UTF-8.
|
|
||||||
/// On other platforms, `file_path` is an opaque sequence of bytes with no particular encoding.
|
|
||||||
pub fn readFile(self: Dir, file_path: []const u8, buffer: []u8) ![]u8 {
|
pub fn readFile(self: Dir, file_path: []const u8, buffer: []u8) ![]u8 {
|
||||||
var file = try self.openFile(file_path, .{});
|
var threaded: Io.Threaded = .init_single_threaded;
|
||||||
defer file.close();
|
const io = threaded.io();
|
||||||
|
return Io.Dir.readFile(.{ .handle = self.fd }, io, file_path, buffer);
|
||||||
const end_index = try file.readAll(buffer);
|
|
||||||
return buffer[0..end_index];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const ReadFileAllocError = File.OpenError || File.ReadError || Allocator.Error || error{
|
pub const ReadFileAllocError = File.OpenError || File.ReadError || Allocator.Error || error{
|
||||||
@ -2091,7 +2043,7 @@ pub fn readFileAlloc(
|
|||||||
/// Used to allocate the result.
|
/// Used to allocate the result.
|
||||||
gpa: Allocator,
|
gpa: Allocator,
|
||||||
/// If reached or exceeded, `error.StreamTooLong` is returned instead.
|
/// If reached or exceeded, `error.StreamTooLong` is returned instead.
|
||||||
limit: std.Io.Limit,
|
limit: Io.Limit,
|
||||||
) ReadFileAllocError![]u8 {
|
) ReadFileAllocError![]u8 {
|
||||||
return readFileAllocOptions(dir, sub_path, gpa, limit, .of(u8), null);
|
return readFileAllocOptions(dir, sub_path, gpa, limit, .of(u8), null);
|
||||||
}
|
}
|
||||||
@ -2101,6 +2053,8 @@ pub fn readFileAlloc(
|
|||||||
///
|
///
|
||||||
/// If the file size is already known, a better alternative is to initialize a
|
/// If the file size is already known, a better alternative is to initialize a
|
||||||
/// `File.Reader`.
|
/// `File.Reader`.
|
||||||
|
///
|
||||||
|
/// TODO move this function to Io.Dir
|
||||||
pub fn readFileAllocOptions(
|
pub fn readFileAllocOptions(
|
||||||
dir: Dir,
|
dir: Dir,
|
||||||
/// On Windows, should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
|
/// On Windows, should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
|
||||||
@ -2110,13 +2064,16 @@ pub fn readFileAllocOptions(
|
|||||||
/// Used to allocate the result.
|
/// Used to allocate the result.
|
||||||
gpa: Allocator,
|
gpa: Allocator,
|
||||||
/// If reached or exceeded, `error.StreamTooLong` is returned instead.
|
/// If reached or exceeded, `error.StreamTooLong` is returned instead.
|
||||||
limit: std.Io.Limit,
|
limit: Io.Limit,
|
||||||
comptime alignment: std.mem.Alignment,
|
comptime alignment: std.mem.Alignment,
|
||||||
comptime sentinel: ?u8,
|
comptime sentinel: ?u8,
|
||||||
) ReadFileAllocError!(if (sentinel) |s| [:s]align(alignment.toByteUnits()) u8 else []align(alignment.toByteUnits()) u8) {
|
) ReadFileAllocError!(if (sentinel) |s| [:s]align(alignment.toByteUnits()) u8 else []align(alignment.toByteUnits()) u8) {
|
||||||
|
var threaded: Io.Threaded = .init_single_threaded;
|
||||||
|
const io = threaded.io();
|
||||||
|
|
||||||
var file = try dir.openFile(sub_path, .{});
|
var file = try dir.openFile(sub_path, .{});
|
||||||
defer file.close();
|
defer file.close();
|
||||||
var file_reader = file.reader(&.{});
|
var file_reader = file.reader(io, &.{});
|
||||||
return file_reader.interface.allocRemainingAlignedSentinel(gpa, limit, alignment, sentinel) catch |err| switch (err) {
|
return file_reader.interface.allocRemainingAlignedSentinel(gpa, limit, alignment, sentinel) catch |err| switch (err) {
|
||||||
error.ReadFailed => return file_reader.err.?,
|
error.ReadFailed => return file_reader.err.?,
|
||||||
error.OutOfMemory, error.StreamTooLong => |e| return e,
|
error.OutOfMemory, error.StreamTooLong => |e| return e,
|
||||||
@ -2647,6 +2604,8 @@ pub const CopyFileError = File.OpenError || File.StatError ||
|
|||||||
/// [WTF-8](https://wtf-8.codeberg.page/). On WASI, both paths should be
|
/// [WTF-8](https://wtf-8.codeberg.page/). On WASI, both paths should be
|
||||||
/// encoded as valid UTF-8. On other platforms, both paths are an opaque
|
/// encoded as valid UTF-8. On other platforms, both paths are an opaque
|
||||||
/// sequence of bytes with no particular encoding.
|
/// sequence of bytes with no particular encoding.
|
||||||
|
///
|
||||||
|
/// TODO move this function to Io.Dir
|
||||||
pub fn copyFile(
|
pub fn copyFile(
|
||||||
source_dir: Dir,
|
source_dir: Dir,
|
||||||
source_path: []const u8,
|
source_path: []const u8,
|
||||||
@ -2654,11 +2613,15 @@ pub fn copyFile(
|
|||||||
dest_path: []const u8,
|
dest_path: []const u8,
|
||||||
options: CopyFileOptions,
|
options: CopyFileOptions,
|
||||||
) CopyFileError!void {
|
) CopyFileError!void {
|
||||||
var file_reader: File.Reader = .init(try source_dir.openFile(source_path, .{}), &.{});
|
var threaded: Io.Threaded = .init_single_threaded;
|
||||||
defer file_reader.file.close();
|
const io = threaded.io();
|
||||||
|
|
||||||
|
const file = try source_dir.openFile(source_path, .{});
|
||||||
|
var file_reader: File.Reader = .init(.{ .handle = file.handle }, io, &.{});
|
||||||
|
defer file_reader.file.close(io);
|
||||||
|
|
||||||
const mode = options.override_mode orelse blk: {
|
const mode = options.override_mode orelse blk: {
|
||||||
const st = try file_reader.file.stat();
|
const st = try file_reader.file.stat(io);
|
||||||
file_reader.size = st.size;
|
file_reader.size = st.size;
|
||||||
break :blk st.mode;
|
break :blk st.mode;
|
||||||
};
|
};
|
||||||
@ -2708,6 +2671,7 @@ pub fn atomicFile(self: Dir, dest_path: []const u8, options: AtomicFileOptions)
|
|||||||
pub const Stat = File.Stat;
|
pub const Stat = File.Stat;
|
||||||
pub const StatError = File.StatError;
|
pub const StatError = File.StatError;
|
||||||
|
|
||||||
|
/// Deprecated in favor of `Io.Dir.stat`.
|
||||||
pub fn stat(self: Dir) StatError!Stat {
|
pub fn stat(self: Dir) StatError!Stat {
|
||||||
const file: File = .{ .handle = self.fd };
|
const file: File = .{ .handle = self.fd };
|
||||||
return file.stat();
|
return file.stat();
|
||||||
@ -2715,17 +2679,7 @@ pub fn stat(self: Dir) StatError!Stat {
|
|||||||
|
|
||||||
pub const StatFileError = File.OpenError || File.StatError || posix.FStatAtError;
|
pub const StatFileError = File.OpenError || File.StatError || posix.FStatAtError;
|
||||||
|
|
||||||
/// Returns metadata for a file inside the directory.
|
/// Deprecated in favor of `Io.Dir.statPath`.
|
||||||
///
|
|
||||||
/// On Windows, this requires three syscalls. On other operating systems, it
|
|
||||||
/// only takes one.
|
|
||||||
///
|
|
||||||
/// Symlinks are followed.
|
|
||||||
///
|
|
||||||
/// `sub_path` may be absolute, in which case `self` is ignored.
|
|
||||||
/// On Windows, `sub_path` should be encoded as [WTF-8](https://wtf-8.codeberg.page/).
|
|
||||||
/// On WASI, `sub_path` should be encoded as valid UTF-8.
|
|
||||||
/// On other platforms, `sub_path` is an opaque sequence of bytes with no particular encoding.
|
|
||||||
pub fn statFile(self: Dir, sub_path: []const u8) StatFileError!Stat {
|
pub fn statFile(self: Dir, sub_path: []const u8) StatFileError!Stat {
|
||||||
if (native_os == .windows) {
|
if (native_os == .windows) {
|
||||||
var file = try self.openFile(sub_path, .{});
|
var file = try self.openFile(sub_path, .{});
|
||||||
@ -2799,3 +2753,11 @@ pub fn setPermissions(self: Dir, permissions: Permissions) SetPermissionsError!v
|
|||||||
const file: File = .{ .handle = self.fd };
|
const file: File = .{ .handle = self.fd };
|
||||||
try file.setPermissions(permissions);
|
try file.setPermissions(permissions);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn adaptToNewApi(dir: Dir) Io.Dir {
|
||||||
|
return .{ .handle = dir.fd };
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn adaptFromNewApi(dir: Io.Dir) Dir {
|
||||||
|
return .{ .fd = dir.handle };
|
||||||
|
}
|
||||||
|
|||||||
@ -858,10 +858,12 @@ pub const Writer = struct {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn moveToReader(w: *Writer) Reader {
|
/// TODO when this logic moves from fs.File to Io.File the io parameter should be deleted
|
||||||
|
pub fn moveToReader(w: *Writer, io: std.Io) Reader {
|
||||||
defer w.* = undefined;
|
defer w.* = undefined;
|
||||||
return .{
|
return .{
|
||||||
.file = w.file,
|
.io = io,
|
||||||
|
.file = .{ .handle = w.file.handle },
|
||||||
.mode = w.mode,
|
.mode = w.mode,
|
||||||
.pos = w.pos,
|
.pos = w.pos,
|
||||||
.interface = Reader.initInterface(w.interface.buffer),
|
.interface = Reader.initInterface(w.interface.buffer),
|
||||||
@ -1350,15 +1352,15 @@ pub const Writer = struct {
|
|||||||
///
|
///
|
||||||
/// Positional is more threadsafe, since the global seek position is not
|
/// Positional is more threadsafe, since the global seek position is not
|
||||||
/// affected.
|
/// affected.
|
||||||
pub fn reader(file: File, buffer: []u8) Reader {
|
pub fn reader(file: File, io: std.Io, buffer: []u8) Reader {
|
||||||
return .init(file, buffer);
|
return .init(.{ .handle = file.handle }, io, buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Positional is more threadsafe, since the global seek position is not
|
/// Positional is more threadsafe, since the global seek position is not
|
||||||
/// affected, but when such syscalls are not available, preemptively
|
/// affected, but when such syscalls are not available, preemptively
|
||||||
/// initializing in streaming mode skips a failed syscall.
|
/// initializing in streaming mode skips a failed syscall.
|
||||||
pub fn readerStreaming(file: File, buffer: []u8) Reader {
|
pub fn readerStreaming(file: File, io: std.Io, buffer: []u8) Reader {
|
||||||
return .initStreaming(file, buffer);
|
return .initStreaming(.{ .handle = file.handle }, io, buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Defaults to positional reading; falls back to streaming.
|
/// Defaults to positional reading; falls back to streaming.
|
||||||
@ -1538,3 +1540,11 @@ pub fn downgradeLock(file: File) LockError!void {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn adaptToNewApi(file: File) std.Io.File {
|
||||||
|
return .{ .handle = file.handle };
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn adaptFromNewApi(file: std.Io.File) File {
|
||||||
|
return .{ .handle = file.handle };
|
||||||
|
}
|
||||||
|
|||||||
@ -1,10 +1,12 @@
|
|||||||
const std = @import("../std.zig");
|
|
||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
|
const native_os = builtin.os.tag;
|
||||||
|
|
||||||
|
const std = @import("../std.zig");
|
||||||
|
const Io = std.Io;
|
||||||
const testing = std.testing;
|
const testing = std.testing;
|
||||||
const fs = std.fs;
|
const fs = std.fs;
|
||||||
const mem = std.mem;
|
const mem = std.mem;
|
||||||
const wasi = std.os.wasi;
|
const wasi = std.os.wasi;
|
||||||
const native_os = builtin.os.tag;
|
|
||||||
const windows = std.os.windows;
|
const windows = std.os.windows;
|
||||||
const posix = std.posix;
|
const posix = std.posix;
|
||||||
|
|
||||||
@ -73,6 +75,7 @@ const PathType = enum {
|
|||||||
};
|
};
|
||||||
|
|
||||||
const TestContext = struct {
|
const TestContext = struct {
|
||||||
|
io: Io,
|
||||||
path_type: PathType,
|
path_type: PathType,
|
||||||
path_sep: u8,
|
path_sep: u8,
|
||||||
arena: ArenaAllocator,
|
arena: ArenaAllocator,
|
||||||
@ -83,6 +86,7 @@ const TestContext = struct {
|
|||||||
pub fn init(path_type: PathType, path_sep: u8, allocator: mem.Allocator, transform_fn: *const PathType.TransformFn) TestContext {
|
pub fn init(path_type: PathType, path_sep: u8, allocator: mem.Allocator, transform_fn: *const PathType.TransformFn) TestContext {
|
||||||
const tmp = tmpDir(.{ .iterate = true });
|
const tmp = tmpDir(.{ .iterate = true });
|
||||||
return .{
|
return .{
|
||||||
|
.io = testing.io,
|
||||||
.path_type = path_type,
|
.path_type = path_type,
|
||||||
.path_sep = path_sep,
|
.path_sep = path_sep,
|
||||||
.arena = ArenaAllocator.init(allocator),
|
.arena = ArenaAllocator.init(allocator),
|
||||||
@ -1319,6 +1323,8 @@ test "max file name component lengths" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "writev, readv" {
|
test "writev, readv" {
|
||||||
|
const io = testing.io;
|
||||||
|
|
||||||
var tmp = tmpDir(.{});
|
var tmp = tmpDir(.{});
|
||||||
defer tmp.cleanup();
|
defer tmp.cleanup();
|
||||||
|
|
||||||
@ -1327,78 +1333,55 @@ test "writev, readv" {
|
|||||||
|
|
||||||
var buf1: [line1.len]u8 = undefined;
|
var buf1: [line1.len]u8 = undefined;
|
||||||
var buf2: [line2.len]u8 = undefined;
|
var buf2: [line2.len]u8 = undefined;
|
||||||
var write_vecs = [_]posix.iovec_const{
|
var write_vecs: [2][]const u8 = .{ line1, line2 };
|
||||||
.{
|
var read_vecs: [2][]u8 = .{ &buf2, &buf1 };
|
||||||
.base = line1,
|
|
||||||
.len = line1.len,
|
|
||||||
},
|
|
||||||
.{
|
|
||||||
.base = line2,
|
|
||||||
.len = line2.len,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
var read_vecs = [_]posix.iovec{
|
|
||||||
.{
|
|
||||||
.base = &buf2,
|
|
||||||
.len = buf2.len,
|
|
||||||
},
|
|
||||||
.{
|
|
||||||
.base = &buf1,
|
|
||||||
.len = buf1.len,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
var src_file = try tmp.dir.createFile("test.txt", .{ .read = true });
|
var src_file = try tmp.dir.createFile("test.txt", .{ .read = true });
|
||||||
defer src_file.close();
|
defer src_file.close();
|
||||||
|
|
||||||
try src_file.writevAll(&write_vecs);
|
var writer = src_file.writerStreaming(&.{});
|
||||||
|
|
||||||
|
try writer.interface.writeVecAll(&write_vecs);
|
||||||
|
try writer.interface.flush();
|
||||||
try testing.expectEqual(@as(u64, line1.len + line2.len), try src_file.getEndPos());
|
try testing.expectEqual(@as(u64, line1.len + line2.len), try src_file.getEndPos());
|
||||||
try src_file.seekTo(0);
|
|
||||||
const read = try src_file.readvAll(&read_vecs);
|
var reader = writer.moveToReader(io);
|
||||||
try testing.expectEqual(@as(usize, line1.len + line2.len), read);
|
try reader.seekTo(0);
|
||||||
|
try reader.interface.readVecAll(&read_vecs);
|
||||||
try testing.expectEqualStrings(&buf1, "line2\n");
|
try testing.expectEqualStrings(&buf1, "line2\n");
|
||||||
try testing.expectEqualStrings(&buf2, "line1\n");
|
try testing.expectEqualStrings(&buf2, "line1\n");
|
||||||
|
try testing.expectError(error.EndOfStream, reader.interface.readSliceAll(&buf1));
|
||||||
}
|
}
|
||||||
|
|
||||||
test "pwritev, preadv" {
|
test "pwritev, preadv" {
|
||||||
|
const io = testing.io;
|
||||||
|
|
||||||
var tmp = tmpDir(.{});
|
var tmp = tmpDir(.{});
|
||||||
defer tmp.cleanup();
|
defer tmp.cleanup();
|
||||||
|
|
||||||
const line1 = "line1\n";
|
const line1 = "line1\n";
|
||||||
const line2 = "line2\n";
|
const line2 = "line2\n";
|
||||||
|
var lines: [2][]const u8 = .{ line1, line2 };
|
||||||
var buf1: [line1.len]u8 = undefined;
|
var buf1: [line1.len]u8 = undefined;
|
||||||
var buf2: [line2.len]u8 = undefined;
|
var buf2: [line2.len]u8 = undefined;
|
||||||
var write_vecs = [_]posix.iovec_const{
|
var read_vecs: [2][]u8 = .{ &buf2, &buf1 };
|
||||||
.{
|
|
||||||
.base = line1,
|
|
||||||
.len = line1.len,
|
|
||||||
},
|
|
||||||
.{
|
|
||||||
.base = line2,
|
|
||||||
.len = line2.len,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
var read_vecs = [_]posix.iovec{
|
|
||||||
.{
|
|
||||||
.base = &buf2,
|
|
||||||
.len = buf2.len,
|
|
||||||
},
|
|
||||||
.{
|
|
||||||
.base = &buf1,
|
|
||||||
.len = buf1.len,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
var src_file = try tmp.dir.createFile("test.txt", .{ .read = true });
|
var src_file = try tmp.dir.createFile("test.txt", .{ .read = true });
|
||||||
defer src_file.close();
|
defer src_file.close();
|
||||||
|
|
||||||
try src_file.pwritevAll(&write_vecs, 16);
|
var writer = src_file.writer(&.{});
|
||||||
|
|
||||||
|
try writer.seekTo(16);
|
||||||
|
try writer.interface.writeVecAll(&lines);
|
||||||
|
try writer.interface.flush();
|
||||||
try testing.expectEqual(@as(u64, 16 + line1.len + line2.len), try src_file.getEndPos());
|
try testing.expectEqual(@as(u64, 16 + line1.len + line2.len), try src_file.getEndPos());
|
||||||
const read = try src_file.preadvAll(&read_vecs, 16);
|
|
||||||
try testing.expectEqual(@as(usize, line1.len + line2.len), read);
|
var reader = writer.moveToReader(io);
|
||||||
|
try reader.seekTo(16);
|
||||||
|
try reader.interface.readVecAll(&read_vecs);
|
||||||
try testing.expectEqualStrings(&buf1, "line2\n");
|
try testing.expectEqualStrings(&buf1, "line2\n");
|
||||||
try testing.expectEqualStrings(&buf2, "line1\n");
|
try testing.expectEqualStrings(&buf2, "line1\n");
|
||||||
|
try testing.expectError(error.EndOfStream, reader.interface.readSliceAll(&buf1));
|
||||||
}
|
}
|
||||||
|
|
||||||
test "setEndPos" {
|
test "setEndPos" {
|
||||||
@ -1406,6 +1389,8 @@ test "setEndPos" {
|
|||||||
if (native_os == .wasi and builtin.link_libc) return error.SkipZigTest;
|
if (native_os == .wasi and builtin.link_libc) return error.SkipZigTest;
|
||||||
if (builtin.cpu.arch.isMIPS64() and (builtin.abi == .gnuabin32 or builtin.abi == .muslabin32)) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23806
|
if (builtin.cpu.arch.isMIPS64() and (builtin.abi == .gnuabin32 or builtin.abi == .muslabin32)) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/23806
|
||||||
|
|
||||||
|
const io = testing.io;
|
||||||
|
|
||||||
var tmp = tmpDir(.{});
|
var tmp = tmpDir(.{});
|
||||||
defer tmp.cleanup();
|
defer tmp.cleanup();
|
||||||
|
|
||||||
@ -1416,11 +1401,13 @@ test "setEndPos" {
|
|||||||
|
|
||||||
const initial_size = try f.getEndPos();
|
const initial_size = try f.getEndPos();
|
||||||
var buffer: [32]u8 = undefined;
|
var buffer: [32]u8 = undefined;
|
||||||
|
var reader = f.reader(io, &.{});
|
||||||
|
|
||||||
{
|
{
|
||||||
try f.setEndPos(initial_size);
|
try f.setEndPos(initial_size);
|
||||||
try testing.expectEqual(initial_size, try f.getEndPos());
|
try testing.expectEqual(initial_size, try f.getEndPos());
|
||||||
try testing.expectEqual(initial_size, try f.preadAll(&buffer, 0));
|
try reader.seekTo(0);
|
||||||
|
try testing.expectEqual(initial_size, reader.interface.readSliceShort(&buffer));
|
||||||
try testing.expectEqualStrings("ninebytes", buffer[0..@intCast(initial_size)]);
|
try testing.expectEqualStrings("ninebytes", buffer[0..@intCast(initial_size)]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1428,7 +1415,8 @@ test "setEndPos" {
|
|||||||
const larger = initial_size + 4;
|
const larger = initial_size + 4;
|
||||||
try f.setEndPos(larger);
|
try f.setEndPos(larger);
|
||||||
try testing.expectEqual(larger, try f.getEndPos());
|
try testing.expectEqual(larger, try f.getEndPos());
|
||||||
try testing.expectEqual(larger, try f.preadAll(&buffer, 0));
|
try reader.seekTo(0);
|
||||||
|
try testing.expectEqual(larger, reader.interface.readSliceShort(&buffer));
|
||||||
try testing.expectEqualStrings("ninebytes\x00\x00\x00\x00", buffer[0..@intCast(larger)]);
|
try testing.expectEqualStrings("ninebytes\x00\x00\x00\x00", buffer[0..@intCast(larger)]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1436,25 +1424,21 @@ test "setEndPos" {
|
|||||||
const smaller = initial_size - 5;
|
const smaller = initial_size - 5;
|
||||||
try f.setEndPos(smaller);
|
try f.setEndPos(smaller);
|
||||||
try testing.expectEqual(smaller, try f.getEndPos());
|
try testing.expectEqual(smaller, try f.getEndPos());
|
||||||
try testing.expectEqual(smaller, try f.preadAll(&buffer, 0));
|
try reader.seekTo(0);
|
||||||
|
try testing.expectEqual(smaller, try reader.interface.readSliceShort(&buffer));
|
||||||
try testing.expectEqualStrings("nine", buffer[0..@intCast(smaller)]);
|
try testing.expectEqualStrings("nine", buffer[0..@intCast(smaller)]);
|
||||||
}
|
}
|
||||||
|
|
||||||
try f.setEndPos(0);
|
try f.setEndPos(0);
|
||||||
try testing.expectEqual(0, try f.getEndPos());
|
try testing.expectEqual(0, try f.getEndPos());
|
||||||
try testing.expectEqual(0, try f.preadAll(&buffer, 0));
|
try reader.seekTo(0);
|
||||||
|
try testing.expectEqual(0, try reader.interface.readSliceShort(&buffer));
|
||||||
|
|
||||||
// Invalid file length should error gracefully. Actual limit is host
|
// Invalid file length should error gracefully. Actual limit is host
|
||||||
// and file-system dependent, but 1PB should fail on filesystems like
|
// and file-system dependent, but 1PB should fail on filesystems like
|
||||||
// EXT4 and NTFS. But XFS or Btrfs support up to 8EiB files.
|
// EXT4 and NTFS. But XFS or Btrfs support up to 8EiB files.
|
||||||
f.setEndPos(0x4_0000_0000_0000) catch |err| if (err != error.FileTooBig) {
|
try testing.expectError(error.FileTooBig, f.setEndPos(0x4_0000_0000_0000));
|
||||||
return err;
|
try testing.expectError(error.FileTooBig, f.setEndPos(std.math.maxInt(u63)));
|
||||||
};
|
|
||||||
|
|
||||||
f.setEndPos(std.math.maxInt(u63)) catch |err| if (err != error.FileTooBig) {
|
|
||||||
return err;
|
|
||||||
};
|
|
||||||
|
|
||||||
try testing.expectError(error.FileTooBig, f.setEndPos(std.math.maxInt(u63) + 1));
|
try testing.expectError(error.FileTooBig, f.setEndPos(std.math.maxInt(u63) + 1));
|
||||||
try testing.expectError(error.FileTooBig, f.setEndPos(std.math.maxInt(u64)));
|
try testing.expectError(error.FileTooBig, f.setEndPos(std.math.maxInt(u64)));
|
||||||
}
|
}
|
||||||
@ -1560,31 +1544,6 @@ test "sendfile with buffered data" {
|
|||||||
try std.testing.expectEqualSlices(u8, "AAAA", written_buf[0..amt]);
|
try std.testing.expectEqualSlices(u8, "AAAA", written_buf[0..amt]);
|
||||||
}
|
}
|
||||||
|
|
||||||
test "copyRangeAll" {
|
|
||||||
var tmp = tmpDir(.{});
|
|
||||||
defer tmp.cleanup();
|
|
||||||
|
|
||||||
try tmp.dir.makePath("os_test_tmp");
|
|
||||||
|
|
||||||
var dir = try tmp.dir.openDir("os_test_tmp", .{});
|
|
||||||
defer dir.close();
|
|
||||||
|
|
||||||
var src_file = try dir.createFile("file1.txt", .{ .read = true });
|
|
||||||
defer src_file.close();
|
|
||||||
|
|
||||||
const data = "u6wj+JmdF3qHsFPE BUlH2g4gJCmEz0PP";
|
|
||||||
try src_file.writeAll(data);
|
|
||||||
|
|
||||||
var dest_file = try dir.createFile("file2.txt", .{ .read = true });
|
|
||||||
defer dest_file.close();
|
|
||||||
|
|
||||||
var written_buf: [100]u8 = undefined;
|
|
||||||
_ = try src_file.copyRangeAll(0, dest_file, 0, data.len);
|
|
||||||
|
|
||||||
const amt = try dest_file.preadAll(&written_buf, 0);
|
|
||||||
try testing.expectEqualStrings(data, written_buf[0..amt]);
|
|
||||||
}
|
|
||||||
|
|
||||||
test "copyFile" {
|
test "copyFile" {
|
||||||
try testWithAllSupportedPathTypes(struct {
|
try testWithAllSupportedPathTypes(struct {
|
||||||
fn impl(ctx: *TestContext) !void {
|
fn impl(ctx: *TestContext) !void {
|
||||||
@ -1708,8 +1667,8 @@ test "open file with exclusive lock twice, make sure second lock waits" {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
var started = std.Thread.ResetEvent{};
|
var started: std.Thread.ResetEvent = .unset;
|
||||||
var locked = std.Thread.ResetEvent{};
|
var locked: std.Thread.ResetEvent = .unset;
|
||||||
|
|
||||||
const t = try std.Thread.spawn(.{}, S.checkFn, .{
|
const t = try std.Thread.spawn(.{}, S.checkFn, .{
|
||||||
&ctx.dir,
|
&ctx.dir,
|
||||||
@ -1773,7 +1732,7 @@ test "read from locked file" {
|
|||||||
const f = try ctx.dir.createFile(filename, .{ .read = true });
|
const f = try ctx.dir.createFile(filename, .{ .read = true });
|
||||||
defer f.close();
|
defer f.close();
|
||||||
var buffer: [1]u8 = undefined;
|
var buffer: [1]u8 = undefined;
|
||||||
_ = try f.readAll(&buffer);
|
_ = try f.read(&buffer);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
const f = try ctx.dir.createFile(filename, .{
|
const f = try ctx.dir.createFile(filename, .{
|
||||||
@ -1785,9 +1744,9 @@ test "read from locked file" {
|
|||||||
defer f2.close();
|
defer f2.close();
|
||||||
var buffer: [1]u8 = undefined;
|
var buffer: [1]u8 = undefined;
|
||||||
if (builtin.os.tag == .windows) {
|
if (builtin.os.tag == .windows) {
|
||||||
try std.testing.expectError(error.LockViolation, f2.readAll(&buffer));
|
try std.testing.expectError(error.LockViolation, f2.read(&buffer));
|
||||||
} else {
|
} else {
|
||||||
try std.testing.expectEqual(0, f2.readAll(&buffer));
|
try std.testing.expectEqual(0, f2.read(&buffer));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1944,6 +1903,7 @@ test "'.' and '..' in fs.Dir functions" {
|
|||||||
|
|
||||||
try testWithAllSupportedPathTypes(struct {
|
try testWithAllSupportedPathTypes(struct {
|
||||||
fn impl(ctx: *TestContext) !void {
|
fn impl(ctx: *TestContext) !void {
|
||||||
|
const io = ctx.io;
|
||||||
const subdir_path = try ctx.transformPath("./subdir");
|
const subdir_path = try ctx.transformPath("./subdir");
|
||||||
const file_path = try ctx.transformPath("./subdir/../file");
|
const file_path = try ctx.transformPath("./subdir/../file");
|
||||||
const copy_path = try ctx.transformPath("./subdir/../copy");
|
const copy_path = try ctx.transformPath("./subdir/../copy");
|
||||||
@ -1966,7 +1926,8 @@ test "'.' and '..' in fs.Dir functions" {
|
|||||||
try ctx.dir.deleteFile(rename_path);
|
try ctx.dir.deleteFile(rename_path);
|
||||||
|
|
||||||
try ctx.dir.writeFile(.{ .sub_path = update_path, .data = "something" });
|
try ctx.dir.writeFile(.{ .sub_path = update_path, .data = "something" });
|
||||||
const prev_status = try ctx.dir.updateFile(file_path, ctx.dir, update_path, .{});
|
var dir = ctx.dir.adaptToNewApi();
|
||||||
|
const prev_status = try dir.updateFile(io, file_path, dir, update_path, .{});
|
||||||
try testing.expectEqual(fs.Dir.PrevStatus.stale, prev_status);
|
try testing.expectEqual(fs.Dir.PrevStatus.stale, prev_status);
|
||||||
|
|
||||||
try ctx.dir.deleteDir(subdir_path);
|
try ctx.dir.deleteDir(subdir_path);
|
||||||
@ -2005,13 +1966,6 @@ test "'.' and '..' in absolute functions" {
|
|||||||
renamed_file.close();
|
renamed_file.close();
|
||||||
try fs.deleteFileAbsolute(renamed_file_path);
|
try fs.deleteFileAbsolute(renamed_file_path);
|
||||||
|
|
||||||
const update_file_path = try fs.path.join(allocator, &.{ subdir_path, "../update" });
|
|
||||||
const update_file = try fs.createFileAbsolute(update_file_path, .{});
|
|
||||||
try update_file.writeAll("something");
|
|
||||||
update_file.close();
|
|
||||||
const prev_status = try fs.updateFileAbsolute(created_file_path, update_file_path, .{});
|
|
||||||
try testing.expectEqual(fs.Dir.PrevStatus.stale, prev_status);
|
|
||||||
|
|
||||||
try fs.deleteDirAbsolute(subdir_path);
|
try fs.deleteDirAbsolute(subdir_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2079,6 +2033,7 @@ test "invalid UTF-8/WTF-8 paths" {
|
|||||||
|
|
||||||
try testWithAllSupportedPathTypes(struct {
|
try testWithAllSupportedPathTypes(struct {
|
||||||
fn impl(ctx: *TestContext) !void {
|
fn impl(ctx: *TestContext) !void {
|
||||||
|
const io = ctx.io;
|
||||||
// This is both invalid UTF-8 and WTF-8, since \xFF is an invalid start byte
|
// This is both invalid UTF-8 and WTF-8, since \xFF is an invalid start byte
|
||||||
const invalid_path = try ctx.transformPath("\xFF");
|
const invalid_path = try ctx.transformPath("\xFF");
|
||||||
|
|
||||||
@ -2129,7 +2084,8 @@ test "invalid UTF-8/WTF-8 paths" {
|
|||||||
try testing.expectError(expected_err, ctx.dir.access(invalid_path, .{}));
|
try testing.expectError(expected_err, ctx.dir.access(invalid_path, .{}));
|
||||||
try testing.expectError(expected_err, ctx.dir.accessZ(invalid_path, .{}));
|
try testing.expectError(expected_err, ctx.dir.accessZ(invalid_path, .{}));
|
||||||
|
|
||||||
try testing.expectError(expected_err, ctx.dir.updateFile(invalid_path, ctx.dir, invalid_path, .{}));
|
var dir = ctx.dir.adaptToNewApi();
|
||||||
|
try testing.expectError(expected_err, dir.updateFile(io, invalid_path, dir, invalid_path, .{}));
|
||||||
try testing.expectError(expected_err, ctx.dir.copyFile(invalid_path, ctx.dir, invalid_path, .{}));
|
try testing.expectError(expected_err, ctx.dir.copyFile(invalid_path, ctx.dir, invalid_path, .{}));
|
||||||
|
|
||||||
try testing.expectError(expected_err, ctx.dir.statFile(invalid_path));
|
try testing.expectError(expected_err, ctx.dir.statFile(invalid_path));
|
||||||
@ -2144,7 +2100,6 @@ test "invalid UTF-8/WTF-8 paths" {
|
|||||||
try testing.expectError(expected_err, fs.renameZ(ctx.dir, invalid_path, ctx.dir, invalid_path));
|
try testing.expectError(expected_err, fs.renameZ(ctx.dir, invalid_path, ctx.dir, invalid_path));
|
||||||
|
|
||||||
if (native_os != .wasi and ctx.path_type != .relative) {
|
if (native_os != .wasi and ctx.path_type != .relative) {
|
||||||
try testing.expectError(expected_err, fs.updateFileAbsolute(invalid_path, invalid_path, .{}));
|
|
||||||
try testing.expectError(expected_err, fs.copyFileAbsolute(invalid_path, invalid_path, .{}));
|
try testing.expectError(expected_err, fs.copyFileAbsolute(invalid_path, invalid_path, .{}));
|
||||||
try testing.expectError(expected_err, fs.makeDirAbsolute(invalid_path));
|
try testing.expectError(expected_err, fs.makeDirAbsolute(invalid_path));
|
||||||
try testing.expectError(expected_err, fs.makeDirAbsoluteZ(invalid_path));
|
try testing.expectError(expected_err, fs.makeDirAbsoluteZ(invalid_path));
|
||||||
@ -2175,6 +2130,8 @@ test "invalid UTF-8/WTF-8 paths" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "read file non vectored" {
|
test "read file non vectored" {
|
||||||
|
const io = std.testing.io;
|
||||||
|
|
||||||
var tmp_dir = testing.tmpDir(.{});
|
var tmp_dir = testing.tmpDir(.{});
|
||||||
defer tmp_dir.cleanup();
|
defer tmp_dir.cleanup();
|
||||||
|
|
||||||
@ -2188,7 +2145,7 @@ test "read file non vectored" {
|
|||||||
try file_writer.interface.flush();
|
try file_writer.interface.flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
var file_reader: std.fs.File.Reader = .init(file, &.{});
|
var file_reader: std.Io.File.Reader = .initAdapted(file, io, &.{});
|
||||||
|
|
||||||
var write_buffer: [100]u8 = undefined;
|
var write_buffer: [100]u8 = undefined;
|
||||||
var w: std.Io.Writer = .fixed(&write_buffer);
|
var w: std.Io.Writer = .fixed(&write_buffer);
|
||||||
@ -2205,6 +2162,8 @@ test "read file non vectored" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "seek keeping partial buffer" {
|
test "seek keeping partial buffer" {
|
||||||
|
const io = std.testing.io;
|
||||||
|
|
||||||
var tmp_dir = testing.tmpDir(.{});
|
var tmp_dir = testing.tmpDir(.{});
|
||||||
defer tmp_dir.cleanup();
|
defer tmp_dir.cleanup();
|
||||||
|
|
||||||
@ -2219,7 +2178,7 @@ test "seek keeping partial buffer" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var read_buffer: [3]u8 = undefined;
|
var read_buffer: [3]u8 = undefined;
|
||||||
var file_reader: std.fs.File.Reader = .init(file, &read_buffer);
|
var file_reader: Io.File.Reader = .initAdapted(file, io, &read_buffer);
|
||||||
|
|
||||||
try testing.expectEqual(0, file_reader.logicalPos());
|
try testing.expectEqual(0, file_reader.logicalPos());
|
||||||
|
|
||||||
@ -2246,13 +2205,15 @@ test "seek keeping partial buffer" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "seekBy" {
|
test "seekBy" {
|
||||||
|
const io = testing.io;
|
||||||
|
|
||||||
var tmp_dir = testing.tmpDir(.{});
|
var tmp_dir = testing.tmpDir(.{});
|
||||||
defer tmp_dir.cleanup();
|
defer tmp_dir.cleanup();
|
||||||
|
|
||||||
try tmp_dir.dir.writeFile(.{ .sub_path = "blah.txt", .data = "let's test seekBy" });
|
try tmp_dir.dir.writeFile(.{ .sub_path = "blah.txt", .data = "let's test seekBy" });
|
||||||
const f = try tmp_dir.dir.openFile("blah.txt", .{ .mode = .read_only });
|
const f = try tmp_dir.dir.openFile("blah.txt", .{ .mode = .read_only });
|
||||||
defer f.close();
|
defer f.close();
|
||||||
var reader = f.readerStreaming(&.{});
|
var reader = f.readerStreaming(io, &.{});
|
||||||
try reader.seekBy(2);
|
try reader.seekBy(2);
|
||||||
|
|
||||||
var buffer: [20]u8 = undefined;
|
var buffer: [20]u8 = undefined;
|
||||||
|
|||||||
@ -247,6 +247,7 @@ pub const Connection = struct {
|
|||||||
port: u16,
|
port: u16,
|
||||||
stream: Io.net.Stream,
|
stream: Io.net.Stream,
|
||||||
) error{OutOfMemory}!*Plain {
|
) error{OutOfMemory}!*Plain {
|
||||||
|
const io = client.io;
|
||||||
const gpa = client.allocator;
|
const gpa = client.allocator;
|
||||||
const alloc_len = allocLen(client, remote_host.bytes.len);
|
const alloc_len = allocLen(client, remote_host.bytes.len);
|
||||||
const base = try gpa.alignedAlloc(u8, .of(Plain), alloc_len);
|
const base = try gpa.alignedAlloc(u8, .of(Plain), alloc_len);
|
||||||
@ -260,8 +261,8 @@ pub const Connection = struct {
|
|||||||
plain.* = .{
|
plain.* = .{
|
||||||
.connection = .{
|
.connection = .{
|
||||||
.client = client,
|
.client = client,
|
||||||
.stream_writer = stream.writer(socket_write_buffer),
|
.stream_writer = stream.writer(io, socket_write_buffer),
|
||||||
.stream_reader = stream.reader(socket_read_buffer),
|
.stream_reader = stream.reader(io, socket_read_buffer),
|
||||||
.pool_node = .{},
|
.pool_node = .{},
|
||||||
.port = port,
|
.port = port,
|
||||||
.host_len = @intCast(remote_host.bytes.len),
|
.host_len = @intCast(remote_host.bytes.len),
|
||||||
@ -300,6 +301,7 @@ pub const Connection = struct {
|
|||||||
port: u16,
|
port: u16,
|
||||||
stream: Io.net.Stream,
|
stream: Io.net.Stream,
|
||||||
) error{ OutOfMemory, TlsInitializationFailed }!*Tls {
|
) error{ OutOfMemory, TlsInitializationFailed }!*Tls {
|
||||||
|
const io = client.io;
|
||||||
const gpa = client.allocator;
|
const gpa = client.allocator;
|
||||||
const alloc_len = allocLen(client, remote_host.bytes.len);
|
const alloc_len = allocLen(client, remote_host.bytes.len);
|
||||||
const base = try gpa.alignedAlloc(u8, .of(Tls), alloc_len);
|
const base = try gpa.alignedAlloc(u8, .of(Tls), alloc_len);
|
||||||
@ -316,11 +318,14 @@ pub const Connection = struct {
|
|||||||
assert(base.ptr + alloc_len == socket_read_buffer.ptr + socket_read_buffer.len);
|
assert(base.ptr + alloc_len == socket_read_buffer.ptr + socket_read_buffer.len);
|
||||||
@memcpy(host_buffer, remote_host.bytes);
|
@memcpy(host_buffer, remote_host.bytes);
|
||||||
const tls: *Tls = @ptrCast(base);
|
const tls: *Tls = @ptrCast(base);
|
||||||
|
var random_buffer: [176]u8 = undefined;
|
||||||
|
std.crypto.random.bytes(&random_buffer);
|
||||||
|
const now_ts = if (Io.Timestamp.now(io, .real)) |ts| ts.toSeconds() else |_| return error.TlsInitializationFailed;
|
||||||
tls.* = .{
|
tls.* = .{
|
||||||
.connection = .{
|
.connection = .{
|
||||||
.client = client,
|
.client = client,
|
||||||
.stream_writer = stream.writer(tls_write_buffer),
|
.stream_writer = stream.writer(io, tls_write_buffer),
|
||||||
.stream_reader = stream.reader(socket_read_buffer),
|
.stream_reader = stream.reader(io, socket_read_buffer),
|
||||||
.pool_node = .{},
|
.pool_node = .{},
|
||||||
.port = port,
|
.port = port,
|
||||||
.host_len = @intCast(remote_host.bytes.len),
|
.host_len = @intCast(remote_host.bytes.len),
|
||||||
@ -338,6 +343,8 @@ pub const Connection = struct {
|
|||||||
.ssl_key_log = client.ssl_key_log,
|
.ssl_key_log = client.ssl_key_log,
|
||||||
.read_buffer = tls_read_buffer,
|
.read_buffer = tls_read_buffer,
|
||||||
.write_buffer = socket_write_buffer,
|
.write_buffer = socket_write_buffer,
|
||||||
|
.entropy = &random_buffer,
|
||||||
|
.realtime_now_seconds = now_ts,
|
||||||
// This is appropriate for HTTPS because the HTTP headers contain
|
// This is appropriate for HTTPS because the HTTP headers contain
|
||||||
// the content length which is used to detect truncation attacks.
|
// the content length which is used to detect truncation attacks.
|
||||||
.allow_truncation_attacks = true,
|
.allow_truncation_attacks = true,
|
||||||
@ -1390,16 +1397,8 @@ pub const basic_authorization = struct {
|
|||||||
};
|
};
|
||||||
|
|
||||||
pub const ConnectTcpError = error{
|
pub const ConnectTcpError = error{
|
||||||
ConnectionRefused,
|
|
||||||
NetworkUnreachable,
|
|
||||||
ConnectionTimedOut,
|
|
||||||
ConnectionResetByPeer,
|
|
||||||
TemporaryNameServerFailure,
|
|
||||||
NameServerFailure,
|
|
||||||
UnknownHostName,
|
|
||||||
UnexpectedConnectFailure,
|
|
||||||
TlsInitializationFailed,
|
TlsInitializationFailed,
|
||||||
} || Allocator.Error || Io.Cancelable;
|
} || Allocator.Error || HostName.ConnectError;
|
||||||
|
|
||||||
/// Reuses a `Connection` if one matching `host` and `port` is already open.
|
/// Reuses a `Connection` if one matching `host` and `port` is already open.
|
||||||
///
|
///
|
||||||
@ -1424,6 +1423,7 @@ pub const ConnectTcpOptions = struct {
|
|||||||
};
|
};
|
||||||
|
|
||||||
pub fn connectTcpOptions(client: *Client, options: ConnectTcpOptions) ConnectTcpError!*Connection {
|
pub fn connectTcpOptions(client: *Client, options: ConnectTcpOptions) ConnectTcpError!*Connection {
|
||||||
|
const io = client.io;
|
||||||
const host = options.host;
|
const host = options.host;
|
||||||
const port = options.port;
|
const port = options.port;
|
||||||
const protocol = options.protocol;
|
const protocol = options.protocol;
|
||||||
@ -1437,22 +1437,17 @@ pub fn connectTcpOptions(client: *Client, options: ConnectTcpOptions) ConnectTcp
|
|||||||
.protocol = protocol,
|
.protocol = protocol,
|
||||||
})) |conn| return conn;
|
})) |conn| return conn;
|
||||||
|
|
||||||
const stream = host.connect(client.io, port, .{ .mode = .stream }) catch |err| switch (err) {
|
var stream = try host.connect(io, port, .{ .mode = .stream });
|
||||||
error.ConnectionRefused => return error.ConnectionRefused,
|
errdefer stream.close(io);
|
||||||
error.NetworkUnreachable => return error.NetworkUnreachable,
|
|
||||||
error.ConnectionTimedOut => return error.ConnectionTimedOut,
|
|
||||||
error.ConnectionResetByPeer => return error.ConnectionResetByPeer,
|
|
||||||
error.NameServerFailure => return error.NameServerFailure,
|
|
||||||
error.UnknownHostName => return error.UnknownHostName,
|
|
||||||
error.Canceled => return error.Canceled,
|
|
||||||
//else => return error.UnexpectedConnectFailure,
|
|
||||||
};
|
|
||||||
errdefer stream.close();
|
|
||||||
|
|
||||||
switch (protocol) {
|
switch (protocol) {
|
||||||
.tls => {
|
.tls => {
|
||||||
if (disable_tls) return error.TlsInitializationFailed;
|
if (disable_tls) return error.TlsInitializationFailed;
|
||||||
const tc = try Connection.Tls.create(client, proxied_host, proxied_port, stream);
|
const tc = Connection.Tls.create(client, proxied_host, proxied_port, stream) catch |err| switch (err) {
|
||||||
|
error.OutOfMemory => |e| return e,
|
||||||
|
error.Unexpected => |e| return e,
|
||||||
|
error.UnsupportedClock => return error.TlsInitializationFailed,
|
||||||
|
};
|
||||||
client.connection_pool.addUsed(&tc.connection);
|
client.connection_pool.addUsed(&tc.connection);
|
||||||
return &tc.connection;
|
return &tc.connection;
|
||||||
},
|
},
|
||||||
|
|||||||
@ -3,7 +3,7 @@ const std = @import("std");
|
|||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
const mem = std.mem;
|
const mem = std.mem;
|
||||||
const net = std.net;
|
const net = std.Io.net;
|
||||||
const posix = std.posix;
|
const posix = std.posix;
|
||||||
const linux = std.os.linux;
|
const linux = std.os.linux;
|
||||||
const testing = std.testing;
|
const testing = std.testing;
|
||||||
@ -2361,19 +2361,22 @@ test "sendmsg/recvmsg" {
|
|||||||
};
|
};
|
||||||
defer ring.deinit();
|
defer ring.deinit();
|
||||||
|
|
||||||
var address_server = try net.Address.parseIp4("127.0.0.1", 0);
|
var address_server: linux.sockaddr.in = .{
|
||||||
|
.port = 0,
|
||||||
|
.addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
|
||||||
|
};
|
||||||
|
|
||||||
const server = try posix.socket(address_server.any.family, posix.SOCK.DGRAM, 0);
|
const server = try posix.socket(address_server.family, posix.SOCK.DGRAM, 0);
|
||||||
defer posix.close(server);
|
defer posix.close(server);
|
||||||
try posix.setsockopt(server, posix.SOL.SOCKET, posix.SO.REUSEPORT, &mem.toBytes(@as(c_int, 1)));
|
try posix.setsockopt(server, posix.SOL.SOCKET, posix.SO.REUSEPORT, &mem.toBytes(@as(c_int, 1)));
|
||||||
try posix.setsockopt(server, posix.SOL.SOCKET, posix.SO.REUSEADDR, &mem.toBytes(@as(c_int, 1)));
|
try posix.setsockopt(server, posix.SOL.SOCKET, posix.SO.REUSEADDR, &mem.toBytes(@as(c_int, 1)));
|
||||||
try posix.bind(server, &address_server.any, address_server.getOsSockLen());
|
try posix.bind(server, addrAny(&address_server), @sizeOf(linux.sockaddr.in));
|
||||||
|
|
||||||
// set address_server to the OS-chosen IP/port.
|
// set address_server to the OS-chosen IP/port.
|
||||||
var slen: posix.socklen_t = address_server.getOsSockLen();
|
var slen: posix.socklen_t = @sizeOf(linux.sockaddr.in);
|
||||||
try posix.getsockname(server, &address_server.any, &slen);
|
try posix.getsockname(server, addrAny(&address_server), &slen);
|
||||||
|
|
||||||
const client = try posix.socket(address_server.any.family, posix.SOCK.DGRAM, 0);
|
const client = try posix.socket(address_server.family, posix.SOCK.DGRAM, 0);
|
||||||
defer posix.close(client);
|
defer posix.close(client);
|
||||||
|
|
||||||
const buffer_send = [_]u8{42} ** 128;
|
const buffer_send = [_]u8{42} ** 128;
|
||||||
@ -2381,8 +2384,8 @@ test "sendmsg/recvmsg" {
|
|||||||
posix.iovec_const{ .base = &buffer_send, .len = buffer_send.len },
|
posix.iovec_const{ .base = &buffer_send, .len = buffer_send.len },
|
||||||
};
|
};
|
||||||
const msg_send: posix.msghdr_const = .{
|
const msg_send: posix.msghdr_const = .{
|
||||||
.name = &address_server.any,
|
.name = addrAny(&address_server),
|
||||||
.namelen = address_server.getOsSockLen(),
|
.namelen = @sizeOf(linux.sockaddr.in),
|
||||||
.iov = &iovecs_send,
|
.iov = &iovecs_send,
|
||||||
.iovlen = 1,
|
.iovlen = 1,
|
||||||
.control = null,
|
.control = null,
|
||||||
@ -2398,11 +2401,13 @@ test "sendmsg/recvmsg" {
|
|||||||
var iovecs_recv = [_]posix.iovec{
|
var iovecs_recv = [_]posix.iovec{
|
||||||
posix.iovec{ .base = &buffer_recv, .len = buffer_recv.len },
|
posix.iovec{ .base = &buffer_recv, .len = buffer_recv.len },
|
||||||
};
|
};
|
||||||
const addr = [_]u8{0} ** 4;
|
var address_recv: linux.sockaddr.in = .{
|
||||||
var address_recv = net.Address.initIp4(addr, 0);
|
.port = 0,
|
||||||
|
.addr = 0,
|
||||||
|
};
|
||||||
var msg_recv: posix.msghdr = .{
|
var msg_recv: posix.msghdr = .{
|
||||||
.name = &address_recv.any,
|
.name = addrAny(&address_recv),
|
||||||
.namelen = address_recv.getOsSockLen(),
|
.namelen = @sizeOf(linux.sockaddr.in),
|
||||||
.iov = &iovecs_recv,
|
.iov = &iovecs_recv,
|
||||||
.iovlen = 1,
|
.iovlen = 1,
|
||||||
.control = null,
|
.control = null,
|
||||||
@ -2441,6 +2446,8 @@ test "sendmsg/recvmsg" {
|
|||||||
test "timeout (after a relative time)" {
|
test "timeout (after a relative time)" {
|
||||||
if (!is_linux) return error.SkipZigTest;
|
if (!is_linux) return error.SkipZigTest;
|
||||||
|
|
||||||
|
const io = testing.io;
|
||||||
|
|
||||||
var ring = IoUring.init(1, 0) catch |err| switch (err) {
|
var ring = IoUring.init(1, 0) catch |err| switch (err) {
|
||||||
error.SystemOutdated => return error.SkipZigTest,
|
error.SystemOutdated => return error.SkipZigTest,
|
||||||
error.PermissionDenied => return error.SkipZigTest,
|
error.PermissionDenied => return error.SkipZigTest,
|
||||||
@ -2452,12 +2459,12 @@ test "timeout (after a relative time)" {
|
|||||||
const margin = 5;
|
const margin = 5;
|
||||||
const ts: linux.kernel_timespec = .{ .sec = 0, .nsec = ms * 1000000 };
|
const ts: linux.kernel_timespec = .{ .sec = 0, .nsec = ms * 1000000 };
|
||||||
|
|
||||||
const started = std.time.milliTimestamp();
|
const started = try std.Io.Timestamp.now(io, .awake);
|
||||||
const sqe = try ring.timeout(0x55555555, &ts, 0, 0);
|
const sqe = try ring.timeout(0x55555555, &ts, 0, 0);
|
||||||
try testing.expectEqual(linux.IORING_OP.TIMEOUT, sqe.opcode);
|
try testing.expectEqual(linux.IORING_OP.TIMEOUT, sqe.opcode);
|
||||||
try testing.expectEqual(@as(u32, 1), try ring.submit());
|
try testing.expectEqual(@as(u32, 1), try ring.submit());
|
||||||
const cqe = try ring.copy_cqe();
|
const cqe = try ring.copy_cqe();
|
||||||
const stopped = std.time.milliTimestamp();
|
const stopped = try std.Io.Timestamp.now(io, .awake);
|
||||||
|
|
||||||
try testing.expectEqual(linux.io_uring_cqe{
|
try testing.expectEqual(linux.io_uring_cqe{
|
||||||
.user_data = 0x55555555,
|
.user_data = 0x55555555,
|
||||||
@ -2466,7 +2473,8 @@ test "timeout (after a relative time)" {
|
|||||||
}, cqe);
|
}, cqe);
|
||||||
|
|
||||||
// Tests should not depend on timings: skip test if outside margin.
|
// Tests should not depend on timings: skip test if outside margin.
|
||||||
if (!std.math.approxEqAbs(f64, ms, @as(f64, @floatFromInt(stopped - started)), margin)) return error.SkipZigTest;
|
const ms_elapsed = started.durationTo(stopped).toMilliseconds();
|
||||||
|
if (ms_elapsed > margin) return error.SkipZigTest;
|
||||||
}
|
}
|
||||||
|
|
||||||
test "timeout (after a number of completions)" {
|
test "timeout (after a number of completions)" {
|
||||||
@ -2861,19 +2869,22 @@ test "shutdown" {
|
|||||||
};
|
};
|
||||||
defer ring.deinit();
|
defer ring.deinit();
|
||||||
|
|
||||||
var address = try net.Address.parseIp4("127.0.0.1", 0);
|
var address: linux.sockaddr.in = .{
|
||||||
|
.port = 0,
|
||||||
|
.addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
|
||||||
|
};
|
||||||
|
|
||||||
// Socket bound, expect shutdown to work
|
// Socket bound, expect shutdown to work
|
||||||
{
|
{
|
||||||
const server = try posix.socket(address.any.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
|
const server = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
|
||||||
defer posix.close(server);
|
defer posix.close(server);
|
||||||
try posix.setsockopt(server, posix.SOL.SOCKET, posix.SO.REUSEADDR, &mem.toBytes(@as(c_int, 1)));
|
try posix.setsockopt(server, posix.SOL.SOCKET, posix.SO.REUSEADDR, &mem.toBytes(@as(c_int, 1)));
|
||||||
try posix.bind(server, &address.any, address.getOsSockLen());
|
try posix.bind(server, addrAny(&address), @sizeOf(linux.sockaddr.in));
|
||||||
try posix.listen(server, 1);
|
try posix.listen(server, 1);
|
||||||
|
|
||||||
// set address to the OS-chosen IP/port.
|
// set address to the OS-chosen IP/port.
|
||||||
var slen: posix.socklen_t = address.getOsSockLen();
|
var slen: posix.socklen_t = @sizeOf(linux.sockaddr.in);
|
||||||
try posix.getsockname(server, &address.any, &slen);
|
try posix.getsockname(server, addrAny(&address), &slen);
|
||||||
|
|
||||||
const shutdown_sqe = try ring.shutdown(0x445445445, server, linux.SHUT.RD);
|
const shutdown_sqe = try ring.shutdown(0x445445445, server, linux.SHUT.RD);
|
||||||
try testing.expectEqual(linux.IORING_OP.SHUTDOWN, shutdown_sqe.opcode);
|
try testing.expectEqual(linux.IORING_OP.SHUTDOWN, shutdown_sqe.opcode);
|
||||||
@ -2898,7 +2909,7 @@ test "shutdown" {
|
|||||||
|
|
||||||
// Socket not bound, expect to fail with ENOTCONN
|
// Socket not bound, expect to fail with ENOTCONN
|
||||||
{
|
{
|
||||||
const server = try posix.socket(address.any.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
|
const server = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
|
||||||
defer posix.close(server);
|
defer posix.close(server);
|
||||||
|
|
||||||
const shutdown_sqe = ring.shutdown(0x445445445, server, linux.SHUT.RD) catch |err| switch (err) {
|
const shutdown_sqe = ring.shutdown(0x445445445, server, linux.SHUT.RD) catch |err| switch (err) {
|
||||||
@ -2966,22 +2977,11 @@ test "renameat" {
|
|||||||
}, cqe);
|
}, cqe);
|
||||||
|
|
||||||
// Validate that the old file doesn't exist anymore
|
// Validate that the old file doesn't exist anymore
|
||||||
{
|
try testing.expectError(error.FileNotFound, tmp.dir.openFile(old_path, .{}));
|
||||||
_ = tmp.dir.openFile(old_path, .{}) catch |err| switch (err) {
|
|
||||||
error.FileNotFound => {},
|
|
||||||
else => std.debug.panic("unexpected error: {}", .{err}),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate that the new file exists with the proper content
|
// Validate that the new file exists with the proper content
|
||||||
{
|
var new_file_data: [16]u8 = undefined;
|
||||||
const new_file = try tmp.dir.openFile(new_path, .{});
|
try testing.expectEqualStrings("hello", try tmp.dir.readFile(new_path, &new_file_data));
|
||||||
defer new_file.close();
|
|
||||||
|
|
||||||
var new_file_data: [16]u8 = undefined;
|
|
||||||
const bytes_read = try new_file.readAll(&new_file_data);
|
|
||||||
try testing.expectEqualStrings("hello", new_file_data[0..bytes_read]);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
test "unlinkat" {
|
test "unlinkat" {
|
||||||
@ -3179,12 +3179,8 @@ test "linkat" {
|
|||||||
}, cqe);
|
}, cqe);
|
||||||
|
|
||||||
// Validate the second file
|
// Validate the second file
|
||||||
const second_file = try tmp.dir.openFile(second_path, .{});
|
|
||||||
defer second_file.close();
|
|
||||||
|
|
||||||
var second_file_data: [16]u8 = undefined;
|
var second_file_data: [16]u8 = undefined;
|
||||||
const bytes_read = try second_file.readAll(&second_file_data);
|
try testing.expectEqualStrings("hello", try tmp.dir.readFile(second_path, &second_file_data));
|
||||||
try testing.expectEqualStrings("hello", second_file_data[0..bytes_read]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
test "provide_buffers: read" {
|
test "provide_buffers: read" {
|
||||||
@ -3588,7 +3584,10 @@ const SocketTestHarness = struct {
|
|||||||
|
|
||||||
fn createSocketTestHarness(ring: *IoUring) !SocketTestHarness {
|
fn createSocketTestHarness(ring: *IoUring) !SocketTestHarness {
|
||||||
// Create a TCP server socket
|
// Create a TCP server socket
|
||||||
var address = try net.Address.parseIp4("127.0.0.1", 0);
|
var address: linux.sockaddr.in = .{
|
||||||
|
.port = 0,
|
||||||
|
.addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
|
||||||
|
};
|
||||||
const listener_socket = try createListenerSocket(&address);
|
const listener_socket = try createListenerSocket(&address);
|
||||||
errdefer posix.close(listener_socket);
|
errdefer posix.close(listener_socket);
|
||||||
|
|
||||||
@ -3598,9 +3597,9 @@ fn createSocketTestHarness(ring: *IoUring) !SocketTestHarness {
|
|||||||
_ = try ring.accept(0xaaaaaaaa, listener_socket, &accept_addr, &accept_addr_len, 0);
|
_ = try ring.accept(0xaaaaaaaa, listener_socket, &accept_addr, &accept_addr_len, 0);
|
||||||
|
|
||||||
// Create a TCP client socket
|
// Create a TCP client socket
|
||||||
const client = try posix.socket(address.any.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
|
const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
|
||||||
errdefer posix.close(client);
|
errdefer posix.close(client);
|
||||||
_ = try ring.connect(0xcccccccc, client, &address.any, address.getOsSockLen());
|
_ = try ring.connect(0xcccccccc, client, addrAny(&address), @sizeOf(linux.sockaddr.in));
|
||||||
|
|
||||||
try testing.expectEqual(@as(u32, 2), try ring.submit());
|
try testing.expectEqual(@as(u32, 2), try ring.submit());
|
||||||
|
|
||||||
@ -3636,18 +3635,18 @@ fn createSocketTestHarness(ring: *IoUring) !SocketTestHarness {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn createListenerSocket(address: *net.Address) !posix.socket_t {
|
fn createListenerSocket(address: *linux.sockaddr.in) !posix.socket_t {
|
||||||
const kernel_backlog = 1;
|
const kernel_backlog = 1;
|
||||||
const listener_socket = try posix.socket(address.any.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
|
const listener_socket = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
|
||||||
errdefer posix.close(listener_socket);
|
errdefer posix.close(listener_socket);
|
||||||
|
|
||||||
try posix.setsockopt(listener_socket, posix.SOL.SOCKET, posix.SO.REUSEADDR, &mem.toBytes(@as(c_int, 1)));
|
try posix.setsockopt(listener_socket, posix.SOL.SOCKET, posix.SO.REUSEADDR, &mem.toBytes(@as(c_int, 1)));
|
||||||
try posix.bind(listener_socket, &address.any, address.getOsSockLen());
|
try posix.bind(listener_socket, addrAny(address), @sizeOf(linux.sockaddr.in));
|
||||||
try posix.listen(listener_socket, kernel_backlog);
|
try posix.listen(listener_socket, kernel_backlog);
|
||||||
|
|
||||||
// set address to the OS-chosen IP/port.
|
// set address to the OS-chosen IP/port.
|
||||||
var slen: posix.socklen_t = address.getOsSockLen();
|
var slen: posix.socklen_t = @sizeOf(linux.sockaddr.in);
|
||||||
try posix.getsockname(listener_socket, &address.any, &slen);
|
try posix.getsockname(listener_socket, addrAny(address), &slen);
|
||||||
|
|
||||||
return listener_socket;
|
return listener_socket;
|
||||||
}
|
}
|
||||||
@ -3662,7 +3661,10 @@ test "accept multishot" {
|
|||||||
};
|
};
|
||||||
defer ring.deinit();
|
defer ring.deinit();
|
||||||
|
|
||||||
var address = try net.Address.parseIp4("127.0.0.1", 0);
|
var address: linux.sockaddr.in = .{
|
||||||
|
.port = 0,
|
||||||
|
.addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
|
||||||
|
};
|
||||||
const listener_socket = try createListenerSocket(&address);
|
const listener_socket = try createListenerSocket(&address);
|
||||||
defer posix.close(listener_socket);
|
defer posix.close(listener_socket);
|
||||||
|
|
||||||
@ -3676,9 +3678,9 @@ test "accept multishot" {
|
|||||||
var nr: usize = 4; // number of clients to connect
|
var nr: usize = 4; // number of clients to connect
|
||||||
while (nr > 0) : (nr -= 1) {
|
while (nr > 0) : (nr -= 1) {
|
||||||
// connect client
|
// connect client
|
||||||
const client = try posix.socket(address.any.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
|
const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
|
||||||
errdefer posix.close(client);
|
errdefer posix.close(client);
|
||||||
try posix.connect(client, &address.any, address.getOsSockLen());
|
try posix.connect(client, addrAny(&address), @sizeOf(linux.sockaddr.in));
|
||||||
|
|
||||||
// test accept completion
|
// test accept completion
|
||||||
var cqe = try ring.copy_cqe();
|
var cqe = try ring.copy_cqe();
|
||||||
@ -3756,7 +3758,10 @@ test "accept_direct" {
|
|||||||
else => return err,
|
else => return err,
|
||||||
};
|
};
|
||||||
defer ring.deinit();
|
defer ring.deinit();
|
||||||
var address = try net.Address.parseIp4("127.0.0.1", 0);
|
var address: linux.sockaddr.in = .{
|
||||||
|
.port = 0,
|
||||||
|
.addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
|
||||||
|
};
|
||||||
|
|
||||||
// register direct file descriptors
|
// register direct file descriptors
|
||||||
var registered_fds = [_]posix.fd_t{-1} ** 2;
|
var registered_fds = [_]posix.fd_t{-1} ** 2;
|
||||||
@ -3779,8 +3784,8 @@ test "accept_direct" {
|
|||||||
try testing.expectEqual(@as(u32, 1), try ring.submit());
|
try testing.expectEqual(@as(u32, 1), try ring.submit());
|
||||||
|
|
||||||
// connect
|
// connect
|
||||||
const client = try posix.socket(address.any.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
|
const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
|
||||||
try posix.connect(client, &address.any, address.getOsSockLen());
|
try posix.connect(client, addrAny(&address), @sizeOf(linux.sockaddr.in));
|
||||||
defer posix.close(client);
|
defer posix.close(client);
|
||||||
|
|
||||||
// accept completion
|
// accept completion
|
||||||
@ -3813,8 +3818,8 @@ test "accept_direct" {
|
|||||||
_ = try ring.accept_direct(accept_userdata, listener_socket, null, null, 0);
|
_ = try ring.accept_direct(accept_userdata, listener_socket, null, null, 0);
|
||||||
try testing.expectEqual(@as(u32, 1), try ring.submit());
|
try testing.expectEqual(@as(u32, 1), try ring.submit());
|
||||||
// connect
|
// connect
|
||||||
const client = try posix.socket(address.any.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
|
const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
|
||||||
try posix.connect(client, &address.any, address.getOsSockLen());
|
try posix.connect(client, addrAny(&address), @sizeOf(linux.sockaddr.in));
|
||||||
defer posix.close(client);
|
defer posix.close(client);
|
||||||
// completion with error
|
// completion with error
|
||||||
const cqe_accept = try ring.copy_cqe();
|
const cqe_accept = try ring.copy_cqe();
|
||||||
@ -3837,7 +3842,10 @@ test "accept_multishot_direct" {
|
|||||||
};
|
};
|
||||||
defer ring.deinit();
|
defer ring.deinit();
|
||||||
|
|
||||||
var address = try net.Address.parseIp4("127.0.0.1", 0);
|
var address: linux.sockaddr.in = .{
|
||||||
|
.port = 0,
|
||||||
|
.addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
|
||||||
|
};
|
||||||
|
|
||||||
var registered_fds = [_]posix.fd_t{-1} ** 2;
|
var registered_fds = [_]posix.fd_t{-1} ** 2;
|
||||||
try ring.register_files(registered_fds[0..]);
|
try ring.register_files(registered_fds[0..]);
|
||||||
@ -3855,8 +3863,8 @@ test "accept_multishot_direct" {
|
|||||||
|
|
||||||
for (registered_fds) |_| {
|
for (registered_fds) |_| {
|
||||||
// connect
|
// connect
|
||||||
const client = try posix.socket(address.any.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
|
const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
|
||||||
try posix.connect(client, &address.any, address.getOsSockLen());
|
try posix.connect(client, addrAny(&address), @sizeOf(linux.sockaddr.in));
|
||||||
defer posix.close(client);
|
defer posix.close(client);
|
||||||
|
|
||||||
// accept completion
|
// accept completion
|
||||||
@ -3870,8 +3878,8 @@ test "accept_multishot_direct" {
|
|||||||
// Multishot is terminated (more flag is not set).
|
// Multishot is terminated (more flag is not set).
|
||||||
{
|
{
|
||||||
// connect
|
// connect
|
||||||
const client = try posix.socket(address.any.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
|
const client = try posix.socket(address.family, posix.SOCK.STREAM | posix.SOCK.CLOEXEC, 0);
|
||||||
try posix.connect(client, &address.any, address.getOsSockLen());
|
try posix.connect(client, addrAny(&address), @sizeOf(linux.sockaddr.in));
|
||||||
defer posix.close(client);
|
defer posix.close(client);
|
||||||
// completion with error
|
// completion with error
|
||||||
const cqe_accept = try ring.copy_cqe();
|
const cqe_accept = try ring.copy_cqe();
|
||||||
@ -3944,7 +3952,10 @@ test "socket_direct/socket_direct_alloc/close_direct" {
|
|||||||
try testing.expect(cqe_socket.res == 2); // returns registered file index
|
try testing.expect(cqe_socket.res == 2); // returns registered file index
|
||||||
|
|
||||||
// use sockets from registered_fds in connect operation
|
// use sockets from registered_fds in connect operation
|
||||||
var address = try net.Address.parseIp4("127.0.0.1", 0);
|
var address: linux.sockaddr.in = .{
|
||||||
|
.port = 0,
|
||||||
|
.addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
|
||||||
|
};
|
||||||
const listener_socket = try createListenerSocket(&address);
|
const listener_socket = try createListenerSocket(&address);
|
||||||
defer posix.close(listener_socket);
|
defer posix.close(listener_socket);
|
||||||
const accept_userdata: u64 = 0xaaaaaaaa;
|
const accept_userdata: u64 = 0xaaaaaaaa;
|
||||||
@ -3954,7 +3965,7 @@ test "socket_direct/socket_direct_alloc/close_direct" {
|
|||||||
// prepare accept
|
// prepare accept
|
||||||
_ = try ring.accept(accept_userdata, listener_socket, null, null, 0);
|
_ = try ring.accept(accept_userdata, listener_socket, null, null, 0);
|
||||||
// prepare connect with fixed socket
|
// prepare connect with fixed socket
|
||||||
const connect_sqe = try ring.connect(connect_userdata, @intCast(fd_index), &address.any, address.getOsSockLen());
|
const connect_sqe = try ring.connect(connect_userdata, @intCast(fd_index), addrAny(&address), @sizeOf(linux.sockaddr.in));
|
||||||
connect_sqe.flags |= linux.IOSQE_FIXED_FILE; // fd is fixed file index
|
connect_sqe.flags |= linux.IOSQE_FIXED_FILE; // fd is fixed file index
|
||||||
// submit both
|
// submit both
|
||||||
try testing.expectEqual(@as(u32, 2), try ring.submit());
|
try testing.expectEqual(@as(u32, 2), try ring.submit());
|
||||||
@ -4483,12 +4494,15 @@ test "bind/listen/connect" {
|
|||||||
// LISTEN is higher required operation
|
// LISTEN is higher required operation
|
||||||
if (!probe.is_supported(.LISTEN)) return error.SkipZigTest;
|
if (!probe.is_supported(.LISTEN)) return error.SkipZigTest;
|
||||||
|
|
||||||
var addr = net.Address.initIp4([4]u8{ 127, 0, 0, 1 }, 0);
|
var addr: linux.sockaddr.in = .{
|
||||||
const proto: u32 = if (addr.any.family == linux.AF.UNIX) 0 else linux.IPPROTO.TCP;
|
.port = 0,
|
||||||
|
.addr = @bitCast([4]u8{ 127, 0, 0, 1 }),
|
||||||
|
};
|
||||||
|
const proto: u32 = if (addr.family == linux.AF.UNIX) 0 else linux.IPPROTO.TCP;
|
||||||
|
|
||||||
const listen_fd = brk: {
|
const listen_fd = brk: {
|
||||||
// Create socket
|
// Create socket
|
||||||
_ = try ring.socket(1, addr.any.family, linux.SOCK.STREAM | linux.SOCK.CLOEXEC, proto, 0);
|
_ = try ring.socket(1, addr.family, linux.SOCK.STREAM | linux.SOCK.CLOEXEC, proto, 0);
|
||||||
try testing.expectEqual(1, try ring.submit());
|
try testing.expectEqual(1, try ring.submit());
|
||||||
var cqe = try ring.copy_cqe();
|
var cqe = try ring.copy_cqe();
|
||||||
try testing.expectEqual(1, cqe.user_data);
|
try testing.expectEqual(1, cqe.user_data);
|
||||||
@ -4500,7 +4514,7 @@ test "bind/listen/connect" {
|
|||||||
var optval: u32 = 1;
|
var optval: u32 = 1;
|
||||||
(try ring.setsockopt(2, listen_fd, linux.SOL.SOCKET, linux.SO.REUSEADDR, mem.asBytes(&optval))).link_next();
|
(try ring.setsockopt(2, listen_fd, linux.SOL.SOCKET, linux.SO.REUSEADDR, mem.asBytes(&optval))).link_next();
|
||||||
(try ring.setsockopt(3, listen_fd, linux.SOL.SOCKET, linux.SO.REUSEPORT, mem.asBytes(&optval))).link_next();
|
(try ring.setsockopt(3, listen_fd, linux.SOL.SOCKET, linux.SO.REUSEPORT, mem.asBytes(&optval))).link_next();
|
||||||
(try ring.bind(4, listen_fd, &addr.any, addr.getOsSockLen(), 0)).link_next();
|
(try ring.bind(4, listen_fd, addrAny(&addr), @sizeOf(linux.sockaddr.in), 0)).link_next();
|
||||||
_ = try ring.listen(5, listen_fd, 1, 0);
|
_ = try ring.listen(5, listen_fd, 1, 0);
|
||||||
// Submit 4 operations
|
// Submit 4 operations
|
||||||
try testing.expectEqual(4, try ring.submit());
|
try testing.expectEqual(4, try ring.submit());
|
||||||
@ -4521,15 +4535,15 @@ test "bind/listen/connect" {
|
|||||||
try testing.expectEqual(1, optval);
|
try testing.expectEqual(1, optval);
|
||||||
|
|
||||||
// Read system assigned port into addr
|
// Read system assigned port into addr
|
||||||
var addr_len: posix.socklen_t = addr.getOsSockLen();
|
var addr_len: posix.socklen_t = @sizeOf(linux.sockaddr.in);
|
||||||
try posix.getsockname(listen_fd, &addr.any, &addr_len);
|
try posix.getsockname(listen_fd, addrAny(&addr), &addr_len);
|
||||||
|
|
||||||
break :brk listen_fd;
|
break :brk listen_fd;
|
||||||
};
|
};
|
||||||
|
|
||||||
const connect_fd = brk: {
|
const connect_fd = brk: {
|
||||||
// Create connect socket
|
// Create connect socket
|
||||||
_ = try ring.socket(6, addr.any.family, linux.SOCK.STREAM | linux.SOCK.CLOEXEC, proto, 0);
|
_ = try ring.socket(6, addr.family, linux.SOCK.STREAM | linux.SOCK.CLOEXEC, proto, 0);
|
||||||
try testing.expectEqual(1, try ring.submit());
|
try testing.expectEqual(1, try ring.submit());
|
||||||
const cqe = try ring.copy_cqe();
|
const cqe = try ring.copy_cqe();
|
||||||
try testing.expectEqual(6, cqe.user_data);
|
try testing.expectEqual(6, cqe.user_data);
|
||||||
@ -4542,7 +4556,7 @@ test "bind/listen/connect" {
|
|||||||
|
|
||||||
// Prepare accept/connect operations
|
// Prepare accept/connect operations
|
||||||
_ = try ring.accept(7, listen_fd, null, null, 0);
|
_ = try ring.accept(7, listen_fd, null, null, 0);
|
||||||
_ = try ring.connect(8, connect_fd, &addr.any, addr.getOsSockLen());
|
_ = try ring.connect(8, connect_fd, addrAny(&addr), @sizeOf(linux.sockaddr.in));
|
||||||
try testing.expectEqual(2, try ring.submit());
|
try testing.expectEqual(2, try ring.submit());
|
||||||
// Get listener accepted socket
|
// Get listener accepted socket
|
||||||
var accept_fd: posix.socket_t = 0;
|
var accept_fd: posix.socket_t = 0;
|
||||||
@ -4604,3 +4618,7 @@ fn testSendRecv(ring: *IoUring, send_fd: posix.socket_t, recv_fd: posix.socket_t
|
|||||||
try testing.expectEqualSlices(u8, buffer_send, buffer_recv[0..buffer_send.len]);
|
try testing.expectEqualSlices(u8, buffer_send, buffer_recv[0..buffer_send.len]);
|
||||||
try testing.expectEqualSlices(u8, buffer_send, buffer_recv[buffer_send.len..]);
|
try testing.expectEqualSlices(u8, buffer_send, buffer_recv[buffer_send.len..]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn addrAny(addr: *linux.sockaddr.in) *linux.sockaddr {
|
||||||
|
return @ptrCast(addr);
|
||||||
|
}
|
||||||
|
|||||||
@ -3000,31 +3000,7 @@ pub fn mkdiratW(dir_fd: fd_t, sub_path_w: []const u16, mode: mode_t) MakeDirErro
|
|||||||
windows.CloseHandle(sub_dir_handle);
|
windows.CloseHandle(sub_dir_handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const MakeDirError = error{
|
pub const MakeDirError = std.Io.Dir.MakeError;
|
||||||
/// In WASI, this error may occur when the file descriptor does
|
|
||||||
/// not hold the required rights to create a new directory relative to it.
|
|
||||||
AccessDenied,
|
|
||||||
PermissionDenied,
|
|
||||||
DiskQuota,
|
|
||||||
PathAlreadyExists,
|
|
||||||
SymLinkLoop,
|
|
||||||
LinkQuotaExceeded,
|
|
||||||
NameTooLong,
|
|
||||||
FileNotFound,
|
|
||||||
SystemResources,
|
|
||||||
NoSpaceLeft,
|
|
||||||
NotDir,
|
|
||||||
ReadOnlyFileSystem,
|
|
||||||
/// WASI-only; file paths must be valid UTF-8.
|
|
||||||
InvalidUtf8,
|
|
||||||
/// Windows-only; file paths provided by the user must be valid WTF-8.
|
|
||||||
/// https://wtf-8.codeberg.page/
|
|
||||||
InvalidWtf8,
|
|
||||||
BadPathName,
|
|
||||||
NoDevice,
|
|
||||||
/// On Windows, `\\server` or `\\server\share` was not found.
|
|
||||||
NetworkNotFound,
|
|
||||||
} || UnexpectedError;
|
|
||||||
|
|
||||||
/// Create a directory.
|
/// Create a directory.
|
||||||
/// `mode` is ignored on Windows and WASI.
|
/// `mode` is ignored on Windows and WASI.
|
||||||
|
|||||||
@ -731,11 +731,8 @@ test "dup & dup2" {
|
|||||||
try dup2ed.writeAll("dup2");
|
try dup2ed.writeAll("dup2");
|
||||||
}
|
}
|
||||||
|
|
||||||
var file = try tmp.dir.openFile("os_dup_test", .{});
|
var buffer: [8]u8 = undefined;
|
||||||
defer file.close();
|
try testing.expectEqualStrings("dupdup2", try tmp.dir.readFile("os_dup_test", &buffer));
|
||||||
|
|
||||||
var buf: [7]u8 = undefined;
|
|
||||||
try testing.expectEqualStrings("dupdup2", buf[0..try file.readAll(&buf)]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
test "writev longer than IOV_MAX" {
|
test "writev longer than IOV_MAX" {
|
||||||
|
|||||||
@ -1,5 +1,9 @@
|
|||||||
const std = @import("../std.zig");
|
const ChildProcess = @This();
|
||||||
|
|
||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
|
const native_os = builtin.os.tag;
|
||||||
|
|
||||||
|
const std = @import("../std.zig");
|
||||||
const unicode = std.unicode;
|
const unicode = std.unicode;
|
||||||
const fs = std.fs;
|
const fs = std.fs;
|
||||||
const process = std.process;
|
const process = std.process;
|
||||||
@ -11,9 +15,7 @@ const mem = std.mem;
|
|||||||
const EnvMap = std.process.EnvMap;
|
const EnvMap = std.process.EnvMap;
|
||||||
const maxInt = std.math.maxInt;
|
const maxInt = std.math.maxInt;
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
const native_os = builtin.os.tag;
|
|
||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
const ChildProcess = @This();
|
|
||||||
const ArrayList = std.ArrayList;
|
const ArrayList = std.ArrayList;
|
||||||
|
|
||||||
pub const Id = switch (native_os) {
|
pub const Id = switch (native_os) {
|
||||||
@ -317,16 +319,23 @@ pub fn waitForSpawn(self: *ChildProcess) SpawnError!void {
|
|||||||
|
|
||||||
const err_pipe = self.err_pipe orelse return;
|
const err_pipe = self.err_pipe orelse return;
|
||||||
self.err_pipe = null;
|
self.err_pipe = null;
|
||||||
|
|
||||||
// Wait for the child to report any errors in or before `execvpe`.
|
// Wait for the child to report any errors in or before `execvpe`.
|
||||||
if (readIntFd(err_pipe)) |child_err_int| {
|
const report = readIntFd(err_pipe);
|
||||||
posix.close(err_pipe);
|
posix.close(err_pipe);
|
||||||
|
if (report) |child_err_int| {
|
||||||
const child_err: SpawnError = @errorCast(@errorFromInt(child_err_int));
|
const child_err: SpawnError = @errorCast(@errorFromInt(child_err_int));
|
||||||
self.term = child_err;
|
self.term = child_err;
|
||||||
return child_err;
|
return child_err;
|
||||||
} else |_| {
|
} else |read_err| switch (read_err) {
|
||||||
// Write end closed by CLOEXEC at the time of the `execvpe` call, indicating success!
|
error.EndOfStream => {
|
||||||
posix.close(err_pipe);
|
// Write end closed by CLOEXEC at the time of the `execvpe` call,
|
||||||
|
// indicating success.
|
||||||
|
},
|
||||||
|
else => {
|
||||||
|
// Problem reading the error from the error reporting pipe. We
|
||||||
|
// don't know if the child is alive or dead. Better to assume it is
|
||||||
|
// alive so the resource does not risk being leaked.
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1014,8 +1023,14 @@ fn writeIntFd(fd: i32, value: ErrInt) !void {
|
|||||||
|
|
||||||
fn readIntFd(fd: i32) !ErrInt {
|
fn readIntFd(fd: i32) !ErrInt {
|
||||||
var buffer: [8]u8 = undefined;
|
var buffer: [8]u8 = undefined;
|
||||||
var fr: std.fs.File.Reader = .initStreaming(.{ .handle = fd }, &buffer);
|
var i: usize = 0;
|
||||||
return @intCast(fr.interface.takeInt(u64, .little) catch return error.SystemResources);
|
while (i < buffer.len) {
|
||||||
|
const n = try std.posix.read(fd, buffer[i..]);
|
||||||
|
if (n == 0) return error.EndOfStream;
|
||||||
|
i += n;
|
||||||
|
}
|
||||||
|
const int = mem.readInt(u64, &buffer, .little);
|
||||||
|
return @intCast(int);
|
||||||
}
|
}
|
||||||
|
|
||||||
const ErrInt = std.meta.Int(.unsigned, @sizeOf(anyerror) * 8);
|
const ErrInt = std.meta.Int(.unsigned, @sizeOf(anyerror) * 8);
|
||||||
|
|||||||
@ -1,7 +1,9 @@
|
|||||||
|
const Writer = @This();
|
||||||
|
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
|
const Io = std.Io;
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
const testing = std.testing;
|
const testing = std.testing;
|
||||||
const Writer = @This();
|
|
||||||
|
|
||||||
const block_size = @sizeOf(Header);
|
const block_size = @sizeOf(Header);
|
||||||
|
|
||||||
@ -14,7 +16,7 @@ pub const Options = struct {
|
|||||||
mtime: u64 = 0,
|
mtime: u64 = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
underlying_writer: *std.Io.Writer,
|
underlying_writer: *Io.Writer,
|
||||||
prefix: []const u8 = "",
|
prefix: []const u8 = "",
|
||||||
mtime_now: u64 = 0,
|
mtime_now: u64 = 0,
|
||||||
|
|
||||||
@ -36,12 +38,12 @@ pub fn writeDir(w: *Writer, sub_path: []const u8, options: Options) Error!void {
|
|||||||
try w.writeHeader(.directory, sub_path, "", 0, options);
|
try w.writeHeader(.directory, sub_path, "", 0, options);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const WriteFileError = std.Io.Writer.FileError || Error || std.fs.File.Reader.SizeError;
|
pub const WriteFileError = Io.Writer.FileError || Error || Io.File.Reader.SizeError;
|
||||||
|
|
||||||
pub fn writeFile(
|
pub fn writeFile(
|
||||||
w: *Writer,
|
w: *Writer,
|
||||||
sub_path: []const u8,
|
sub_path: []const u8,
|
||||||
file_reader: *std.fs.File.Reader,
|
file_reader: *Io.File.Reader,
|
||||||
stat_mtime: i128,
|
stat_mtime: i128,
|
||||||
) WriteFileError!void {
|
) WriteFileError!void {
|
||||||
const size = try file_reader.getSize();
|
const size = try file_reader.getSize();
|
||||||
@ -58,7 +60,7 @@ pub fn writeFile(
|
|||||||
try w.writePadding64(size);
|
try w.writePadding64(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const WriteFileStreamError = Error || std.Io.Reader.StreamError;
|
pub const WriteFileStreamError = Error || Io.Reader.StreamError;
|
||||||
|
|
||||||
/// Writes file reading file content from `reader`. Reads exactly `size` bytes
|
/// Writes file reading file content from `reader`. Reads exactly `size` bytes
|
||||||
/// from `reader`, or returns `error.EndOfStream`.
|
/// from `reader`, or returns `error.EndOfStream`.
|
||||||
@ -66,7 +68,7 @@ pub fn writeFileStream(
|
|||||||
w: *Writer,
|
w: *Writer,
|
||||||
sub_path: []const u8,
|
sub_path: []const u8,
|
||||||
size: u64,
|
size: u64,
|
||||||
reader: *std.Io.Reader,
|
reader: *Io.Reader,
|
||||||
options: Options,
|
options: Options,
|
||||||
) WriteFileStreamError!void {
|
) WriteFileStreamError!void {
|
||||||
try w.writeHeader(.regular, sub_path, "", size, options);
|
try w.writeHeader(.regular, sub_path, "", size, options);
|
||||||
@ -136,15 +138,15 @@ fn writeExtendedHeader(w: *Writer, typeflag: Header.FileType, buffers: []const [
|
|||||||
try w.writePadding(len);
|
try w.writePadding(len);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn writePadding(w: *Writer, bytes: usize) std.Io.Writer.Error!void {
|
fn writePadding(w: *Writer, bytes: usize) Io.Writer.Error!void {
|
||||||
return writePaddingPos(w, bytes % block_size);
|
return writePaddingPos(w, bytes % block_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn writePadding64(w: *Writer, bytes: u64) std.Io.Writer.Error!void {
|
fn writePadding64(w: *Writer, bytes: u64) Io.Writer.Error!void {
|
||||||
return writePaddingPos(w, @intCast(bytes % block_size));
|
return writePaddingPos(w, @intCast(bytes % block_size));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn writePaddingPos(w: *Writer, pos: usize) std.Io.Writer.Error!void {
|
fn writePaddingPos(w: *Writer, pos: usize) Io.Writer.Error!void {
|
||||||
if (pos == 0) return;
|
if (pos == 0) return;
|
||||||
try w.underlying_writer.splatByteAll(0, block_size - pos);
|
try w.underlying_writer.splatByteAll(0, block_size - pos);
|
||||||
}
|
}
|
||||||
@ -153,7 +155,7 @@ fn writePaddingPos(w: *Writer, pos: usize) std.Io.Writer.Error!void {
|
|||||||
/// "reasonable system must not assume that such a block exists when reading an
|
/// "reasonable system must not assume that such a block exists when reading an
|
||||||
/// archive". Therefore, the Zig standard library recommends to not call this
|
/// archive". Therefore, the Zig standard library recommends to not call this
|
||||||
/// function.
|
/// function.
|
||||||
pub fn finishPedantically(w: *Writer) std.Io.Writer.Error!void {
|
pub fn finishPedantically(w: *Writer) Io.Writer.Error!void {
|
||||||
try w.underlying_writer.splatByteAll(0, block_size * 2);
|
try w.underlying_writer.splatByteAll(0, block_size * 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -248,7 +250,7 @@ pub const Header = extern struct {
|
|||||||
try octal(&w.checksum, checksum);
|
try octal(&w.checksum, checksum);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn write(h: *Header, bw: *std.Io.Writer) error{ OctalOverflow, WriteFailed }!void {
|
pub fn write(h: *Header, bw: *Io.Writer) error{ OctalOverflow, WriteFailed }!void {
|
||||||
try h.updateChecksum();
|
try h.updateChecksum();
|
||||||
try bw.writeAll(std.mem.asBytes(h));
|
try bw.writeAll(std.mem.asBytes(h));
|
||||||
}
|
}
|
||||||
@ -396,14 +398,14 @@ test "write files" {
|
|||||||
{
|
{
|
||||||
const root = "root";
|
const root = "root";
|
||||||
|
|
||||||
var output: std.Io.Writer.Allocating = .init(testing.allocator);
|
var output: Io.Writer.Allocating = .init(testing.allocator);
|
||||||
var w: Writer = .{ .underlying_writer = &output.writer };
|
var w: Writer = .{ .underlying_writer = &output.writer };
|
||||||
defer output.deinit();
|
defer output.deinit();
|
||||||
try w.setRoot(root);
|
try w.setRoot(root);
|
||||||
for (files) |file|
|
for (files) |file|
|
||||||
try w.writeFileBytes(file.path, file.content, .{});
|
try w.writeFileBytes(file.path, file.content, .{});
|
||||||
|
|
||||||
var input: std.Io.Reader = .fixed(output.written());
|
var input: Io.Reader = .fixed(output.written());
|
||||||
var it: std.tar.Iterator = .init(&input, .{
|
var it: std.tar.Iterator = .init(&input, .{
|
||||||
.file_name_buffer = &file_name_buffer,
|
.file_name_buffer = &file_name_buffer,
|
||||||
.link_name_buffer = &link_name_buffer,
|
.link_name_buffer = &link_name_buffer,
|
||||||
@ -424,7 +426,7 @@ test "write files" {
|
|||||||
try testing.expectEqual('/', actual.name[root.len..][0]);
|
try testing.expectEqual('/', actual.name[root.len..][0]);
|
||||||
try testing.expectEqualStrings(expected.path, actual.name[root.len + 1 ..]);
|
try testing.expectEqualStrings(expected.path, actual.name[root.len + 1 ..]);
|
||||||
|
|
||||||
var content: std.Io.Writer.Allocating = .init(testing.allocator);
|
var content: Io.Writer.Allocating = .init(testing.allocator);
|
||||||
defer content.deinit();
|
defer content.deinit();
|
||||||
try it.streamRemaining(actual, &content.writer);
|
try it.streamRemaining(actual, &content.writer);
|
||||||
try testing.expectEqualSlices(u8, expected.content, content.written());
|
try testing.expectEqualSlices(u8, expected.content, content.written());
|
||||||
@ -432,15 +434,15 @@ test "write files" {
|
|||||||
}
|
}
|
||||||
// without root
|
// without root
|
||||||
{
|
{
|
||||||
var output: std.Io.Writer.Allocating = .init(testing.allocator);
|
var output: Io.Writer.Allocating = .init(testing.allocator);
|
||||||
var w: Writer = .{ .underlying_writer = &output.writer };
|
var w: Writer = .{ .underlying_writer = &output.writer };
|
||||||
defer output.deinit();
|
defer output.deinit();
|
||||||
for (files) |file| {
|
for (files) |file| {
|
||||||
var content: std.Io.Reader = .fixed(file.content);
|
var content: Io.Reader = .fixed(file.content);
|
||||||
try w.writeFileStream(file.path, file.content.len, &content, .{});
|
try w.writeFileStream(file.path, file.content.len, &content, .{});
|
||||||
}
|
}
|
||||||
|
|
||||||
var input: std.Io.Reader = .fixed(output.written());
|
var input: Io.Reader = .fixed(output.written());
|
||||||
var it: std.tar.Iterator = .init(&input, .{
|
var it: std.tar.Iterator = .init(&input, .{
|
||||||
.file_name_buffer = &file_name_buffer,
|
.file_name_buffer = &file_name_buffer,
|
||||||
.link_name_buffer = &link_name_buffer,
|
.link_name_buffer = &link_name_buffer,
|
||||||
@ -452,7 +454,7 @@ test "write files" {
|
|||||||
const expected = files[i];
|
const expected = files[i];
|
||||||
try testing.expectEqualStrings(expected.path, actual.name);
|
try testing.expectEqualStrings(expected.path, actual.name);
|
||||||
|
|
||||||
var content: std.Io.Writer.Allocating = .init(testing.allocator);
|
var content: Io.Writer.Allocating = .init(testing.allocator);
|
||||||
defer content.deinit();
|
defer content.deinit();
|
||||||
try it.streamRemaining(actual, &content.writer);
|
try it.streamRemaining(actual, &content.writer);
|
||||||
try testing.expectEqualSlices(u8, expected.content, content.written());
|
try testing.expectEqualSlices(u8, expected.content, content.written());
|
||||||
|
|||||||
@ -559,7 +559,7 @@ test isUnderscore {
|
|||||||
/// If the source can be UTF-16LE encoded, this function asserts that `gpa`
|
/// If the source can be UTF-16LE encoded, this function asserts that `gpa`
|
||||||
/// will align a byte-sized allocation to at least 2. Allocators that don't do
|
/// will align a byte-sized allocation to at least 2. Allocators that don't do
|
||||||
/// this are rare.
|
/// this are rare.
|
||||||
pub fn readSourceFileToEndAlloc(gpa: Allocator, file_reader: *std.fs.File.Reader) ![:0]u8 {
|
pub fn readSourceFileToEndAlloc(gpa: Allocator, file_reader: *Io.File.Reader) ![:0]u8 {
|
||||||
var buffer: std.ArrayList(u8) = .empty;
|
var buffer: std.ArrayList(u8) = .empty;
|
||||||
defer buffer.deinit(gpa);
|
defer buffer.deinit(gpa);
|
||||||
|
|
||||||
|
|||||||
@ -442,6 +442,7 @@ pub fn resolveTargetQuery(io: Io, query: Target.Query) DetectError!Target {
|
|||||||
error.DeviceBusy,
|
error.DeviceBusy,
|
||||||
error.InputOutput,
|
error.InputOutput,
|
||||||
error.LockViolation,
|
error.LockViolation,
|
||||||
|
error.FileSystem,
|
||||||
|
|
||||||
error.UnableToOpenElfFile,
|
error.UnableToOpenElfFile,
|
||||||
error.UnhelpfulFile,
|
error.UnhelpfulFile,
|
||||||
@ -542,16 +543,15 @@ fn detectNativeCpuAndFeatures(cpu_arch: Target.Cpu.Arch, os: Target.Os, query: T
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const AbiAndDynamicLinkerFromFileError = error{};
|
fn abiAndDynamicLinkerFromFile(
|
||||||
|
|
||||||
pub fn abiAndDynamicLinkerFromFile(
|
|
||||||
file_reader: *Io.File.Reader,
|
file_reader: *Io.File.Reader,
|
||||||
header: *const elf.Header,
|
header: *const elf.Header,
|
||||||
cpu: Target.Cpu,
|
cpu: Target.Cpu,
|
||||||
os: Target.Os,
|
os: Target.Os,
|
||||||
ld_info_list: []const LdInfo,
|
ld_info_list: []const LdInfo,
|
||||||
query: Target.Query,
|
query: Target.Query,
|
||||||
) AbiAndDynamicLinkerFromFileError!Target {
|
) !Target {
|
||||||
|
const io = file_reader.io;
|
||||||
var result: Target = .{
|
var result: Target = .{
|
||||||
.cpu = cpu,
|
.cpu = cpu,
|
||||||
.os = os,
|
.os = os,
|
||||||
@ -623,8 +623,8 @@ pub fn abiAndDynamicLinkerFromFile(
|
|||||||
try file_reader.seekTo(shstr.sh_offset);
|
try file_reader.seekTo(shstr.sh_offset);
|
||||||
try file_reader.interface.readSliceAll(shstrtab);
|
try file_reader.interface.readSliceAll(shstrtab);
|
||||||
const dynstr: ?struct { offset: u64, size: u64 } = find_dyn_str: {
|
const dynstr: ?struct { offset: u64, size: u64 } = find_dyn_str: {
|
||||||
var it = header.iterateSectionHeaders(&file_reader.interface);
|
var it = header.iterateSectionHeaders(file_reader);
|
||||||
while (it.next()) |shdr| {
|
while (try it.next()) |shdr| {
|
||||||
const end = mem.findScalarPos(u8, shstrtab, shdr.sh_name, 0) orelse continue;
|
const end = mem.findScalarPos(u8, shstrtab, shdr.sh_name, 0) orelse continue;
|
||||||
const sh_name = shstrtab[shdr.sh_name..end :0];
|
const sh_name = shstrtab[shdr.sh_name..end :0];
|
||||||
if (mem.eql(u8, sh_name, ".dynstr")) break :find_dyn_str .{
|
if (mem.eql(u8, sh_name, ".dynstr")) break :find_dyn_str .{
|
||||||
@ -645,7 +645,7 @@ pub fn abiAndDynamicLinkerFromFile(
|
|||||||
|
|
||||||
var it = mem.tokenizeScalar(u8, rpath_list, ':');
|
var it = mem.tokenizeScalar(u8, rpath_list, ':');
|
||||||
while (it.next()) |rpath| {
|
while (it.next()) |rpath| {
|
||||||
if (glibcVerFromRPath(rpath)) |ver| {
|
if (glibcVerFromRPath(io, rpath)) |ver| {
|
||||||
result.os.version_range.linux.glibc = ver;
|
result.os.version_range.linux.glibc = ver;
|
||||||
return result;
|
return result;
|
||||||
} else |err| switch (err) {
|
} else |err| switch (err) {
|
||||||
@ -660,7 +660,7 @@ pub fn abiAndDynamicLinkerFromFile(
|
|||||||
// There is no DT_RUNPATH so we try to find libc.so.6 inside the same
|
// There is no DT_RUNPATH so we try to find libc.so.6 inside the same
|
||||||
// directory as the dynamic linker.
|
// directory as the dynamic linker.
|
||||||
if (fs.path.dirname(dl_path)) |rpath| {
|
if (fs.path.dirname(dl_path)) |rpath| {
|
||||||
if (glibcVerFromRPath(rpath)) |ver| {
|
if (glibcVerFromRPath(io, rpath)) |ver| {
|
||||||
result.os.version_range.linux.glibc = ver;
|
result.os.version_range.linux.glibc = ver;
|
||||||
return result;
|
return result;
|
||||||
} else |err| switch (err) {
|
} else |err| switch (err) {
|
||||||
@ -725,7 +725,7 @@ pub fn abiAndDynamicLinkerFromFile(
|
|||||||
@memcpy(path_buf[index..][0..abi.len], abi);
|
@memcpy(path_buf[index..][0..abi.len], abi);
|
||||||
index += abi.len;
|
index += abi.len;
|
||||||
const rpath = path_buf[0..index];
|
const rpath = path_buf[0..index];
|
||||||
if (glibcVerFromRPath(rpath)) |ver| {
|
if (glibcVerFromRPath(io, rpath)) |ver| {
|
||||||
result.os.version_range.linux.glibc = ver;
|
result.os.version_range.linux.glibc = ver;
|
||||||
return result;
|
return result;
|
||||||
} else |err| switch (err) {
|
} else |err| switch (err) {
|
||||||
@ -842,18 +842,13 @@ fn glibcVerFromRPath(io: Io, rpath: []const u8) !std.SemanticVersion {
|
|||||||
error.InvalidElfMagic,
|
error.InvalidElfMagic,
|
||||||
error.InvalidElfEndian,
|
error.InvalidElfEndian,
|
||||||
error.InvalidElfClass,
|
error.InvalidElfClass,
|
||||||
error.InvalidElfFile,
|
|
||||||
error.InvalidElfVersion,
|
error.InvalidElfVersion,
|
||||||
error.InvalidGnuLibCVersion,
|
error.InvalidGnuLibCVersion,
|
||||||
error.EndOfStream,
|
error.EndOfStream,
|
||||||
=> return error.GLibCNotFound,
|
=> return error.GLibCNotFound,
|
||||||
|
|
||||||
error.SystemResources,
|
error.ReadFailed => return file_reader.err.?,
|
||||||
error.UnableToReadElfFile,
|
else => |e| return e,
|
||||||
error.Unexpected,
|
|
||||||
error.FileSystem,
|
|
||||||
error.ProcessNotFound,
|
|
||||||
=> |e| return e,
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -867,8 +862,8 @@ fn glibcVerFromSoFile(file_reader: *Io.File.Reader) !std.SemanticVersion {
|
|||||||
try file_reader.seekTo(shstr.sh_offset);
|
try file_reader.seekTo(shstr.sh_offset);
|
||||||
try file_reader.interface.readSliceAll(shstrtab);
|
try file_reader.interface.readSliceAll(shstrtab);
|
||||||
const dynstr: struct { offset: u64, size: u64 } = find_dyn_str: {
|
const dynstr: struct { offset: u64, size: u64 } = find_dyn_str: {
|
||||||
var it = header.iterateSectionHeaders(&file_reader.interface);
|
var it = header.iterateSectionHeaders(file_reader);
|
||||||
while (it.next()) |shdr| {
|
while (try it.next()) |shdr| {
|
||||||
const end = mem.findScalarPos(u8, shstrtab, shdr.sh_name, 0) orelse continue;
|
const end = mem.findScalarPos(u8, shstrtab, shdr.sh_name, 0) orelse continue;
|
||||||
const sh_name = shstrtab[shdr.sh_name..end :0];
|
const sh_name = shstrtab[shdr.sh_name..end :0];
|
||||||
if (mem.eql(u8, sh_name, ".dynstr")) break :find_dyn_str .{
|
if (mem.eql(u8, sh_name, ".dynstr")) break :find_dyn_str .{
|
||||||
@ -882,19 +877,25 @@ fn glibcVerFromSoFile(file_reader: *Io.File.Reader) !std.SemanticVersion {
|
|||||||
// strings that start with "GLIBC_2." indicate the existence of such a glibc version,
|
// strings that start with "GLIBC_2." indicate the existence of such a glibc version,
|
||||||
// and furthermore, that the system-installed glibc is at minimum that version.
|
// and furthermore, that the system-installed glibc is at minimum that version.
|
||||||
var max_ver: std.SemanticVersion = .{ .major = 2, .minor = 2, .patch = 5 };
|
var max_ver: std.SemanticVersion = .{ .major = 2, .minor = 2, .patch = 5 };
|
||||||
|
var offset: u64 = 0;
|
||||||
try file_reader.seekTo(dynstr.offset);
|
try file_reader.seekTo(dynstr.offset);
|
||||||
while (file_reader.interface.takeSentinel(0)) |s| {
|
while (offset < dynstr.size) {
|
||||||
if (mem.startsWith(u8, s, "GLIBC_2.")) {
|
if (file_reader.interface.takeSentinel(0)) |s| {
|
||||||
const chopped = s["GLIBC_".len..];
|
if (mem.startsWith(u8, s, "GLIBC_2.")) {
|
||||||
const ver = Target.Query.parseVersion(chopped) catch |err| switch (err) {
|
const chopped = s["GLIBC_".len..];
|
||||||
error.Overflow => return error.InvalidGnuLibCVersion,
|
const ver = Target.Query.parseVersion(chopped) catch |err| switch (err) {
|
||||||
error.InvalidVersion => return error.InvalidGnuLibCVersion,
|
error.Overflow => return error.InvalidGnuLibCVersion,
|
||||||
};
|
error.InvalidVersion => return error.InvalidGnuLibCVersion,
|
||||||
switch (ver.order(max_ver)) {
|
};
|
||||||
.gt => max_ver = ver,
|
switch (ver.order(max_ver)) {
|
||||||
.lt, .eq => continue,
|
.gt => max_ver = ver,
|
||||||
|
.lt, .eq => continue,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
offset += s.len + 1;
|
||||||
|
} else |err| switch (err) {
|
||||||
|
error.EndOfStream, error.StreamTooLong => break,
|
||||||
|
error.ReadFailed => |e| return e,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1091,22 +1092,12 @@ fn detectAbiAndDynamicLinker(io: Io, cpu: Target.Cpu, os: Target.Os, query: Targ
|
|||||||
error.ProcessFdQuotaExceeded,
|
error.ProcessFdQuotaExceeded,
|
||||||
error.SystemFdQuotaExceeded,
|
error.SystemFdQuotaExceeded,
|
||||||
error.ProcessNotFound,
|
error.ProcessNotFound,
|
||||||
|
error.Canceled,
|
||||||
=> |e| return e,
|
=> |e| return e,
|
||||||
|
|
||||||
error.ReadFailed => return file_reader.err.?,
|
error.ReadFailed => return file_reader.err.?,
|
||||||
|
|
||||||
error.UnableToReadElfFile,
|
else => |e| {
|
||||||
error.InvalidElfClass,
|
|
||||||
error.InvalidElfVersion,
|
|
||||||
error.InvalidElfEndian,
|
|
||||||
error.InvalidElfFile,
|
|
||||||
error.InvalidElfMagic,
|
|
||||||
error.Unexpected,
|
|
||||||
error.EndOfStream,
|
|
||||||
error.NameTooLong,
|
|
||||||
error.StaticElfFile,
|
|
||||||
// Finally, we fall back on the standard path.
|
|
||||||
=> |e| {
|
|
||||||
std.log.warn("encountered {t}; falling back to default ABI and dynamic linker", .{e});
|
std.log.warn("encountered {t}; falling back to default ABI and dynamic linker", .{e});
|
||||||
return defaultAbiAndDynamicLinker(cpu, os, query);
|
return defaultAbiAndDynamicLinker(cpu, os, query);
|
||||||
},
|
},
|
||||||
|
|||||||
@ -455,8 +455,7 @@ pub fn lowerToBuildSteps(
|
|||||||
parent_step: *std.Build.Step,
|
parent_step: *std.Build.Step,
|
||||||
options: CaseTestOptions,
|
options: CaseTestOptions,
|
||||||
) void {
|
) void {
|
||||||
const host = std.zig.system.resolveTargetQuery(.{}) catch |err|
|
const host = b.resolveTargetQuery(.{});
|
||||||
std.debug.panic("unable to detect native host: {s}\n", .{@errorName(err)});
|
|
||||||
const cases_dir_path = b.build_root.join(b.allocator, &.{ "test", "cases" }) catch @panic("OOM");
|
const cases_dir_path = b.build_root.join(b.allocator, &.{ "test", "cases" }) catch @panic("OOM");
|
||||||
|
|
||||||
for (self.cases.items) |case| {
|
for (self.cases.items) |case| {
|
||||||
@ -587,7 +586,7 @@ pub fn lowerToBuildSteps(
|
|||||||
},
|
},
|
||||||
.Execution => |expected_stdout| no_exec: {
|
.Execution => |expected_stdout| no_exec: {
|
||||||
const run = if (case.target.result.ofmt == .c) run_step: {
|
const run = if (case.target.result.ofmt == .c) run_step: {
|
||||||
if (getExternalExecutor(&host, &case.target.result, .{ .link_libc = true }) != .native) {
|
if (getExternalExecutor(&host.result, &case.target.result, .{ .link_libc = true }) != .native) {
|
||||||
// We wouldn't be able to run the compiled C code.
|
// We wouldn't be able to run the compiled C code.
|
||||||
break :no_exec;
|
break :no_exec;
|
||||||
}
|
}
|
||||||
@ -972,14 +971,6 @@ const TestManifest = struct {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
fn resolveTargetQuery(query: std.Target.Query) std.Build.ResolvedTarget {
|
|
||||||
return .{
|
|
||||||
.query = query,
|
|
||||||
.target = std.zig.system.resolveTargetQuery(query) catch
|
|
||||||
@panic("unable to resolve target query"),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
fn knownFileExtension(filename: []const u8) bool {
|
fn knownFileExtension(filename: []const u8) bool {
|
||||||
// List taken from `Compilation.classifyFileExt` in the compiler.
|
// List taken from `Compilation.classifyFileExt` in the compiler.
|
||||||
for ([_][]const u8{
|
for ([_][]const u8{
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user