std.Io: fix error handling and asyncParallel docs

This commit is contained in:
Andrew Kelley 2025-07-17 20:53:39 -07:00
parent 33b10abaf6
commit 363d7deb8a
3 changed files with 20 additions and 20 deletions

View File

@ -946,7 +946,7 @@ pub const VTable = struct {
context: []const u8,
context_alignment: std.mem.Alignment,
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
) ?*AnyFuture,
) error{OutOfMemory}!*AnyFuture,
/// Returning `null` indicates resource allocation failed.
///
/// Thread-safe.
@ -959,7 +959,7 @@ pub const VTable = struct {
context: []const u8,
context_alignment: std.mem.Alignment,
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
) ?*AnyFuture,
) error{OutOfMemory}!*AnyFuture,
/// Executes `start` asynchronously in a manner such that it cleans itself
/// up. This mode does not support results, await, or cancel.
///
@ -1573,7 +1573,7 @@ pub fn asyncConcurrent(
}
};
var future: Future(Result) = undefined;
future.any_future = io.vtable.asyncConcurrent(
future.any_future = try io.vtable.asyncConcurrent(
io.userdata,
@sizeOf(Result),
.of(Result),
@ -1584,14 +1584,14 @@ pub fn asyncConcurrent(
return future;
}
/// Calls `function` with `args`, such that the return value of the function is
/// not guaranteed to be available until `await` is called, while simultaneously
/// passing control flow back to the caller.
/// Simultaneously calls `function` with `args` while passing control flow back
/// to the caller. The return value of the function is not guaranteed to be
/// available until `await` is called.
///
/// This has the strongest guarantees of all async family functions, placing
/// the most restrictions on what kind of `Io` implementations are supported.
/// By calling `asyncConcurrent` instead, one allows, for example,
/// stackful single-threaded non-blocking I/O.
/// By calling `asyncConcurrent` instead, one allows, for example, stackful
/// single-threaded non-blocking I/O.
///
/// See also:
/// * `asyncConcurrent`
@ -1611,9 +1611,9 @@ pub fn asyncParallel(
}
};
var future: Future(Result) = undefined;
future.any_future = io.vtable.asyncConcurrent(
future.any_future = try io.vtable.asyncParallel(
io.userdata,
@ptrCast((&future.result)[0..1]),
@sizeOf(Result),
.of(Result),
@ptrCast((&args)[0..1]),
.of(Args),

View File

@ -879,7 +879,7 @@ fn async(
context_alignment: Alignment,
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
) ?*std.Io.AnyFuture {
return asyncConcurrent(userdata, result.len, result_alignment, context, context_alignment, start) orelse {
return asyncConcurrent(userdata, result.len, result_alignment, context, context_alignment, start) catch {
start(context.ptr, result.ptr);
return null;
};
@ -892,14 +892,14 @@ fn asyncConcurrent(
context: []const u8,
context_alignment: Alignment,
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
) ?*std.Io.AnyFuture {
) error{OutOfMemory}!*std.Io.AnyFuture {
assert(result_alignment.compare(.lte, Fiber.max_result_align)); // TODO
assert(context_alignment.compare(.lte, Fiber.max_context_align)); // TODO
assert(result_len <= Fiber.max_result_size); // TODO
assert(context.len <= Fiber.max_context_size); // TODO
const event_loop: *EventLoop = @alignCast(@ptrCast(userdata));
const fiber = Fiber.allocate(event_loop) catch return null;
const fiber = try Fiber.allocate(event_loop);
std.log.debug("allocated {*}", .{fiber});
const closure: *AsyncClosure = .fromFiber(fiber);
@ -945,7 +945,7 @@ fn asyncParallel(
context: []const u8,
context_alignment: Alignment,
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
) ?*std.Io.AnyFuture {
) error{OutOfMemory}!*std.Io.AnyFuture {
_ = userdata;
_ = result_len;
_ = result_alignment;

View File

@ -220,7 +220,7 @@ fn async(
}
const pool: *Pool = @alignCast(@ptrCast(userdata));
const cpu_count = pool.cpu_count catch {
return asyncParallel(userdata, result.len, result_alignment, context, context_alignment, start) orelse {
return asyncParallel(userdata, result.len, result_alignment, context, context_alignment, start) catch {
start(context.ptr, result.ptr);
return null;
};
@ -291,8 +291,8 @@ fn asyncParallel(
context: []const u8,
context_alignment: std.mem.Alignment,
start: *const fn (context: *const anyopaque, result: *anyopaque) void,
) ?*Io.AnyFuture {
if (builtin.single_threaded) return null;
) error{OutOfMemory}!*Io.AnyFuture {
if (builtin.single_threaded) unreachable;
const pool: *Pool = @alignCast(@ptrCast(userdata));
const cpu_count = pool.cpu_count catch 1;
@ -300,7 +300,7 @@ fn asyncParallel(
const context_offset = context_alignment.forward(@sizeOf(AsyncClosure));
const result_offset = result_alignment.forward(context_offset + context.len);
const n = result_offset + result_len;
const closure: *AsyncClosure = @alignCast(@ptrCast(gpa.alignedAlloc(u8, .of(AsyncClosure), n) catch return null));
const closure: *AsyncClosure = @alignCast(@ptrCast(try gpa.alignedAlloc(u8, .of(AsyncClosure), n)));
closure.* = .{
.func = start,
@ -324,7 +324,7 @@ fn asyncParallel(
pool.threads.ensureTotalCapacity(gpa, thread_capacity) catch {
pool.mutex.unlock();
closure.free(gpa, result_len);
return null;
return error.OutOfMemory;
};
pool.run_queue.prepend(&closure.runnable.node);
@ -334,7 +334,7 @@ fn asyncParallel(
assert(pool.run_queue.popFirst() == &closure.runnable.node);
pool.mutex.unlock();
closure.free(gpa, result_len);
return null;
return error.OutOfMemory;
};
pool.threads.appendAssumeCapacity(thread);
}