std.Io: introduce cancellation

This commit is contained in:
Andrew Kelley 2025-03-29 20:58:07 -07:00
parent dab5dd286f
commit 988f58341b
3 changed files with 239 additions and 117 deletions

View File

@ -922,6 +922,8 @@ vtable: *const VTable,
pub const VTable = struct { pub const VTable = struct {
/// If it returns `null` it means `result` has been already populated and /// If it returns `null` it means `result` has been already populated and
/// `await` will be a no-op. /// `await` will be a no-op.
///
/// Thread-safe.
async: *const fn ( async: *const fn (
/// Corresponds to `Io.userdata`. /// Corresponds to `Io.userdata`.
userdata: ?*anyopaque, userdata: ?*anyopaque,
@ -937,6 +939,8 @@ pub const VTable = struct {
) ?*AnyFuture, ) ?*AnyFuture,
/// This function is only called when `async` returns a non-null value. /// This function is only called when `async` returns a non-null value.
///
/// Thread-safe.
await: *const fn ( await: *const fn (
/// Corresponds to `Io.userdata`. /// Corresponds to `Io.userdata`.
userdata: ?*anyopaque, userdata: ?*anyopaque,
@ -947,13 +951,41 @@ pub const VTable = struct {
result: []u8, result: []u8,
) void, ) void,
createFile: *const fn (?*anyopaque, dir: fs.Dir, sub_path: []const u8, flags: fs.File.CreateFlags) fs.File.OpenError!fs.File, /// Equivalent to `await` but initiates cancel request.
openFile: *const fn (?*anyopaque, dir: fs.Dir, sub_path: []const u8, flags: fs.File.OpenFlags) fs.File.OpenError!fs.File, ///
/// This function is only called when `async` returns a non-null value.
///
/// Thread-safe.
cancel: *const fn (
/// Corresponds to `Io.userdata`.
userdata: ?*anyopaque,
/// The same value that was returned from `async`.
any_future: *AnyFuture,
/// Points to a buffer where the result is written.
/// The length is equal to size in bytes of result type.
result: []u8,
) void,
/// Returns whether the current thread of execution is known to have
/// been requested to cancel.
///
/// Thread-safe.
cancelRequested: *const fn (?*anyopaque) bool,
createFile: *const fn (?*anyopaque, dir: fs.Dir, sub_path: []const u8, flags: fs.File.CreateFlags) FileOpenError!fs.File,
openFile: *const fn (?*anyopaque, dir: fs.Dir, sub_path: []const u8, flags: fs.File.OpenFlags) FileOpenError!fs.File,
closeFile: *const fn (?*anyopaque, fs.File) void, closeFile: *const fn (?*anyopaque, fs.File) void,
read: *const fn (?*anyopaque, file: fs.File, buffer: []u8) fs.File.ReadError!usize, read: *const fn (?*anyopaque, file: fs.File, buffer: []u8) FileReadError!usize,
write: *const fn (?*anyopaque, file: fs.File, buffer: []const u8) fs.File.WriteError!usize, write: *const fn (?*anyopaque, file: fs.File, buffer: []const u8) FileWriteError!usize,
}; };
pub const OpenFlags = fs.File.OpenFlags;
pub const CreateFlags = fs.File.CreateFlags;
pub const FileOpenError = fs.File.OpenError || error{AsyncCancel};
pub const FileReadError = fs.File.ReadError || error{AsyncCancel};
pub const FileWriteError = fs.File.WriteError || error{AsyncCancel};
pub const AnyFuture = opaque {}; pub const AnyFuture = opaque {};
pub fn Future(Result: type) type { pub fn Future(Result: type) type {
@ -961,6 +993,17 @@ pub fn Future(Result: type) type {
any_future: ?*AnyFuture, any_future: ?*AnyFuture,
result: Result, result: Result,
/// Equivalent to `await` but sets a flag observable to application
/// code that cancellation has been requested.
///
/// Idempotent.
pub fn cancel(f: *@This(), io: Io) Result {
const any_future = f.any_future orelse return f.result;
io.vtable.cancel(io.userdata, any_future, @ptrCast((&f.result)[0..1]));
f.any_future = null;
return f.result;
}
pub fn await(f: *@This(), io: Io) Result { pub fn await(f: *@This(), io: Io) Result {
const any_future = f.any_future orelse return f.result; const any_future = f.any_future orelse return f.result;
io.vtable.await(io.userdata, any_future, @ptrCast((&f.result)[0..1])); io.vtable.await(io.userdata, any_future, @ptrCast((&f.result)[0..1]));
@ -994,11 +1037,11 @@ pub fn async(io: Io, function: anytype, args: anytype) Future(@typeInfo(@TypeOf(
return future; return future;
} }
pub fn openFile(io: Io, dir: fs.Dir, sub_path: []const u8, flags: fs.File.OpenFlags) fs.File.OpenError!fs.File { pub fn openFile(io: Io, dir: fs.Dir, sub_path: []const u8, flags: fs.File.OpenFlags) FileOpenError!fs.File {
return io.vtable.openFile(io.userdata, dir, sub_path, flags); return io.vtable.openFile(io.userdata, dir, sub_path, flags);
} }
pub fn createFile(io: Io, dir: fs.Dir, sub_path: []const u8, flags: fs.File.CreateFlags) fs.File.OpenError!fs.File { pub fn createFile(io: Io, dir: fs.Dir, sub_path: []const u8, flags: fs.File.CreateFlags) FileOpenError!fs.File {
return io.vtable.createFile(io.userdata, dir, sub_path, flags); return io.vtable.createFile(io.userdata, dir, sub_path, flags);
} }
@ -1006,22 +1049,22 @@ pub fn closeFile(io: Io, file: fs.File) void {
return io.vtable.closeFile(io.userdata, file); return io.vtable.closeFile(io.userdata, file);
} }
pub fn read(io: Io, file: fs.File, buffer: []u8) fs.File.ReadError!usize { pub fn read(io: Io, file: fs.File, buffer: []u8) FileReadError!usize {
return io.vtable.read(io.userdata, file, buffer); return io.vtable.read(io.userdata, file, buffer);
} }
pub fn write(io: Io, file: fs.File, buffer: []const u8) fs.File.WriteError!usize { pub fn write(io: Io, file: fs.File, buffer: []const u8) FileWriteError!usize {
return io.vtable.write(io.userdata, file, buffer); return io.vtable.write(io.userdata, file, buffer);
} }
pub fn writeAll(io: Io, file: fs.File, bytes: []const u8) fs.File.WriteError!void { pub fn writeAll(io: Io, file: fs.File, bytes: []const u8) FileWriteError!void {
var index: usize = 0; var index: usize = 0;
while (index < bytes.len) { while (index < bytes.len) {
index += try io.write(file, bytes[index..]); index += try io.write(file, bytes[index..]);
} }
} }
pub fn readAll(io: Io, file: fs.File, buffer: []u8) fs.File.ReadError!usize { pub fn readAll(io: Io, file: fs.File, buffer: []u8) FileReadError!usize {
var index: usize = 0; var index: usize = 0;
while (index != buffer.len) { while (index != buffer.len) {
const amt = try io.read(file, buffer[index..]); const amt = try io.read(file, buffer[index..]);

View File

@ -7,12 +7,13 @@ const EventLoop = @This();
const Alignment = std.mem.Alignment; const Alignment = std.mem.Alignment;
const IoUring = std.os.linux.IoUring; const IoUring = std.os.linux.IoUring;
/// Must be a thread-safe allocator.
gpa: Allocator, gpa: Allocator,
mutex: std.Thread.Mutex, mutex: std.Thread.Mutex,
queue: std.DoublyLinkedList(void), queue: std.DoublyLinkedList,
/// Atomic copy of queue.len /// Atomic copy of queue.len
queue_len: u32, queue_len: u32,
free: std.DoublyLinkedList(void), free: std.DoublyLinkedList,
main_fiber: Fiber, main_fiber: Fiber,
idle_count: usize, idle_count: usize,
threads: std.ArrayListUnmanaged(Thread), threads: std.ArrayListUnmanaged(Thread),
@ -39,7 +40,7 @@ const Thread = struct {
const Fiber = struct { const Fiber = struct {
context: Context, context: Context,
awaiter: ?*Fiber, awaiter: ?*Fiber,
queue_node: std.DoublyLinkedList(void).Node, queue_node: std.DoublyLinkedList.Node,
result_align: Alignment, result_align: Alignment,
const finished: ?*Fiber = @ptrFromInt(std.mem.alignBackward(usize, std.math.maxInt(usize), @alignOf(Fiber))); const finished: ?*Fiber = @ptrFromInt(std.mem.alignBackward(usize, std.math.maxInt(usize), @alignOf(Fiber)));
@ -447,6 +448,15 @@ pub fn @"await"(userdata: ?*anyopaque, any_future: *std.Io.AnyFuture, result: []
event_loop.recycle(future_fiber); event_loop.recycle(future_fiber);
} }
pub fn cancel(userdata: ?*anyopaque, any_future: *std.Io.AnyFuture, result: []u8) void {
const event_loop: *EventLoop = @alignCast(@ptrCast(userdata));
const future_fiber: *Fiber = @alignCast(@ptrCast(any_future));
// TODO set a flag that makes all IO operations for this fiber return error.Canceled
if (@atomicLoad(?*Fiber, &future_fiber.awaiter, .acquire) != Fiber.finished) event_loop.yield(null, .{ .register_awaiter = &future_fiber.awaiter });
@memcpy(result, future_fiber.resultPointer());
event_loop.recycle(future_fiber);
}
pub fn createFile(userdata: ?*anyopaque, dir: std.fs.Dir, sub_path: []const u8, flags: std.fs.File.CreateFlags) std.fs.File.OpenError!std.fs.File { pub fn createFile(userdata: ?*anyopaque, dir: std.fs.Dir, sub_path: []const u8, flags: std.fs.File.CreateFlags) std.fs.File.OpenError!std.fs.File {
const el: *EventLoop = @ptrCast(@alignCast(userdata)); const el: *EventLoop = @ptrCast(@alignCast(userdata));

View File

@ -1,22 +1,27 @@
const builtin = @import("builtin"); const builtin = @import("builtin");
const std = @import("std"); const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert; const assert = std.debug.assert;
const WaitGroup = @import("WaitGroup.zig"); const WaitGroup = @import("WaitGroup.zig");
const Io = std.Io;
const Pool = @This(); const Pool = @This();
/// Must be a thread-safe allocator.
allocator: std.mem.Allocator,
mutex: std.Thread.Mutex = .{}, mutex: std.Thread.Mutex = .{},
cond: std.Thread.Condition = .{}, cond: std.Thread.Condition = .{},
run_queue: std.SinglyLinkedList = .{}, run_queue: std.SinglyLinkedList = .{},
is_running: bool = true, is_running: bool = true,
/// Must be a thread-safe allocator. threads: std.ArrayListUnmanaged(std.Thread),
allocator: std.mem.Allocator,
threads: if (builtin.single_threaded) [0]std.Thread else []std.Thread,
ids: if (builtin.single_threaded) struct { ids: if (builtin.single_threaded) struct {
inline fn deinit(_: @This(), _: std.mem.Allocator) void {} inline fn deinit(_: @This(), _: std.mem.Allocator) void {}
fn getIndex(_: @This(), _: std.Thread.Id) usize { fn getIndex(_: @This(), _: std.Thread.Id) usize {
return 0; return 0;
} }
} else std.AutoArrayHashMapUnmanaged(std.Thread.Id, void), } else std.AutoArrayHashMapUnmanaged(std.Thread.Id, void),
stack_size: usize,
threadlocal var current_closure: ?*AsyncClosure = null;
pub const Runnable = struct { pub const Runnable = struct {
runFn: RunProto, runFn: RunProto,
@ -33,48 +38,36 @@ pub const Options = struct {
}; };
pub fn init(pool: *Pool, options: Options) !void { pub fn init(pool: *Pool, options: Options) !void {
const allocator = options.allocator; const gpa = options.allocator;
const thread_count = options.n_jobs orelse @max(1, std.Thread.getCpuCount() catch 1);
const threads = try gpa.alloc(std.Thread, thread_count);
errdefer gpa.free(threads);
pool.* = .{ pool.* = .{
.allocator = allocator, .allocator = gpa,
.threads = if (builtin.single_threaded) .{} else &.{}, .threads = .initBuffer(threads),
.ids = .{}, .ids = .{},
.stack_size = options.stack_size,
}; };
if (builtin.single_threaded) { if (builtin.single_threaded) return;
return;
}
const thread_count = options.n_jobs orelse @max(1, std.Thread.getCpuCount() catch 1);
if (options.track_ids) { if (options.track_ids) {
try pool.ids.ensureTotalCapacity(allocator, 1 + thread_count); try pool.ids.ensureTotalCapacity(gpa, 1 + thread_count);
pool.ids.putAssumeCapacityNoClobber(std.Thread.getCurrentId(), {}); pool.ids.putAssumeCapacityNoClobber(std.Thread.getCurrentId(), {});
} }
// kill and join any threads we spawned and free memory on error.
pool.threads = try allocator.alloc(std.Thread, thread_count);
var spawned: usize = 0;
errdefer pool.join(spawned);
for (pool.threads) |*thread| {
thread.* = try std.Thread.spawn(.{
.stack_size = options.stack_size,
.allocator = allocator,
}, worker, .{pool});
spawned += 1;
}
} }
pub fn deinit(pool: *Pool) void { pub fn deinit(pool: *Pool) void {
pool.join(pool.threads.len); // kill and join all threads. const gpa = pool.allocator;
pool.ids.deinit(pool.allocator); pool.join();
pool.threads.deinit(gpa);
pool.ids.deinit(gpa);
pool.* = undefined; pool.* = undefined;
} }
fn join(pool: *Pool, spawned: usize) void { fn join(pool: *Pool) void {
if (builtin.single_threaded) { if (builtin.single_threaded) return;
return;
}
{ {
pool.mutex.lock(); pool.mutex.lock();
@ -87,11 +80,7 @@ fn join(pool: *Pool, spawned: usize) void {
// wake up any sleeping threads (this can be done outside the mutex) // wake up any sleeping threads (this can be done outside the mutex)
// then wait for all the threads we know are spawned to complete. // then wait for all the threads we know are spawned to complete.
pool.cond.broadcast(); pool.cond.broadcast();
for (pool.threads[0..spawned]) |thread| { for (pool.threads.items) |thread| thread.join();
thread.join();
}
pool.allocator.free(pool.threads);
} }
/// Runs `func` in the thread pool, calling `WaitGroup.start` beforehand, and /// Runs `func` in the thread pool, calling `WaitGroup.start` beforehand, and
@ -123,26 +112,34 @@ pub fn spawnWg(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args
} }
}; };
{ pool.mutex.lock();
pool.mutex.lock();
const closure = pool.allocator.create(Closure) catch { const gpa = pool.allocator;
pool.mutex.unlock(); const closure = gpa.create(Closure) catch {
@call(.auto, func, args);
wait_group.finish();
return;
};
closure.* = .{
.arguments = args,
.pool = pool,
.wait_group = wait_group,
};
pool.run_queue.prepend(&closure.runnable.node);
pool.mutex.unlock(); pool.mutex.unlock();
@call(.auto, func, args);
wait_group.finish();
return;
};
closure.* = .{
.arguments = args,
.pool = pool,
.wait_group = wait_group,
};
pool.run_queue.prepend(&closure.runnable.node);
if (pool.threads.items.len < pool.threads.capacity) {
pool.threads.addOneAssumeCapacity().* = std.Thread.spawn(.{
.stack_size = pool.stack_size,
.allocator = gpa,
}, worker, .{pool}) catch t: {
pool.threads.items.len -= 1;
break :t undefined;
};
} }
// Notify waiting threads outside the lock to try and keep the critical section small. pool.mutex.unlock();
pool.cond.signal(); pool.cond.signal();
} }
@ -179,31 +176,39 @@ pub fn spawnWgId(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, ar
} }
}; };
{ pool.mutex.lock();
pool.mutex.lock();
const closure = pool.allocator.create(Closure) catch { const gpa = pool.allocator;
const id: ?usize = pool.ids.getIndex(std.Thread.getCurrentId()); const closure = gpa.create(Closure) catch {
pool.mutex.unlock(); const id: ?usize = pool.ids.getIndex(std.Thread.getCurrentId());
@call(.auto, func, .{id.?} ++ args);
wait_group.finish();
return;
};
closure.* = .{
.arguments = args,
.pool = pool,
.wait_group = wait_group,
};
pool.run_queue.prepend(&closure.runnable.node);
pool.mutex.unlock(); pool.mutex.unlock();
@call(.auto, func, .{id.?} ++ args);
wait_group.finish();
return;
};
closure.* = .{
.arguments = args,
.pool = pool,
.wait_group = wait_group,
};
pool.run_queue.prepend(&closure.runnable.node);
if (pool.threads.items.len < pool.threads.capacity) {
pool.threads.addOneAssumeCapacity().* = std.Thread.spawn(.{
.stack_size = pool.stack_size,
.allocator = gpa,
}, worker, .{pool}) catch t: {
pool.threads.items.len -= 1;
break :t undefined;
};
} }
// Notify waiting threads outside the lock to try and keep the critical section small. pool.mutex.unlock();
pool.cond.signal(); pool.cond.signal();
} }
pub fn spawn(pool: *Pool, comptime func: anytype, args: anytype) !void { pub fn spawn(pool: *Pool, comptime func: anytype, args: anytype) void {
if (builtin.single_threaded) { if (builtin.single_threaded) {
@call(.auto, func, args); @call(.auto, func, args);
return; return;
@ -222,20 +227,32 @@ pub fn spawn(pool: *Pool, comptime func: anytype, args: anytype) !void {
} }
}; };
{ pool.mutex.lock();
pool.mutex.lock();
defer pool.mutex.unlock();
const closure = try pool.allocator.create(Closure); const gpa = pool.allocator;
closure.* = .{ const closure = gpa.create(Closure) catch {
.arguments = args, pool.mutex.unlock();
.pool = pool, @call(.auto, func, args);
return;
};
closure.* = .{
.arguments = args,
.pool = pool,
};
pool.run_queue.prepend(&closure.runnable.node);
if (pool.threads.items.len < pool.threads.capacity) {
pool.threads.addOneAssumeCapacity().* = std.Thread.spawn(.{
.stack_size = pool.stack_size,
.allocator = gpa,
}, worker, .{pool}) catch t: {
pool.threads.items.len -= 1;
break :t undefined;
}; };
pool.run_queue.prepend(&closure.runnable.node);
} }
// Notify waiting threads outside the lock to try and keep the critical section small. pool.mutex.unlock();
pool.cond.signal(); pool.cond.signal();
} }
@ -254,7 +271,7 @@ test spawn {
.allocator = std.testing.allocator, .allocator = std.testing.allocator,
}); });
defer pool.deinit(); defer pool.deinit();
try pool.spawn(TestFn.checkRun, .{&completed}); pool.spawn(TestFn.checkRun, .{&completed});
} }
try std.testing.expectEqual(true, completed); try std.testing.expectEqual(true, completed);
@ -306,15 +323,17 @@ pub fn waitAndWork(pool: *Pool, wait_group: *WaitGroup) void {
} }
pub fn getIdCount(pool: *Pool) usize { pub fn getIdCount(pool: *Pool) usize {
return @intCast(1 + pool.threads.len); return @intCast(1 + pool.threads.items.len);
} }
pub fn io(pool: *Pool) std.Io { pub fn io(pool: *Pool) Io {
return .{ return .{
.userdata = pool, .userdata = pool,
.vtable = &.{ .vtable = &.{
.@"async" = @"async", .@"async" = @"async",
.@"await" = @"await", .@"await" = @"await",
.cancel = cancel,
.cancelRequested = cancelRequested,
.createFile = createFile, .createFile = createFile,
.openFile = openFile, .openFile = openFile,
.closeFile = closeFile, .closeFile = closeFile,
@ -326,15 +345,17 @@ pub fn io(pool: *Pool) std.Io {
const AsyncClosure = struct { const AsyncClosure = struct {
func: *const fn (context: *anyopaque, result: *anyopaque) void, func: *const fn (context: *anyopaque, result: *anyopaque) void,
run_node: std.Thread.Pool.RunQueue.Node = .{ .data = .{ .runFn = runFn } }, runnable: Runnable = .{ .runFn = runFn },
reset_event: std.Thread.ResetEvent, reset_event: std.Thread.ResetEvent,
cancel_flag: bool,
context_offset: usize, context_offset: usize,
result_offset: usize, result_offset: usize,
fn runFn(runnable: *std.Thread.Pool.Runnable, _: ?usize) void { fn runFn(runnable: *std.Thread.Pool.Runnable, _: ?usize) void {
const run_node: *std.Thread.Pool.RunQueue.Node = @fieldParentPtr("data", runnable); const closure: *AsyncClosure = @alignCast(@fieldParentPtr("runnable", runnable));
const closure: *AsyncClosure = @alignCast(@fieldParentPtr("run_node", run_node)); current_closure = closure;
closure.func(closure.contextPointer(), closure.resultPointer()); closure.func(closure.contextPointer(), closure.resultPointer());
current_closure = null;
closure.reset_event.set(); closure.reset_event.set();
} }
@ -359,16 +380,23 @@ const AsyncClosure = struct {
const base: [*]u8 = @ptrCast(closure); const base: [*]u8 = @ptrCast(closure);
return base + closure.context_offset; return base + closure.context_offset;
} }
fn waitAndFree(closure: *AsyncClosure, gpa: Allocator, result: []u8) void {
closure.reset_event.wait();
const base: [*]align(@alignOf(AsyncClosure)) u8 = @ptrCast(closure);
@memcpy(result, closure.resultPointer()[0..result.len]);
gpa.free(base[0 .. closure.result_offset + result.len]);
}
}; };
pub fn @"async"( fn @"async"(
userdata: ?*anyopaque, userdata: ?*anyopaque,
result: []u8, result: []u8,
result_alignment: std.mem.Alignment, result_alignment: std.mem.Alignment,
context: []const u8, context: []const u8,
context_alignment: std.mem.Alignment, context_alignment: std.mem.Alignment,
start: *const fn (context: *const anyopaque, result: *anyopaque) void, start: *const fn (context: *const anyopaque, result: *anyopaque) void,
) ?*std.Io.AnyFuture { ) ?*Io.AnyFuture {
const pool: *std.Thread.Pool = @alignCast(@ptrCast(userdata)); const pool: *std.Thread.Pool = @alignCast(@ptrCast(userdata));
pool.mutex.lock(); pool.mutex.lock();
@ -386,46 +414,87 @@ pub fn @"async"(
.context_offset = context_offset, .context_offset = context_offset,
.result_offset = result_offset, .result_offset = result_offset,
.reset_event = .{}, .reset_event = .{},
.cancel_flag = false,
}; };
@memcpy(closure.contextPointer()[0..context.len], context); @memcpy(closure.contextPointer()[0..context.len], context);
pool.run_queue.prepend(&closure.run_node); pool.run_queue.prepend(&closure.runnable.node);
pool.mutex.unlock();
if (pool.threads.items.len < pool.threads.capacity) {
pool.threads.addOneAssumeCapacity().* = std.Thread.spawn(.{
.stack_size = pool.stack_size,
.allocator = gpa,
}, worker, .{pool}) catch t: {
pool.threads.items.len -= 1;
break :t undefined;
};
}
pool.mutex.unlock();
pool.cond.signal(); pool.cond.signal();
return @ptrCast(closure); return @ptrCast(closure);
} }
pub fn @"await"(userdata: ?*anyopaque, any_future: *std.Io.AnyFuture, result: []u8) void { fn @"await"(userdata: ?*anyopaque, any_future: *Io.AnyFuture, result: []u8) void {
const thread_pool: *std.Thread.Pool = @alignCast(@ptrCast(userdata)); const pool: *std.Thread.Pool = @alignCast(@ptrCast(userdata));
const closure: *AsyncClosure = @ptrCast(@alignCast(any_future)); const closure: *AsyncClosure = @ptrCast(@alignCast(any_future));
closure.reset_event.wait(); closure.waitAndFree(pool.allocator, result);
const base: [*]align(@alignOf(AsyncClosure)) u8 = @ptrCast(closure);
@memcpy(result, closure.resultPointer()[0..result.len]);
thread_pool.allocator.free(base[0 .. closure.result_offset + result.len]);
} }
pub fn createFile(userdata: ?*anyopaque, dir: std.fs.Dir, sub_path: []const u8, flags: std.fs.File.CreateFlags) std.fs.File.OpenError!std.fs.File { fn cancel(userdata: ?*anyopaque, any_future: *Io.AnyFuture, result: []u8) void {
_ = userdata; const pool: *std.Thread.Pool = @alignCast(@ptrCast(userdata));
const closure: *AsyncClosure = @ptrCast(@alignCast(any_future));
@atomicStore(bool, &closure.cancel_flag, true, .seq_cst);
closure.waitAndFree(pool.allocator, result);
}
fn cancelRequested(userdata: ?*anyopaque) bool {
const pool: *std.Thread.Pool = @alignCast(@ptrCast(userdata));
_ = pool;
const closure = current_closure orelse return false;
return @atomicLoad(bool, &closure.cancel_flag, .unordered);
}
fn checkCancel(pool: *Pool) error{AsyncCancel}!void {
if (cancelRequested(pool)) return error.AsyncCancel;
}
pub fn createFile(
userdata: ?*anyopaque,
dir: std.fs.Dir,
sub_path: []const u8,
flags: std.fs.File.CreateFlags,
) Io.FileOpenError!std.fs.File {
const pool: *std.Thread.Pool = @alignCast(@ptrCast(userdata));
try pool.checkCancel();
return dir.createFile(sub_path, flags); return dir.createFile(sub_path, flags);
} }
pub fn openFile(userdata: ?*anyopaque, dir: std.fs.Dir, sub_path: []const u8, flags: std.fs.File.OpenFlags) std.fs.File.OpenError!std.fs.File { pub fn openFile(
_ = userdata; userdata: ?*anyopaque,
dir: std.fs.Dir,
sub_path: []const u8,
flags: std.fs.File.OpenFlags,
) Io.FileOpenError!std.fs.File {
const pool: *std.Thread.Pool = @alignCast(@ptrCast(userdata));
try pool.checkCancel();
return dir.openFile(sub_path, flags); return dir.openFile(sub_path, flags);
} }
pub fn closeFile(userdata: ?*anyopaque, file: std.fs.File) void { pub fn closeFile(userdata: ?*anyopaque, file: std.fs.File) void {
_ = userdata; const pool: *std.Thread.Pool = @alignCast(@ptrCast(userdata));
_ = pool;
return file.close(); return file.close();
} }
pub fn read(userdata: ?*anyopaque, file: std.fs.File, buffer: []u8) std.fs.File.ReadError!usize { pub fn read(userdata: ?*anyopaque, file: std.fs.File, buffer: []u8) Io.FileReadError!usize {
_ = userdata; const pool: *std.Thread.Pool = @alignCast(@ptrCast(userdata));
try pool.checkCancel();
return file.read(buffer); return file.read(buffer);
} }
pub fn write(userdata: ?*anyopaque, file: std.fs.File, buffer: []const u8) std.fs.File.WriteError!usize { pub fn write(userdata: ?*anyopaque, file: std.fs.File, buffer: []const u8) Io.FileWriteError!usize {
_ = userdata; const pool: *std.Thread.Pool = @alignCast(@ptrCast(userdata));
try pool.checkCancel();
return file.write(buffer); return file.write(buffer);
} }