zig/lib/std/io/Reader.zig
Andrew Kelley 6ac7931bec std: hacking around with buffered reader / writer semantics
I think I'm going to back out these vtable changes in the next commit
2025-07-01 16:35:26 -07:00

202 lines
6.3 KiB
Zig

const std = @import("../std.zig");
const Reader = @This();
const assert = std.debug.assert;
context: ?*anyopaque,
vtable: *const VTable,
pub const VTable = struct {
/// Writes bytes starting from `offset` to `bw`, or returns
/// `error.Unseekable`, indicating `streamRead` should be used instead.
///
/// Returns the number of bytes written, which will be at minimum `0` and at
/// most `limit`. The number of bytes read, including zero, does not
/// indicate end of stream.
///
/// If the reader has an internal seek position, it is not mutated.
///
/// The implementation should do a maximum of one underlying read call.
///
/// If this is `null` it is equivalent to always returning
/// `error.Unseekable`.
posRead: ?*const fn (ctx: ?*anyopaque, bw: *std.io.BufferedWriter, limit: Limit, offset: u64) RwResult,
posReadVec: ?*const fn (ctx: ?*anyopaque, data: []const []u8, offset: u64) Result,
/// Writes bytes from the internally tracked stream position to `bw`, or
/// returns `error.Unstreamable`, indicating `posRead` should be used
/// instead.
///
/// Returns the number of bytes written, which will be at minimum `0` and at
/// most `limit`. The number of bytes read, including zero, does not
/// indicate end of stream.
///
/// If the reader has an internal seek position, it moves forward in accordance
/// with the number of bytes return from this function.
///
/// The implementation should do a maximum of one underlying read call.
///
/// If this is `null` it is equivalent to always returning
/// `error.Unstreamable`.
streamRead: ?*const fn (ctx: ?*anyopaque, bw: *std.io.BufferedWriter, limit: Limit) RwResult,
streamReadVec: ?*const fn (ctx: ?*anyopaque, data: []const []u8) Result,
pub const eof: VTable = .{
.posRead = eof_posRead,
.posReadVec = eof_posReadVec,
.streamRead = eof_streamRead,
.streamReadVec = eof_streamReadVec,
};
};
pub const Result = std.io.Writer.Result;
pub const RwResult = struct {
len: usize = 0,
read_err: anyerror!void = {},
write_err: anyerror!void = {},
read_end: bool = false,
write_end: bool = false,
};
pub const Limit = enum(usize) {
none = std.math.maxInt(usize),
_,
pub fn min(l: Limit, int: usize) usize {
return @min(int, @intFromEnum(l));
}
};
/// Returns total number of bytes written to `w`.
pub fn readAll(r: Reader, w: *std.io.BufferedWriter) anyerror!usize {
if (r.vtable.pread != null) {
return posReadAll(r, w) catch |err| switch (err) {
error.Unseekable => {},
else => return err,
};
}
return streamReadAll(r, w);
}
/// Returns total number of bytes written to `w`.
///
/// May return `error.Unseekable`, indicating this function cannot be used to
/// read from the reader.
pub fn posReadAll(r: Reader, w: *std.io.BufferedWriter, start_offset: u64) anyerror!usize {
const vtable_posRead = r.vtable.posRead.?;
var offset: u64 = start_offset;
while (true) {
const status = try vtable_posRead(r.context, w, .none, offset);
offset += status.len;
if (status.end) return @intCast(offset - start_offset);
}
}
/// Returns total number of bytes written to `w`.
pub fn streamReadAll(r: Reader, w: *std.io.BufferedWriter) anyerror!usize {
const vtable_streamRead = r.vtable.streamRead.?;
var offset: usize = 0;
while (true) {
const status = try vtable_streamRead(r.context, w, .none);
offset += status.len;
if (status.end) return offset;
}
}
/// Allocates enough memory to hold all the contents of the stream. If the allocated
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
///
/// Caller owns returned memory.
///
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn streamReadAlloc(r: Reader, gpa: std.mem.Allocator, max_size: usize) anyerror![]u8 {
const vtable_streamRead = r.vtable.streamRead.?;
var bw: std.io.BufferedWriter = .{
.buffer = .empty,
.mode = .{ .allocator = gpa },
};
const list = &bw.buffer;
defer list.deinit(gpa);
var remaining = max_size;
while (remaining > 0) {
const status = try vtable_streamRead(r.context, &bw, .init(remaining));
if (status.end) return list.toOwnedSlice(gpa);
remaining -= status.len;
}
}
/// Reads the stream until the end, ignoring all the data.
/// Returns the number of bytes discarded.
pub fn discardUntilEnd(r: Reader) anyerror!usize {
var bw = std.io.null_writer.unbuffered();
return streamReadAll(r, &bw);
}
pub fn allocating(r: Reader, gpa: std.mem.Allocator) std.io.BufferedReader {
return .{
.reader = r,
.buffered_writer = .{
.buffer = .empty,
.mode = .{ .allocator = gpa },
},
};
}
test "when the backing reader provides one byte at a time" {
const OneByteReader = struct {
str: []const u8,
curr: usize,
fn read(self: *@This(), dest: []u8) anyerror!usize {
if (self.str.len <= self.curr or dest.len == 0)
return 0;
dest[0] = self.str[self.curr];
self.curr += 1;
return 1;
}
fn reader(self: *@This()) std.io.Reader {
return .{
.context = self,
};
}
};
const str = "This is a test";
var one_byte_stream: OneByteReader = .init(str);
const res = try one_byte_stream.reader().streamReadAlloc(std.testing.allocator, str.len + 1);
defer std.testing.allocator.free(res);
try std.testing.expectEqualStrings(str, res);
}
fn eof_posRead(ctx: ?*anyopaque, bw: *std.io.BufferedWriter, limit: Limit, offset: u64) RwResult {
_ = ctx;
_ = bw;
_ = limit;
_ = offset;
return .{ .end = true };
}
fn eof_posReadVec(ctx: ?*anyopaque, data: []const []u8, offset: u64) Result {
_ = ctx;
_ = data;
_ = offset;
return .{ .end = true };
}
fn eof_streamRead(ctx: ?*anyopaque, bw: *std.io.BufferedWriter, limit: Limit) RwResult {
_ = ctx;
_ = bw;
_ = limit;
return .{ .end = true };
}
fn eof_streamReadVec(ctx: ?*anyopaque, data: []const []u8) Result {
_ = ctx;
_ = data;
return .{ .end = true };
}