mirror of
https://github.com/ziglang/zig.git
synced 2025-12-16 03:03:09 +00:00
redo reader
This commit is contained in:
parent
2fb6ce2f92
commit
5aa8573f2b
@ -452,12 +452,6 @@ set(ZIG_STAGE2_SOURCES
|
||||
lib/std/io.zig
|
||||
lib/std/io/Reader.zig
|
||||
lib/std/io/Writer.zig
|
||||
lib/std/io/buffered_atomic_file.zig
|
||||
lib/std/io/change_detection_stream.zig
|
||||
lib/std/io/counting_reader.zig
|
||||
lib/std/io/find_byte_writer.zig
|
||||
lib/std/io/limited_reader.zig
|
||||
lib/std/io/seekable_stream.zig
|
||||
lib/std/json.zig
|
||||
lib/std/leb128.zig
|
||||
lib/std/log.zig
|
||||
|
||||
@ -374,25 +374,6 @@ pub fn LinearFifo(
|
||||
return self.buf[index];
|
||||
}
|
||||
|
||||
/// Pump data from a reader into a writer.
|
||||
/// Stops when reader returns 0 bytes (EOF).
|
||||
/// Buffer size must be set before calling; a buffer length of 0 is invalid.
|
||||
pub fn pump(self: *Self, src_reader: anytype, dest_writer: anytype) !void {
|
||||
assert(self.buf.len > 0);
|
||||
while (true) {
|
||||
if (self.writableLength() > 0) {
|
||||
const n = try src_reader.read(self.writableSlice(0));
|
||||
if (n == 0) break; // EOF
|
||||
self.update(n);
|
||||
}
|
||||
self.discard(try dest_writer.write(self.readableSlice(0)));
|
||||
}
|
||||
// flush remaining data
|
||||
while (self.readableLength() > 0) {
|
||||
self.discard(try dest_writer.write(self.readableSlice(0)));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn toOwnedSlice(self: *Self) Allocator.Error![]T {
|
||||
if (self.head != 0) self.realign();
|
||||
assert(self.head == 0);
|
||||
|
||||
@ -1586,10 +1586,14 @@ fn writeFileAllSendfile(self: File, in_file: File, args: WriteFileOptions) posix
|
||||
}
|
||||
}
|
||||
|
||||
pub const Reader = io.Reader(File, ReadError, read);
|
||||
|
||||
pub fn reader(file: File) Reader {
|
||||
return .{ .context = file };
|
||||
pub fn reader(file: File) std.io.Reader {
|
||||
return .{
|
||||
.context = handleToOpaque(file.handle),
|
||||
.vtable = .{
|
||||
.seekRead = reader_seekRead,
|
||||
.streamRead = reader_streamRead,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn writer(file: File) std.io.Writer {
|
||||
@ -1606,6 +1610,23 @@ pub fn writer(file: File) std.io.Writer {
|
||||
/// vectors through the underlying write calls as possible.
|
||||
const max_buffers_len = 16;
|
||||
|
||||
pub fn reader_seekRead(
|
||||
context: *anyopaque,
|
||||
bw: *std.io.BufferedWriter,
|
||||
limit: std.io.Reader.Limit,
|
||||
offset: u64,
|
||||
) anyerror!usize {
|
||||
const file = opaqueToHandle(context);
|
||||
const len: std.io.Writer.Len = if (limit.unwrap()) |l| .init(l) else .entire_file;
|
||||
return writer.writeFile(bw, file, .init(offset), len, &.{}, 0);
|
||||
}
|
||||
|
||||
pub fn reader_streamRead(context: *anyopaque, bw: *std.io.BufferedWriter, limit: std.io.Reader.Limit) anyerror!usize {
|
||||
const file = opaqueToHandle(context);
|
||||
const len: std.io.Writer.Len = if (limit.unwrap()) |l| .init(l) else .entire_file;
|
||||
return writer.writeFile(bw, file, .none, len, &.{}, 0);
|
||||
}
|
||||
|
||||
pub fn writer_writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
|
||||
const file = opaqueToHandle(context);
|
||||
var splat_buffer: [256]u8 = undefined;
|
||||
|
||||
205
lib/std/io.zig
205
lib/std/io.zig
@ -62,195 +62,14 @@ pub fn getStdIn() File {
|
||||
return .{ .handle = getStdInHandle() };
|
||||
}
|
||||
|
||||
pub fn GenericReader(
|
||||
comptime Context: type,
|
||||
comptime ReadError: type,
|
||||
/// Returns the number of bytes read. It may be less than buffer.len.
|
||||
/// If the number of bytes read is 0, it means end of stream.
|
||||
/// End of stream is not an error condition.
|
||||
comptime readFn: fn (context: Context, buffer: []u8) ReadError!usize,
|
||||
) type {
|
||||
return struct {
|
||||
context: Context,
|
||||
|
||||
pub const Error = ReadError;
|
||||
pub const NoEofError = ReadError || error{
|
||||
EndOfStream,
|
||||
};
|
||||
|
||||
pub inline fn read(self: Self, buffer: []u8) Error!usize {
|
||||
return readFn(self.context, buffer);
|
||||
}
|
||||
|
||||
pub inline fn readAll(self: Self, buffer: []u8) Error!usize {
|
||||
return @errorCast(self.any().readAll(buffer));
|
||||
}
|
||||
|
||||
pub inline fn readAtLeast(self: Self, buffer: []u8, len: usize) Error!usize {
|
||||
return @errorCast(self.any().readAtLeast(buffer, len));
|
||||
}
|
||||
|
||||
pub inline fn readNoEof(self: Self, buf: []u8) NoEofError!void {
|
||||
return @errorCast(self.any().readNoEof(buf));
|
||||
}
|
||||
|
||||
pub inline fn readAllArrayList(
|
||||
self: Self,
|
||||
array_list: *std.ArrayList(u8),
|
||||
max_append_size: usize,
|
||||
) (error{StreamTooLong} || Allocator.Error || Error)!void {
|
||||
return @errorCast(self.any().readAllArrayList(array_list, max_append_size));
|
||||
}
|
||||
|
||||
pub inline fn readAllArrayListAligned(
|
||||
self: Self,
|
||||
comptime alignment: ?Alignment,
|
||||
array_list: *std.ArrayListAligned(u8, alignment),
|
||||
max_append_size: usize,
|
||||
) (error{StreamTooLong} || Allocator.Error || Error)!void {
|
||||
return @errorCast(self.any().readAllArrayListAligned(
|
||||
alignment,
|
||||
array_list,
|
||||
max_append_size,
|
||||
));
|
||||
}
|
||||
|
||||
pub inline fn readAllAlloc(
|
||||
self: Self,
|
||||
allocator: Allocator,
|
||||
max_size: usize,
|
||||
) (Error || Allocator.Error || error{StreamTooLong})![]u8 {
|
||||
return @errorCast(self.any().readAllAlloc(allocator, max_size));
|
||||
}
|
||||
|
||||
pub inline fn streamUntilDelimiter(
|
||||
self: Self,
|
||||
writer: *std.io.BufferedWriter,
|
||||
delimiter: u8,
|
||||
optional_max_size: ?usize,
|
||||
) anyerror!void {
|
||||
return self.any().streamUntilDelimiter(
|
||||
writer,
|
||||
delimiter,
|
||||
optional_max_size,
|
||||
);
|
||||
}
|
||||
|
||||
pub inline fn skipUntilDelimiterOrEof(self: Self, delimiter: u8) Error!void {
|
||||
return @errorCast(self.any().skipUntilDelimiterOrEof(delimiter));
|
||||
}
|
||||
|
||||
pub inline fn readByte(self: Self) NoEofError!u8 {
|
||||
return @errorCast(self.any().readByte());
|
||||
}
|
||||
|
||||
pub inline fn readByteSigned(self: Self) NoEofError!i8 {
|
||||
return @errorCast(self.any().readByteSigned());
|
||||
}
|
||||
|
||||
pub inline fn readBytesNoEof(
|
||||
self: Self,
|
||||
comptime num_bytes: usize,
|
||||
) NoEofError![num_bytes]u8 {
|
||||
return @errorCast(self.any().readBytesNoEof(num_bytes));
|
||||
}
|
||||
|
||||
pub inline fn readIntoBoundedBytes(
|
||||
self: Self,
|
||||
comptime num_bytes: usize,
|
||||
bounded: *std.BoundedArray(u8, num_bytes),
|
||||
) Error!void {
|
||||
return @errorCast(self.any().readIntoBoundedBytes(num_bytes, bounded));
|
||||
}
|
||||
|
||||
pub inline fn readBoundedBytes(
|
||||
self: Self,
|
||||
comptime num_bytes: usize,
|
||||
) Error!std.BoundedArray(u8, num_bytes) {
|
||||
return @errorCast(self.any().readBoundedBytes(num_bytes));
|
||||
}
|
||||
|
||||
pub inline fn readInt(self: Self, comptime T: type, endian: std.builtin.Endian) NoEofError!T {
|
||||
return @errorCast(self.any().readInt(T, endian));
|
||||
}
|
||||
|
||||
pub inline fn readVarInt(
|
||||
self: Self,
|
||||
comptime ReturnType: type,
|
||||
endian: std.builtin.Endian,
|
||||
size: usize,
|
||||
) NoEofError!ReturnType {
|
||||
return @errorCast(self.any().readVarInt(ReturnType, endian, size));
|
||||
}
|
||||
|
||||
pub const SkipBytesOptions = AnyReader.SkipBytesOptions;
|
||||
|
||||
pub inline fn skipBytes(
|
||||
self: Self,
|
||||
num_bytes: u64,
|
||||
comptime options: SkipBytesOptions,
|
||||
) NoEofError!void {
|
||||
return @errorCast(self.any().skipBytes(num_bytes, options));
|
||||
}
|
||||
|
||||
pub inline fn isBytes(self: Self, slice: []const u8) NoEofError!bool {
|
||||
return @errorCast(self.any().isBytes(slice));
|
||||
}
|
||||
|
||||
pub inline fn readStruct(self: Self, comptime T: type) NoEofError!T {
|
||||
return @errorCast(self.any().readStruct(T));
|
||||
}
|
||||
|
||||
pub inline fn readStructEndian(self: Self, comptime T: type, endian: std.builtin.Endian) NoEofError!T {
|
||||
return @errorCast(self.any().readStructEndian(T, endian));
|
||||
}
|
||||
|
||||
pub const ReadEnumError = NoEofError || error{
|
||||
/// An integer was read, but it did not match any of the tags in the supplied enum.
|
||||
InvalidValue,
|
||||
};
|
||||
|
||||
pub inline fn readEnum(
|
||||
self: Self,
|
||||
comptime Enum: type,
|
||||
endian: std.builtin.Endian,
|
||||
) ReadEnumError!Enum {
|
||||
return @errorCast(self.any().readEnum(Enum, endian));
|
||||
}
|
||||
|
||||
pub inline fn any(self: *const Self) AnyReader {
|
||||
return .{
|
||||
.context = @ptrCast(&self.context),
|
||||
.readFn = typeErasedReadFn,
|
||||
};
|
||||
}
|
||||
|
||||
const Self = @This();
|
||||
|
||||
fn typeErasedReadFn(context: *const anyopaque, buffer: []u8) anyerror!usize {
|
||||
const ptr: *const Context = @alignCast(@ptrCast(context));
|
||||
return readFn(ptr.*, buffer);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Deprecated; consider switching to `AnyReader` or use `GenericReader`
|
||||
/// to use previous API. To be removed after 0.14.0 is tagged.
|
||||
pub const Reader = GenericReader;
|
||||
pub const Reader = @import("io/Reader.zig");
|
||||
pub const Writer = @import("io/Writer.zig");
|
||||
|
||||
pub const AnyReader = @import("io/Reader.zig");
|
||||
|
||||
pub const SeekableStream = @import("io/seekable_stream.zig").SeekableStream;
|
||||
|
||||
pub const BufferedReader = @import("io/BufferedReader.zig");
|
||||
pub const BufferedWriter = @import("io/BufferedWriter.zig");
|
||||
pub const AllocatingWriter = @import("io/AllocatingWriter.zig");
|
||||
|
||||
pub const BufferedReader = @import("io/buffered_reader.zig").BufferedReader;
|
||||
pub const bufferedReader = @import("io/buffered_reader.zig").bufferedReader;
|
||||
pub const bufferedReaderSize = @import("io/buffered_reader.zig").bufferedReaderSize;
|
||||
|
||||
pub const FixedBufferStream = @import("io/FixedBufferStream.zig");
|
||||
pub const CountingWriter = @import("io/CountingWriter.zig");
|
||||
pub const CountingReader = @import("io/CountingReader.zig");
|
||||
|
||||
pub const CWriter = @import("io/c_writer.zig").CWriter;
|
||||
pub const cWriter = @import("io/c_writer.zig").cWriter;
|
||||
@ -258,10 +77,6 @@ pub const cWriter = @import("io/c_writer.zig").cWriter;
|
||||
pub const LimitedReader = @import("io/limited_reader.zig").LimitedReader;
|
||||
pub const limitedReader = @import("io/limited_reader.zig").limitedReader;
|
||||
|
||||
pub const CountingWriter = @import("io/CountingWriter.zig");
|
||||
pub const CountingReader = @import("io/counting_reader.zig").CountingReader;
|
||||
pub const countingReader = @import("io/counting_reader.zig").countingReader;
|
||||
|
||||
pub const MultiWriter = @import("io/multi_writer.zig").MultiWriter;
|
||||
pub const multiWriter = @import("io/multi_writer.zig").multiWriter;
|
||||
|
||||
@ -279,8 +94,6 @@ pub const findByteWriter = @import("io/find_byte_writer.zig").findByteWriter;
|
||||
|
||||
pub const BufferedAtomicFile = @import("io/buffered_atomic_file.zig").BufferedAtomicFile;
|
||||
|
||||
pub const StreamSource = @import("io/stream_source.zig").StreamSource;
|
||||
|
||||
pub const tty = @import("io/tty.zig");
|
||||
|
||||
/// A `Writer` that discards all data.
|
||||
@ -725,18 +538,16 @@ pub fn PollFiles(comptime StreamEnum: type) type {
|
||||
}
|
||||
|
||||
test {
|
||||
_ = AnyReader;
|
||||
_ = BufferedWriter;
|
||||
_ = BufferedReader;
|
||||
_ = Reader;
|
||||
_ = Writer;
|
||||
_ = CountingWriter;
|
||||
_ = FixedBufferStream;
|
||||
_ = CountingReader;
|
||||
_ = AllocatingWriter;
|
||||
_ = @import("io/bit_reader.zig");
|
||||
_ = @import("io/bit_writer.zig");
|
||||
_ = @import("io/buffered_atomic_file.zig");
|
||||
_ = @import("io/buffered_reader.zig");
|
||||
_ = @import("io/c_writer.zig");
|
||||
_ = @import("io/counting_reader.zig");
|
||||
_ = @import("io/seekable_stream.zig");
|
||||
_ = @import("io/stream_source.zig");
|
||||
_ = @import("io/test.zig");
|
||||
}
|
||||
|
||||
561
lib/std/io/BufferedReader.zig
Normal file
561
lib/std/io/BufferedReader.zig
Normal file
@ -0,0 +1,561 @@
|
||||
const builtin = @import("builtin");
|
||||
const native_endian = builtin.target.cpu.arch.endian();
|
||||
|
||||
const std = @import("../std.zig");
|
||||
const assert = std.debug.assert;
|
||||
const testing = std.testing;
|
||||
const BufferedWriter = std.io.BufferedWriter;
|
||||
const Reader = std.io.Reader;
|
||||
|
||||
const BufferedReader = @This();
|
||||
|
||||
/// Number of bytes which have been consumed from `storage`.
|
||||
seek: usize,
|
||||
storage: BufferedWriter,
|
||||
unbuffered_reader: Reader,
|
||||
|
||||
pub fn initFixed(br: *BufferedReader, buffer: []const u8) void {
|
||||
br.* = .{
|
||||
.seek = 0,
|
||||
.storage = .{
|
||||
.buffer = buffer,
|
||||
.mode = .fixed,
|
||||
},
|
||||
.reader = .{
|
||||
.context = br,
|
||||
.vtable = &.{
|
||||
.streamRead = null,
|
||||
.seekRead = null,
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(br: *BufferedReader) void {
|
||||
br.storage.deinit();
|
||||
br.* = undefined;
|
||||
}
|
||||
|
||||
/// Although `BufferedReader` can easily satisfy the `Reader` interface, it's
|
||||
/// generally more practical to pass a `BufferedReader` instance itself around,
|
||||
/// since it will result in fewer calls across vtable boundaries.
|
||||
pub fn reader(br: *BufferedReader) Reader {
|
||||
return .{
|
||||
.context = br,
|
||||
.vtable = &.{
|
||||
.streamRead = passthru_streamRead,
|
||||
.seekRead = passthru_seekRead,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn passthru_streamRead(ctx: *anyopaque, bw: *BufferedWriter, limit: Reader.Limit) anyerror!Reader.Status {
|
||||
const br: *BufferedReader = @alignCast(@ptrCast(ctx));
|
||||
const buffer = br.storage.buffer.items;
|
||||
const buffered = buffer[br.seek..];
|
||||
const limited = buffered[0..limit.min(buffered.len)];
|
||||
if (limited.len > 0) {
|
||||
const n = try bw.writeSplat(limited, 1);
|
||||
br.seek += n;
|
||||
return .{
|
||||
.end = false,
|
||||
.len = @intCast(n),
|
||||
};
|
||||
}
|
||||
return br.unbuffered_reader.streamRead(bw, limit);
|
||||
}
|
||||
|
||||
fn passthru_seekRead(ctx: *anyopaque, bw: *BufferedWriter, limit: Reader.Limit, off: u64) anyerror!Reader.Status {
|
||||
const br: *BufferedReader = @alignCast(@ptrCast(ctx));
|
||||
const buffer = br.storage.buffer.items;
|
||||
if (off < buffer.len) {
|
||||
const send = buffer[off..limit.min(buffer.len)];
|
||||
return bw.writeSplat(send, 1);
|
||||
}
|
||||
return br.unbuffered_reader.seekRead(bw, limit, off - buffer.len);
|
||||
}
|
||||
|
||||
/// Returns the next `n` bytes from `unbuffered_reader`, filling the buffer as
|
||||
/// necessary.
|
||||
///
|
||||
/// Invalidates previously returned values from `peek`.
|
||||
///
|
||||
/// Asserts that the `BufferedReader` was initialized with a buffer capacity at
|
||||
/// least as big as `n`.
|
||||
///
|
||||
/// If there are fewer than `n` bytes left in the stream, `error.EndOfStream`
|
||||
/// is returned instead.
|
||||
///
|
||||
/// See also:
|
||||
/// * `toss`
|
||||
pub fn peek(br: *BufferedReader, n: usize) anyerror![]u8 {
|
||||
const list = &br.storage.buffer;
|
||||
assert(n <= list.capacity);
|
||||
try fill(br, n);
|
||||
return list.items[br.seek..][0..n];
|
||||
}
|
||||
|
||||
/// Skips the next `n` bytes from the stream, advancing the seek position. This
|
||||
/// is typically and safely used after `peek`.
|
||||
///
|
||||
/// Asserts that the number of bytes buffered is at least as many as `n`.
|
||||
///
|
||||
/// See also:
|
||||
/// * `peek`.
|
||||
/// * `discard`.
|
||||
pub fn toss(br: *BufferedReader, n: usize) void {
|
||||
br.seek += n;
|
||||
assert(br.seek <= br.storage.buffer.items.len);
|
||||
}
|
||||
|
||||
/// Equivalent to `peek` + `toss`.
|
||||
pub fn take(br: *BufferedReader, n: usize) anyerror![]u8 {
|
||||
const result = try peek(br, n);
|
||||
toss(br, n);
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Returns the next `n` bytes from `unbuffered_reader` as an array, filling
|
||||
/// the buffer as necessary.
|
||||
///
|
||||
/// Asserts that the `BufferedReader` was initialized with a buffer capacity at
|
||||
/// least as big as `n`.
|
||||
///
|
||||
/// If there are fewer than `n` bytes left in the stream, `error.EndOfStream`
|
||||
/// is returned instead.
|
||||
///
|
||||
/// See also:
|
||||
/// * `take`
|
||||
pub fn takeArray(br: *BufferedReader, comptime n: usize) anyerror!*[n]u8 {
|
||||
return (try take(br, n))[0..n];
|
||||
}
|
||||
|
||||
/// Skips the next `n` bytes from the stream, advancing the seek position.
|
||||
///
|
||||
/// Unlike `toss` which is infallible, in this function `n` can be any amount.
|
||||
///
|
||||
/// Returns `error.EndOfStream` if fewer than `n` bytes could be discarded.
|
||||
///
|
||||
/// See also:
|
||||
/// * `toss`
|
||||
/// * `discardAll`
|
||||
pub fn discard(br: *BufferedReader, n: usize) anyerror!void {
|
||||
const list = &br.storage.buffer;
|
||||
var remaining = n;
|
||||
while (remaining > 0) {
|
||||
const proposed_seek = br.seek + remaining;
|
||||
if (proposed_seek <= list.items.len) {
|
||||
br.seek = proposed_seek;
|
||||
return;
|
||||
}
|
||||
remaining -= (list.items.len - br.seek);
|
||||
list.items.len = 0;
|
||||
br.seek = 0;
|
||||
const status = try br.unbuffered_reader.streamRead(&br.storage, .none);
|
||||
if (remaining <= list.items.len) continue;
|
||||
if (status.end) return error.EndOfStream;
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads the stream until the end, ignoring all the data.
|
||||
/// Returns the number of bytes discarded.
|
||||
pub fn discardAll(br: *BufferedReader) anyerror!usize {
|
||||
const list = &br.storage.buffer;
|
||||
var total: usize = list.items.len;
|
||||
list.items.len = 0;
|
||||
total += try br.unbuffered_reader.discardAll();
|
||||
return total;
|
||||
}
|
||||
|
||||
/// Fill `buffer` with the next `buffer.len` bytes from the stream, advancing
|
||||
/// the seek position.
|
||||
///
|
||||
/// Invalidates previously returned values from `peek`.
|
||||
///
|
||||
/// If the provided buffer cannot be filled completely, `error.EndOfStream` is
|
||||
/// returned instead.
|
||||
///
|
||||
/// See also:
|
||||
/// * `peek`
|
||||
pub fn read(br: *BufferedReader, buffer: []u8) anyerror!void {
|
||||
const list = &br.storage.buffer;
|
||||
const in_buffer = list.items;
|
||||
const seek = br.seek;
|
||||
const proposed_seek = seek + in_buffer.len;
|
||||
if (proposed_seek <= in_buffer.len) {
|
||||
@memcpy(buffer, in_buffer[seek..proposed_seek]);
|
||||
br.seek = proposed_seek;
|
||||
return;
|
||||
}
|
||||
@memcpy(buffer[0..in_buffer.len], in_buffer);
|
||||
list.items.len = 0;
|
||||
br.seek = 0;
|
||||
var i: usize = in_buffer.len;
|
||||
while (true) {
|
||||
const status = try br.unbuffered_reader.streamRead(&br.storage, .none);
|
||||
const next_i = i + list.items.len;
|
||||
if (next_i >= buffer.len) {
|
||||
const remaining = buffer[i..];
|
||||
@memcpy(remaining, list.items[0..remaining.len]);
|
||||
br.seek = remaining.len;
|
||||
return;
|
||||
}
|
||||
if (status.end) return error.EndOfStream;
|
||||
@memcpy(buffer[i..next_i], list.items);
|
||||
list.items.len = 0;
|
||||
i = next_i;
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a slice of the next bytes of buffered data from the stream until
|
||||
/// `delimiter` is found, advancing the seek position.
|
||||
///
|
||||
/// Returned slice includes the delimiter as the last byte.
|
||||
///
|
||||
/// If the stream ends before the delimiter is found, `error.EndOfStream` is
|
||||
/// returned.
|
||||
///
|
||||
/// If the delimiter is not found within a number of bytes matching the
|
||||
/// capacity of the `BufferedReader`, `error.StreamTooLong` is returned.
|
||||
///
|
||||
/// Invalidates previously returned values from `peek`.
|
||||
///
|
||||
/// See also:
|
||||
/// * `takeDelimiterConclusive`
|
||||
/// * `peekDelimiterInclusive`
|
||||
pub fn takeDelimiterInclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 {
|
||||
const result = try peekDelimiterInclusive(br, delimiter);
|
||||
toss(result.len);
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn peekDelimiterInclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 {
|
||||
const list = &br.storage.buffer;
|
||||
const buffer = list.items;
|
||||
const seek = br.seek;
|
||||
if (std.mem.indexOfScalarPos(u8, buffer, seek, delimiter)) |end| {
|
||||
@branchHint(.likely);
|
||||
return buffer[seek .. end + 1];
|
||||
}
|
||||
const remainder = buffer[seek..];
|
||||
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
|
||||
var i = remainder.len;
|
||||
list.items.len = i;
|
||||
br.seek = 0;
|
||||
while (i < list.capacity) {
|
||||
const status = try br.unbuffered_reader.streamRead(&br.storage, .none);
|
||||
if (std.mem.indexOfScalarPos(u8, list.items, i, delimiter)) |end| {
|
||||
return list.items[0 .. end + 1];
|
||||
}
|
||||
if (status.end) return error.EndOfStream;
|
||||
i = list.items.len;
|
||||
}
|
||||
return error.StreamTooLong;
|
||||
}
|
||||
|
||||
/// Returns a slice of the next bytes of buffered data from the stream until
|
||||
/// `delimiter` is found, advancing the seek position.
|
||||
///
|
||||
/// Returned slice excludes the delimiter.
|
||||
///
|
||||
/// End-of-stream is treated equivalent to a delimiter.
|
||||
///
|
||||
/// If the delimiter is not found within a number of bytes matching the
|
||||
/// capacity of the `BufferedReader`, `error.StreamTooLong` is returned.
|
||||
///
|
||||
/// Invalidates previously returned values from `peek`.
|
||||
///
|
||||
/// See also:
|
||||
/// * `takeDelimiterInclusive`
|
||||
/// * `peekDelimiterConclusive`
|
||||
pub fn takeDelimiterConclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 {
|
||||
const result = try peekDelimiterConclusive(br, delimiter);
|
||||
toss(result.len);
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn peekDelimiterConclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 {
|
||||
const list = &br.storage.buffer;
|
||||
const buffer = list.items;
|
||||
const seek = br.seek;
|
||||
if (std.mem.indexOfScalarPos(u8, buffer, seek, delimiter)) |end| {
|
||||
@branchHint(.likely);
|
||||
return buffer[seek..end];
|
||||
}
|
||||
const remainder = buffer[seek..];
|
||||
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
|
||||
var i = remainder.len;
|
||||
list.items.len = i;
|
||||
br.seek = 0;
|
||||
while (i < list.capacity) {
|
||||
const status = try br.unbuffered_reader.streamRead(&br.storage, .none);
|
||||
if (std.mem.indexOfScalarPos(u8, list.items, i, delimiter)) |end| {
|
||||
return list.items[0 .. end + 1];
|
||||
}
|
||||
if (status.end) return list.items;
|
||||
i = list.items.len;
|
||||
}
|
||||
return error.StreamTooLong;
|
||||
}
|
||||
|
||||
/// Appends to `bw` contents by reading from the stream until `delimiter` is found.
|
||||
/// Does not write the delimiter itself.
|
||||
///
|
||||
/// If stream ends before delimiter found, returns `error.EndOfStream`.
|
||||
///
|
||||
/// Returns number of bytes streamed.
|
||||
pub fn streamReadDelimiter(br: *BufferedReader, bw: *std.io.BufferedWriter, delimiter: u8) anyerror!usize {
|
||||
_ = br;
|
||||
_ = bw;
|
||||
_ = delimiter;
|
||||
@panic("TODO");
|
||||
}
|
||||
|
||||
/// Appends to `bw` contents by reading from the stream until `delimiter` is found.
|
||||
/// Does not write the delimiter itself.
|
||||
///
|
||||
/// Succeeds if stream ends before delimiter found.
|
||||
///
|
||||
/// Returns number of bytes streamed as well as whether the input reached the end.
|
||||
/// The end is not signaled to the writer.
|
||||
pub fn streamReadDelimiterConclusive(
|
||||
br: *BufferedReader,
|
||||
bw: *std.io.BufferedWriter,
|
||||
delimiter: u8,
|
||||
) anyerror!Reader.Status {
|
||||
_ = br;
|
||||
_ = bw;
|
||||
_ = delimiter;
|
||||
@panic("TODO");
|
||||
}
|
||||
|
||||
/// Appends to `bw` contents by reading from the stream until `delimiter` is found.
|
||||
/// Does not write the delimiter itself.
|
||||
///
|
||||
/// If `limit` is exceeded, returns `error.StreamTooLong`.
|
||||
pub fn streamReadDelimiterLimited(
|
||||
br: *BufferedReader,
|
||||
bw: *BufferedWriter,
|
||||
delimiter: u8,
|
||||
limit: usize,
|
||||
) anyerror!void {
|
||||
_ = br;
|
||||
_ = bw;
|
||||
_ = delimiter;
|
||||
_ = limit;
|
||||
@panic("TODO");
|
||||
}
|
||||
|
||||
/// Reads from the stream until specified byte is found, discarding all data,
|
||||
/// including the delimiter.
|
||||
///
|
||||
/// If end of stream is found, this function succeeds.
|
||||
pub fn discardDelimiterConclusive(br: *BufferedReader, delimiter: u8) anyerror!void {
|
||||
_ = br;
|
||||
_ = delimiter;
|
||||
@panic("TODO");
|
||||
}
|
||||
|
||||
/// Reads from the stream until specified byte is found, discarding all data,
|
||||
/// excluding the delimiter.
|
||||
///
|
||||
/// If end of stream is found, `error.EndOfStream` is returned.
|
||||
pub fn discardDelimiterInclusive(br: *BufferedReader, delimiter: u8) anyerror!void {
|
||||
_ = br;
|
||||
_ = delimiter;
|
||||
@panic("TODO");
|
||||
}
|
||||
|
||||
/// Fills the buffer such that it contains at least `n` bytes, without
|
||||
/// advancing the seek position.
|
||||
///
|
||||
/// Returns `error.EndOfStream` if there are fewer than `n` bytes remaining.
|
||||
///
|
||||
/// Asserts buffer capacity is at least `n`.
|
||||
pub fn fill(br: *BufferedReader, n: usize) anyerror!void {
|
||||
assert(n <= br.storage.buffer.capacity);
|
||||
const list = &br.storage.buffer;
|
||||
const buffer = list.items;
|
||||
const seek = br.seek;
|
||||
if (seek + n <= buffer.len) {
|
||||
@branchHint(.likely);
|
||||
return;
|
||||
}
|
||||
const remainder = buffer[seek..];
|
||||
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
|
||||
list.items.len = remainder.len;
|
||||
br.seek = 0;
|
||||
while (true) {
|
||||
const status = try br.unbuffered_reader.streamRead(&br.storage, .none);
|
||||
if (n <= list.items.len) return;
|
||||
if (status.end) return error.EndOfStream;
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads 1 byte from the stream or returns `error.EndOfStream`.
|
||||
pub fn takeByte(br: *BufferedReader) anyerror!u8 {
|
||||
const buffer = br.storage.buffer.items;
|
||||
const seek = br.seek;
|
||||
if (seek >= buffer.len) {
|
||||
@branchHint(.unlikely);
|
||||
try fill(br, 1);
|
||||
}
|
||||
br.seek = seek + 1;
|
||||
return buffer[seek];
|
||||
}
|
||||
|
||||
/// Same as `readByte` except the returned byte is signed.
|
||||
pub fn takeByteSigned(br: *BufferedReader) anyerror!i8 {
|
||||
return @bitCast(try br.readByte());
|
||||
}
|
||||
|
||||
/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
|
||||
pub inline fn takeInt(br: *BufferedReader, comptime T: type, endian: std.builtin.Endian) anyerror!T {
|
||||
const n = @divExact(@typeInfo(T).int.bits, 8);
|
||||
return std.mem.readInt(T, try takeArray(br, n), endian);
|
||||
}
|
||||
|
||||
/// Asserts the buffer was initialized with a capacity at least `n`.
|
||||
pub fn takeVarInt(br: *BufferedReader, comptime Int: type, endian: std.builtin.Endian, n: usize) anyerror!Int {
|
||||
assert(n <= @sizeOf(Int));
|
||||
return std.mem.readVarInt(Int, try take(br, n), endian);
|
||||
}
|
||||
|
||||
/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
|
||||
pub fn takeStruct(br: *BufferedReader, comptime T: type) anyerror!*align(1) T {
|
||||
// Only extern and packed structs have defined in-memory layout.
|
||||
comptime assert(@typeInfo(T).@"struct".layout != .auto);
|
||||
return @ptrCast(try takeArray(br, @sizeOf(T)));
|
||||
}
|
||||
|
||||
/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
|
||||
pub fn takeStructEndian(br: *BufferedReader, comptime T: type, endian: std.builtin.Endian) anyerror!T {
|
||||
var res = (try br.takeStruct(T)).*;
|
||||
if (native_endian != endian) std.mem.byteSwapAllFields(T, &res);
|
||||
return res;
|
||||
}
|
||||
|
||||
/// Reads an integer with the same size as the given enum's tag type. If the
|
||||
/// integer matches an enum tag, casts the integer to the enum tag and returns
|
||||
/// it. Otherwise, returns `error.InvalidEnumTag`.
|
||||
///
|
||||
/// Asserts the buffer was initialized with a capacity at least `@sizeOf(Enum)`.
|
||||
pub fn takeEnum(br: *BufferedReader, comptime Enum: type, endian: std.builtin.Endian) anyerror!Enum {
|
||||
const Tag = @typeInfo(Enum).@"enum".tag_type;
|
||||
const int = try takeInt(br, Tag, endian);
|
||||
return std.meta.intToEnum(Enum, int);
|
||||
}
|
||||
|
||||
test initFixed {
|
||||
var br: BufferedReader = undefined;
|
||||
br.initFixed("a\x02");
|
||||
try testing.expect((try br.takeByte()) == 'a');
|
||||
try testing.expect((try br.takeEnum(enum(u8) {
|
||||
a = 0,
|
||||
b = 99,
|
||||
c = 2,
|
||||
d = 3,
|
||||
}, builtin.cpu.arch.endian())) == .c);
|
||||
try testing.expectError(error.EndOfStream, br.takeByte());
|
||||
}
|
||||
|
||||
test peek {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test toss {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test take {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test takeArray {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test discard {
|
||||
var br: BufferedReader = undefined;
|
||||
br.initFixed("foobar");
|
||||
try br.discard(3);
|
||||
try testing.expectEqualStrings("bar", try br.take(3));
|
||||
try br.discard(0);
|
||||
try testing.expectError(error.EndOfStream, br.discard(1));
|
||||
}
|
||||
|
||||
test discardAll {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test read {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test takeDelimiterInclusive {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test peekDelimiterInclusive {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test takeDelimiterConclusive {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test peekDelimiterConclusive {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test streamReadDelimiter {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test streamReadDelimiterConclusive {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test streamReadDelimiterLimited {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test discardDelimiterConclusive {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test discardDelimiterInclusive {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test fill {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test takeByte {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test takeByteSigned {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test takeInt {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test takeVarInt {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test takeStruct {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test takeStructEndian {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
test takeEnum {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
@ -3,24 +3,37 @@ const BufferedWriter = @This();
|
||||
const assert = std.debug.assert;
|
||||
const native_endian = @import("builtin").target.cpu.arch.endian();
|
||||
const Writer = std.io.Writer;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const testing = std.testing;
|
||||
|
||||
/// Underlying stream to send bytes to.
|
||||
///
|
||||
/// A write will only be sent here if it could not fit into `buffer`, or if it
|
||||
/// is a `writeFile`.
|
||||
///
|
||||
/// `unbuffered_writer` may modify `buffer` if the number of bytes returned
|
||||
/// equals number of bytes provided. This property is exploited by
|
||||
/// `std.io.AllocatingWriter` for example.
|
||||
unbuffered_writer: Writer,
|
||||
/// User-provided storage that must outlive this `BufferedWriter`.
|
||||
///
|
||||
/// If this has length zero, the writer is unbuffered, and `flush` is a no-op.
|
||||
buffer: []u8,
|
||||
/// Marks the end of `buffer` - before this are buffered bytes, after this is
|
||||
/// undefined.
|
||||
end: usize = 0,
|
||||
/// If this has capacity zero, the writer is unbuffered, and `flush` is a no-op.
|
||||
buffer: std.ArrayListUnmanaged(u8),
|
||||
mode: union(enum) {
|
||||
/// Return `error.NoSpaceLeft` if a write could not fit into the buffer.
|
||||
fixed,
|
||||
/// Underlying stream to send bytes to.
|
||||
///
|
||||
/// A write will only be sent here if it could not fit into `buffer`, or if
|
||||
/// it is a `writeFile`.
|
||||
///
|
||||
/// `unbuffered_writer` may modify `buffer` if the number of bytes returned
|
||||
/// equals number of bytes provided. This property is exploited by
|
||||
/// `std.io.AllocatingWriter` for example.
|
||||
writer: Writer,
|
||||
/// If this is provided, `buffer` will grow superlinearly rather than
|
||||
/// become full.
|
||||
allocator: Allocator,
|
||||
},
|
||||
|
||||
pub fn deinit(bw: *BufferedWriter) void {
|
||||
switch (bw.mode) {
|
||||
.allocator => |gpa| bw.buffer.deinit(gpa),
|
||||
.fixed, .writer => {},
|
||||
}
|
||||
bw.* = undefined;
|
||||
}
|
||||
|
||||
/// Number of slices to store on the stack, when trying to send as many byte
|
||||
/// vectors through the underlying write calls as possible.
|
||||
@ -281,37 +294,44 @@ pub fn print(bw: *BufferedWriter, comptime format: []const u8, args: anytype) an
|
||||
}
|
||||
|
||||
pub fn writeByte(bw: *BufferedWriter, byte: u8) anyerror!void {
|
||||
const buffer = bw.buffer;
|
||||
const end = bw.end;
|
||||
if (end == buffer.len) {
|
||||
@branchHint(.unlikely);
|
||||
var buffers: [2][]const u8 = .{ buffer, &.{byte} };
|
||||
while (true) {
|
||||
const n = try bw.unbuffered_writer.writev(&buffers);
|
||||
if (n == 0) {
|
||||
@branchHint(.unlikely);
|
||||
continue;
|
||||
} else if (n >= buffer.len) {
|
||||
@branchHint(.likely);
|
||||
if (n > buffer.len) {
|
||||
@branchHint(.likely);
|
||||
bw.end = 0;
|
||||
return;
|
||||
} else {
|
||||
buffer[0] = byte;
|
||||
bw.end = 1;
|
||||
return;
|
||||
}
|
||||
}
|
||||
const remainder = buffer[n..];
|
||||
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
|
||||
buffer[remainder.len] = byte;
|
||||
bw.end = remainder.len + 1;
|
||||
return;
|
||||
}
|
||||
const list = &bw.buffer;
|
||||
const buffer = list.items;
|
||||
if (buffer.len < list.capacity) {
|
||||
@branchHint(.likely);
|
||||
buffer.ptr[buffer.len] = byte;
|
||||
list.items.len = buffer.len + 1;
|
||||
return;
|
||||
}
|
||||
switch (bw.mode) {
|
||||
.fixed => return error.NoSpaceLeft,
|
||||
.writer => |w| {
|
||||
var buffers: [2][]const u8 = .{ buffer, &.{byte} };
|
||||
while (true) {
|
||||
const n = try w.writev(&buffers);
|
||||
if (n == 0) {
|
||||
@branchHint(.unlikely);
|
||||
continue;
|
||||
} else if (n >= buffer.len) {
|
||||
@branchHint(.likely);
|
||||
if (n > buffer.len) {
|
||||
@branchHint(.likely);
|
||||
list.items.len = 0;
|
||||
return;
|
||||
} else {
|
||||
buffer[0] = byte;
|
||||
list.items.len = 1;
|
||||
return;
|
||||
}
|
||||
}
|
||||
const remainder = buffer[n..];
|
||||
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
|
||||
buffer[remainder.len] = byte;
|
||||
list.items.len = remainder.len + 1;
|
||||
return;
|
||||
}
|
||||
},
|
||||
.allocator => |gpa| try list.append(gpa, byte),
|
||||
}
|
||||
buffer[end] = byte;
|
||||
bw.end = end + 1;
|
||||
}
|
||||
|
||||
/// Writes the same byte many times, performing the underlying write call as
|
||||
@ -1553,3 +1573,45 @@ test "bytes.hex" {
|
||||
const bytes_with_zeros = "\x00\x0E\xBA\xBE";
|
||||
try std.testing.expectFmt("lowercase: 000ebabe\n", "lowercase: {x}\n", .{bytes_with_zeros});
|
||||
}
|
||||
|
||||
test initFixed {
|
||||
{
|
||||
var buf: [255]u8 = undefined;
|
||||
var bw: BufferedWriter = undefined;
|
||||
bw.initFixed(&buf);
|
||||
try bw.print("{s}{s}!", .{ "Hello", "World" });
|
||||
try testing.expectEqualStrings("HelloWorld!", bw.getWritten());
|
||||
}
|
||||
|
||||
comptime {
|
||||
var buf: [255]u8 = undefined;
|
||||
var bw: BufferedWriter = undefined;
|
||||
bw.initFixed(&buf);
|
||||
try bw.print("{s}{s}!", .{ "Hello", "World" });
|
||||
try testing.expectEqualStrings("HelloWorld!", bw.getWritten());
|
||||
}
|
||||
}
|
||||
|
||||
test "fixed output" {
|
||||
var buffer: [10]u8 = undefined;
|
||||
var bw: BufferedWriter = undefined;
|
||||
bw.initFixed(&buffer);
|
||||
|
||||
try bw.writeAll("Hello");
|
||||
try testing.expect(std.mem.eql(u8, bw.getWritten(), "Hello"));
|
||||
|
||||
try bw.writeAll("world");
|
||||
try testing.expect(std.mem.eql(u8, bw.getWritten(), "Helloworld"));
|
||||
|
||||
try testing.expectError(error.NoSpaceLeft, bw.writeAll("!"));
|
||||
try testing.expect(std.mem.eql(u8, bw.getWritten(), "Helloworld"));
|
||||
|
||||
bw.reset();
|
||||
try testing.expect(bw.getWritten().len == 0);
|
||||
|
||||
try testing.expectError(error.NoSpaceLeft, bw.writeAll("Hello world!"));
|
||||
try testing.expect(std.mem.eql(u8, bw.getWritten(), "Hello worl"));
|
||||
|
||||
try bw.seekTo((try bw.getEndPos()) + 1);
|
||||
try testing.expectError(error.NoSpaceLeft, bw.writeAll("H"));
|
||||
}
|
||||
|
||||
29
lib/std/io/CountingReader.zig
Normal file
29
lib/std/io/CountingReader.zig
Normal file
@ -0,0 +1,29 @@
|
||||
//! A Reader that counts how many bytes has been read from it.
|
||||
|
||||
const std = @import("../std.zig");
|
||||
const CountingReader = @This();
|
||||
|
||||
child_reader: std.io.Reader,
|
||||
bytes_read: u64 = 0,
|
||||
|
||||
pub fn read(self: *@This(), buf: []u8) anyerror!usize {
|
||||
const amt = try self.child_reader.read(buf);
|
||||
self.bytes_read += amt;
|
||||
return amt;
|
||||
}
|
||||
|
||||
pub fn reader(self: *@This()) std.io.Reader {
|
||||
return .{ .context = self };
|
||||
}
|
||||
|
||||
test CountingReader {
|
||||
const bytes = "yay" ** 20;
|
||||
var fbs: std.io.BufferedReader = undefined;
|
||||
fbs.initFixed(bytes);
|
||||
var counting_stream: CountingReader = .{ .child_reader = fbs.reader() };
|
||||
var stream = counting_stream.reader().unbuffered();
|
||||
while (stream.readByte()) |_| {} else |err| {
|
||||
try std.testing.expectError(error.EndOfStream, err);
|
||||
}
|
||||
try std.testing.expect(counting_stream.bytes_read == bytes.len);
|
||||
}
|
||||
@ -1,148 +0,0 @@
|
||||
//! This turns a const byte buffer into an `io.Reader`, or `io.SeekableStream`.
|
||||
|
||||
const std = @import("../std.zig");
|
||||
const io = std.io;
|
||||
const testing = std.testing;
|
||||
const mem = std.mem;
|
||||
const assert = std.debug.assert;
|
||||
const FixedBufferStream = @This();
|
||||
|
||||
buffer: []const u8,
|
||||
pos: usize = 0,
|
||||
|
||||
pub const ReadError = error{};
|
||||
pub const SeekError = error{};
|
||||
pub const GetSeekPosError = error{};
|
||||
|
||||
pub const Reader = io.Reader(*Self, ReadError, read);
|
||||
|
||||
pub const SeekableStream = io.SeekableStream(
|
||||
*Self,
|
||||
SeekError,
|
||||
GetSeekPosError,
|
||||
seekTo,
|
||||
seekBy,
|
||||
getPos,
|
||||
getEndPos,
|
||||
);
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn reader(self: *Self) Reader {
|
||||
return .{ .context = self };
|
||||
}
|
||||
|
||||
pub fn seekableStream(self: *Self) SeekableStream {
|
||||
return .{ .context = self };
|
||||
}
|
||||
|
||||
pub fn read(self: *Self, dest: []u8) ReadError!usize {
|
||||
const size = @min(dest.len, self.buffer.len - self.pos);
|
||||
const end = self.pos + size;
|
||||
|
||||
@memcpy(dest[0..size], self.buffer[self.pos..end]);
|
||||
self.pos = end;
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
pub fn seekTo(self: *Self, pos: u64) SeekError!void {
|
||||
self.pos = @min(std.math.lossyCast(usize, pos), self.buffer.len);
|
||||
}
|
||||
|
||||
pub fn seekBy(self: *Self, amt: i64) SeekError!void {
|
||||
if (amt < 0) {
|
||||
const abs_amt = @abs(amt);
|
||||
const abs_amt_usize = std.math.cast(usize, abs_amt) orelse std.math.maxInt(usize);
|
||||
if (abs_amt_usize > self.pos) {
|
||||
self.pos = 0;
|
||||
} else {
|
||||
self.pos -= abs_amt_usize;
|
||||
}
|
||||
} else {
|
||||
const amt_usize = std.math.cast(usize, amt) orelse std.math.maxInt(usize);
|
||||
const new_pos = std.math.add(usize, self.pos, amt_usize) catch std.math.maxInt(usize);
|
||||
self.pos = @min(self.buffer.len, new_pos);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getEndPos(self: *Self) GetSeekPosError!u64 {
|
||||
return self.buffer.len;
|
||||
}
|
||||
|
||||
pub fn getPos(self: *Self) GetSeekPosError!u64 {
|
||||
return self.pos;
|
||||
}
|
||||
|
||||
pub fn getWritten(self: Self) []const u8 {
|
||||
return self.buffer[0..self.pos];
|
||||
}
|
||||
|
||||
pub fn reset(self: *Self) void {
|
||||
self.pos = 0;
|
||||
}
|
||||
|
||||
test "output" {
|
||||
var buf: [255]u8 = undefined;
|
||||
var fbs: FixedBufferStream = .{ .buffer = &buf };
|
||||
const stream = fbs.writer();
|
||||
|
||||
try stream.print("{s}{s}!", .{ "Hello", "World" });
|
||||
try testing.expectEqualSlices(u8, "HelloWorld!", fbs.getWritten());
|
||||
}
|
||||
|
||||
test "output at comptime" {
|
||||
comptime {
|
||||
var buf: [255]u8 = undefined;
|
||||
var fbs: FixedBufferStream = .{ .buffer = &buf };
|
||||
const stream = fbs.writer();
|
||||
|
||||
try stream.print("{s}{s}!", .{ "Hello", "World" });
|
||||
try testing.expectEqualSlices(u8, "HelloWorld!", fbs.getWritten());
|
||||
}
|
||||
}
|
||||
|
||||
test "output 2" {
|
||||
var buffer: [10]u8 = undefined;
|
||||
var fbs: FixedBufferStream = .{ .buffer = &buffer };
|
||||
|
||||
try fbs.writer().writeAll("Hello");
|
||||
try testing.expect(mem.eql(u8, fbs.getWritten(), "Hello"));
|
||||
|
||||
try fbs.writer().writeAll("world");
|
||||
try testing.expect(mem.eql(u8, fbs.getWritten(), "Helloworld"));
|
||||
|
||||
try testing.expectError(error.NoSpaceLeft, fbs.writer().writeAll("!"));
|
||||
try testing.expect(mem.eql(u8, fbs.getWritten(), "Helloworld"));
|
||||
|
||||
fbs.reset();
|
||||
try testing.expect(fbs.getWritten().len == 0);
|
||||
|
||||
try testing.expectError(error.NoSpaceLeft, fbs.writer().writeAll("Hello world!"));
|
||||
try testing.expect(mem.eql(u8, fbs.getWritten(), "Hello worl"));
|
||||
|
||||
try fbs.seekTo((try fbs.getEndPos()) + 1);
|
||||
try testing.expectError(error.NoSpaceLeft, fbs.writer().writeAll("H"));
|
||||
}
|
||||
|
||||
test "input" {
|
||||
const bytes = [_]u8{ 1, 2, 3, 4, 5, 6, 7 };
|
||||
var fbs: FixedBufferStream = .{ .buffer = &bytes };
|
||||
|
||||
var dest: [4]u8 = undefined;
|
||||
|
||||
var amt_read = try fbs.reader().read(&dest);
|
||||
try testing.expect(amt_read == 4);
|
||||
try testing.expect(mem.eql(u8, dest[0..4], bytes[0..4]));
|
||||
|
||||
amt_read = try fbs.reader().read(&dest);
|
||||
try testing.expect(amt_read == 3);
|
||||
try testing.expect(mem.eql(u8, dest[0..3], bytes[4..7]));
|
||||
|
||||
amt_read = try fbs.reader().read(&dest);
|
||||
try testing.expect(amt_read == 0);
|
||||
|
||||
try fbs.seekTo((try fbs.getEndPos()) + 1);
|
||||
amt_read = try fbs.reader().read(&dest);
|
||||
try testing.expect(amt_read == 0);
|
||||
}
|
||||
@ -1,292 +1,173 @@
|
||||
context: *const anyopaque,
|
||||
readFn: *const fn (context: *const anyopaque, buffer: []u8) anyerror!usize,
|
||||
const std = @import("../std.zig");
|
||||
const Reader = @This();
|
||||
const assert = std.debug.assert;
|
||||
|
||||
pub const Error = anyerror;
|
||||
context: *anyopaque,
|
||||
vtable: *const VTable,
|
||||
|
||||
/// Returns the number of bytes read. It may be less than buffer.len.
|
||||
/// If the number of bytes read is 0, it means end of stream.
|
||||
/// End of stream is not an error condition.
|
||||
pub fn read(self: Self, buffer: []u8) anyerror!usize {
|
||||
return self.readFn(self.context, buffer);
|
||||
}
|
||||
pub const VTable = struct {
|
||||
/// Writes bytes starting from `offset` to `bw`, or returns
|
||||
/// `error.Unseekable`, indicating `streamRead` should be used instead.
|
||||
///
|
||||
/// Returns the number of bytes written, which will be at minimum `0` and at
|
||||
/// most `limit`. The number of bytes read, including zero, does not
|
||||
/// indicate end of stream.
|
||||
///
|
||||
/// If the reader has an internal seek position, it is not mutated.
|
||||
///
|
||||
/// The implementation should do a maximum of one underlying read call.
|
||||
///
|
||||
/// If this is `null` it is equivalent to always returning
|
||||
/// `error.Unseekable`.
|
||||
seekRead: ?*const fn (ctx: *anyopaque, bw: *std.io.BufferedWriter, limit: Limit, offset: u64) anyerror!Status,
|
||||
|
||||
/// Returns the number of bytes read. If the number read is smaller than `buffer.len`, it
|
||||
/// means the stream reached the end. Reaching the end of a stream is not an error
|
||||
/// condition.
|
||||
pub fn readAll(self: Self, buffer: []u8) anyerror!usize {
|
||||
return readAtLeast(self, buffer, buffer.len);
|
||||
}
|
||||
/// Writes bytes from the internally tracked stream position to `bw`, or
|
||||
/// returns `error.Unstreamable`, indicating `seekRead` should be used
|
||||
/// instead.
|
||||
///
|
||||
/// Returns the number of bytes written, which will be at minimum `0` and at
|
||||
/// most `limit`. The number of bytes read, including zero, does not
|
||||
/// indicate end of stream.
|
||||
///
|
||||
/// If the reader has an internal seek position, it moves forward in accordance
|
||||
/// with the number of bytes return from this function.
|
||||
///
|
||||
/// The implementation should do a maximum of one underlying read call.
|
||||
///
|
||||
/// If this is `null` it is equivalent to always returning
|
||||
/// `error.Unstreamable`.
|
||||
streamRead: ?*const fn (ctx: *anyopaque, bw: *std.io.BufferedWriter, limit: Limit) anyerror!Status,
|
||||
};
|
||||
|
||||
/// Returns the number of bytes read, calling the underlying read
|
||||
/// function the minimal number of times until the buffer has at least
|
||||
/// `len` bytes filled. If the number read is less than `len` it means
|
||||
/// the stream reached the end. Reaching the end of the stream is not
|
||||
/// an error condition.
|
||||
pub fn readAtLeast(self: Self, buffer: []u8, len: usize) anyerror!usize {
|
||||
assert(len <= buffer.len);
|
||||
var index: usize = 0;
|
||||
while (index < len) {
|
||||
const amt = try self.read(buffer[index..]);
|
||||
if (amt == 0) break;
|
||||
index += amt;
|
||||
pub const Len = @Type(.{ .signedness = .unsigned, .bits = @bitSizeOf(usize) - 1 });
|
||||
|
||||
pub const Status = packed struct(usize) {
|
||||
/// Number of bytes that were written to `writer`.
|
||||
len: Len,
|
||||
/// Indicates end of stream.
|
||||
end: bool,
|
||||
};
|
||||
|
||||
pub const Limit = enum(usize) {
|
||||
none = std.math.maxInt(usize),
|
||||
_,
|
||||
};
|
||||
|
||||
/// Returns total number of bytes written to `w`.
|
||||
pub fn readAll(r: Reader, w: *std.io.BufferedWriter) anyerror!usize {
|
||||
if (r.vtable.pread != null) {
|
||||
return seekReadAll(r, w) catch |err| switch (err) {
|
||||
error.Unseekable => {},
|
||||
else => return err,
|
||||
};
|
||||
}
|
||||
return index;
|
||||
return streamReadAll(r, w);
|
||||
}
|
||||
|
||||
/// If the number read would be smaller than `buf.len`, `error.EndOfStream` is returned instead.
|
||||
pub fn readNoEof(self: Self, buf: []u8) anyerror!void {
|
||||
const amt_read = try self.readAll(buf);
|
||||
if (amt_read < buf.len) return error.EndOfStream;
|
||||
}
|
||||
|
||||
/// Appends to the `std.ArrayList` contents by reading from the stream
|
||||
/// until end of stream is found.
|
||||
/// If the number of bytes appended would exceed `max_append_size`,
|
||||
/// `error.StreamTooLong` is returned
|
||||
/// and the `std.ArrayList` has exactly `max_append_size` bytes appended.
|
||||
pub fn readAllArrayList(
|
||||
self: Self,
|
||||
array_list: *std.ArrayList(u8),
|
||||
max_append_size: usize,
|
||||
) anyerror!void {
|
||||
return self.readAllArrayListAligned(null, array_list, max_append_size);
|
||||
}
|
||||
|
||||
pub fn readAllArrayListAligned(
|
||||
self: Self,
|
||||
comptime alignment: ?Alignment,
|
||||
array_list: *std.ArrayListAligned(u8, alignment),
|
||||
max_append_size: usize,
|
||||
) anyerror!void {
|
||||
try array_list.ensureTotalCapacity(@min(max_append_size, 4096));
|
||||
const original_len = array_list.items.len;
|
||||
var start_index: usize = original_len;
|
||||
/// Returns total number of bytes written to `w`.
|
||||
///
|
||||
/// May return `error.Unseekable`, indicating this function cannot be used to
|
||||
/// read from the reader.
|
||||
pub fn seekReadAll(r: Reader, w: *std.io.BufferedWriter, start_offset: u64) anyerror!usize {
|
||||
const vtable_seekRead = r.vtable.seekRead.?;
|
||||
var offset: u64 = start_offset;
|
||||
while (true) {
|
||||
array_list.expandToCapacity();
|
||||
const dest_slice = array_list.items[start_index..];
|
||||
const bytes_read = try self.readAll(dest_slice);
|
||||
start_index += bytes_read;
|
||||
const status = try vtable_seekRead(r.context, w, .none, offset);
|
||||
offset += status.len;
|
||||
if (status.end) return @intCast(offset - start_offset);
|
||||
}
|
||||
}
|
||||
|
||||
if (start_index - original_len > max_append_size) {
|
||||
array_list.shrinkAndFree(original_len + max_append_size);
|
||||
return error.StreamTooLong;
|
||||
}
|
||||
|
||||
if (bytes_read != dest_slice.len) {
|
||||
array_list.shrinkAndFree(start_index);
|
||||
return;
|
||||
}
|
||||
|
||||
// This will trigger ArrayList to expand superlinearly at whatever its growth rate is.
|
||||
try array_list.ensureTotalCapacity(start_index + 1);
|
||||
/// Returns total number of bytes written to `w`.
|
||||
pub fn streamReadAll(r: Reader, w: *std.io.BufferedWriter) anyerror!usize {
|
||||
const vtable_streamRead = r.vtable.streamRead.?;
|
||||
var offset: usize = 0;
|
||||
while (true) {
|
||||
const status = try vtable_streamRead(r.context, w, .none);
|
||||
offset += status.len;
|
||||
if (status.end) return offset;
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates enough memory to hold all the contents of the stream. If the allocated
|
||||
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
|
||||
/// Caller owns returned memory.
|
||||
/// If this function returns an error, the contents from the stream read so far are lost.
|
||||
pub fn readAllAlloc(self: Self, allocator: mem.Allocator, max_size: usize) anyerror![]u8 {
|
||||
var array_list = std.ArrayList(u8).init(allocator);
|
||||
defer array_list.deinit();
|
||||
try self.readAllArrayList(&array_list, max_size);
|
||||
return try array_list.toOwnedSlice();
|
||||
}
|
||||
|
||||
/// Appends to `bw` contents by reading from the stream until `delimiter` is found.
|
||||
/// Does not write the delimiter itself.
|
||||
/// If `optional_max_size` is not null and amount of written bytes exceeds `optional_max_size`,
|
||||
/// returns `error.StreamTooLong` and finishes appending.
|
||||
/// If `optional_max_size` is null, appending is unbounded.
|
||||
pub fn streamUntilDelimiter(
|
||||
self: Self,
|
||||
bw: *std.io.BufferedWriter,
|
||||
delimiter: u8,
|
||||
optional_max_size: ?usize,
|
||||
) anyerror!void {
|
||||
if (optional_max_size) |max_size| {
|
||||
for (0..max_size) |_| {
|
||||
const byte: u8 = try self.readByte();
|
||||
if (byte == delimiter) return;
|
||||
try bw.writeByte(byte);
|
||||
}
|
||||
return error.StreamTooLong;
|
||||
} else {
|
||||
while (true) {
|
||||
const byte: u8 = try self.readByte();
|
||||
if (byte == delimiter) return;
|
||||
try bw.writeByte(byte);
|
||||
}
|
||||
// Can not throw `error.StreamTooLong` since there are no boundary.
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads from the stream until specified byte is found, discarding all data,
|
||||
/// including the delimiter.
|
||||
/// If end-of-stream is found, this function succeeds.
|
||||
pub fn skipUntilDelimiterOrEof(self: Self, delimiter: u8) anyerror!void {
|
||||
while (true) {
|
||||
const byte = self.readByte() catch |err| switch (err) {
|
||||
error.EndOfStream => return,
|
||||
else => |e| return e,
|
||||
};
|
||||
if (byte == delimiter) return;
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads 1 byte from the stream or returns `error.EndOfStream`.
|
||||
pub fn readByte(self: Self) anyerror!u8 {
|
||||
var result: [1]u8 = undefined;
|
||||
const amt_read = try self.read(result[0..]);
|
||||
if (amt_read < 1) return error.EndOfStream;
|
||||
return result[0];
|
||||
}
|
||||
|
||||
/// Same as `readByte` except the returned byte is signed.
|
||||
pub fn readByteSigned(self: Self) anyerror!i8 {
|
||||
return @as(i8, @bitCast(try self.readByte()));
|
||||
}
|
||||
|
||||
/// Reads exactly `num_bytes` bytes and returns as an array.
|
||||
/// `num_bytes` must be comptime-known
|
||||
pub fn readBytesNoEof(self: Self, comptime num_bytes: usize) anyerror![num_bytes]u8 {
|
||||
var bytes: [num_bytes]u8 = undefined;
|
||||
try self.readNoEof(&bytes);
|
||||
return bytes;
|
||||
}
|
||||
|
||||
/// Reads bytes until `bounded.len` is equal to `num_bytes`,
|
||||
/// or the stream ends.
|
||||
///
|
||||
/// * it is assumed that `num_bytes` will not exceed `bounded.capacity()`
|
||||
pub fn readIntoBoundedBytes(
|
||||
self: Self,
|
||||
comptime num_bytes: usize,
|
||||
bounded: *std.BoundedArray(u8, num_bytes),
|
||||
) anyerror!void {
|
||||
while (bounded.len < num_bytes) {
|
||||
// get at most the number of bytes free in the bounded array
|
||||
const bytes_read = try self.read(bounded.unusedCapacitySlice());
|
||||
if (bytes_read == 0) return;
|
||||
/// Caller owns returned memory.
|
||||
///
|
||||
/// If this function returns an error, the contents from the stream read so far are lost.
|
||||
pub fn streamReadAlloc(r: Reader, gpa: std.mem.Allocator, max_size: usize) anyerror![]u8 {
|
||||
const vtable_streamRead = r.vtable.streamRead.?;
|
||||
|
||||
// bytes_read will never be larger than @TypeOf(bounded.len)
|
||||
// due to `self.read` being bounded by `bounded.unusedCapacitySlice()`
|
||||
bounded.len += @as(@TypeOf(bounded.len), @intCast(bytes_read));
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads at most `num_bytes` and returns as a bounded array.
|
||||
pub fn readBoundedBytes(self: Self, comptime num_bytes: usize) anyerror!std.BoundedArray(u8, num_bytes) {
|
||||
var result = std.BoundedArray(u8, num_bytes){};
|
||||
try self.readIntoBoundedBytes(num_bytes, &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
pub inline fn readInt(self: Self, comptime T: type, endian: std.builtin.Endian) anyerror!T {
|
||||
const bytes = try self.readBytesNoEof(@divExact(@typeInfo(T).int.bits, 8));
|
||||
return mem.readInt(T, &bytes, endian);
|
||||
}
|
||||
|
||||
pub fn readVarInt(
|
||||
self: Self,
|
||||
comptime ReturnType: type,
|
||||
endian: std.builtin.Endian,
|
||||
size: usize,
|
||||
) anyerror!ReturnType {
|
||||
assert(size <= @sizeOf(ReturnType));
|
||||
var bytes_buf: [@sizeOf(ReturnType)]u8 = undefined;
|
||||
const bytes = bytes_buf[0..size];
|
||||
try self.readNoEof(bytes);
|
||||
return mem.readVarInt(ReturnType, bytes, endian);
|
||||
}
|
||||
|
||||
/// Optional parameters for `skipBytes`
|
||||
pub const SkipBytesOptions = struct {
|
||||
buf_size: usize = 512,
|
||||
};
|
||||
|
||||
// `num_bytes` is a `u64` to match `off_t`
|
||||
/// Reads `num_bytes` bytes from the stream and discards them
|
||||
pub fn skipBytes(self: Self, num_bytes: u64, comptime options: SkipBytesOptions) anyerror!void {
|
||||
var buf: [options.buf_size]u8 = undefined;
|
||||
var remaining = num_bytes;
|
||||
|
||||
while (remaining > 0) {
|
||||
const amt = @min(remaining, options.buf_size);
|
||||
try self.readNoEof(buf[0..amt]);
|
||||
remaining -= amt;
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads `slice.len` bytes from the stream and returns if they are the same as the passed slice
|
||||
pub fn isBytes(self: Self, slice: []const u8) anyerror!bool {
|
||||
var i: usize = 0;
|
||||
var matches = true;
|
||||
while (i < slice.len) : (i += 1) {
|
||||
if (slice[i] != try self.readByte()) {
|
||||
matches = false;
|
||||
}
|
||||
}
|
||||
return matches;
|
||||
}
|
||||
|
||||
pub fn readStruct(self: Self, comptime T: type) anyerror!T {
|
||||
// Only extern and packed structs have defined in-memory layout.
|
||||
comptime assert(@typeInfo(T).@"struct".layout != .auto);
|
||||
var res: [1]T = undefined;
|
||||
try self.readNoEof(mem.sliceAsBytes(res[0..]));
|
||||
return res[0];
|
||||
}
|
||||
|
||||
pub fn readStructEndian(self: Self, comptime T: type, endian: std.builtin.Endian) anyerror!T {
|
||||
var res = try self.readStruct(T);
|
||||
if (native_endian != endian) {
|
||||
mem.byteSwapAllFields(T, &res);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/// Reads an integer with the same size as the given enum's tag type. If the integer matches
|
||||
/// an enum tag, casts the integer to the enum tag and returns it. Otherwise, returns an `error.InvalidValue`.
|
||||
/// TODO optimization taking advantage of most fields being in order
|
||||
pub fn readEnum(self: Self, comptime Enum: type, endian: std.builtin.Endian) anyerror!Enum {
|
||||
const E = error{
|
||||
/// An integer was read, but it did not match any of the tags in the supplied enum.
|
||||
InvalidValue,
|
||||
var bw: std.io.BufferedWriter = .{
|
||||
.buffer = .empty,
|
||||
.mode = .{ .allocator = gpa },
|
||||
};
|
||||
const type_info = @typeInfo(Enum).@"enum";
|
||||
const tag = try self.readInt(type_info.tag_type, endian);
|
||||
const list = &bw.buffer;
|
||||
defer list.deinit(gpa);
|
||||
|
||||
inline for (std.meta.fields(Enum)) |field| {
|
||||
if (tag == field.value) {
|
||||
return @field(Enum, field.name);
|
||||
}
|
||||
var remaining = max_size;
|
||||
while (remaining > 0) {
|
||||
const status = try vtable_streamRead(r.context, &bw, .init(remaining));
|
||||
if (status.end) return list.toOwnedSlice(gpa);
|
||||
remaining -= status.len;
|
||||
}
|
||||
|
||||
return E.InvalidValue;
|
||||
}
|
||||
|
||||
/// Reads the stream until the end, ignoring all the data.
|
||||
/// Returns the number of bytes discarded.
|
||||
pub fn discard(self: Self) anyerror!u64 {
|
||||
var trash: [4096]u8 = undefined;
|
||||
var index: u64 = 0;
|
||||
while (true) {
|
||||
const n = try self.read(&trash);
|
||||
if (n == 0) return index;
|
||||
index += n;
|
||||
}
|
||||
pub fn discardAll(r: Reader) anyerror!usize {
|
||||
var bw = std.io.null_writer.unbuffered();
|
||||
return streamReadAll(r, &bw);
|
||||
}
|
||||
|
||||
const std = @import("../std.zig");
|
||||
const Self = @This();
|
||||
const math = std.math;
|
||||
const assert = std.debug.assert;
|
||||
const mem = std.mem;
|
||||
const testing = std.testing;
|
||||
const native_endian = @import("builtin").target.cpu.arch.endian();
|
||||
const Alignment = std.mem.Alignment;
|
||||
|
||||
test {
|
||||
_ = @import("Reader/test.zig");
|
||||
pub fn buffered(r: Reader, buffer: []u8) std.io.BufferedReader {
|
||||
return .{
|
||||
.reader = r,
|
||||
.buffered_writer = .{
|
||||
.buffer = buffer,
|
||||
.mode = .fixed,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn allocating(r: Reader, gpa: std.mem.Allocator) std.io.BufferedReader {
|
||||
return .{
|
||||
.reader = r,
|
||||
.buffered_writer = .{
|
||||
.buffer = .empty,
|
||||
.mode = .{ .allocator = gpa },
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn unbuffered(r: Reader) std.io.BufferedReader {
|
||||
return buffered(r, &.{});
|
||||
}
|
||||
|
||||
test "when the backing reader provides one byte at a time" {
|
||||
const OneByteReader = struct {
|
||||
str: []const u8,
|
||||
curr: usize,
|
||||
|
||||
fn read(self: *@This(), dest: []u8) anyerror!usize {
|
||||
if (self.str.len <= self.curr or dest.len == 0)
|
||||
return 0;
|
||||
|
||||
dest[0] = self.str[self.curr];
|
||||
self.curr += 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
fn reader(self: *@This()) std.io.Reader {
|
||||
return .{
|
||||
.context = self,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
const str = "This is a test";
|
||||
var one_byte_stream: OneByteReader = .init(str);
|
||||
const res = try one_byte_stream.reader().streamReadAlloc(std.testing.allocator, str.len + 1);
|
||||
defer std.testing.allocator.free(res);
|
||||
try std.testing.expectEqualStrings(str, res);
|
||||
}
|
||||
|
||||
@ -1,372 +0,0 @@
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("../../std.zig");
|
||||
const testing = std.testing;
|
||||
|
||||
test "Reader" {
|
||||
var buf = "a\x02".*;
|
||||
var fis = std.io.fixedBufferStream(&buf);
|
||||
const reader = fis.reader();
|
||||
try testing.expect((try reader.readByte()) == 'a');
|
||||
try testing.expect((try reader.readEnum(enum(u8) {
|
||||
a = 0,
|
||||
b = 99,
|
||||
c = 2,
|
||||
d = 3,
|
||||
}, builtin.cpu.arch.endian())) == .c);
|
||||
try testing.expectError(error.EndOfStream, reader.readByte());
|
||||
}
|
||||
|
||||
test "isBytes" {
|
||||
var fis = std.io.fixedBufferStream("foobar");
|
||||
const reader = fis.reader();
|
||||
try testing.expectEqual(true, try reader.isBytes("foo"));
|
||||
try testing.expectEqual(false, try reader.isBytes("qux"));
|
||||
}
|
||||
|
||||
test "skipBytes" {
|
||||
var fis = std.io.fixedBufferStream("foobar");
|
||||
const reader = fis.reader();
|
||||
try reader.skipBytes(3, .{});
|
||||
try testing.expect(try reader.isBytes("bar"));
|
||||
try reader.skipBytes(0, .{});
|
||||
try testing.expectError(error.EndOfStream, reader.skipBytes(1, .{}));
|
||||
}
|
||||
|
||||
test "readUntilDelimiterArrayList returns ArrayLists with bytes read until the delimiter, then EndOfStream" {
|
||||
const a = std.testing.allocator;
|
||||
var list = std.ArrayList(u8).init(a);
|
||||
defer list.deinit();
|
||||
|
||||
var fis = std.io.fixedBufferStream("0000\n1234\n");
|
||||
const reader = fis.reader();
|
||||
|
||||
try reader.readUntilDelimiterArrayList(&list, '\n', 5);
|
||||
try std.testing.expectEqualStrings("0000", list.items);
|
||||
try reader.readUntilDelimiterArrayList(&list, '\n', 5);
|
||||
try std.testing.expectEqualStrings("1234", list.items);
|
||||
try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiterArrayList(&list, '\n', 5));
|
||||
}
|
||||
|
||||
test "readUntilDelimiterArrayList returns an empty ArrayList" {
|
||||
const a = std.testing.allocator;
|
||||
var list = std.ArrayList(u8).init(a);
|
||||
defer list.deinit();
|
||||
|
||||
var fis = std.io.fixedBufferStream("\n");
|
||||
const reader = fis.reader();
|
||||
|
||||
try reader.readUntilDelimiterArrayList(&list, '\n', 5);
|
||||
try std.testing.expectEqualStrings("", list.items);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterArrayList returns StreamTooLong, then an ArrayList with bytes read until the delimiter" {
|
||||
const a = std.testing.allocator;
|
||||
var list = std.ArrayList(u8).init(a);
|
||||
defer list.deinit();
|
||||
|
||||
var fis = std.io.fixedBufferStream("1234567\n");
|
||||
const reader = fis.reader();
|
||||
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterArrayList(&list, '\n', 5));
|
||||
try std.testing.expectEqualStrings("12345", list.items);
|
||||
try reader.readUntilDelimiterArrayList(&list, '\n', 5);
|
||||
try std.testing.expectEqualStrings("67", list.items);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterArrayList returns EndOfStream" {
|
||||
const a = std.testing.allocator;
|
||||
var list = std.ArrayList(u8).init(a);
|
||||
defer list.deinit();
|
||||
|
||||
var fis = std.io.fixedBufferStream("1234");
|
||||
const reader = fis.reader();
|
||||
|
||||
try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiterArrayList(&list, '\n', 5));
|
||||
try std.testing.expectEqualStrings("1234", list.items);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterAlloc returns ArrayLists with bytes read until the delimiter, then EndOfStream" {
|
||||
const a = std.testing.allocator;
|
||||
|
||||
var fis = std.io.fixedBufferStream("0000\n1234\n");
|
||||
const reader = fis.reader();
|
||||
|
||||
{
|
||||
const result = try reader.readUntilDelimiterAlloc(a, '\n', 5);
|
||||
defer a.free(result);
|
||||
try std.testing.expectEqualStrings("0000", result);
|
||||
}
|
||||
|
||||
{
|
||||
const result = try reader.readUntilDelimiterAlloc(a, '\n', 5);
|
||||
defer a.free(result);
|
||||
try std.testing.expectEqualStrings("1234", result);
|
||||
}
|
||||
|
||||
try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiterAlloc(a, '\n', 5));
|
||||
}
|
||||
|
||||
test "readUntilDelimiterAlloc returns an empty ArrayList" {
|
||||
const a = std.testing.allocator;
|
||||
|
||||
var fis = std.io.fixedBufferStream("\n");
|
||||
const reader = fis.reader();
|
||||
|
||||
{
|
||||
const result = try reader.readUntilDelimiterAlloc(a, '\n', 5);
|
||||
defer a.free(result);
|
||||
try std.testing.expectEqualStrings("", result);
|
||||
}
|
||||
}
|
||||
|
||||
test "readUntilDelimiterAlloc returns StreamTooLong, then an ArrayList with bytes read until the delimiter" {
|
||||
const a = std.testing.allocator;
|
||||
|
||||
var fis = std.io.fixedBufferStream("1234567\n");
|
||||
const reader = fis.reader();
|
||||
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterAlloc(a, '\n', 5));
|
||||
|
||||
const result = try reader.readUntilDelimiterAlloc(a, '\n', 5);
|
||||
defer a.free(result);
|
||||
try std.testing.expectEqualStrings("67", result);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterAlloc returns EndOfStream" {
|
||||
const a = std.testing.allocator;
|
||||
|
||||
var fis = std.io.fixedBufferStream("1234");
|
||||
const reader = fis.reader();
|
||||
|
||||
try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiterAlloc(a, '\n', 5));
|
||||
}
|
||||
|
||||
test "readUntilDelimiter returns bytes read until the delimiter" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("0000\n1234\n");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectEqualStrings("0000", try reader.readUntilDelimiter(&buf, '\n'));
|
||||
try std.testing.expectEqualStrings("1234", try reader.readUntilDelimiter(&buf, '\n'));
|
||||
}
|
||||
|
||||
test "readUntilDelimiter returns an empty string" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("\n");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectEqualStrings("", try reader.readUntilDelimiter(&buf, '\n'));
|
||||
}
|
||||
|
||||
test "readUntilDelimiter returns StreamTooLong, then an empty string" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("12345\n");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiter(&buf, '\n'));
|
||||
try std.testing.expectEqualStrings("", try reader.readUntilDelimiter(&buf, '\n'));
|
||||
}
|
||||
|
||||
test "readUntilDelimiter returns StreamTooLong, then bytes read until the delimiter" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("1234567\n");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiter(&buf, '\n'));
|
||||
try std.testing.expectEqualStrings("67", try reader.readUntilDelimiter(&buf, '\n'));
|
||||
}
|
||||
|
||||
test "readUntilDelimiter returns EndOfStream" {
|
||||
{
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiter(&buf, '\n'));
|
||||
}
|
||||
{
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("1234");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiter(&buf, '\n'));
|
||||
}
|
||||
}
|
||||
|
||||
test "readUntilDelimiter returns bytes read until delimiter, then EndOfStream" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("1234\n");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectEqualStrings("1234", try reader.readUntilDelimiter(&buf, '\n'));
|
||||
try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiter(&buf, '\n'));
|
||||
}
|
||||
|
||||
test "readUntilDelimiter returns StreamTooLong, then EndOfStream" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("12345");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiter(&buf, '\n'));
|
||||
try std.testing.expectError(error.EndOfStream, reader.readUntilDelimiter(&buf, '\n'));
|
||||
}
|
||||
|
||||
test "readUntilDelimiter writes all bytes read to the output buffer" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("0000\n12345");
|
||||
const reader = fis.reader();
|
||||
_ = try reader.readUntilDelimiter(&buf, '\n');
|
||||
try std.testing.expectEqualStrings("0000\n", &buf);
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiter(&buf, '\n'));
|
||||
try std.testing.expectEqualStrings("12345", &buf);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEofAlloc returns ArrayLists with bytes read until the delimiter, then EndOfStream" {
|
||||
const a = std.testing.allocator;
|
||||
|
||||
var fis = std.io.fixedBufferStream("0000\n1234\n");
|
||||
const reader = fis.reader();
|
||||
|
||||
{
|
||||
const result = (try reader.readUntilDelimiterOrEofAlloc(a, '\n', 5)).?;
|
||||
defer a.free(result);
|
||||
try std.testing.expectEqualStrings("0000", result);
|
||||
}
|
||||
|
||||
{
|
||||
const result = (try reader.readUntilDelimiterOrEofAlloc(a, '\n', 5)).?;
|
||||
defer a.free(result);
|
||||
try std.testing.expectEqualStrings("1234", result);
|
||||
}
|
||||
|
||||
try std.testing.expect((try reader.readUntilDelimiterOrEofAlloc(a, '\n', 5)) == null);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEofAlloc returns an empty ArrayList" {
|
||||
const a = std.testing.allocator;
|
||||
|
||||
var fis = std.io.fixedBufferStream("\n");
|
||||
const reader = fis.reader();
|
||||
|
||||
{
|
||||
const result = (try reader.readUntilDelimiterOrEofAlloc(a, '\n', 5)).?;
|
||||
defer a.free(result);
|
||||
try std.testing.expectEqualStrings("", result);
|
||||
}
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEofAlloc returns StreamTooLong, then an ArrayList with bytes read until the delimiter" {
|
||||
const a = std.testing.allocator;
|
||||
|
||||
var fis = std.io.fixedBufferStream("1234567\n");
|
||||
const reader = fis.reader();
|
||||
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterOrEofAlloc(a, '\n', 5));
|
||||
|
||||
const result = (try reader.readUntilDelimiterOrEofAlloc(a, '\n', 5)).?;
|
||||
defer a.free(result);
|
||||
try std.testing.expectEqualStrings("67", result);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEof returns bytes read until the delimiter" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("0000\n1234\n");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectEqualStrings("0000", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
|
||||
try std.testing.expectEqualStrings("1234", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEof returns an empty string" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("\n");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectEqualStrings("", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEof returns StreamTooLong, then an empty string" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("12345\n");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterOrEof(&buf, '\n'));
|
||||
try std.testing.expectEqualStrings("", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEof returns StreamTooLong, then bytes read until the delimiter" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("1234567\n");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterOrEof(&buf, '\n'));
|
||||
try std.testing.expectEqualStrings("67", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEof returns null" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expect((try reader.readUntilDelimiterOrEof(&buf, '\n')) == null);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEof returns bytes read until delimiter, then null" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("1234\n");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectEqualStrings("1234", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
|
||||
try std.testing.expect((try reader.readUntilDelimiterOrEof(&buf, '\n')) == null);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEof returns bytes read until end-of-stream" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("1234");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectEqualStrings("1234", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEof returns StreamTooLong, then bytes read until end-of-stream" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("1234567");
|
||||
const reader = fis.reader();
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterOrEof(&buf, '\n'));
|
||||
try std.testing.expectEqualStrings("67", (try reader.readUntilDelimiterOrEof(&buf, '\n')).?);
|
||||
}
|
||||
|
||||
test "readUntilDelimiterOrEof writes all bytes read to the output buffer" {
|
||||
var buf: [5]u8 = undefined;
|
||||
var fis = std.io.fixedBufferStream("0000\n12345");
|
||||
const reader = fis.reader();
|
||||
_ = try reader.readUntilDelimiterOrEof(&buf, '\n');
|
||||
try std.testing.expectEqualStrings("0000\n", &buf);
|
||||
try std.testing.expectError(error.StreamTooLong, reader.readUntilDelimiterOrEof(&buf, '\n'));
|
||||
try std.testing.expectEqualStrings("12345", &buf);
|
||||
}
|
||||
|
||||
test "streamUntilDelimiter writes all bytes without delimiter to the output" {
|
||||
const input_string = "some_string_with_delimiter!";
|
||||
var input_fbs = std.io.fixedBufferStream(input_string);
|
||||
const reader = input_fbs.reader();
|
||||
|
||||
var output: [input_string.len]u8 = undefined;
|
||||
var output_fbs = std.io.fixedBufferStream(&output);
|
||||
const writer = output_fbs.writer();
|
||||
|
||||
try reader.streamUntilDelimiter(writer, '!', input_fbs.buffer.len);
|
||||
try std.testing.expectEqualStrings("some_string_with_delimiter", output_fbs.getWritten());
|
||||
try std.testing.expectError(error.EndOfStream, reader.streamUntilDelimiter(writer, '!', input_fbs.buffer.len));
|
||||
|
||||
input_fbs.reset();
|
||||
output_fbs.reset();
|
||||
|
||||
try std.testing.expectError(error.StreamTooLong, reader.streamUntilDelimiter(writer, '!', 5));
|
||||
}
|
||||
|
||||
test "readBoundedBytes correctly reads into a new bounded array" {
|
||||
const test_string = "abcdefg";
|
||||
var fis = std.io.fixedBufferStream(test_string);
|
||||
const reader = fis.reader();
|
||||
|
||||
var array = try reader.readBoundedBytes(10000);
|
||||
try testing.expectEqualStrings(array.slice(), test_string);
|
||||
}
|
||||
|
||||
test "readIntoBoundedBytes correctly reads into a provided bounded array" {
|
||||
const test_string = "abcdefg";
|
||||
var fis = std.io.fixedBufferStream(test_string);
|
||||
const reader = fis.reader();
|
||||
|
||||
var bounded_array = std.BoundedArray(u8, 10000){};
|
||||
|
||||
// compile time error if the size is not the same at the provided `bounded.capacity()`
|
||||
try reader.readIntoBoundedBytes(10000, &bounded_array);
|
||||
try testing.expectEqualStrings(bounded_array.slice(), test_string);
|
||||
}
|
||||
@ -17,7 +17,7 @@ pub const VTable = struct {
|
||||
/// Number of bytes returned may be zero, which does not mean
|
||||
/// end-of-stream. A subsequent call may return nonzero, or may signal end
|
||||
/// of stream via an error.
|
||||
writeSplat: *const fn (context: *anyopaque, data: []const []const u8, splat: usize) anyerror!usize,
|
||||
writeSplat: *const fn (ctx: *anyopaque, data: []const []const u8, splat: usize) anyerror!usize,
|
||||
|
||||
/// Writes contents from an open file. `headers` are written first, then `len`
|
||||
/// bytes of `file` starting from `offset`, then `trailers`.
|
||||
@ -29,9 +29,9 @@ pub const VTable = struct {
|
||||
/// end-of-stream. A subsequent call may return nonzero, or may signal end
|
||||
/// of stream via an error.
|
||||
writeFile: *const fn (
|
||||
context: *anyopaque,
|
||||
ctx: *anyopaque,
|
||||
file: std.fs.File,
|
||||
offset: u64,
|
||||
offset: Offset,
|
||||
/// When zero, it means copy until the end of the file is reached.
|
||||
len: FileLen,
|
||||
/// Headers and trailers must be passed together so that in case `len` is
|
||||
@ -39,22 +39,33 @@ pub const VTable = struct {
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) anyerror!usize,
|
||||
};
|
||||
|
||||
pub const FileLen = enum(u64) {
|
||||
zero = 0,
|
||||
entire_file = std.math.maxInt(u64),
|
||||
_,
|
||||
pub const Offset = enum(u64) {
|
||||
none = std.math.maxInt(u64),
|
||||
_,
|
||||
|
||||
pub fn init(integer: u64) FileLen {
|
||||
const result: FileLen = @enumFromInt(integer);
|
||||
assert(result != .entire_file);
|
||||
return result;
|
||||
}
|
||||
pub fn init(integer: u64) Offset {
|
||||
const result: Offset = @enumFromInt(integer);
|
||||
assert(result != .none);
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
pub fn int(len: FileLen) u64 {
|
||||
return @intFromEnum(len);
|
||||
}
|
||||
};
|
||||
pub const FileLen = enum(u64) {
|
||||
zero = 0,
|
||||
entire_file = std.math.maxInt(u64),
|
||||
_,
|
||||
|
||||
pub fn init(integer: u64) FileLen {
|
||||
const result: FileLen = @enumFromInt(integer);
|
||||
assert(result != .entire_file);
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn int(len: FileLen) u64 {
|
||||
return @intFromEnum(len);
|
||||
}
|
||||
};
|
||||
|
||||
pub fn writev(w: Writer, data: []const []const u8) anyerror!usize {
|
||||
@ -93,34 +104,13 @@ pub fn unimplemented_writeFile(
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
pub fn write(w: Writer, bytes: []const u8) anyerror!usize {
|
||||
const single: [1][]const u8 = .{bytes};
|
||||
return w.vtable.writeSplat(w.context, &single, 1);
|
||||
}
|
||||
|
||||
pub fn writeAll(w: Writer, bytes: []const u8) anyerror!void {
|
||||
var index: usize = 0;
|
||||
while (index < bytes.len) index += try w.vtable.writeSplat(w.context, &.{bytes[index..]}, 1);
|
||||
}
|
||||
|
||||
/// The `data` parameter is mutable because this function needs to mutate the
|
||||
/// fields in order to handle partial writes from `VTable.writev`.
|
||||
pub fn writevAll(w: Writer, data: [][]const u8) anyerror!void {
|
||||
var i: usize = 0;
|
||||
while (true) {
|
||||
var n = try w.vtable.writeSplat(w.context, data[i..], 1);
|
||||
while (n >= data[i].len) {
|
||||
n -= data[i].len;
|
||||
i += 1;
|
||||
if (i >= data.len) return;
|
||||
}
|
||||
data[i] = data[i][n..];
|
||||
}
|
||||
pub fn buffered(w: Writer, buffer: []u8) std.io.BufferedWriter {
|
||||
return .{
|
||||
.buffer = .initBuffer(buffer),
|
||||
.mode = .{ .writer = w },
|
||||
};
|
||||
}
|
||||
|
||||
pub fn unbuffered(w: Writer) std.io.BufferedWriter {
|
||||
return .{
|
||||
.buffer = &.{},
|
||||
.unbuffered_writer = w,
|
||||
};
|
||||
return buffered(w, &.{});
|
||||
}
|
||||
|
||||
@ -1,201 +0,0 @@
|
||||
const std = @import("../std.zig");
|
||||
const io = std.io;
|
||||
const mem = std.mem;
|
||||
const assert = std.debug.assert;
|
||||
const testing = std.testing;
|
||||
|
||||
pub fn BufferedReader(comptime buffer_size: usize, comptime ReaderType: type) type {
|
||||
return struct {
|
||||
unbuffered_reader: ReaderType,
|
||||
buf: [buffer_size]u8 = undefined,
|
||||
start: usize = 0,
|
||||
end: usize = 0,
|
||||
|
||||
pub const Error = ReaderType.Error;
|
||||
pub const Reader = io.Reader(*Self, Error, read);
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn read(self: *Self, dest: []u8) Error!usize {
|
||||
// First try reading from the already buffered data onto the destination.
|
||||
const current = self.buf[self.start..self.end];
|
||||
if (current.len != 0) {
|
||||
const to_transfer = @min(current.len, dest.len);
|
||||
@memcpy(dest[0..to_transfer], current[0..to_transfer]);
|
||||
self.start += to_transfer;
|
||||
return to_transfer;
|
||||
}
|
||||
|
||||
// If dest is large, read from the unbuffered reader directly into the destination.
|
||||
if (dest.len >= buffer_size) {
|
||||
return self.unbuffered_reader.read(dest);
|
||||
}
|
||||
|
||||
// If dest is small, read from the unbuffered reader into our own internal buffer,
|
||||
// and then transfer to destination.
|
||||
self.end = try self.unbuffered_reader.read(&self.buf);
|
||||
const to_transfer = @min(self.end, dest.len);
|
||||
@memcpy(dest[0..to_transfer], self.buf[0..to_transfer]);
|
||||
self.start = to_transfer;
|
||||
return to_transfer;
|
||||
}
|
||||
|
||||
pub fn reader(self: *Self) Reader {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn bufferedReader(reader: anytype) BufferedReader(4096, @TypeOf(reader)) {
|
||||
return .{ .unbuffered_reader = reader };
|
||||
}
|
||||
|
||||
pub fn bufferedReaderSize(comptime size: usize, reader: anytype) BufferedReader(size, @TypeOf(reader)) {
|
||||
return .{ .unbuffered_reader = reader };
|
||||
}
|
||||
|
||||
test "OneByte" {
|
||||
const OneByteReadReader = struct {
|
||||
str: []const u8,
|
||||
curr: usize,
|
||||
|
||||
const Error = error{NoError};
|
||||
const Self = @This();
|
||||
const Reader = io.Reader(*Self, Error, read);
|
||||
|
||||
fn init(str: []const u8) Self {
|
||||
return Self{
|
||||
.str = str,
|
||||
.curr = 0,
|
||||
};
|
||||
}
|
||||
|
||||
fn read(self: *Self, dest: []u8) Error!usize {
|
||||
if (self.str.len <= self.curr or dest.len == 0)
|
||||
return 0;
|
||||
|
||||
dest[0] = self.str[self.curr];
|
||||
self.curr += 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
fn reader(self: *Self) Reader {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
|
||||
const str = "This is a test";
|
||||
var one_byte_stream = OneByteReadReader.init(str);
|
||||
var buf_reader = bufferedReader(one_byte_stream.reader());
|
||||
const stream = buf_reader.reader();
|
||||
|
||||
const res = try stream.readAllAlloc(testing.allocator, str.len + 1);
|
||||
defer testing.allocator.free(res);
|
||||
try testing.expectEqualSlices(u8, str, res);
|
||||
}
|
||||
|
||||
fn smallBufferedReader(underlying_stream: anytype) BufferedReader(8, @TypeOf(underlying_stream)) {
|
||||
return .{ .unbuffered_reader = underlying_stream };
|
||||
}
|
||||
test "Block" {
|
||||
const BlockReader = struct {
|
||||
block: []const u8,
|
||||
reads_allowed: usize,
|
||||
curr_read: usize,
|
||||
|
||||
const Error = error{NoError};
|
||||
const Self = @This();
|
||||
const Reader = io.Reader(*Self, Error, read);
|
||||
|
||||
fn init(block: []const u8, reads_allowed: usize) Self {
|
||||
return Self{
|
||||
.block = block,
|
||||
.reads_allowed = reads_allowed,
|
||||
.curr_read = 0,
|
||||
};
|
||||
}
|
||||
|
||||
fn read(self: *Self, dest: []u8) Error!usize {
|
||||
if (self.curr_read >= self.reads_allowed) return 0;
|
||||
@memcpy(dest[0..self.block.len], self.block);
|
||||
|
||||
self.curr_read += 1;
|
||||
return self.block.len;
|
||||
}
|
||||
|
||||
fn reader(self: *Self) Reader {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
|
||||
const block = "0123";
|
||||
|
||||
// len out == block
|
||||
{
|
||||
var test_buf_reader: BufferedReader(4, BlockReader) = .{
|
||||
.unbuffered_reader = BlockReader.init(block, 2),
|
||||
};
|
||||
const reader = test_buf_reader.reader();
|
||||
var out_buf: [4]u8 = undefined;
|
||||
_ = try reader.readAll(&out_buf);
|
||||
try testing.expectEqualSlices(u8, &out_buf, block);
|
||||
_ = try reader.readAll(&out_buf);
|
||||
try testing.expectEqualSlices(u8, &out_buf, block);
|
||||
try testing.expectEqual(try reader.readAll(&out_buf), 0);
|
||||
}
|
||||
|
||||
// len out < block
|
||||
{
|
||||
var test_buf_reader: BufferedReader(4, BlockReader) = .{
|
||||
.unbuffered_reader = BlockReader.init(block, 2),
|
||||
};
|
||||
const reader = test_buf_reader.reader();
|
||||
var out_buf: [3]u8 = undefined;
|
||||
_ = try reader.readAll(&out_buf);
|
||||
try testing.expectEqualSlices(u8, &out_buf, "012");
|
||||
_ = try reader.readAll(&out_buf);
|
||||
try testing.expectEqualSlices(u8, &out_buf, "301");
|
||||
const n = try reader.readAll(&out_buf);
|
||||
try testing.expectEqualSlices(u8, out_buf[0..n], "23");
|
||||
try testing.expectEqual(try reader.readAll(&out_buf), 0);
|
||||
}
|
||||
|
||||
// len out > block
|
||||
{
|
||||
var test_buf_reader: BufferedReader(4, BlockReader) = .{
|
||||
.unbuffered_reader = BlockReader.init(block, 2),
|
||||
};
|
||||
const reader = test_buf_reader.reader();
|
||||
var out_buf: [5]u8 = undefined;
|
||||
_ = try reader.readAll(&out_buf);
|
||||
try testing.expectEqualSlices(u8, &out_buf, "01230");
|
||||
const n = try reader.readAll(&out_buf);
|
||||
try testing.expectEqualSlices(u8, out_buf[0..n], "123");
|
||||
try testing.expectEqual(try reader.readAll(&out_buf), 0);
|
||||
}
|
||||
|
||||
// len out == 0
|
||||
{
|
||||
var test_buf_reader: BufferedReader(4, BlockReader) = .{
|
||||
.unbuffered_reader = BlockReader.init(block, 2),
|
||||
};
|
||||
const reader = test_buf_reader.reader();
|
||||
var out_buf: [0]u8 = undefined;
|
||||
_ = try reader.readAll(&out_buf);
|
||||
try testing.expectEqualSlices(u8, &out_buf, "");
|
||||
}
|
||||
|
||||
// len bufreader buf > block
|
||||
{
|
||||
var test_buf_reader: BufferedReader(5, BlockReader) = .{
|
||||
.unbuffered_reader = BlockReader.init(block, 2),
|
||||
};
|
||||
const reader = test_buf_reader.reader();
|
||||
var out_buf: [4]u8 = undefined;
|
||||
_ = try reader.readAll(&out_buf);
|
||||
try testing.expectEqualSlices(u8, &out_buf, block);
|
||||
_ = try reader.readAll(&out_buf);
|
||||
try testing.expectEqualSlices(u8, &out_buf, block);
|
||||
try testing.expectEqual(try reader.readAll(&out_buf), 0);
|
||||
}
|
||||
}
|
||||
@ -1,43 +0,0 @@
|
||||
const std = @import("../std.zig");
|
||||
const io = std.io;
|
||||
const testing = std.testing;
|
||||
|
||||
/// A Reader that counts how many bytes has been read from it.
|
||||
pub fn CountingReader(comptime ReaderType: anytype) type {
|
||||
return struct {
|
||||
child_reader: ReaderType,
|
||||
bytes_read: u64 = 0,
|
||||
|
||||
pub const Error = ReaderType.Error;
|
||||
pub const Reader = io.Reader(*@This(), Error, read);
|
||||
|
||||
pub fn read(self: *@This(), buf: []u8) Error!usize {
|
||||
const amt = try self.child_reader.read(buf);
|
||||
self.bytes_read += amt;
|
||||
return amt;
|
||||
}
|
||||
|
||||
pub fn reader(self: *@This()) Reader {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn countingReader(reader: anytype) CountingReader(@TypeOf(reader)) {
|
||||
return .{ .child_reader = reader };
|
||||
}
|
||||
|
||||
test CountingReader {
|
||||
const bytes = "yay" ** 100;
|
||||
var fbs = io.fixedBufferStream(bytes);
|
||||
|
||||
var counting_stream = countingReader(fbs.reader());
|
||||
const stream = counting_stream.reader();
|
||||
|
||||
//read and discard all bytes
|
||||
while (stream.readByte()) |_| {} else |err| {
|
||||
try testing.expect(err == error.EndOfStream);
|
||||
}
|
||||
|
||||
try testing.expect(counting_stream.bytes_read == bytes.len);
|
||||
}
|
||||
@ -1,35 +0,0 @@
|
||||
const std = @import("../std.zig");
|
||||
|
||||
pub fn SeekableStream(
|
||||
comptime Context: type,
|
||||
comptime SeekErrorType: type,
|
||||
comptime GetSeekPosErrorType: type,
|
||||
comptime seekToFn: fn (context: Context, pos: u64) SeekErrorType!void,
|
||||
comptime seekByFn: fn (context: Context, pos: i64) SeekErrorType!void,
|
||||
comptime getPosFn: fn (context: Context) GetSeekPosErrorType!u64,
|
||||
comptime getEndPosFn: fn (context: Context) GetSeekPosErrorType!u64,
|
||||
) type {
|
||||
return struct {
|
||||
context: Context,
|
||||
|
||||
const Self = @This();
|
||||
pub const SeekError = SeekErrorType;
|
||||
pub const GetSeekPosError = GetSeekPosErrorType;
|
||||
|
||||
pub fn seekTo(self: Self, pos: u64) SeekError!void {
|
||||
return seekToFn(self.context, pos);
|
||||
}
|
||||
|
||||
pub fn seekBy(self: Self, amt: i64) SeekError!void {
|
||||
return seekByFn(self.context, amt);
|
||||
}
|
||||
|
||||
pub fn getEndPos(self: Self) GetSeekPosError!u64 {
|
||||
return getEndPosFn(self.context);
|
||||
}
|
||||
|
||||
pub fn getPos(self: Self) GetSeekPosError!u64 {
|
||||
return getPosFn(self.context);
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -1,127 +0,0 @@
|
||||
const std = @import("../std.zig");
|
||||
const builtin = @import("builtin");
|
||||
const io = std.io;
|
||||
|
||||
/// Provides `io.Reader`, `io.Writer`, and `io.SeekableStream` for in-memory buffers as
|
||||
/// well as files.
|
||||
/// For memory sources, if the supplied byte buffer is const, then `io.Writer` is not available.
|
||||
/// The error set of the stream functions is the error set of the corresponding file functions.
|
||||
pub const StreamSource = union(enum) {
|
||||
// TODO: expose UEFI files to std.os in a way that allows this to be true
|
||||
const has_file = (builtin.os.tag != .freestanding and builtin.os.tag != .uefi);
|
||||
|
||||
/// The stream access is redirected to this buffer.
|
||||
buffer: io.FixedBufferStream([]u8),
|
||||
|
||||
/// The stream access is redirected to this buffer.
|
||||
/// Writing to the source will always yield `error.AccessDenied`.
|
||||
const_buffer: io.FixedBufferStream([]const u8),
|
||||
|
||||
/// The stream access is redirected to this file.
|
||||
/// On freestanding, this must never be initialized!
|
||||
file: if (has_file) std.fs.File else void,
|
||||
|
||||
pub const ReadError = io.FixedBufferStream([]u8).ReadError || (if (has_file) std.fs.File.ReadError else error{});
|
||||
pub const WriteError = error{AccessDenied} || io.FixedBufferStream([]u8).WriteError || (if (has_file) std.fs.File.WriteError else error{});
|
||||
pub const SeekError = io.FixedBufferStream([]u8).SeekError || (if (has_file) std.fs.File.SeekError else error{});
|
||||
pub const GetSeekPosError = io.FixedBufferStream([]u8).GetSeekPosError || (if (has_file) std.fs.File.GetSeekPosError else error{});
|
||||
|
||||
pub const Reader = io.Reader(*StreamSource, ReadError, read);
|
||||
pub const Writer = io.Writer(*StreamSource, WriteError, write);
|
||||
pub const SeekableStream = io.SeekableStream(
|
||||
*StreamSource,
|
||||
SeekError,
|
||||
GetSeekPosError,
|
||||
seekTo,
|
||||
seekBy,
|
||||
getPos,
|
||||
getEndPos,
|
||||
);
|
||||
|
||||
pub fn read(self: *StreamSource, dest: []u8) ReadError!usize {
|
||||
switch (self.*) {
|
||||
.buffer => |*x| return x.read(dest),
|
||||
.const_buffer => |*x| return x.read(dest),
|
||||
.file => |x| if (!has_file) unreachable else return x.read(dest),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write(self: *StreamSource, bytes: []const u8) WriteError!usize {
|
||||
switch (self.*) {
|
||||
.buffer => |*x| return x.write(bytes),
|
||||
.const_buffer => return error.AccessDenied,
|
||||
.file => |x| if (!has_file) unreachable else return x.write(bytes),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn seekTo(self: *StreamSource, pos: u64) SeekError!void {
|
||||
switch (self.*) {
|
||||
.buffer => |*x| return x.seekTo(pos),
|
||||
.const_buffer => |*x| return x.seekTo(pos),
|
||||
.file => |x| if (!has_file) unreachable else return x.seekTo(pos),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn seekBy(self: *StreamSource, amt: i64) SeekError!void {
|
||||
switch (self.*) {
|
||||
.buffer => |*x| return x.seekBy(amt),
|
||||
.const_buffer => |*x| return x.seekBy(amt),
|
||||
.file => |x| if (!has_file) unreachable else return x.seekBy(amt),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getEndPos(self: *StreamSource) GetSeekPosError!u64 {
|
||||
switch (self.*) {
|
||||
.buffer => |*x| return x.getEndPos(),
|
||||
.const_buffer => |*x| return x.getEndPos(),
|
||||
.file => |x| if (!has_file) unreachable else return x.getEndPos(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getPos(self: *StreamSource) GetSeekPosError!u64 {
|
||||
switch (self.*) {
|
||||
.buffer => |*x| return x.getPos(),
|
||||
.const_buffer => |*x| return x.getPos(),
|
||||
.file => |x| if (!has_file) unreachable else return x.getPos(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reader(self: *StreamSource) Reader {
|
||||
return .{ .context = self };
|
||||
}
|
||||
|
||||
pub fn writer(self: *StreamSource) Writer {
|
||||
return .{ .context = self };
|
||||
}
|
||||
|
||||
pub fn seekableStream(self: *StreamSource) SeekableStream {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
|
||||
test "refs" {
|
||||
std.testing.refAllDecls(StreamSource);
|
||||
}
|
||||
|
||||
test "mutable buffer" {
|
||||
var buffer: [64]u8 = undefined;
|
||||
var source = StreamSource{ .buffer = std.io.fixedBufferStream(&buffer) };
|
||||
|
||||
var writer = source.writer();
|
||||
|
||||
try writer.writeAll("Hello, World!");
|
||||
|
||||
try std.testing.expectEqualStrings("Hello, World!", source.buffer.getWritten());
|
||||
}
|
||||
|
||||
test "const buffer" {
|
||||
const buffer: [64]u8 = "Hello, World!".* ++ ([1]u8{0xAA} ** 51);
|
||||
var source = StreamSource{ .const_buffer = std.io.fixedBufferStream(&buffer) };
|
||||
|
||||
var reader = source.reader();
|
||||
|
||||
var dst_buffer: [13]u8 = undefined;
|
||||
try reader.readNoEof(&dst_buffer);
|
||||
|
||||
try std.testing.expectEqualStrings("Hello, World!", &dst_buffer);
|
||||
}
|
||||
@ -1356,25 +1356,9 @@ fn linuxLookupNameFromHosts(
|
||||
};
|
||||
defer file.close();
|
||||
|
||||
var buffered_reader = std.io.bufferedReader(file.reader());
|
||||
const reader = buffered_reader.reader();
|
||||
// TODO: rework buffered reader so that we can use its buffer directly when searching for delimiters
|
||||
var line_buf: [512]u8 = undefined;
|
||||
var line_buf_writer: std.io.BufferedWriter = undefined;
|
||||
line_buf_writer.initFixed(&line_buf);
|
||||
while (true) {
|
||||
const line = if (reader.streamUntilDelimiter(&line_buf_writer, '\n', line_buf.len)) |_| l: {
|
||||
break :l line_buf_writer.getWritten();
|
||||
} else |err| switch (err) {
|
||||
error.EndOfStream => l: {
|
||||
if (line_buf_writer.getWritten().len == 0) break;
|
||||
// Skip to the delimiter in the reader, to fix parsing
|
||||
try reader.skipUntilDelimiterOrEof('\n');
|
||||
// Use the truncated line. A truncated comment or hostname will be handled correctly.
|
||||
break :l &line_buf;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
var br = file.reader().buffered(&line_buf);
|
||||
while (br.takeDelimiterConclusive('\n')) |line| {
|
||||
var split_it = mem.splitScalar(u8, line, '#');
|
||||
const no_comment_line = split_it.first();
|
||||
|
||||
@ -1406,7 +1390,7 @@ fn linuxLookupNameFromHosts(
|
||||
canon.items.len = 0;
|
||||
try canon.appendSlice(name_text);
|
||||
}
|
||||
}
|
||||
} else |err| return err;
|
||||
}
|
||||
|
||||
pub fn isValidHostName(hostname: []const u8) bool {
|
||||
@ -1543,7 +1527,7 @@ const ResolvConf = struct {
|
||||
}
|
||||
};
|
||||
|
||||
/// Ignores lines longer than 512 bytes.
|
||||
/// Returns `error.StreamTooLong` if a line is longer than 512 bytes.
|
||||
/// TODO: https://github.com/ziglang/zig/issues/2765 and https://github.com/ziglang/zig/issues/2761
|
||||
fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void {
|
||||
rc.* = ResolvConf{
|
||||
@ -1564,30 +1548,14 @@ fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void {
|
||||
};
|
||||
defer file.close();
|
||||
|
||||
var buf_reader = std.io.bufferedReader(file.reader());
|
||||
const stream = buf_reader.reader();
|
||||
// TODO: rework buffered reader so that we can use its buffer directly when searching for delimiters
|
||||
var line_buf: [512]u8 = undefined;
|
||||
var line_buf_writer: std.io.BufferedWriter = undefined;
|
||||
line_buf_writer.initFixed(&line_buf);
|
||||
while (true) {
|
||||
const line = if (stream.streamUntilDelimiter(&line_buf_writer, '\n', line_buf.len)) |_| l: {
|
||||
break :l line_buf_writer.getWritten();
|
||||
} else |err| switch (err) {
|
||||
error.EndOfStream => l: {
|
||||
if (line_buf_writer.getWritten().len == 0) break;
|
||||
// Skip to the delimiter in the reader, to fix parsing
|
||||
try stream.skipUntilDelimiterOrEof('\n');
|
||||
// Give an empty line to the while loop, which will be skipped.
|
||||
break :l line_buf[0..0];
|
||||
},
|
||||
else => |e| return e,
|
||||
var br = file.reader().buffered(&line_buf);
|
||||
while (br.takeDelimiterConclusive('\n')) |line_with_comment| {
|
||||
const line = line: {
|
||||
var split = mem.splitScalar(u8, line_with_comment, '#');
|
||||
break :line split.first();
|
||||
};
|
||||
const no_comment_line = no_comment_line: {
|
||||
var split = mem.splitScalar(u8, line, '#');
|
||||
break :no_comment_line split.first();
|
||||
};
|
||||
var line_it = mem.tokenizeAny(u8, no_comment_line, " \t");
|
||||
var line_it = mem.tokenizeAny(u8, line, " \t");
|
||||
|
||||
const token = line_it.next() orelse continue;
|
||||
if (mem.eql(u8, token, "options")) {
|
||||
@ -1615,7 +1583,7 @@ fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void {
|
||||
rc.search.items.len = 0;
|
||||
try rc.search.appendSlice(line_it.rest());
|
||||
}
|
||||
}
|
||||
} else |err| return err;
|
||||
|
||||
if (rc.ns.items.len == 0) {
|
||||
return linuxLookupNameFromNumericUnspec(&rc.ns, "127.0.0.1", 53);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user