mirror of
https://github.com/ziglang/zig.git
synced 2025-12-11 16:53:06 +00:00
- flatten std.crypto.hash.Sha1 and give it a writable interface that optimizes splats - flatten std.hash.crc and give it a writable interface that optimizes splats - remove old writer impls from std.crypto - add fs.File.Writer.moveToReader - add fs.File.Writer.seekTo - add std.io.Reader.Hashed and std.io.Writer.Hashed which are passthrough streams. Instead of passing through to null writer, use the writable interface implemented directly on hashers which doesn't have to account for passing through the data. - add std.io.BufferedWriter.writeSplatAll
1203 lines
39 KiB
Zig
1203 lines
39 KiB
Zig
const builtin = @import("builtin");
|
|
const native_endian = builtin.target.cpu.arch.endian();
|
|
|
|
const std = @import("../std.zig");
|
|
const assert = std.debug.assert;
|
|
const testing = std.testing;
|
|
const BufferedWriter = std.io.BufferedWriter;
|
|
const Reader = std.io.Reader;
|
|
const Allocator = std.mem.Allocator;
|
|
const ArrayList = std.ArrayListUnmanaged;
|
|
|
|
const BufferedReader = @This();
|
|
|
|
unbuffered_reader: Reader,
|
|
buffer: []u8,
|
|
/// In `buffer` before this are buffered bytes, after this is `undefined`.
|
|
end: usize,
|
|
/// Number of bytes which have been consumed from `buffer`.
|
|
seek: usize,
|
|
|
|
/// Constructs `br` such that it will read from `buffer` and then end.
|
|
///
|
|
/// Most methods do not require mutating `buffer`. Those that do are marked,
|
|
/// and if they are avoided then `buffer` can be safely used with `@constCast`.
|
|
pub fn initFixed(br: *BufferedReader, buffer: []u8) void {
|
|
br.* = .{
|
|
.unbuffered_reader = .ending,
|
|
.buffer = buffer,
|
|
.end = buffer.len,
|
|
.seek = 0,
|
|
};
|
|
}
|
|
|
|
pub fn bufferContents(br: *BufferedReader) []u8 {
|
|
return br.buffer[br.seek..br.end];
|
|
}
|
|
|
|
pub fn bufferedLen(br: *const BufferedReader) usize {
|
|
return br.end - br.seek;
|
|
}
|
|
|
|
/// Although `BufferedReader` can easily satisfy the `Reader` interface, it's
|
|
/// generally more practical to pass a `BufferedReader` instance itself around,
|
|
/// since it will result in fewer calls across vtable boundaries.
|
|
pub fn reader(br: *BufferedReader) Reader {
|
|
return .{
|
|
.context = br,
|
|
.vtable = &.{
|
|
.read = passthruRead,
|
|
.readVec = passthruReadVec,
|
|
.discard = passthruDiscard,
|
|
},
|
|
};
|
|
}
|
|
|
|
pub fn hashed(br: *BufferedReader, hasher: anytype) Reader.Hashed(@TypeOf(hasher)) {
|
|
return .{ .in = br, .hasher = hasher };
|
|
}
|
|
|
|
/// Equivalent semantics to `std.io.Reader.VTable.readVec`.
|
|
pub fn readVec(br: *BufferedReader, data: []const []u8) Reader.Error!usize {
|
|
return readVecLimit(br, data, .unlimited);
|
|
}
|
|
|
|
/// Equivalent semantics to `std.io.Reader.VTable.read`.
|
|
pub fn read(br: *BufferedReader, bw: *BufferedWriter, limit: Reader.Limit) Reader.RwError!usize {
|
|
return passthruRead(br, bw, limit);
|
|
}
|
|
|
|
/// Equivalent semantics to `std.io.Reader.VTable.discard`.
|
|
pub fn discard(br: *BufferedReader, limit: Reader.Limit) Reader.Error!usize {
|
|
return passthruDiscard(br, limit);
|
|
}
|
|
|
|
pub fn readVecAll(br: *BufferedReader, data: [][]u8) Reader.Error!void {
|
|
var index: usize = 0;
|
|
var truncate: usize = 0;
|
|
while (index < data.len) {
|
|
{
|
|
const untruncated = data[index];
|
|
data[index] = untruncated[truncate..];
|
|
defer data[index] = untruncated;
|
|
truncate += try br.readVec(data[index..]);
|
|
}
|
|
while (index < data.len and truncate >= data[index].len) {
|
|
truncate -= data[index].len;
|
|
index += 1;
|
|
}
|
|
}
|
|
}
|
|
|
|
/// "Pump" data from the reader to the writer.
|
|
pub fn readAll(br: *BufferedReader, bw: *BufferedWriter, limit: Reader.Limit) Reader.RwError!void {
|
|
var remaining = limit;
|
|
while (remaining.nonzero()) {
|
|
const n = try br.read(bw, remaining);
|
|
remaining = remaining.subtract(n).?;
|
|
}
|
|
}
|
|
|
|
/// "Pump" data from the reader to the writer, handling `error.EndOfStream` as
|
|
/// a success case.
|
|
///
|
|
/// Returns total number of bytes written to `bw`.
|
|
pub fn readRemaining(br: *BufferedReader, bw: *BufferedWriter) Reader.RwRemainingError!usize {
|
|
var offset: usize = 0;
|
|
while (true) {
|
|
offset += br.read(bw, .unlimited) catch |err| switch (err) {
|
|
error.EndOfStream => return offset,
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
}
|
|
|
|
/// Equivalent to `readVec` but reads at most `limit` bytes.
|
|
pub fn readVecLimit(br: *BufferedReader, data: []const []u8, limit: Reader.Limit) Reader.Error!usize {
|
|
assert(@intFromEnum(Reader.Limit.unlimited) == std.math.maxInt(usize));
|
|
var remaining = @intFromEnum(limit);
|
|
for (data, 0..) |buf, i| {
|
|
const buffered = br.buffer[br.seek..br.end];
|
|
const copy_len = @min(buffered.len, buf.len, remaining);
|
|
@memcpy(buf[0..copy_len], buffered[0..copy_len]);
|
|
br.seek += copy_len;
|
|
remaining -= copy_len;
|
|
if (remaining == 0) break;
|
|
if (buf.len - copy_len == 0) continue;
|
|
|
|
br.seek = 0;
|
|
br.end = 0;
|
|
var vecs: [8][]u8 = undefined; // Arbitrarily chosen value.
|
|
const available_remaining_buf = buf[copy_len..];
|
|
vecs[0] = available_remaining_buf[0..@min(available_remaining_buf.len, remaining)];
|
|
const vec_start_remaining = remaining;
|
|
remaining -= vecs[0].len;
|
|
var vecs_i: usize = 1;
|
|
var data_i: usize = i + 1;
|
|
while (true) {
|
|
if (vecs.len - vecs_i == 0) {
|
|
const n = try br.unbuffered_reader.readVec(&vecs);
|
|
return @intFromEnum(limit) - vec_start_remaining + n;
|
|
}
|
|
if (remaining == 0 or data.len - data_i == 0) {
|
|
vecs[vecs_i] = br.buffer;
|
|
vecs_i += 1;
|
|
const n = try br.unbuffered_reader.readVec(vecs[0..vecs_i]);
|
|
const cutoff = vec_start_remaining - remaining;
|
|
if (n > cutoff) {
|
|
br.end = n - cutoff;
|
|
return @intFromEnum(limit) - remaining;
|
|
} else {
|
|
return @intFromEnum(limit) - vec_start_remaining + n;
|
|
}
|
|
}
|
|
if (data[data_i].len == 0) {
|
|
data_i += 1;
|
|
continue;
|
|
}
|
|
const data_elem = data[data_i];
|
|
vecs[vecs_i] = data_elem[0..@min(data_elem.len, remaining)];
|
|
remaining -= vecs[vecs_i].len;
|
|
vecs_i += 1;
|
|
data_i += 1;
|
|
}
|
|
}
|
|
return @intFromEnum(limit) - remaining;
|
|
}
|
|
|
|
fn passthruRead(context: ?*anyopaque, bw: *BufferedWriter, limit: Reader.Limit) Reader.RwError!usize {
|
|
const br: *BufferedReader = @alignCast(@ptrCast(context));
|
|
const buffer = limit.slice(br.buffer[br.seek..br.end]);
|
|
if (buffer.len > 0) {
|
|
const n = try bw.write(buffer);
|
|
br.seek += n;
|
|
return n;
|
|
}
|
|
return br.unbuffered_reader.read(bw, limit);
|
|
}
|
|
|
|
fn passthruDiscard(context: ?*anyopaque, limit: Reader.Limit) Reader.Error!usize {
|
|
const br: *BufferedReader = @alignCast(@ptrCast(context));
|
|
const buffered_len = br.end - br.seek;
|
|
if (limit.toInt()) |n| {
|
|
if (buffered_len >= n) {
|
|
br.seek += n;
|
|
return n;
|
|
}
|
|
br.seek = 0;
|
|
br.end = 0;
|
|
const additional = try br.unbuffered_reader.discard(.limited(n - buffered_len));
|
|
return n + additional;
|
|
}
|
|
const n = try br.unbuffered_reader.discard(.unlimited);
|
|
br.seek = 0;
|
|
br.end = 0;
|
|
return buffered_len + n;
|
|
}
|
|
|
|
fn passthruReadVec(context: ?*anyopaque, data: []const []u8) Reader.Error!usize {
|
|
const br: *BufferedReader = @alignCast(@ptrCast(context));
|
|
return readVecLimit(br, data, .unlimited);
|
|
}
|
|
|
|
/// Returns the next `len` bytes from `unbuffered_reader`, filling the buffer as
|
|
/// necessary.
|
|
///
|
|
/// Invalidates previously returned values from `peek`.
|
|
///
|
|
/// Asserts that the `BufferedReader` was initialized with a buffer capacity at
|
|
/// least as big as `len`.
|
|
///
|
|
/// If there are fewer than `len` bytes left in the stream, `error.EndOfStream`
|
|
/// is returned instead.
|
|
///
|
|
/// See also:
|
|
/// * `peek`
|
|
/// * `toss`
|
|
pub fn peek(br: *BufferedReader, n: usize) Reader.Error![]u8 {
|
|
try br.fill(n);
|
|
return br.buffer[br.seek..][0..n];
|
|
}
|
|
|
|
/// Returns all the next buffered bytes from `unbuffered_reader`, after filling
|
|
/// the buffer to ensure it contains at least `n` bytes.
|
|
///
|
|
/// Invalidates previously returned values from `peek` and `peekGreedy`.
|
|
///
|
|
/// Asserts that the `BufferedReader` was initialized with a buffer capacity at
|
|
/// least as big as `n`.
|
|
///
|
|
/// If there are fewer than `n` bytes left in the stream, `error.EndOfStream`
|
|
/// is returned instead.
|
|
///
|
|
/// See also:
|
|
/// * `peek`
|
|
/// * `toss`
|
|
pub fn peekGreedy(br: *BufferedReader, n: usize) Reader.Error![]u8 {
|
|
try br.fill(n);
|
|
return br.buffer[br.seek..br.end];
|
|
}
|
|
|
|
/// Skips the next `n` bytes from the stream, advancing the seek position. This
|
|
/// is typically and safely used after `peek`.
|
|
///
|
|
/// Asserts that the number of bytes buffered is at least as many as `n`.
|
|
///
|
|
/// The "tossed" memory remains alive until a "peek" operation occurs.
|
|
///
|
|
/// See also:
|
|
/// * `peek`.
|
|
/// * `discard`.
|
|
pub fn toss(br: *BufferedReader, n: usize) void {
|
|
br.seek += n;
|
|
assert(br.seek <= br.end);
|
|
}
|
|
|
|
/// Equivalent to `peek` followed by `toss`.
|
|
///
|
|
/// The data returned is invalidated by the next call to `take`, `peek`,
|
|
/// `fill`, and functions with those prefixes.
|
|
pub fn take(br: *BufferedReader, n: usize) Reader.Error![]u8 {
|
|
const result = try br.peek(n);
|
|
br.toss(n);
|
|
return result;
|
|
}
|
|
|
|
/// Returns the next `n` bytes from `unbuffered_reader` as an array, filling
|
|
/// the buffer as necessary and advancing the seek position `n` bytes.
|
|
///
|
|
/// Asserts that the `BufferedReader` was initialized with a buffer capacity at
|
|
/// least as big as `n`.
|
|
///
|
|
/// If there are fewer than `n` bytes left in the stream, `error.EndOfStream`
|
|
/// is returned instead.
|
|
///
|
|
/// See also:
|
|
/// * `take`
|
|
pub fn takeArray(br: *BufferedReader, comptime n: usize) Reader.Error!*[n]u8 {
|
|
return (try br.take(n))[0..n];
|
|
}
|
|
|
|
/// Returns the next `n` bytes from `unbuffered_reader` as an array, filling
|
|
/// the buffer as necessary, without advancing the seek position.
|
|
///
|
|
/// Asserts that the `BufferedReader` was initialized with a buffer capacity at
|
|
/// least as big as `n`.
|
|
///
|
|
/// If there are fewer than `n` bytes left in the stream, `error.EndOfStream`
|
|
/// is returned instead.
|
|
///
|
|
/// See also:
|
|
/// * `peek`
|
|
/// * `takeArray`
|
|
pub fn peekArray(br: *BufferedReader, comptime n: usize) Reader.Error!*[n]u8 {
|
|
return (try br.peek(n))[0..n];
|
|
}
|
|
|
|
/// Skips the next `n` bytes from the stream, advancing the seek position.
|
|
///
|
|
/// Unlike `toss` which is infallible, in this function `n` can be any amount.
|
|
///
|
|
/// Returns `error.EndOfStream` if fewer than `n` bytes could be discarded.
|
|
///
|
|
/// See also:
|
|
/// * `toss`
|
|
/// * `discardRemaining`
|
|
/// * `discardShort`
|
|
/// * `discard`
|
|
pub fn discardAll(br: *BufferedReader, n: usize) Reader.Error!void {
|
|
if ((try br.discardShort(n)) != n) return error.EndOfStream;
|
|
}
|
|
|
|
pub fn discardAll64(br: *BufferedReader, n: u64) Reader.Error!void {
|
|
var remaining: u64 = n;
|
|
while (remaining > 0) {
|
|
const limited = std.math.cast(usize, remaining) orelse std.math.maxInt(usize);
|
|
try discardAll(br, limited);
|
|
remaining -= limited;
|
|
}
|
|
}
|
|
|
|
/// Skips the next `n` bytes from the stream, advancing the seek position.
|
|
///
|
|
/// Unlike `toss` which is infallible, in this function `n` can be any amount.
|
|
///
|
|
/// Returns the number of bytes discarded, which is less than `n` if and only
|
|
/// if the stream reached the end.
|
|
///
|
|
/// See also:
|
|
/// * `discardAll`
|
|
/// * `discardRemaining`
|
|
/// * `discard`
|
|
pub fn discardShort(br: *BufferedReader, n: usize) Reader.ShortError!usize {
|
|
const proposed_seek = br.seek + n;
|
|
if (proposed_seek <= br.end) {
|
|
@branchHint(.likely);
|
|
br.seek = proposed_seek;
|
|
return n;
|
|
}
|
|
var remaining = n - (br.end - br.seek);
|
|
br.end = 0;
|
|
br.seek = 0;
|
|
while (true) {
|
|
const discard_len = br.unbuffered_reader.discard(.limited(remaining)) catch |err| switch (err) {
|
|
error.EndOfStream => return n - remaining,
|
|
error.ReadFailed => return error.ReadFailed,
|
|
};
|
|
remaining -= discard_len;
|
|
if (remaining == 0) return n;
|
|
}
|
|
}
|
|
|
|
/// Reads the stream until the end, ignoring all the data.
|
|
/// Returns the number of bytes discarded.
|
|
pub fn discardRemaining(br: *BufferedReader) Reader.ShortError!usize {
|
|
const buffered_len = br.end;
|
|
br.end = 0;
|
|
return buffered_len + try br.unbuffered_reader.discardRemaining();
|
|
}
|
|
|
|
/// Fill `buffer` with the next `buffer.len` bytes from the stream, advancing
|
|
/// the seek position.
|
|
///
|
|
/// Invalidates previously returned values from `peek`.
|
|
///
|
|
/// If the provided buffer cannot be filled completely, `error.EndOfStream` is
|
|
/// returned instead.
|
|
///
|
|
/// See also:
|
|
/// * `peek`
|
|
/// * `readSliceShort`
|
|
pub fn readSlice(br: *BufferedReader, buffer: []u8) Reader.Error!void {
|
|
const n = try readSliceShort(br, buffer);
|
|
if (n != buffer.len) return error.EndOfStream;
|
|
}
|
|
|
|
/// Fill `buffer` with the next `buffer.len` bytes from the stream, advancing
|
|
/// the seek position.
|
|
///
|
|
/// Invalidates previously returned values from `peek`.
|
|
///
|
|
/// Returns the number of bytes read, which is less than `buffer.len` if and
|
|
/// only if the stream reached the end.
|
|
///
|
|
/// See also:
|
|
/// * `readSlice`
|
|
pub fn readSliceShort(br: *BufferedReader, buffer: []u8) Reader.ShortError!usize {
|
|
const in_buffer = br.buffer[br.seek..br.end];
|
|
const copy_len = @min(buffer.len, in_buffer.len);
|
|
@memcpy(buffer[0..copy_len], in_buffer[0..copy_len]);
|
|
if (buffer.len - copy_len == 0) {
|
|
br.seek += copy_len;
|
|
return buffer.len;
|
|
}
|
|
var i: usize = copy_len;
|
|
br.end = 0;
|
|
br.seek = 0;
|
|
while (true) {
|
|
const remaining = buffer[i..];
|
|
const n = br.unbuffered_reader.readVec(&.{ remaining, br.buffer }) catch |err| switch (err) {
|
|
error.EndOfStream => return i,
|
|
error.ReadFailed => return error.ReadFailed,
|
|
};
|
|
if (n < remaining.len) {
|
|
i += n;
|
|
continue;
|
|
}
|
|
br.end = n - remaining.len;
|
|
return buffer.len;
|
|
}
|
|
}
|
|
|
|
/// Fill `buffer` with the next `buffer.len` bytes from the stream, advancing
|
|
/// the seek position.
|
|
///
|
|
/// Invalidates previously returned values from `peek`.
|
|
///
|
|
/// If the provided buffer cannot be filled completely, `error.EndOfStream` is
|
|
/// returned instead.
|
|
///
|
|
/// The function is inline to avoid the dead code in case `endian` is
|
|
/// comptime-known and matches host endianness.
|
|
///
|
|
/// See also:
|
|
/// * `readSlice`
|
|
/// * `readSliceEndianAlloc`
|
|
pub inline fn readSliceEndian(
|
|
br: *BufferedReader,
|
|
comptime Elem: type,
|
|
buffer: []Elem,
|
|
endian: std.builtin.Endian,
|
|
) Reader.Error!void {
|
|
try readSlice(br, @ptrCast(buffer));
|
|
if (native_endian != endian) for (buffer) |*elem| std.mem.byteSwapAllFields(Elem, elem);
|
|
}
|
|
|
|
pub const ReadAllocError = Reader.Error || Allocator.Error;
|
|
|
|
/// The function is inline to avoid the dead code in case `endian` is
|
|
/// comptime-known and matches host endianness.
|
|
pub inline fn readSliceEndianAlloc(
|
|
br: *BufferedReader,
|
|
allocator: Allocator,
|
|
comptime Elem: type,
|
|
len: usize,
|
|
endian: std.builtin.Endian,
|
|
) ReadAllocError![]Elem {
|
|
const dest = try allocator.alloc(Elem, len);
|
|
errdefer allocator.free(dest);
|
|
try readSlice(br, @ptrCast(dest));
|
|
if (native_endian != endian) for (dest) |*elem| std.mem.byteSwapAllFields(Elem, elem);
|
|
return dest;
|
|
}
|
|
|
|
pub fn readSliceAlloc(br: *BufferedReader, allocator: Allocator, len: usize) ReadAllocError![]u8 {
|
|
const dest = try allocator.alloc(u8, len);
|
|
errdefer allocator.free(dest);
|
|
try readSlice(br, dest);
|
|
return dest;
|
|
}
|
|
|
|
/// Transfers all bytes from the current position to the end of the stream, up
|
|
/// to `limit`, returning them as a caller-owned allocated slice.
|
|
///
|
|
/// If `limit` is exceeded, returns `error.StreamTooLong`. In such case, the
|
|
/// stream is advanced an unspecified amount, and the consumed data is
|
|
/// unrecoverable. The other function listed below does not have this caveat.
|
|
///
|
|
/// Asserts `br` was initialized with at least one byte of storage capacity.
|
|
///
|
|
/// See also:
|
|
/// * `readRemainingArrayList`
|
|
pub fn readRemainingAlloc(r: Reader, gpa: Allocator, limit: Reader.Limit) Reader.LimitedAllocError![]u8 {
|
|
var buffer: ArrayList(u8) = .empty;
|
|
defer buffer.deinit(gpa);
|
|
try readRemainingArrayList(r, gpa, null, &buffer, limit);
|
|
return buffer.toOwnedSlice(gpa);
|
|
}
|
|
|
|
/// Transfers all bytes from the current position to the end of the stream, up
|
|
/// to `limit`, appending them to `list`.
|
|
///
|
|
/// If `limit` would be exceeded, `error.StreamTooLong` is returned instead. In
|
|
/// such case, the stream is in a well-defined state. The next byte that would
|
|
/// be read will be the first one to exceed `limit`, and all preceeding bytes
|
|
/// have been appended to `list`.
|
|
///
|
|
/// Asserts `br` was initialized with at least one byte of storage capacity.
|
|
///
|
|
/// See also:
|
|
/// * `readRemainingAlloc`
|
|
pub fn readRemainingArrayList(
|
|
br: *BufferedReader,
|
|
gpa: Allocator,
|
|
comptime alignment: ?std.mem.Alignment,
|
|
list: *std.ArrayListAlignedUnmanaged(u8, alignment),
|
|
limit: Reader.Limit,
|
|
) Reader.LimitedAllocError!void {
|
|
const buffer = br.buffer;
|
|
const buffered = buffer[br.seek..br.end];
|
|
const copy_len = limit.minInt(buffered.len);
|
|
try list.ensureUnusedCapacity(gpa, copy_len);
|
|
@memcpy(list.unusedCapacitySlice()[0..copy_len], buffer[0..copy_len]);
|
|
list.items.len += copy_len;
|
|
br.seek += copy_len;
|
|
if (copy_len == buffered.len) {
|
|
br.seek = 0;
|
|
br.end = 0;
|
|
}
|
|
var remaining = limit.subtract(copy_len).?;
|
|
while (true) {
|
|
try list.ensureUnusedCapacity(gpa, 1);
|
|
const dest = remaining.slice(list.unusedCapacitySlice());
|
|
const additional_buffer = if (@intFromEnum(remaining) == dest.len) buffer else &.{};
|
|
const n = br.unbuffered_reader.readVec(&.{ dest, additional_buffer }) catch |err| switch (err) {
|
|
error.EndOfStream => break,
|
|
error.ReadFailed => return error.ReadFailed,
|
|
};
|
|
if (n >= dest.len) {
|
|
br.end = n - dest.len;
|
|
list.items.len += dest.len;
|
|
if (n == dest.len) return;
|
|
return error.StreamTooLong;
|
|
}
|
|
list.items.len += n;
|
|
remaining = remaining.subtract(n).?;
|
|
}
|
|
}
|
|
|
|
pub const DelimiterError = error{
|
|
/// See the `Reader` implementation for detailed diagnostics.
|
|
ReadFailed,
|
|
/// For "inclusive" functions, stream ended before the delimiter was found.
|
|
/// For "exclusive" functions, stream ended and there are no more bytes to
|
|
/// return.
|
|
EndOfStream,
|
|
/// The delimiter was not found within a number of bytes matching the
|
|
/// capacity of the `BufferedReader`.
|
|
StreamTooLong,
|
|
};
|
|
|
|
/// Returns a slice of the next bytes of buffered data from the stream until
|
|
/// `sentinel` is found, advancing the seek position.
|
|
///
|
|
/// Returned slice has a sentinel.
|
|
///
|
|
/// Invalidates previously returned values from `peek`.
|
|
///
|
|
/// See also:
|
|
/// * `peekSentinel`
|
|
/// * `takeDelimiterExclusive`
|
|
/// * `takeDelimiterInclusive`
|
|
pub fn takeSentinel(br: *BufferedReader, comptime sentinel: u8) DelimiterError![:sentinel]u8 {
|
|
const result = try br.peekSentinel(sentinel);
|
|
br.toss(result.len + 1);
|
|
return result;
|
|
}
|
|
|
|
pub fn peekSentinel(br: *BufferedReader, comptime sentinel: u8) DelimiterError![:sentinel]u8 {
|
|
const result = try br.peekDelimiterInclusive(sentinel);
|
|
return result[0 .. result.len - 1 :sentinel];
|
|
}
|
|
|
|
/// Returns a slice of the next bytes of buffered data from the stream until
|
|
/// `delimiter` is found, advancing the seek position.
|
|
///
|
|
/// Returned slice includes the delimiter as the last byte.
|
|
///
|
|
/// Invalidates previously returned values from `peek`.
|
|
///
|
|
/// See also:
|
|
/// * `takeSentinel`
|
|
/// * `takeDelimiterExclusive`
|
|
/// * `peekDelimiterInclusive`
|
|
pub fn takeDelimiterInclusive(br: *BufferedReader, delimiter: u8) DelimiterError![]u8 {
|
|
const result = try br.peekDelimiterInclusive(delimiter);
|
|
br.toss(result.len);
|
|
return result;
|
|
}
|
|
|
|
/// Returns a slice of the next bytes of buffered data from the stream until
|
|
/// `delimiter` is found, without advancing the seek position.
|
|
///
|
|
/// Returned slice includes the delimiter as the last byte.
|
|
///
|
|
/// Invalidates previously returned values from `peek`.
|
|
///
|
|
/// See also:
|
|
/// * `peekSentinel`
|
|
/// * `peekDelimiterExclusive`
|
|
/// * `takeDelimiterInclusive`
|
|
pub fn peekDelimiterInclusive(br: *BufferedReader, delimiter: u8) DelimiterError![]u8 {
|
|
const buffer = br.buffer[0..br.end];
|
|
const seek = br.seek;
|
|
if (std.mem.indexOfScalarPos(u8, buffer, seek, delimiter)) |end| {
|
|
@branchHint(.likely);
|
|
return buffer[seek .. end + 1];
|
|
}
|
|
if (seek > 0) {
|
|
const remainder = buffer[seek..];
|
|
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
|
|
br.end = remainder.len;
|
|
br.seek = 0;
|
|
}
|
|
while (br.end < br.buffer.len) {
|
|
const n = try br.unbuffered_reader.readVec(&.{br.buffer[br.end..]});
|
|
const prev_end = br.end;
|
|
br.end = prev_end + n;
|
|
if (std.mem.indexOfScalarPos(u8, br.buffer[0..br.end], prev_end, delimiter)) |end| {
|
|
return br.buffer[0 .. end + 1];
|
|
}
|
|
}
|
|
return error.StreamTooLong;
|
|
}
|
|
|
|
/// Returns a slice of the next bytes of buffered data from the stream until
|
|
/// `delimiter` is found, advancing the seek position.
|
|
///
|
|
/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
|
|
/// to a delimiter, unless it would result in a length 0 return value, in which
|
|
/// case `error.EndOfStream` is returned instead.
|
|
///
|
|
/// If the delimiter is not found within a number of bytes matching the
|
|
/// capacity of this `BufferedReader`, `error.StreamTooLong` is returned. In
|
|
/// such case, the stream state is unmodified as if this function was never
|
|
/// called.
|
|
///
|
|
/// Invalidates previously returned values from `peek`.
|
|
///
|
|
/// See also:
|
|
/// * `takeDelimiterInclusive`
|
|
/// * `peekDelimiterExclusive`
|
|
pub fn takeDelimiterExclusive(br: *BufferedReader, delimiter: u8) DelimiterError![]u8 {
|
|
const result = br.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
|
|
error.EndOfStream => {
|
|
if (br.end == 0) return error.EndOfStream;
|
|
br.toss(br.end);
|
|
return br.buffer[0..br.end];
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
br.toss(result.len);
|
|
return result[0 .. result.len - 1];
|
|
}
|
|
|
|
/// Returns a slice of the next bytes of buffered data from the stream until
|
|
/// `delimiter` is found, without advancing the seek position.
|
|
///
|
|
/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
|
|
/// to a delimiter, unless it would result in a length 0 return value, in which
|
|
/// case `error.EndOfStream` is returned instead.
|
|
///
|
|
/// If the delimiter is not found within a number of bytes matching the
|
|
/// capacity of this `BufferedReader`, `error.StreamTooLong` is returned. In
|
|
/// such case, the stream state is unmodified as if this function was never
|
|
/// called.
|
|
///
|
|
/// Invalidates previously returned values from `peek`.
|
|
///
|
|
/// See also:
|
|
/// * `peekDelimiterInclusive`
|
|
/// * `takeDelimiterExclusive`
|
|
pub fn peekDelimiterExclusive(br: *BufferedReader, delimiter: u8) DelimiterError![]u8 {
|
|
const result = br.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
|
|
error.EndOfStream => {
|
|
if (br.end == 0) return error.EndOfStream;
|
|
return br.buffer[0..br.end];
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
return result[0 .. result.len - 1];
|
|
}
|
|
|
|
/// Appends to `bw` contents by reading from the stream until `delimiter` is
|
|
/// found. Does not write the delimiter itself.
|
|
///
|
|
/// Returns number of bytes streamed.
|
|
pub fn readDelimiter(br: *BufferedReader, bw: *BufferedWriter, delimiter: u8) Reader.RwError!usize {
|
|
const amount, const to = try br.readAny(bw, delimiter, .unlimited);
|
|
return switch (to) {
|
|
.delimiter => amount,
|
|
.limit => unreachable,
|
|
.end => error.EndOfStream,
|
|
};
|
|
}
|
|
|
|
/// Appends to `bw` contents by reading from the stream until `delimiter` is found.
|
|
/// Does not write the delimiter itself.
|
|
///
|
|
/// Succeeds if stream ends before delimiter found.
|
|
///
|
|
/// Returns number of bytes streamed. The end is not signaled to the writer.
|
|
pub fn readDelimiterEnding(
|
|
br: *BufferedReader,
|
|
bw: *BufferedWriter,
|
|
delimiter: u8,
|
|
) Reader.RwRemainingError!usize {
|
|
const amount, const to = try br.readAny(bw, delimiter, .unlimited);
|
|
return switch (to) {
|
|
.delimiter, .end => amount,
|
|
.limit => unreachable,
|
|
};
|
|
}
|
|
|
|
pub const StreamDelimiterLimitedError = Reader.RwRemainingError || error{
|
|
/// Stream ended before the delimiter was found.
|
|
EndOfStream,
|
|
/// The delimiter was not found within the limit.
|
|
StreamTooLong,
|
|
};
|
|
|
|
/// Appends to `bw` contents by reading from the stream until `delimiter` is found.
|
|
/// Does not write the delimiter itself.
|
|
///
|
|
/// Returns number of bytes streamed.
|
|
pub fn readDelimiterLimit(
|
|
br: *BufferedReader,
|
|
bw: *BufferedWriter,
|
|
delimiter: u8,
|
|
limit: Reader.Limit,
|
|
) StreamDelimiterLimitedError!usize {
|
|
const amount, const to = try br.readAny(bw, delimiter, limit);
|
|
return switch (to) {
|
|
.delimiter => amount,
|
|
.limit => error.StreamTooLong,
|
|
.end => error.EndOfStream,
|
|
};
|
|
}
|
|
|
|
fn readAny(
|
|
br: *BufferedReader,
|
|
bw: *BufferedWriter,
|
|
delimiter: ?u8,
|
|
limit: Reader.Limit,
|
|
) Reader.RwRemainingError!struct { usize, enum { delimiter, limit, end } } {
|
|
var amount: usize = 0;
|
|
var remaining = limit;
|
|
while (remaining.nonzero()) {
|
|
const available = remaining.slice(br.peekGreedy(1) catch |err| switch (err) {
|
|
error.ReadFailed => |e| return e,
|
|
error.EndOfStream => return .{ amount, .end },
|
|
});
|
|
if (delimiter) |d| if (std.mem.indexOfScalar(u8, available, d)) |delimiter_index| {
|
|
try bw.writeAll(available[0..delimiter_index]);
|
|
br.toss(delimiter_index + 1);
|
|
return .{ amount + delimiter_index, .delimiter };
|
|
};
|
|
try bw.writeAll(available);
|
|
br.toss(available.len);
|
|
amount += available.len;
|
|
remaining = remaining.subtract(available.len).?;
|
|
}
|
|
return .{ amount, .limit };
|
|
}
|
|
|
|
/// Reads from the stream until specified byte is found, discarding all data,
|
|
/// including the delimiter.
|
|
///
|
|
/// If end of stream is found, this function succeeds.
|
|
pub fn discardDelimiterInclusive(br: *BufferedReader, delimiter: u8) Reader.Error!void {
|
|
_ = br;
|
|
_ = delimiter;
|
|
@panic("TODO");
|
|
}
|
|
|
|
/// Reads from the stream until specified byte is found, discarding all data,
|
|
/// excluding the delimiter.
|
|
///
|
|
/// Succeeds if stream ends before delimiter found.
|
|
pub fn discardDelimiterExclusive(br: *BufferedReader, delimiter: u8) Reader.ShortError!void {
|
|
_ = br;
|
|
_ = delimiter;
|
|
@panic("TODO");
|
|
}
|
|
|
|
/// Fills the buffer such that it contains at least `n` bytes, without
|
|
/// advancing the seek position.
|
|
///
|
|
/// Returns `error.EndOfStream` if and only if there are fewer than `n` bytes
|
|
/// remaining.
|
|
///
|
|
/// Asserts buffer capacity is at least `n`.
|
|
pub fn fill(br: *BufferedReader, n: usize) Reader.Error!void {
|
|
assert(n <= br.buffer.len);
|
|
if (br.seek + n <= br.end) {
|
|
@branchHint(.likely);
|
|
return;
|
|
}
|
|
rebaseCapacity(br, n);
|
|
while (br.end < br.seek + n) {
|
|
br.end += try br.unbuffered_reader.readVec(&.{br.buffer[br.end..]});
|
|
}
|
|
}
|
|
|
|
/// Fills the buffer with at least one more byte of data, without advancing the
|
|
/// seek position, doing exactly one underlying read.
|
|
///
|
|
/// Asserts buffer capacity is at least 1.
|
|
pub fn fillMore(br: *BufferedReader) Reader.Error!void {
|
|
rebaseCapacity(br, 1);
|
|
br.end += try br.unbuffered_reader.readVec(&.{br.buffer[br.end..]});
|
|
}
|
|
|
|
/// Returns the next byte from the stream or returns `error.EndOfStream`.
|
|
///
|
|
/// Does not advance the seek position.
|
|
///
|
|
/// Asserts the buffer capacity is nonzero.
|
|
pub fn peekByte(br: *BufferedReader) Reader.Error!u8 {
|
|
const buffer = br.buffer[0..br.end];
|
|
const seek = br.seek;
|
|
if (seek >= buffer.len) {
|
|
@branchHint(.unlikely);
|
|
try fill(br, 1);
|
|
}
|
|
return buffer[seek];
|
|
}
|
|
|
|
/// Reads 1 byte from the stream or returns `error.EndOfStream`.
|
|
///
|
|
/// Asserts the buffer capacity is nonzero.
|
|
pub fn takeByte(br: *BufferedReader) Reader.Error!u8 {
|
|
const result = try peekByte(br);
|
|
br.seek += 1;
|
|
return result;
|
|
}
|
|
|
|
/// Same as `takeByte` except the returned byte is signed.
|
|
pub fn takeByteSigned(br: *BufferedReader) Reader.Error!i8 {
|
|
return @bitCast(try br.takeByte());
|
|
}
|
|
|
|
/// Asserts the buffer was initialized with a capacity at least `@bitSizeOf(T) / 8`.
|
|
pub inline fn takeInt(br: *BufferedReader, comptime T: type, endian: std.builtin.Endian) Reader.Error!T {
|
|
const n = @divExact(@typeInfo(T).int.bits, 8);
|
|
return std.mem.readInt(T, try br.takeArray(n), endian);
|
|
}
|
|
|
|
/// Asserts the buffer was initialized with a capacity at least `n`.
|
|
pub fn takeVarInt(br: *BufferedReader, comptime Int: type, endian: std.builtin.Endian, n: usize) Reader.Error!Int {
|
|
assert(n <= @sizeOf(Int));
|
|
return std.mem.readVarInt(Int, try br.take(n), endian);
|
|
}
|
|
|
|
/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
|
|
///
|
|
/// Advances the seek position.
|
|
///
|
|
/// See also:
|
|
/// * `peekStruct`
|
|
pub fn takeStruct(br: *BufferedReader, comptime T: type) Reader.Error!*align(1) T {
|
|
// Only extern and packed structs have defined in-memory layout.
|
|
comptime assert(@typeInfo(T).@"struct".layout != .auto);
|
|
return @ptrCast(try br.takeArray(@sizeOf(T)));
|
|
}
|
|
|
|
/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
|
|
///
|
|
/// Does not advance the seek position.
|
|
///
|
|
/// See also:
|
|
/// * `takeStruct`
|
|
pub fn peekStruct(br: *BufferedReader, comptime T: type) Reader.Error!*align(1) T {
|
|
// Only extern and packed structs have defined in-memory layout.
|
|
comptime assert(@typeInfo(T).@"struct".layout != .auto);
|
|
return @ptrCast(try br.peekArray(@sizeOf(T)));
|
|
}
|
|
|
|
/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
|
|
///
|
|
/// This function is inline to avoid referencing `std.mem.byteSwapAllFields`
|
|
/// when `endian` is comptime-known and matches the host endianness.
|
|
pub inline fn takeStructEndian(br: *BufferedReader, comptime T: type, endian: std.builtin.Endian) Reader.Error!T {
|
|
var res = (try br.takeStruct(T)).*;
|
|
if (native_endian != endian) std.mem.byteSwapAllFields(T, &res);
|
|
return res;
|
|
}
|
|
|
|
/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
|
|
///
|
|
/// This function is inline to avoid referencing `std.mem.byteSwapAllFields`
|
|
/// when `endian` is comptime-known and matches the host endianness.
|
|
pub inline fn peekStructEndian(br: *BufferedReader, comptime T: type, endian: std.builtin.Endian) Reader.Error!T {
|
|
var res = (try br.peekStruct(T)).*;
|
|
if (native_endian != endian) std.mem.byteSwapAllFields(T, &res);
|
|
return res;
|
|
}
|
|
|
|
pub const TakeEnumError = Reader.Error || error{InvalidEnumTag};
|
|
|
|
/// Reads an integer with the same size as the given enum's tag type. If the
|
|
/// integer matches an enum tag, casts the integer to the enum tag and returns
|
|
/// it. Otherwise, returns `error.InvalidEnumTag`.
|
|
///
|
|
/// Asserts the buffer was initialized with a capacity at least `@sizeOf(Enum)`.
|
|
pub fn takeEnum(br: *BufferedReader, comptime Enum: type, endian: std.builtin.Endian) TakeEnumError!Enum {
|
|
const Tag = @typeInfo(Enum).@"enum".tag_type;
|
|
const int = try br.takeInt(Tag, endian);
|
|
return std.meta.intToEnum(Enum, int);
|
|
}
|
|
|
|
/// Reads an integer with the same size as the given nonexhaustive enum's tag type.
|
|
///
|
|
/// Asserts the buffer was initialized with a capacity at least `@sizeOf(Enum)`.
|
|
pub fn takeEnumNonexhaustive(br: *BufferedReader, comptime Enum: type, endian: std.builtin.Endian) Reader.Error!Enum {
|
|
const info = @typeInfo(Enum).@"enum";
|
|
comptime assert(!info.is_exhaustive);
|
|
comptime assert(@bitSizeOf(info.tag_type) == @sizeOf(info.tag_type) * 8);
|
|
return takeEnum(br, Enum, endian) catch |err| switch (err) {
|
|
error.InvalidEnumTag => unreachable,
|
|
else => |e| return e,
|
|
};
|
|
}
|
|
|
|
pub const TakeLeb128Error = Reader.Error || error{Overflow};
|
|
|
|
/// Read a single LEB128 value as type T, or `error.Overflow` if the value cannot fit.
|
|
pub fn takeLeb128(br: *BufferedReader, comptime Result: type) TakeLeb128Error!Result {
|
|
const result_info = @typeInfo(Result).int;
|
|
return std.math.cast(Result, try br.takeMultipleOf7Leb128(@Type(.{ .int = .{
|
|
.signedness = result_info.signedness,
|
|
.bits = std.mem.alignForwardAnyAlign(u16, result_info.bits, 7),
|
|
} }))) orelse error.Overflow;
|
|
}
|
|
|
|
pub fn expandTotalCapacity(br: *BufferedReader, allocator: Allocator, n: usize) Allocator.Error!void {
|
|
if (n <= br.buffer.len) return;
|
|
if (br.seek > 0) rebase(br);
|
|
var list: ArrayList(u8) = .{
|
|
.items = br.buffer[0..br.end],
|
|
.capacity = br.buffer.len,
|
|
};
|
|
defer br.buffer = list.allocatedSlice();
|
|
try list.ensureTotalCapacity(allocator, n);
|
|
}
|
|
|
|
pub const FillAllocError = Reader.Error || Allocator.Error;
|
|
|
|
pub fn fillAlloc(br: *BufferedReader, allocator: Allocator, n: usize) FillAllocError!void {
|
|
try expandTotalCapacity(br, allocator, n);
|
|
return fill(br, n);
|
|
}
|
|
|
|
/// Returns a slice into the unused capacity of `buffer` with at least
|
|
/// `min_len` bytes, extending `buffer` by resizing it with `gpa` as necessary.
|
|
///
|
|
/// After calling this function, typically the caller will follow up with a
|
|
/// call to `advanceBufferEnd` to report the actual number of bytes buffered.
|
|
pub fn writableSliceGreedyAlloc(br: *BufferedReader, allocator: Allocator, min_len: usize) Allocator.Error![]u8 {
|
|
{
|
|
const unused = br.buffer[br.end..];
|
|
if (unused.len >= min_len) return unused;
|
|
}
|
|
if (br.seek > 0) rebase(br);
|
|
{
|
|
var list: ArrayList(u8) = .{
|
|
.items = br.buffer[0..br.end],
|
|
.capacity = br.buffer.len,
|
|
};
|
|
defer br.buffer = list.allocatedSlice();
|
|
try list.ensureUnusedCapacity(allocator, min_len);
|
|
}
|
|
const unused = br.buffer[br.end..];
|
|
assert(unused.len >= min_len);
|
|
return unused;
|
|
}
|
|
|
|
/// After writing directly into the unused capacity of `buffer`, this function
|
|
/// updates `end` so that users of `BufferedReader` can receive the data.
|
|
pub fn advanceBufferEnd(br: *BufferedReader, n: usize) void {
|
|
assert(n <= br.buffer.len - br.end);
|
|
br.end += n;
|
|
}
|
|
|
|
fn takeMultipleOf7Leb128(br: *BufferedReader, comptime Result: type) TakeLeb128Error!Result {
|
|
const result_info = @typeInfo(Result).int;
|
|
comptime assert(result_info.bits % 7 == 0);
|
|
var remaining_bits: std.math.Log2IntCeil(Result) = result_info.bits;
|
|
const UnsignedResult = @Type(.{ .int = .{
|
|
.signedness = .unsigned,
|
|
.bits = result_info.bits,
|
|
} });
|
|
var result: UnsignedResult = 0;
|
|
var fits = true;
|
|
while (true) {
|
|
const buffer: []const packed struct(u8) { bits: u7, more: bool } = @ptrCast(try br.peekGreedy(1));
|
|
for (buffer, 1..) |byte, len| {
|
|
if (remaining_bits > 0) {
|
|
result = @shlExact(@as(UnsignedResult, byte.bits), result_info.bits - 7) |
|
|
if (result_info.bits > 7) @shrExact(result, 7) else 0;
|
|
remaining_bits -= 7;
|
|
} else if (fits) fits = switch (result_info.signedness) {
|
|
.signed => @as(i7, @bitCast(byte.bits)) ==
|
|
@as(i7, @truncate(@as(Result, @bitCast(result)) >> (result_info.bits - 1))),
|
|
.unsigned => byte.bits == 0,
|
|
};
|
|
if (byte.more) continue;
|
|
br.toss(len);
|
|
return if (fits) @as(Result, @bitCast(result)) >> remaining_bits else error.Overflow;
|
|
}
|
|
br.toss(buffer.len);
|
|
}
|
|
}
|
|
|
|
/// Left-aligns data such that `br.seek` becomes zero.
|
|
pub fn rebase(br: *BufferedReader) void {
|
|
const data = br.buffer[br.seek..br.end];
|
|
const dest = br.buffer[0..data.len];
|
|
std.mem.copyForwards(u8, dest, data);
|
|
br.seek = 0;
|
|
br.end = data.len;
|
|
}
|
|
|
|
/// Ensures `capacity` more data can be buffered without rebasing, by rebasing
|
|
/// if necessary.
|
|
///
|
|
/// Asserts `capacity` is within the buffer capacity.
|
|
pub fn rebaseCapacity(br: *BufferedReader, capacity: usize) void {
|
|
if (br.end > br.buffer.len - capacity) rebase(br);
|
|
}
|
|
|
|
/// Advances the stream and decreases the size of the storage buffer by `n`,
|
|
/// returning the range of bytes no longer accessible by `br`.
|
|
///
|
|
/// This action can be undone by `restitute`.
|
|
///
|
|
/// Asserts there are at least `n` buffered bytes already.
|
|
///
|
|
/// Asserts that `br.seek` is zero, i.e. the buffer is in a rebased state.
|
|
pub fn steal(br: *BufferedReader, n: usize) []u8 {
|
|
assert(br.seek == 0);
|
|
assert(n <= br.end);
|
|
const stolen = br.buffer[0..n];
|
|
br.buffer = br.buffer[n..];
|
|
br.end -= n;
|
|
return stolen;
|
|
}
|
|
|
|
/// Expands the storage buffer, undoing the effects of `steal`
|
|
/// Assumes that `n` does not exceed the total number of stolen bytes.
|
|
pub fn restitute(br: *BufferedReader, n: usize) void {
|
|
br.buffer = (br.buffer.ptr - n)[0 .. br.buffer.len + n];
|
|
br.end += n;
|
|
br.seek += n;
|
|
}
|
|
|
|
test initFixed {
|
|
var br: BufferedReader = undefined;
|
|
br.initFixed("a\x02");
|
|
try testing.expect((try br.takeByte()) == 'a');
|
|
try testing.expect((try br.takeEnum(enum(u8) {
|
|
a = 0,
|
|
b = 99,
|
|
c = 2,
|
|
d = 3,
|
|
}, builtin.cpu.arch.endian())) == .c);
|
|
try testing.expectError(error.EndOfStream, br.takeByte());
|
|
}
|
|
|
|
test peek {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test peekGreedy {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test toss {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test take {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test takeArray {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test peekArray {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test discardAll {
|
|
var br: BufferedReader = undefined;
|
|
br.initFixed("foobar");
|
|
try br.discard(3);
|
|
try testing.expectEqualStrings("bar", try br.take(3));
|
|
try br.discard(0);
|
|
try testing.expectError(error.EndOfStream, br.discard(1));
|
|
}
|
|
|
|
test discardRemaining {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test read {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test takeSentinel {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test peekSentinel {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test takeDelimiterInclusive {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test peekDelimiterInclusive {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test takeDelimiterExclusive {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test peekDelimiterExclusive {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test readDelimiter {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test readDelimiterEnding {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test readDelimiterLimit {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test discardDelimiterExclusive {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test discardDelimiterInclusive {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test fill {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test takeByte {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test takeByteSigned {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test takeInt {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test takeVarInt {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test takeStruct {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test peekStruct {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test takeStructEndian {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test peekStructEndian {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test takeEnum {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test takeLeb128 {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test readSliceShort {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test readVec {
|
|
return error.Unimplemented;
|
|
}
|
|
|
|
test "expected error.EndOfStream" {
|
|
// Unit test inspired by https://github.com/ziglang/zig/issues/17733
|
|
var br: std.io.BufferedReader = undefined;
|
|
br.initFixed("");
|
|
try std.testing.expectError(error.EndOfStream, br.readEnum(enum(u8) { a, b }, .little));
|
|
try std.testing.expectError(error.EndOfStream, br.isBytes("foo"));
|
|
}
|