std: finish renaming RwError to StreamError

This commit is contained in:
Andrew Kelley 2025-05-28 15:44:42 -07:00
parent 3ef75356f6
commit 2f5574ac08
13 changed files with 27 additions and 61 deletions

View File

@ -769,7 +769,7 @@ fn read(
context: ?*anyopaque,
bw: *std.io.BufferedWriter,
limit: std.io.Limit,
) std.io.Reader.RwError!usize {
) std.io.Reader.StreamError!usize {
const c: *Compress = @ptrCast(@alignCast(context));
switch (c.state) {
.header => |i| {

View File

@ -143,7 +143,7 @@ pub fn read(
context: ?*anyopaque,
bw: *std.io.BufferedWriter,
limit: std.io.Limit,
) std.io.Reader.RwError!usize {
) std.io.Reader.StreamError!usize {
const d: *Decompress = @alignCast(@ptrCast(context));
return readInner(d, bw, limit) catch |err| switch (err) {
error.EndOfStream => return error.EndOfStream,

View File

@ -2,7 +2,7 @@ const std = @import("../std.zig");
const Allocator = std.mem.Allocator;
const lzma = std.compress.lzma;
pub fn decompress(gpa: Allocator, reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter) std.io.Reader.RwError!void {
pub fn decompress(gpa: Allocator, reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter) std.io.Reader.StreamError!void {
var decoder = try Decode.init(gpa);
defer decoder.deinit(gpa);
return decoder.decompress(gpa, reader, writer);

View File

@ -78,7 +78,7 @@ pub fn reader(self: *Decompress) Reader {
};
}
fn read(context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) Reader.RwError!usize {
fn read(context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) Reader.StreamError!usize {
const d: *Decompress = @ptrCast(@alignCast(context));
const in = d.input;

View File

@ -1043,7 +1043,7 @@ pub fn eof(c: Client) bool {
return c.received_close_notify;
}
fn read(context: ?*anyopaque, bw: *std.io.BufferedWriter, limit: std.io.Limit) Reader.RwError!usize {
fn read(context: ?*anyopaque, bw: *std.io.BufferedWriter, limit: std.io.Limit) Reader.StreamError!usize {
const c: *Client = @ptrCast(@alignCast(context));
if (c.eof()) return error.EndOfStream;
const input = c.input;

View File

@ -508,7 +508,7 @@ pub const Reader = struct {
ctx: ?*anyopaque,
bw: *std.io.BufferedWriter,
limit: std.io.Limit,
) std.io.Reader.RwError!usize {
) std.io.Reader.StreamError!usize {
const reader: *Reader = @alignCast(@ptrCast(ctx));
const remaining_content_length = &reader.state.body_remaining_content_length;
const remaining = remaining_content_length.*;
@ -551,7 +551,7 @@ pub const Reader = struct {
ctx: ?*anyopaque,
bw: *std.io.BufferedWriter,
limit: std.io.Limit,
) std.io.Reader.RwError!usize {
) std.io.Reader.StreamError!usize {
const reader: *Reader = @alignCast(@ptrCast(ctx));
const chunk_len_ptr = switch (reader.state) {
.ready => return error.EndOfStream,
@ -577,7 +577,7 @@ pub const Reader = struct {
bw: *std.io.BufferedWriter,
limit: std.io.Limit,
chunk_len_ptr: *RemainingChunkLen,
) (BodyError || std.io.Reader.RwError)!usize {
) (BodyError || std.io.Reader.StreamError)!usize {
const in = reader.in;
len: switch (chunk_len_ptr.*) {
.head => {

View File

@ -104,7 +104,7 @@ pub fn readAll(br: *BufferedReader, bw: *BufferedWriter, limit: Limit) Reader.St
/// a success case.
///
/// Returns total number of bytes written to `bw`.
pub fn readRemaining(br: *BufferedReader, bw: *BufferedWriter) Reader.RwRemainingError!usize {
pub fn readRemaining(br: *BufferedReader, bw: *BufferedWriter) Reader.StreamRemainingError!usize {
var offset: usize = 0;
while (true) {
offset += br.read(bw, .unlimited) catch |err| switch (err) {
@ -720,7 +720,7 @@ pub fn readDelimiterEnding(
br: *BufferedReader,
bw: *BufferedWriter,
delimiter: u8,
) Reader.RwRemainingError!usize {
) Reader.StreamRemainingError!usize {
const amount, const to = try br.readAny(bw, delimiter, .unlimited);
return switch (to) {
.delimiter, .end => amount,
@ -728,7 +728,7 @@ pub fn readDelimiterEnding(
};
}
pub const StreamDelimiterLimitedError = Reader.RwRemainingError || error{
pub const StreamDelimiterLimitedError = Reader.StreamRemainingError || error{
/// Stream ended before the delimiter was found.
EndOfStream,
/// The delimiter was not found within the limit.
@ -758,7 +758,7 @@ fn readAny(
bw: *BufferedWriter,
delimiter: ?u8,
limit: Limit,
) Reader.RwRemainingError!struct { usize, enum { delimiter, limit, end } } {
) Reader.StreamRemainingError!struct { usize, enum { delimiter, limit, end } } {
var amount: usize = 0;
var remaining = limit;
while (remaining.nonzero()) {

View File

@ -46,7 +46,7 @@ pub const VTable = struct {
/// provided which is based on calling `read`, borrowing
/// `BufferedReader.buffer` to construct a temporary `BufferedWriter` and
/// ignoring the written data.
discard: *const fn (context: ?*anyopaque, limit: Limit) DiscardError!usize = null,
discard: *const fn (context: ?*anyopaque, limit: Limit) Error!usize = null,
};
pub const StreamError = error{
@ -59,13 +59,13 @@ pub const StreamError = error{
EndOfStream,
};
pub const DiscardError = error{
pub const Error = error{
/// See the `Reader` implementation for detailed diagnostics.
ReadFailed,
EndOfStream,
};
pub const RwRemainingError = error{
pub const StreamRemainingError = error{
/// See the `Reader` implementation for detailed diagnostics.
ReadFailed,
/// See the `Writer` implementation for detailed diagnostics.
@ -85,14 +85,14 @@ pub fn read(r: Reader, bw: *BufferedWriter, limit: Limit) StreamError!usize {
return n;
}
pub fn discard(r: Reader, limit: Limit) DiscardError!usize {
pub fn discard(r: Reader, limit: Limit) Error!usize {
const n = try r.vtable.discard(r.context, limit);
assert(n <= @intFromEnum(limit));
return n;
}
/// Returns total number of bytes written to `bw`.
pub fn readRemaining(r: Reader, bw: *BufferedWriter) RwRemainingError!usize {
pub fn readRemaining(r: Reader, bw: *BufferedWriter) StreamRemainingError!usize {
const readFn = r.vtable.read;
var offset: usize = 0;
while (true) {
@ -212,7 +212,7 @@ fn endingRead(context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) StreamErr
return error.EndOfStream;
}
fn endingDiscard(context: ?*anyopaque, limit: Limit) DiscardError!usize {
fn endingDiscard(context: ?*anyopaque, limit: Limit) Error!usize {
_ = context;
_ = limit;
return error.EndOfStream;
@ -225,7 +225,7 @@ fn failingRead(context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) StreamEr
return error.ReadFailed;
}
fn failingDiscard(context: ?*anyopaque, limit: Limit) DiscardError!usize {
fn failingDiscard(context: ?*anyopaque, limit: Limit) Error!usize {
_ = context;
_ = limit;
return error.ReadFailed;
@ -297,7 +297,7 @@ pub fn Hashed(comptime Hasher: type) type {
return n;
}
fn discard(context: ?*anyopaque, limit: Limit) DiscardError!usize {
fn discard(context: ?*anyopaque, limit: Limit) Error!usize {
const this: *@This() = @alignCast(@ptrCast(context));
var bw = this.hasher.writable(&.{});
const n = this.in.read(&bw, limit) catch |err| switch (err) {
@ -307,7 +307,7 @@ pub fn Hashed(comptime Hasher: type) type {
return n;
}
fn readVec(context: ?*anyopaque, data: []const []u8) DiscardError!usize {
fn readVec(context: ?*anyopaque, data: []const []u8) Error!usize {
const this: *@This() = @alignCast(@ptrCast(context));
const n = try this.in.readVec(data);
var remaining: usize = n;

View File

@ -19,7 +19,7 @@ pub fn reader(l: *Limited) Reader {
};
}
fn passthruRead(context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) Reader.RwError!usize {
fn passthruRead(context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) Reader.StreamError!usize {
const l: *Limited = @alignCast(@ptrCast(context));
const combined_limit = limit.min(l.remaining);
const n = try l.unlimited_reader.read(bw, combined_limit);

View File

@ -358,7 +358,7 @@ pub const Iterator = struct {
};
}
fn read(context: ?*anyopaque, bw: *std.io.BufferedWriter, limit: std.io.Limit) std.io.Reader.RwError!usize {
fn read(context: ?*anyopaque, bw: *std.io.BufferedWriter, limit: std.io.Limit) std.io.Reader.StreamError!usize {
const file: *File = @ptrCast(@alignCast(context));
if (file.unread_bytes.* == 0) return error.EndOfStream;
const n = try file.parent_reader.read(bw, limit.min(.limited(file.unread_bytes.*)));
@ -381,7 +381,7 @@ pub const Iterator = struct {
return n;
}
pub fn readRemaining(file: *File, out: *std.io.BufferedWriter) std.io.Reader.RwRemainingError!void {
pub fn readRemaining(file: *File, out: *std.io.BufferedWriter) std.io.Reader.StreamRemainingError!void {
return file.reader().readRemaining(out);
}
};

View File

@ -69,7 +69,7 @@ pub fn writeFileStream(
size: usize,
reader: *std.io.BufferedReader,
options: Options,
) std.io.Reader.RwError!void {
) std.io.Reader.StreamError!void {
try w.writeHeader(.regular, sub_path, "", @intCast(size), options);
try reader.readAll(w.underlying_writer, .limited(size));
try w.writePadding(size);

View File

@ -202,7 +202,7 @@ pub const Decompress = union {
context: ?*anyopaque,
writer: *std.io.BufferedWriter,
limit: std.io.Limit,
) std.io.Reader.RwError!usize {
) std.io.Reader.StreamError!usize {
const d: *Decompress = @ptrCast(@alignCast(context));
return d.store.read(writer, limit);
}
@ -211,7 +211,7 @@ pub const Decompress = union {
context: ?*anyopaque,
writer: *std.io.BufferedWriter,
limit: std.io.Limit,
) std.io.Reader.RwError!usize {
) std.io.Reader.StreamError!usize {
const d: *Decompress = @ptrCast(@alignCast(context));
return std.compress.flate.Decompress.read(&d.inflate, writer, limit);
}

View File

@ -165,40 +165,6 @@ pub fn LinearFifo(comptime T: type) type {
return self.read(dest);
}
pub fn reader(self: *Self) std.io.Reader {
return .{
.context = self,
.vtable = &.{
.read = &readerRead,
.readVec = &readerReadVec,
.discard = &readerDiscard,
},
};
}
fn readerRead(
ctx: ?*anyopaque,
bw: *std.io.BufferedWriter,
limit: std.io.Limit,
) std.io.Reader.RwError!usize {
const fifo: *Self = @alignCast(@ptrCast(ctx));
_ = fifo;
_ = bw;
_ = limit;
@panic("TODO");
}
fn readerReadVec(ctx: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
const fifo: *Self = @alignCast(@ptrCast(ctx));
_ = fifo;
_ = data;
@panic("TODO");
}
fn readerDiscard(ctx: ?*anyopaque, limit: std.io.Limit) std.io.Reader.Error!usize {
const fifo: *Self = @alignCast(@ptrCast(ctx));
_ = fifo;
_ = limit;
@panic("TODO");
}
/// Returns number of items available in fifo
pub fn writableLength(self: Self) usize {
return self.buf.len - self.count;