mirror of
https://github.com/ziglang/zig.git
synced 2026-02-12 20:37:54 +00:00
std: upgrade more API to use File.Reader
This commit is contained in:
parent
2f5574ac08
commit
ba684a18ca
@ -915,13 +915,24 @@ pub const Reader = struct {
|
||||
pos: u64 = 0,
|
||||
size: ?u64 = null,
|
||||
size_err: ?GetEndPosError = null,
|
||||
seek_err: ?SeekError = null,
|
||||
seek_err: ?Reader.SeekError = null,
|
||||
|
||||
pub const SeekError = File.SeekError || error{
|
||||
/// Seeking fell back to reading, and reached the end before the requested seek position.
|
||||
/// `pos` remains at the end of the file.
|
||||
EndOfStream,
|
||||
/// Seeking fell back to reading, which failed.
|
||||
ReadFailed,
|
||||
};
|
||||
|
||||
pub const Mode = enum {
|
||||
streaming,
|
||||
positional,
|
||||
/// Avoid syscalls other than `read` and `readv`.
|
||||
streaming_reading,
|
||||
/// Avoid syscalls other than `pread` and `preadv`.
|
||||
positional_reading,
|
||||
/// Indicates reading cannot continue because of a seek failure.
|
||||
failure,
|
||||
|
||||
pub fn toStreaming(m: @This()) @This() {
|
||||
@ -931,6 +942,14 @@ pub const Reader = struct {
|
||||
.failure => .failure,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn toReading(m: @This()) @This() {
|
||||
return switch (m) {
|
||||
.positional, .positional_reading => .positional_reading,
|
||||
.streaming, .streaming_reading => .streaming_reading,
|
||||
.failure => .failure,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub fn interface(r: *Reader) std.io.Reader {
|
||||
@ -960,7 +979,7 @@ pub const Reader = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn seekBy(r: *Reader, offset: i64) SeekError!void {
|
||||
pub fn seekBy(r: *Reader, offset: i64) Reader.SeekError!void {
|
||||
switch (r.mode) {
|
||||
.positional, .positional_reading => {
|
||||
// TODO: make += operator allow any integer types
|
||||
@ -977,19 +996,21 @@ pub const Reader = struct {
|
||||
break :e err;
|
||||
}
|
||||
};
|
||||
if (offset < 0) return seek_err;
|
||||
var remaining = offset;
|
||||
var remaining = std.math.cast(u64, offset) orelse return seek_err;
|
||||
while (remaining > 0) {
|
||||
const n = discard(r, .limited(remaining)) catch |err| switch (err) {};
|
||||
const n = discard(r, .limited(remaining)) catch |err| {
|
||||
r.seek_err = err;
|
||||
return err;
|
||||
};
|
||||
r.pos += n;
|
||||
remaining -= n;
|
||||
}
|
||||
},
|
||||
.failure => return error.Unseekable,
|
||||
.failure => return r.seek_err.?,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn seekTo(r: *Reader, offset: u64) SeekError!void {
|
||||
pub fn seekTo(r: *Reader, offset: u64) Reader.SeekError!void {
|
||||
switch (r.mode) {
|
||||
.positional, .positional_reading => {
|
||||
r.pos = offset;
|
||||
@ -1001,7 +1022,9 @@ pub const Reader = struct {
|
||||
r.seek_err = err;
|
||||
return err;
|
||||
};
|
||||
r.pos = offset;
|
||||
},
|
||||
.failure => return r.seek_err.?,
|
||||
}
|
||||
}
|
||||
|
||||
@ -1015,33 +1038,29 @@ pub const Reader = struct {
|
||||
limit: std.io.Limit,
|
||||
) std.io.Reader.StreamError!usize {
|
||||
const r: *Reader = @ptrCast(@alignCast(context));
|
||||
return bw.writeFile(r, limit, &.{}, 0) catch |write_file_error| switch (write_file_error) {
|
||||
error.ReadFailed => return error.ReadFailed,
|
||||
error.WriteFailed => return error.WriteFailed,
|
||||
error.Unimplemented => switch (r.mode) {
|
||||
.positional => {
|
||||
r.mode = .positional_reading;
|
||||
switch (r.mode) {
|
||||
.positional, .streaming => return bw.writeFile(r, limit, &.{}, 0) catch |write_err| switch (write_err) {
|
||||
error.ReadFailed => return error.ReadFailed,
|
||||
error.WriteFailed => return error.WriteFailed,
|
||||
error.Unimplemented => {
|
||||
r.mode = r.mode.toReading();
|
||||
return 0;
|
||||
},
|
||||
.streaming => {
|
||||
r.mode = .streaming_reading;
|
||||
return 0;
|
||||
},
|
||||
.positional_reading => {
|
||||
const dest = limit.slice(try bw.writableSliceGreedy(1));
|
||||
const n = try readPositional(r, dest);
|
||||
bw.advance(n);
|
||||
return n;
|
||||
},
|
||||
.streaming_reading => {
|
||||
const dest = limit.slice(try bw.writableSliceGreedy(1));
|
||||
const n = try readStreaming(r, dest);
|
||||
bw.advance(n);
|
||||
return n;
|
||||
},
|
||||
.failure => return error.ReadFailed,
|
||||
},
|
||||
};
|
||||
.positional_reading => {
|
||||
const dest = limit.slice(try bw.writableSliceGreedy(1));
|
||||
const n = try readPositional(r, dest);
|
||||
bw.advance(n);
|
||||
return n;
|
||||
},
|
||||
.streaming_reading => {
|
||||
const dest = limit.slice(try bw.writableSliceGreedy(1));
|
||||
const n = try readStreaming(r, dest);
|
||||
bw.advance(n);
|
||||
return n;
|
||||
},
|
||||
.failure => return error.ReadFailed,
|
||||
}
|
||||
}
|
||||
|
||||
fn discard(context: ?*anyopaque, limit: std.io.Limit) std.io.Reader.Error!usize {
|
||||
@ -1077,7 +1096,10 @@ pub const Reader = struct {
|
||||
r.err = err;
|
||||
return error.ReadFailed;
|
||||
};
|
||||
if (n == 0) return error.EndOfStream;
|
||||
if (n == 0) {
|
||||
r.size = pos;
|
||||
return error.EndOfStream;
|
||||
}
|
||||
r.pos = pos + n;
|
||||
return n;
|
||||
}
|
||||
@ -1093,7 +1115,10 @@ pub const Reader = struct {
|
||||
r.err = err;
|
||||
return error.ReadFailed;
|
||||
};
|
||||
if (n == 0) return error.EndOfStream;
|
||||
if (n == 0) {
|
||||
r.size = pos;
|
||||
return error.EndOfStream;
|
||||
}
|
||||
r.pos = pos + n;
|
||||
return n;
|
||||
}
|
||||
@ -1113,6 +1138,7 @@ pub const Reader = struct {
|
||||
r.pos = pos + n;
|
||||
return n;
|
||||
},
|
||||
.failure => return error.ReadFailed,
|
||||
}
|
||||
}
|
||||
|
||||
@ -1120,7 +1146,7 @@ pub const Reader = struct {
|
||||
const n = r.file.pread(dest, r.pos) catch |err| switch (err) {
|
||||
error.Unseekable => {
|
||||
r.mode = r.mode.toStreaming();
|
||||
if (r.pos != 0) r.seekBy(r.pos) catch {
|
||||
if (r.pos != 0) r.seekBy(@intCast(r.pos)) catch {
|
||||
r.mode = .failure;
|
||||
return error.ReadFailed;
|
||||
};
|
||||
@ -1131,7 +1157,10 @@ pub const Reader = struct {
|
||||
return error.ReadFailed;
|
||||
},
|
||||
};
|
||||
if (n == 0) return error.EndOfStream;
|
||||
if (n == 0) {
|
||||
r.size = r.pos;
|
||||
return error.EndOfStream;
|
||||
}
|
||||
r.pos += n;
|
||||
return n;
|
||||
}
|
||||
@ -1141,7 +1170,10 @@ pub const Reader = struct {
|
||||
r.err = err;
|
||||
return error.ReadFailed;
|
||||
};
|
||||
if (n == 0) return error.EndOfStream;
|
||||
if (n == 0) {
|
||||
r.size = r.pos;
|
||||
return error.EndOfStream;
|
||||
}
|
||||
r.pos += n;
|
||||
return n;
|
||||
}
|
||||
@ -1167,6 +1199,10 @@ pub const Writer = struct {
|
||||
|
||||
pub const SendfileError = error{
|
||||
UnsupportedOperation,
|
||||
SystemResources,
|
||||
InputOutput,
|
||||
BrokenPipe,
|
||||
WouldBlock,
|
||||
Unexpected,
|
||||
};
|
||||
|
||||
|
||||
@ -855,13 +855,7 @@ pub const BodyWriter = struct {
|
||||
http_protocol_output: *std.io.BufferedWriter,
|
||||
state: State,
|
||||
elide: bool,
|
||||
err: Error!void = {},
|
||||
|
||||
pub const Error = error{
|
||||
/// Attempted to write a file to the stream, an expensive operation
|
||||
/// that should be avoided when `elide` is true.
|
||||
UnableToElideBody,
|
||||
};
|
||||
pub const WriteError = std.io.Writer.Error;
|
||||
|
||||
/// How many zeroes to reserve for hex-encoded chunk length.
|
||||
@ -1043,69 +1037,83 @@ pub const BodyWriter = struct {
|
||||
}
|
||||
|
||||
fn elideWriteFile(
|
||||
w: *BodyWriter,
|
||||
offset: std.io.Writer.Offset,
|
||||
limit: std.io.Writer.Limit,
|
||||
file_reader: *std.fs.File.Reader,
|
||||
limit: std.io.Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
) WriteError!usize {
|
||||
if (offset != .none) {
|
||||
if (countWriteFile(limit, headers_and_trailers)) |n| {
|
||||
headers_len: usize,
|
||||
) error{ReadFailed}!usize {
|
||||
var source = file_reader.readable(&.{});
|
||||
var n = source.discard(limit) catch |err| switch (err) {
|
||||
error.ReadFailed => return error.ReadFailed,
|
||||
error.EndOfStream => {
|
||||
var n: usize = 0;
|
||||
for (headers_and_trailers) |bytes| n += bytes.len;
|
||||
return n;
|
||||
},
|
||||
};
|
||||
if (file_reader.size) |size| {
|
||||
if (size - file_reader.pos == 0) {
|
||||
// End of file reached.
|
||||
for (headers_and_trailers) |bytes| n += bytes.len;
|
||||
return n;
|
||||
}
|
||||
}
|
||||
w.err = error.UnableToElideBody;
|
||||
return error.WriteFailed;
|
||||
for (headers_and_trailers[0..headers_len]) |bytes| n += bytes.len;
|
||||
return n;
|
||||
}
|
||||
|
||||
/// Returns `null` if size cannot be computed without making any syscalls.
|
||||
fn countWriteFile(limit: std.io.Writer.Limit, headers_and_trailers: []const []const u8) ?usize {
|
||||
var total: usize = limit.toInt() orelse return null;
|
||||
for (headers_and_trailers) |buf| total += buf.len;
|
||||
return total;
|
||||
fn countWriteFile(
|
||||
file_reader: *std.fs.File.Reader,
|
||||
limit: std.io.Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
) ?usize {
|
||||
var total: u64 = @min(@intFromEnum(limit), file_reader.getSize() orelse return null);
|
||||
for (headers_and_trailers) |bytes| total += bytes.len;
|
||||
return std.math.lossyCast(usize, total);
|
||||
}
|
||||
|
||||
fn noneWriteFile(
|
||||
context: ?*anyopaque,
|
||||
file: std.fs.File,
|
||||
offset: std.io.Writer.Offset,
|
||||
limit: std.io.Writer.Limit,
|
||||
file_reader: *std.fs.File.Reader,
|
||||
limit: std.io.Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) std.io.Writer.FileError!usize {
|
||||
if (limit == .nothing) return noneWriteSplat(context, headers_and_trailers, 1);
|
||||
const w: *BodyWriter = @alignCast(@ptrCast(context));
|
||||
if (w.elide) return elideWriteFile(w, offset, limit, headers_and_trailers);
|
||||
return w.http_protocol_output.writeFile(file, offset, limit, headers_and_trailers, headers_len);
|
||||
if (w.elide) return elideWriteFile(file_reader, limit, headers_and_trailers, headers_len);
|
||||
return w.http_protocol_output.writeFile(file_reader, limit, headers_and_trailers, headers_len);
|
||||
}
|
||||
|
||||
fn contentLengthWriteFile(
|
||||
context: ?*anyopaque,
|
||||
file: std.fs.File,
|
||||
offset: std.io.Writer.Offset,
|
||||
limit: std.io.Writer.Limit,
|
||||
file_reader: *std.fs.File.Reader,
|
||||
limit: std.io.Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) std.io.Writer.FileError!usize {
|
||||
if (limit == .nothing) return contentLengthWriteSplat(context, headers_and_trailers, 1);
|
||||
const w: *BodyWriter = @alignCast(@ptrCast(context));
|
||||
if (w.elide) return elideWriteFile(w, offset, limit, headers_and_trailers);
|
||||
const n = try w.http_protocol_output.writeFile(file, offset, limit, headers_and_trailers, headers_len);
|
||||
if (w.elide) return elideWriteFile(file_reader, limit, headers_and_trailers, headers_len);
|
||||
const n = try w.http_protocol_output.writeFile(file_reader, limit, headers_and_trailers, headers_len);
|
||||
w.state.content_length -= n;
|
||||
return n;
|
||||
}
|
||||
|
||||
fn chunkedWriteFile(
|
||||
context: ?*anyopaque,
|
||||
file: std.fs.File,
|
||||
offset: std.io.Writer.Offset,
|
||||
limit: std.io.Writer.Limit,
|
||||
file_reader: *std.fs.File.Reader,
|
||||
limit: std.io.Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) std.io.Writer.FileError!usize {
|
||||
if (limit == .nothing) return chunkedWriteSplat(context, headers_and_trailers, 1);
|
||||
const w: *BodyWriter = @alignCast(@ptrCast(context));
|
||||
if (w.elide) return elideWriteFile(w, offset, limit, headers_and_trailers);
|
||||
const data_len = countWriteFile(limit, headers_and_trailers) orelse @panic("TODO");
|
||||
if (w.elide) return elideWriteFile(file_reader, limit, headers_and_trailers, headers_len);
|
||||
if (limit == .nothing) return chunkedWriteSplat(context, headers_and_trailers, 1);
|
||||
const data_len = countWriteFile(file_reader, headers_and_trailers) orelse {
|
||||
// If the file size is unknown, we cannot lower to a `writeFile` since we would
|
||||
// have to flush the chunk header before knowing the chunk length.
|
||||
return error.Unimplemented;
|
||||
};
|
||||
const bw = w.http_protocol_output;
|
||||
const chunked = &w.state.chunked;
|
||||
state: switch (chunked.*) {
|
||||
@ -1114,7 +1122,7 @@ pub const BodyWriter = struct {
|
||||
const buffered_len = bw.end - off - chunk_header_template.len;
|
||||
const chunk_len = data_len + buffered_len;
|
||||
writeHex(bw.buffer[off..][0..chunk_len_digits], chunk_len);
|
||||
const n = try bw.writeFile(file, offset, limit, headers_and_trailers, headers_len);
|
||||
const n = try bw.writeFile(file_reader, limit, headers_and_trailers, headers_len);
|
||||
chunked.* = .{ .chunk_len = data_len + 2 - n };
|
||||
return n;
|
||||
},
|
||||
@ -1138,7 +1146,7 @@ pub const BodyWriter = struct {
|
||||
},
|
||||
else => {
|
||||
const new_limit = limit.min(.limited(chunk_len - 2));
|
||||
const n = try bw.writeFile(file, offset, new_limit, headers_and_trailers, headers_len);
|
||||
const n = try bw.writeFile(file_reader, new_limit, headers_and_trailers, headers_len);
|
||||
chunked.chunk_len = chunk_len - n;
|
||||
return n;
|
||||
},
|
||||
|
||||
@ -161,7 +161,7 @@ fn writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) std.
|
||||
fn writeFile(
|
||||
context: ?*anyopaque,
|
||||
file_reader: *std.fs.File.Reader,
|
||||
limit: std.io.Writer.Limit,
|
||||
limit: std.io.Limit,
|
||||
headers_and_trailers_full: []const []const u8,
|
||||
headers_len_full: usize,
|
||||
) std.io.Writer.FileError!usize {
|
||||
@ -185,8 +185,11 @@ fn writeFile(
|
||||
list.ensureTotalCapacity(gpa, new_capacity) catch return error.WriteFailed;
|
||||
for (headers_and_trailers[0..headers_len]) |bytes| list.appendSliceAssumeCapacity(bytes);
|
||||
const dest = limit.slice(list.items.ptr[list.items.len..list.capacity]);
|
||||
const n = try file_reader.read(dest);
|
||||
const is_end = if (file_reader.getSize()) |size| n >= size - pos else n == 0;
|
||||
const n = file_reader.read(dest) catch |err| switch (err) {
|
||||
error.ReadFailed => return error.ReadFailed,
|
||||
error.EndOfStream => 0,
|
||||
};
|
||||
const is_end = if (file_reader.getSize()) |size| n >= size - pos else |_| n == 0;
|
||||
if (is_end) {
|
||||
new_capacity = list.capacity;
|
||||
for (trailers) |bytes| new_capacity += bytes.len;
|
||||
|
||||
@ -6,6 +6,7 @@ const Writer = std.io.Writer;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const testing = std.testing;
|
||||
const Limit = std.io.Limit;
|
||||
const File = std.fs.File;
|
||||
|
||||
/// Underlying stream to send bytes to.
|
||||
///
|
||||
@ -543,36 +544,31 @@ pub fn writeSliceSwap(bw: *BufferedWriter, Elem: type, slice: []const Elem) Writ
|
||||
/// `error.Unimplemented` in the error set.
|
||||
pub fn writeFile(
|
||||
bw: *BufferedWriter,
|
||||
file: std.fs.File,
|
||||
offset: Writer.Offset,
|
||||
file_reader: *File.Reader,
|
||||
limit: Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) Writer.FileError!usize {
|
||||
return passthruWriteFile(bw, file, offset, limit, headers_and_trailers, headers_len);
|
||||
return passthruWriteFile(bw, file_reader, limit, headers_and_trailers, headers_len);
|
||||
}
|
||||
|
||||
pub const WriteFileReadingError = std.fs.File.PReadError || Writer.Error;
|
||||
|
||||
/// Returning zero bytes means end of stream.
|
||||
///
|
||||
/// Asserts nonzero buffer capacity.
|
||||
pub fn writeFileReading(
|
||||
bw: *BufferedWriter,
|
||||
file: std.fs.File,
|
||||
offset: Writer.Offset,
|
||||
file_reader: *File.Reader,
|
||||
limit: Limit,
|
||||
) WriteFileReadingError!usize {
|
||||
) Writer.ReadingFileError!usize {
|
||||
const dest = limit.slice(try bw.writableSliceGreedy(1));
|
||||
const n = if (offset.toInt()) |pos| try file.pread(dest, pos) else try file.read(dest);
|
||||
const n = try file_reader.read(dest);
|
||||
bw.advance(n);
|
||||
return n;
|
||||
}
|
||||
|
||||
fn passthruWriteFile(
|
||||
context: ?*anyopaque,
|
||||
file: std.fs.File,
|
||||
offset: Writer.Offset,
|
||||
file_reader: *File.Reader,
|
||||
limit: Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
@ -581,7 +577,7 @@ fn passthruWriteFile(
|
||||
const buffer = bw.buffer;
|
||||
if (buffer.len == 0) return track(
|
||||
&bw.count,
|
||||
try bw.unbuffered_writer.writeFile(file, offset, limit, headers_and_trailers, headers_len),
|
||||
try bw.unbuffered_writer.writeFile(file_reader, limit, headers_and_trailers, headers_len),
|
||||
);
|
||||
const start_end = bw.end;
|
||||
const headers = headers_and_trailers[0..headers_len];
|
||||
@ -608,7 +604,7 @@ fn passthruWriteFile(
|
||||
@memcpy(remaining_buffers_for_trailers[0..send_trailers_len], trailers[0..send_trailers_len]);
|
||||
const send_headers_len = 1 + buffers_len;
|
||||
const send_buffers = buffers[0 .. send_headers_len + send_trailers_len];
|
||||
const n = try bw.unbuffered_writer.writeFile(file, offset, limit, send_buffers, send_headers_len);
|
||||
const n = try bw.unbuffered_writer.writeFile(file_reader, limit, send_buffers, send_headers_len);
|
||||
if (n < end) {
|
||||
@branchHint(.unlikely);
|
||||
const remainder = buffer[n..end];
|
||||
@ -638,7 +634,7 @@ fn passthruWriteFile(
|
||||
@memcpy(remaining_buffers[0..send_trailers_len], trailers[0..send_trailers_len]);
|
||||
const send_headers_len = @intFromBool(end != 0);
|
||||
const send_buffers = buffers[1 - send_headers_len .. 1 + send_trailers_len];
|
||||
const n = try bw.unbuffered_writer.writeFile(file, offset, limit, send_buffers, send_headers_len);
|
||||
const n = try bw.unbuffered_writer.writeFile(file_reader, limit, send_buffers, send_headers_len);
|
||||
if (n < end) {
|
||||
@branchHint(.unlikely);
|
||||
const remainder = buffer[n..end];
|
||||
@ -651,9 +647,6 @@ fn passthruWriteFile(
|
||||
}
|
||||
|
||||
pub const WriteFileOptions = struct {
|
||||
offset: Writer.Offset = .none,
|
||||
/// If the size of the source file is known, it is likely that passing the
|
||||
/// size here will save one syscall.
|
||||
limit: Limit = .unlimited,
|
||||
/// Headers and trailers must be passed together so that in case `len` is
|
||||
/// zero, they can be forwarded directly to `Writer.VTable.writeSplat`.
|
||||
@ -666,77 +659,51 @@ pub const WriteFileOptions = struct {
|
||||
headers_len: usize = 0,
|
||||
};
|
||||
|
||||
pub fn writeFileAll(bw: *BufferedWriter, file: std.fs.File, options: WriteFileOptions) WriteFileReadingError!void {
|
||||
pub fn writeFileAll(
|
||||
bw: *BufferedWriter,
|
||||
file_reader: *std.fs.File.Reader,
|
||||
options: WriteFileOptions,
|
||||
) Writer.FileError!void {
|
||||
const headers_and_trailers = options.headers_and_trailers;
|
||||
const headers = headers_and_trailers[0..options.headers_len];
|
||||
switch (options.limit) {
|
||||
.nothing => return bw.writeVecAll(headers_and_trailers),
|
||||
.unlimited => {
|
||||
// When reading the whole file, we cannot include the trailers in the
|
||||
// call that reads from the file handle, because we have no way to
|
||||
// determine whether a partial write is past the end of the file or
|
||||
// not.
|
||||
var i: usize = 0;
|
||||
var offset = options.offset;
|
||||
while (true) {
|
||||
var n = bw.writeFile(file, offset, .unlimited, headers[i..], headers.len - i) catch |err| switch (err) {
|
||||
error.Unimplemented => {
|
||||
try bw.writeVecAll(headers[i..]);
|
||||
try bw.writeFileReadingAll(file, offset, .unlimited);
|
||||
try bw.writeVecAll(headers_and_trailers[headers.len..]);
|
||||
return;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
while (i < headers.len and n >= headers[i].len) {
|
||||
n -= headers[i].len;
|
||||
i += 1;
|
||||
}
|
||||
if (i < headers.len) {
|
||||
headers[i] = headers[i][n..];
|
||||
continue;
|
||||
}
|
||||
if (n == 0) break;
|
||||
offset = offset.advance(n);
|
||||
}
|
||||
},
|
||||
else => {
|
||||
var len = options.limit.toInt().?;
|
||||
var i: usize = 0;
|
||||
var offset = options.offset;
|
||||
while (true) {
|
||||
var n = bw.writeFile(file, offset, .limited(len), headers_and_trailers[i..], headers.len - i) catch |err| switch (err) {
|
||||
error.Unimplemented => {
|
||||
try bw.writeVecAll(headers[i..]);
|
||||
try bw.writeFileReadingAll(file, offset, .limited(len));
|
||||
try bw.writeVecAll(headers_and_trailers[headers.len..]);
|
||||
return;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
while (i < headers.len and n >= headers[i].len) {
|
||||
n -= headers[i].len;
|
||||
i += 1;
|
||||
}
|
||||
if (i < headers.len) {
|
||||
headers[i] = headers[i][n..];
|
||||
continue;
|
||||
}
|
||||
if (n >= len) {
|
||||
n -= len;
|
||||
if (i >= headers_and_trailers.len) return;
|
||||
while (n >= headers_and_trailers[i].len) {
|
||||
n -= headers_and_trailers[i].len;
|
||||
i += 1;
|
||||
if (i >= headers_and_trailers.len) return;
|
||||
}
|
||||
headers_and_trailers[i] = headers_and_trailers[i][n..];
|
||||
return bw.writeVecAll(headers_and_trailers[i..]);
|
||||
}
|
||||
offset = offset.advance(n);
|
||||
len -= n;
|
||||
}
|
||||
},
|
||||
var remaining = options.limit;
|
||||
var i: usize = 0;
|
||||
while (true) {
|
||||
const before_pos = file_reader.pos;
|
||||
var n = bw.writeFile(file_reader, remaining, headers_and_trailers[i..], headers.len - i) catch |err| switch (err) {
|
||||
error.ReadFailed => return error.ReadFailed,
|
||||
error.WriteFailed => return error.WriteFailed,
|
||||
error.Unimplemented => {
|
||||
file_reader.mode = file_reader.mode.toReading();
|
||||
try bw.writeVecAll(headers[i..]);
|
||||
try bw.writeFileReadingAll(file_reader, remaining);
|
||||
try bw.writeVecAll(headers_and_trailers[headers.len..]);
|
||||
return;
|
||||
},
|
||||
};
|
||||
while (i < headers.len and n >= headers[i].len) {
|
||||
n -= headers[i].len;
|
||||
i += 1;
|
||||
}
|
||||
if (i < headers.len) {
|
||||
headers[i] = headers[i][n..];
|
||||
continue;
|
||||
}
|
||||
const file_bytes_consumed = file_reader.pos - before_pos;
|
||||
remaining = remaining.subtract(file_bytes_consumed).?;
|
||||
const size = file_reader.size orelse continue; // End of file not yet reached.
|
||||
if (file_reader.pos < size) continue; // End of file not yet reached.
|
||||
n -= file_bytes_consumed; // Trailers reached.
|
||||
while (i < headers_and_trailers.len and n >= headers_and_trailers[i].len) {
|
||||
n -= headers_and_trailers[i].len;
|
||||
i += 1;
|
||||
}
|
||||
if (i < headers_and_trailers.len) {
|
||||
headers_and_trailers[i] = headers_and_trailers[i][n..];
|
||||
try bw.writeVecAll(headers_and_trailers[i..]);
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@ -748,28 +715,13 @@ pub fn writeFileAll(bw: *BufferedWriter, file: std.fs.File, options: WriteFileOp
|
||||
/// Asserts nonzero buffer capacity.
|
||||
pub fn writeFileReadingAll(
|
||||
bw: *BufferedWriter,
|
||||
file: std.fs.File,
|
||||
offset: Writer.Offset,
|
||||
file_reader: *File.Reader,
|
||||
limit: Limit,
|
||||
) WriteFileReadingError!void {
|
||||
if (offset.toInt()) |start_pos| {
|
||||
var remaining = limit;
|
||||
var pos = start_pos;
|
||||
while (remaining.nonzero()) {
|
||||
const dest = remaining.slice(try bw.writableSliceGreedy(1));
|
||||
const n = try file.pread(dest, pos);
|
||||
if (n == 0) return;
|
||||
bw.advance(n);
|
||||
pos += n;
|
||||
remaining = remaining.subtract(n).?;
|
||||
}
|
||||
}
|
||||
) Writer.ReadingFileError!void {
|
||||
var remaining = limit;
|
||||
while (remaining.nonzero()) {
|
||||
const dest = remaining.slice(try bw.writableSliceGreedy(1));
|
||||
const n = try file.read(dest);
|
||||
const n = try writeFileReading(bw, file_reader, remaining);
|
||||
if (n == 0) return;
|
||||
bw.advance(n);
|
||||
remaining = remaining.subtract(n).?;
|
||||
}
|
||||
}
|
||||
|
||||
@ -46,7 +46,7 @@ pub const VTable = struct {
|
||||
/// provided which is based on calling `read`, borrowing
|
||||
/// `BufferedReader.buffer` to construct a temporary `BufferedWriter` and
|
||||
/// ignoring the written data.
|
||||
discard: *const fn (context: ?*anyopaque, limit: Limit) Error!usize = null,
|
||||
discard: ?*const fn (context: ?*anyopaque, limit: Limit) Error!usize = null,
|
||||
};
|
||||
|
||||
pub const StreamError = error{
|
||||
|
||||
@ -21,8 +21,8 @@ pub const VTable = struct {
|
||||
/// of stream via `error.WriteFailed`.
|
||||
writeSplat: *const fn (ctx: ?*anyopaque, data: []const []const u8, splat: usize) Error!usize,
|
||||
|
||||
/// Writes contents from an open file. `headers` are written first, then `len`
|
||||
/// bytes of `file` starting from `offset`, then `trailers`.
|
||||
/// Writes contents from an open file. `headers` are written first, then
|
||||
/// `limit` bytes of `file` starting from `offset`, then `trailers`.
|
||||
///
|
||||
/// Number of bytes actually written is returned, which may lie within
|
||||
/// headers, the file, trailers, or anywhere in between.
|
||||
@ -31,9 +31,8 @@ pub const VTable = struct {
|
||||
/// end-of-stream. A subsequent call may return nonzero, or may signal end
|
||||
/// of stream via `error.WriteFailed`.
|
||||
///
|
||||
/// If `error.Unimplemented` is returned, the caller should do its own
|
||||
/// reads from the file. The callee indicates it cannot offer a more
|
||||
/// efficient implementation.
|
||||
/// `error.Unimplemented` indicates the callee cannot offer a more
|
||||
/// efficient implementation than the caller performing its own reads.
|
||||
writeFile: *const fn (
|
||||
ctx: ?*anyopaque,
|
||||
file_reader: *File.Reader,
|
||||
@ -43,7 +42,7 @@ pub const VTable = struct {
|
||||
/// `headers_and_trailers` do not count towards this limit.
|
||||
limit: Limit,
|
||||
/// Headers and trailers must be passed together so that in case `len` is
|
||||
/// zero, they can be forwarded directly to `VTable.writeVec`.
|
||||
/// zero, they can be forwarded directly as one contiguous slice of memory.
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) FileError!usize,
|
||||
@ -54,6 +53,13 @@ pub const Error = error{
|
||||
WriteFailed,
|
||||
};
|
||||
|
||||
pub const ReadingFileError = error{
|
||||
/// Detailed diagnostics are found on the `File.Reader` struct.
|
||||
ReadFailed,
|
||||
/// See the `Writer` implementation for detailed diagnostics.
|
||||
WriteFailed,
|
||||
};
|
||||
|
||||
pub const FileError = error{
|
||||
/// Detailed diagnostics are found on the `File.Reader` struct.
|
||||
ReadFailed,
|
||||
|
||||
@ -247,40 +247,6 @@ pub fn LinearFifo(comptime T: type) type {
|
||||
return bytes.len;
|
||||
}
|
||||
|
||||
pub fn writer(fifo: *Self) std.io.Writer {
|
||||
return .{
|
||||
.context = fifo,
|
||||
.vtable = &.{
|
||||
.writeSplat = writerWriteSplat,
|
||||
.writeFile = writerWriteFile,
|
||||
},
|
||||
};
|
||||
}
|
||||
fn writerWriteSplat(ctx: ?*anyopaque, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
|
||||
const fifo: *Self = @alignCast(@ptrCast(ctx));
|
||||
_ = fifo;
|
||||
_ = data;
|
||||
_ = splat;
|
||||
@panic("TODO");
|
||||
}
|
||||
fn writerWriteFile(
|
||||
ctx: ?*anyopaque,
|
||||
file: std.fs.File,
|
||||
offset: std.io.Writer.Offset,
|
||||
limit: std.io.Writer.Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) std.io.Writer.Error!usize {
|
||||
const fifo: *Self = @alignCast(@ptrCast(ctx));
|
||||
_ = fifo;
|
||||
_ = file;
|
||||
_ = offset;
|
||||
_ = limit;
|
||||
_ = headers_and_trailers;
|
||||
_ = headers_len;
|
||||
@panic("TODO");
|
||||
}
|
||||
|
||||
/// Make `count` items available before the current read location
|
||||
fn rewind(self: *Self, count: usize) void {
|
||||
assert(self.writableLength() >= count);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user