mirror of
https://github.com/ziglang/zig.git
synced 2025-12-24 07:03:11 +00:00
std.fs.File: update for new writeFile API
This commit is contained in:
parent
7b417c6caf
commit
3650cd3e8e
@ -663,7 +663,7 @@ pub const Manifest = struct {
|
||||
const gpa = self.cache.gpa;
|
||||
const input_file_count = self.files.entries.len;
|
||||
var manifest_reader = self.manifest_file.?.reader(); // Reads positionally from zero.
|
||||
const limit: std.io.Reader.Limit = .limited(manifest_file_size_max);
|
||||
const limit: std.io.Limit = .limited(manifest_file_size_max);
|
||||
const file_contents = manifest_reader.interface().readRemainingAlloc(gpa, limit) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.StreamTooLong => return error.OutOfMemory,
|
||||
|
||||
@ -768,7 +768,7 @@ fn byFreq(context: void, a: LiteralNode, b: LiteralNode) bool {
|
||||
fn read(
|
||||
context: ?*anyopaque,
|
||||
bw: *std.io.BufferedWriter,
|
||||
limit: std.io.Reader.Limit,
|
||||
limit: std.io.Limit,
|
||||
) std.io.Reader.RwError!usize {
|
||||
const c: *Compress = @ptrCast(@alignCast(context));
|
||||
switch (c.state) {
|
||||
|
||||
@ -142,7 +142,7 @@ fn decodeSymbol(self: *Decompress, decoder: anytype) !Symbol {
|
||||
pub fn read(
|
||||
context: ?*anyopaque,
|
||||
bw: *std.io.BufferedWriter,
|
||||
limit: std.io.Reader.Limit,
|
||||
limit: std.io.Limit,
|
||||
) std.io.Reader.RwError!usize {
|
||||
const d: *Decompress = @alignCast(@ptrCast(context));
|
||||
return readInner(d, bw, limit) catch |err| switch (err) {
|
||||
@ -160,7 +160,7 @@ pub fn read(
|
||||
fn readInner(
|
||||
d: *Decompress,
|
||||
bw: *std.io.BufferedWriter,
|
||||
limit: std.io.Reader.Limit,
|
||||
limit: std.io.Limit,
|
||||
) (Error || error{ WriteFailed, EndOfStream })!usize {
|
||||
const in = d.input;
|
||||
sw: switch (d.state) {
|
||||
@ -351,7 +351,7 @@ fn readVec(context: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
|
||||
@panic("TODO remove readVec primitive");
|
||||
}
|
||||
|
||||
fn discard(context: ?*anyopaque, limit: std.io.Reader.Limit) std.io.Reader.Error!usize {
|
||||
fn discard(context: ?*anyopaque, limit: std.io.Limit) std.io.Reader.Error!usize {
|
||||
_ = context;
|
||||
_ = limit;
|
||||
// Problem here is we still need access to the output ring buffer.
|
||||
|
||||
@ -2,6 +2,7 @@ const Decompress = @This();
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const Reader = std.io.Reader;
|
||||
const Limit = std.io.Limit;
|
||||
const BufferedWriter = std.io.BufferedWriter;
|
||||
const BufferedReader = std.io.BufferedReader;
|
||||
const zstd = @import("../zstd.zig");
|
||||
@ -77,7 +78,7 @@ pub fn reader(self: *Decompress) Reader {
|
||||
};
|
||||
}
|
||||
|
||||
fn read(context: ?*anyopaque, bw: *BufferedWriter, limit: Reader.Limit) Reader.RwError!usize {
|
||||
fn read(context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) Reader.RwError!usize {
|
||||
const d: *Decompress = @ptrCast(@alignCast(context));
|
||||
const in = d.input;
|
||||
|
||||
@ -139,7 +140,7 @@ fn initFrame(d: *Decompress, window_size_max: usize, magic: Frame.Magic) !void {
|
||||
}
|
||||
}
|
||||
|
||||
fn readInFrame(d: *Decompress, bw: *BufferedWriter, limit: Reader.Limit, state: *State.InFrame) !usize {
|
||||
fn readInFrame(d: *Decompress, bw: *BufferedWriter, limit: Limit, state: *State.InFrame) !usize {
|
||||
const in = d.input;
|
||||
|
||||
const header_bytes = try in.takeArray(3);
|
||||
@ -166,7 +167,7 @@ fn readInFrame(d: *Decompress, bw: *BufferedWriter, limit: Reader.Limit, state:
|
||||
var literals_buffer: [zstd.block_size_max]u8 = undefined;
|
||||
var sequence_buffer: [zstd.block_size_max]u8 = undefined;
|
||||
var decode: Frame.Zstandard.Decode = .init(&literal_fse_buffer, &match_fse_buffer, &offset_fse_buffer);
|
||||
var remaining: Reader.Limit = .limited(block_size);
|
||||
var remaining: Limit = .limited(block_size);
|
||||
const literals = try LiteralsSection.decode(in, &remaining, &literals_buffer);
|
||||
const sequences_header = try SequencesSection.Header.decode(in, &remaining);
|
||||
|
||||
@ -446,7 +447,7 @@ pub const Frame = struct {
|
||||
pub fn prepare(
|
||||
self: *Decode,
|
||||
in: *BufferedReader,
|
||||
remaining: *Reader.Limit,
|
||||
remaining: *Limit,
|
||||
literals: LiteralsSection,
|
||||
sequences_header: SequencesSection.Header,
|
||||
) PrepareError!void {
|
||||
@ -536,7 +537,7 @@ pub const Frame = struct {
|
||||
fn updateFseTable(
|
||||
self: *Decode,
|
||||
in: *BufferedReader,
|
||||
remaining: *Reader.Limit,
|
||||
remaining: *Limit,
|
||||
comptime choice: DataType,
|
||||
mode: SequencesSection.Header.Mode,
|
||||
) !void {
|
||||
@ -857,7 +858,7 @@ pub const LiteralsSection = struct {
|
||||
compressed_size: ?u18,
|
||||
|
||||
/// Decode a literals section header.
|
||||
pub fn decode(in: *BufferedReader, remaining: *Reader.Limit) !Header {
|
||||
pub fn decode(in: *BufferedReader, remaining: *Limit) !Header {
|
||||
remaining.* = remaining.subtract(1) orelse return error.EndOfStream;
|
||||
const byte0 = try in.takeByte();
|
||||
const block_type: BlockType = @enumFromInt(byte0 & 0b11);
|
||||
@ -964,7 +965,7 @@ pub const LiteralsSection = struct {
|
||||
MissingStartBit,
|
||||
};
|
||||
|
||||
pub fn decode(in: *BufferedReader, remaining: *Reader.Limit) HuffmanTree.DecodeError!HuffmanTree {
|
||||
pub fn decode(in: *BufferedReader, remaining: *Limit) HuffmanTree.DecodeError!HuffmanTree {
|
||||
remaining.* = remaining.subtract(1) orelse return error.EndOfStream;
|
||||
const header = try in.takeByte();
|
||||
if (header < 128) {
|
||||
@ -976,7 +977,7 @@ pub const LiteralsSection = struct {
|
||||
|
||||
fn decodeDirect(
|
||||
in: *BufferedReader,
|
||||
remaining: *Reader.Limit,
|
||||
remaining: *Limit,
|
||||
encoded_symbol_count: usize,
|
||||
) HuffmanTree.DecodeError!HuffmanTree {
|
||||
var weights: [256]u4 = undefined;
|
||||
@ -993,7 +994,7 @@ pub const LiteralsSection = struct {
|
||||
|
||||
fn decodeFse(
|
||||
in: *BufferedReader,
|
||||
remaining: *Reader.Limit,
|
||||
remaining: *Limit,
|
||||
compressed_size: usize,
|
||||
) HuffmanTree.DecodeError!HuffmanTree {
|
||||
var weights: [256]u4 = undefined;
|
||||
@ -1161,7 +1162,7 @@ pub const LiteralsSection = struct {
|
||||
MissingStartBit,
|
||||
};
|
||||
|
||||
pub fn decode(in: *BufferedReader, remaining: *Reader.Limit, buffer: []u8) DecodeError!LiteralsSection {
|
||||
pub fn decode(in: *BufferedReader, remaining: *Limit, buffer: []u8) DecodeError!LiteralsSection {
|
||||
const header = try Header.decode(in, remaining);
|
||||
switch (header.block_type) {
|
||||
.raw => {
|
||||
@ -1232,7 +1233,7 @@ pub const SequencesSection = struct {
|
||||
ReadFailed,
|
||||
};
|
||||
|
||||
pub fn decode(in: *BufferedReader, remaining: *Reader.Limit) DecodeError!Header {
|
||||
pub fn decode(in: *BufferedReader, remaining: *Limit) DecodeError!Header {
|
||||
var sequence_count: u24 = undefined;
|
||||
|
||||
remaining.* = remaining.subtract(1) orelse return error.EndOfStream;
|
||||
|
||||
@ -1043,7 +1043,7 @@ pub fn eof(c: Client) bool {
|
||||
return c.received_close_notify;
|
||||
}
|
||||
|
||||
fn read(context: ?*anyopaque, bw: *std.io.BufferedWriter, limit: Reader.Limit) Reader.RwError!usize {
|
||||
fn read(context: ?*anyopaque, bw: *std.io.BufferedWriter, limit: std.io.Limit) Reader.RwError!usize {
|
||||
const c: *Client = @ptrCast(@alignCast(context));
|
||||
if (c.eof()) return error.EndOfStream;
|
||||
const input = c.input;
|
||||
|
||||
@ -247,7 +247,7 @@ pub fn LinearFifo(
|
||||
fn readerRead(
|
||||
ctx: ?*anyopaque,
|
||||
bw: *std.io.BufferedWriter,
|
||||
limit: std.io.Reader.Limit,
|
||||
limit: std.io.Limit,
|
||||
) std.io.Reader.RwError!usize {
|
||||
const fifo: *Self = @alignCast(@ptrCast(ctx));
|
||||
_ = fifo;
|
||||
@ -261,7 +261,7 @@ pub fn LinearFifo(
|
||||
_ = data;
|
||||
@panic("TODO");
|
||||
}
|
||||
fn readerDiscard(ctx: ?*anyopaque, limit: std.io.Reader.Limit) std.io.Reader.Error!usize {
|
||||
fn readerDiscard(ctx: ?*anyopaque, limit: std.io.Limit) std.io.Reader.Error!usize {
|
||||
const fifo: *Self = @alignCast(@ptrCast(ctx));
|
||||
_ = fifo;
|
||||
_ = limit;
|
||||
|
||||
@ -1963,7 +1963,7 @@ pub fn readFileAlloc(
|
||||
/// * The array list's length is increased by exactly one byte past `limit`.
|
||||
/// * The file seek position is advanced by exactly one byte past `limit`.
|
||||
/// * `error.StreamTooLong` is returned.
|
||||
limit: std.io.Reader.Limit,
|
||||
limit: std.io.Limit,
|
||||
) ReadFileAllocError![]u8 {
|
||||
return dir.readFileAllocOptions(file_path, gpa, limit, null, .of(u8), null);
|
||||
}
|
||||
@ -1982,7 +1982,7 @@ pub fn readFileAllocOptions(
|
||||
/// * The array list's length is increased by exactly one byte past `limit`.
|
||||
/// * The file seek position is advanced by exactly one byte past `limit`.
|
||||
/// * `error.StreamTooLong` is returned.
|
||||
limit: std.io.Reader.Limit,
|
||||
limit: std.io.Limit,
|
||||
/// If specified, the initial buffer size is calculated using this value,
|
||||
/// otherwise the effective file size is used instead.
|
||||
size_hint: ?usize,
|
||||
@ -2020,7 +2020,7 @@ pub fn readFileIntoArrayList(
|
||||
/// On other platforms, an opaque sequence of bytes with no particular encoding.
|
||||
file_path: []const u8,
|
||||
gpa: Allocator,
|
||||
limit: std.io.Reader.Limit,
|
||||
limit: std.io.Limit,
|
||||
/// If specified, the initial buffer size is calculated using this value,
|
||||
/// otherwise the effective file size is used instead.
|
||||
size_hint: ?usize,
|
||||
|
||||
@ -922,12 +922,13 @@ pub const Reader = struct {
|
||||
positional,
|
||||
streaming_reading,
|
||||
positional_reading,
|
||||
failure,
|
||||
|
||||
pub fn toStreaming(m: @This()) @This() {
|
||||
return switch (m) {
|
||||
.positional => .streaming,
|
||||
.positional_reading => .streaming_reading,
|
||||
else => unreachable,
|
||||
.positional, .streaming => .streaming,
|
||||
.positional_reading, .streaming_reading => .streaming_reading,
|
||||
.failure => .failure,
|
||||
};
|
||||
}
|
||||
};
|
||||
@ -936,8 +937,7 @@ pub const Reader = struct {
|
||||
return .{
|
||||
.context = r,
|
||||
.vtable = &.{
|
||||
.read = Reader.read,
|
||||
.readVec = Reader.readVec,
|
||||
.read = Reader.stream,
|
||||
.discard = Reader.discard,
|
||||
},
|
||||
};
|
||||
@ -963,12 +963,14 @@ pub const Reader = struct {
|
||||
pub fn seekBy(r: *Reader, offset: i64) SeekError!void {
|
||||
switch (r.mode) {
|
||||
.positional, .positional_reading => {
|
||||
r.pos += offset;
|
||||
// TODO: make += operator allow any integer types
|
||||
r.pos = @intCast(@as(i64, @intCast(r.pos)) + offset);
|
||||
},
|
||||
.streaming, .streaming_reading => {
|
||||
const seek_err = r.seek_err orelse e: {
|
||||
if (posix.lseek_CUR(r.file.handle, offset)) |_| {
|
||||
r.pos += offset;
|
||||
// TODO: make += operator allow any integer types
|
||||
r.pos = @intCast(@as(i64, @intCast(r.pos)) + offset);
|
||||
return;
|
||||
} else |err| {
|
||||
r.seek_err = err;
|
||||
@ -983,6 +985,7 @@ pub const Reader = struct {
|
||||
remaining -= n;
|
||||
}
|
||||
},
|
||||
.failure => return error.Unseekable,
|
||||
}
|
||||
}
|
||||
|
||||
@ -1006,188 +1009,42 @@ pub const Reader = struct {
|
||||
/// vectors through the underlying read calls as possible.
|
||||
const max_buffers_len = 16;
|
||||
|
||||
fn read(
|
||||
fn stream(
|
||||
context: ?*anyopaque,
|
||||
bw: *BufferedWriter,
|
||||
limit: std.io.Reader.Limit,
|
||||
) std.io.Reader.RwError!usize {
|
||||
limit: std.io.Limit,
|
||||
) std.io.Reader.StreamError!usize {
|
||||
const r: *Reader = @ptrCast(@alignCast(context));
|
||||
const file = r.file;
|
||||
const pos = r.pos;
|
||||
switch (r.mode) {
|
||||
.positional => {
|
||||
const size = r.size orelse {
|
||||
if (file.getEndPos()) |size| {
|
||||
r.size = size;
|
||||
} else |err| {
|
||||
r.size_err = err;
|
||||
r.mode = .streaming;
|
||||
}
|
||||
return bw.writeFile(r, limit, &.{}, 0) catch |write_file_error| switch (write_file_error) {
|
||||
error.ReadFailed => return error.ReadFailed,
|
||||
error.WriteFailed => return error.WriteFailed,
|
||||
error.Unimplemented => switch (r.mode) {
|
||||
.positional => {
|
||||
r.mode = .positional_reading;
|
||||
return 0;
|
||||
};
|
||||
const new_limit = limit.min(.limited(size - pos));
|
||||
const n = bw.writeFile(file, .init(pos), new_limit, &.{}, 0) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.WriteFailed,
|
||||
error.Unseekable => {
|
||||
r.mode = .streaming;
|
||||
if (pos != 0) @panic("TODO need to seek here");
|
||||
return 0;
|
||||
},
|
||||
error.Unimplemented => {
|
||||
r.mode = .positional_reading;
|
||||
return 0;
|
||||
},
|
||||
else => |e| {
|
||||
r.err = e;
|
||||
return error.ReadFailed;
|
||||
},
|
||||
};
|
||||
r.pos = pos + n;
|
||||
return n;
|
||||
},
|
||||
.streaming => {
|
||||
r.mode = .streaming_reading;
|
||||
return 0;
|
||||
},
|
||||
.positional_reading => {
|
||||
const dest = limit.slice(try bw.writableSliceGreedy(1));
|
||||
const n = try readPositional(r, dest);
|
||||
bw.advance(n);
|
||||
return n;
|
||||
},
|
||||
.streaming_reading => {
|
||||
const dest = limit.slice(try bw.writableSliceGreedy(1));
|
||||
const n = try readStreaming(r, dest);
|
||||
bw.advance(n);
|
||||
return n;
|
||||
},
|
||||
.failure => return error.ReadFailed,
|
||||
},
|
||||
.streaming => {
|
||||
const n = bw.writeFile(file, .none, limit, &.{}, 0) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.WriteFailed,
|
||||
error.Unseekable => unreachable, // Passing `Offset.none`.
|
||||
error.Unimplemented => {
|
||||
r.mode = .streaming_reading;
|
||||
return 0;
|
||||
},
|
||||
else => |e| {
|
||||
r.err = e;
|
||||
return error.ReadFailed;
|
||||
},
|
||||
};
|
||||
r.pos = pos + n;
|
||||
return n;
|
||||
},
|
||||
.positional_reading => {
|
||||
const dest = limit.slice(try bw.writableSliceGreedy(1));
|
||||
const n = file.pread(dest, pos) catch |err| switch (err) {
|
||||
error.Unseekable => {
|
||||
r.mode = .streaming_reading;
|
||||
if (pos != 0) @panic("TODO need to seek here");
|
||||
return 0;
|
||||
},
|
||||
else => |e| {
|
||||
r.err = e;
|
||||
return error.ReadFailed;
|
||||
},
|
||||
};
|
||||
if (n == 0) return error.EndOfStream;
|
||||
r.pos = pos + n;
|
||||
bw.advance(n);
|
||||
return n;
|
||||
},
|
||||
.streaming_reading => {
|
||||
const dest = limit.slice(try bw.writableSliceGreedy(1));
|
||||
const n = file.read(dest) catch |err| {
|
||||
r.err = err;
|
||||
return error.ReadFailed;
|
||||
};
|
||||
if (n == 0) return error.EndOfStream;
|
||||
r.pos = pos + n;
|
||||
bw.advance(n);
|
||||
return n;
|
||||
},
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
fn readVec(context: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
|
||||
const r: *Reader = @ptrCast(@alignCast(context));
|
||||
const handle = r.file.handle;
|
||||
const pos = r.pos;
|
||||
|
||||
switch (r.mode) {
|
||||
.positional, .positional_reading => {
|
||||
if (is_windows) {
|
||||
// Unfortunately, `ReadFileScatter` cannot be used since it requires
|
||||
// page alignment, so we are stuck using only the first slice.
|
||||
// Avoid empty slices to prevent false positive end detections.
|
||||
var i: usize = 0;
|
||||
while (true) : (i += 1) {
|
||||
if (i >= data.len) return .{};
|
||||
if (data[i].len > 0) break;
|
||||
}
|
||||
const n = windows.ReadFile(handle, data[i], pos) catch |err| {
|
||||
r.err = err;
|
||||
return error.ReadFailed;
|
||||
};
|
||||
if (n == 0) return error.EndOfFile;
|
||||
r.pos = pos + n;
|
||||
return n;
|
||||
}
|
||||
|
||||
var iovecs: [max_buffers_len]std.posix.iovec = undefined;
|
||||
var iovecs_i: usize = 0;
|
||||
for (data) |d| {
|
||||
// Since the OS checks pointer address before length, we must omit
|
||||
// length-zero vectors.
|
||||
if (d.len == 0) continue;
|
||||
iovecs[iovecs_i] = .{ .base = d.ptr, .len = d.len };
|
||||
iovecs_i += 1;
|
||||
if (iovecs_i >= iovecs.len) break;
|
||||
}
|
||||
const send_vecs = iovecs[0..iovecs_i];
|
||||
if (send_vecs.len == 0) return 0; // Prevent false positive end detection on empty `data`.
|
||||
const n = posix.preadv(handle, send_vecs, pos) catch |err| switch (err) {
|
||||
error.Unseekable => {
|
||||
r.mode = r.mode.toStreaming();
|
||||
assert(pos == 0);
|
||||
return 0;
|
||||
},
|
||||
else => |e| {
|
||||
r.err = e;
|
||||
return error.ReadFailed;
|
||||
},
|
||||
};
|
||||
if (n == 0) return error.EndOfStream;
|
||||
r.pos = pos + n;
|
||||
return n;
|
||||
},
|
||||
.streaming, .streaming_reading => {
|
||||
if (is_windows) {
|
||||
// Unfortunately, `ReadFileScatter` cannot be used since it requires
|
||||
// page alignment, so we are stuck using only the first slice.
|
||||
// Avoid empty slices to prevent false positive end detections.
|
||||
var i: usize = 0;
|
||||
while (true) : (i += 1) {
|
||||
if (i >= data.len) return .{};
|
||||
if (data[i].len > 0) break;
|
||||
}
|
||||
const n = windows.ReadFile(handle, data[i], null) catch |err| {
|
||||
r.err = err;
|
||||
return error.ReadFailed;
|
||||
};
|
||||
if (n == 0) return error.EndOfFile;
|
||||
r.pos = pos + n;
|
||||
return n;
|
||||
}
|
||||
|
||||
var iovecs: [max_buffers_len]std.posix.iovec = undefined;
|
||||
var iovecs_i: usize = 0;
|
||||
for (data) |d| {
|
||||
// Since the OS checks pointer address before length, we must omit
|
||||
// length-zero vectors.
|
||||
if (d.len == 0) continue;
|
||||
iovecs[iovecs_i] = .{ .base = d.ptr, .len = d.len };
|
||||
iovecs_i += 1;
|
||||
if (iovecs_i >= iovecs.len) break;
|
||||
}
|
||||
const send_vecs = iovecs[0..iovecs_i];
|
||||
if (send_vecs.len == 0) return 0; // Prevent false positive end detection on empty `data`.
|
||||
const n = posix.readv(handle, send_vecs) catch |err| {
|
||||
r.err = err;
|
||||
return error.ReadFailed;
|
||||
};
|
||||
if (n == 0) return error.EndOfStream;
|
||||
r.pos = pos + n;
|
||||
return n;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn discard(context: ?*anyopaque, limit: std.io.Reader.Limit) std.io.Reader.Error!usize {
|
||||
fn discard(context: ?*anyopaque, limit: std.io.Limit) std.io.Reader.Error!usize {
|
||||
const r: *Reader = @ptrCast(@alignCast(context));
|
||||
const file = r.file;
|
||||
const pos = r.pos;
|
||||
@ -1258,6 +1115,44 @@ pub const Reader = struct {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn readPositional(r: *Reader, dest: []u8) std.io.Reader.Error!usize {
|
||||
const n = r.file.pread(dest, r.pos) catch |err| switch (err) {
|
||||
error.Unseekable => {
|
||||
r.mode = r.mode.toStreaming();
|
||||
if (r.pos != 0) r.seekBy(r.pos) catch {
|
||||
r.mode = .failure;
|
||||
return error.ReadFailed;
|
||||
};
|
||||
return 0;
|
||||
},
|
||||
else => |e| {
|
||||
r.err = e;
|
||||
return error.ReadFailed;
|
||||
},
|
||||
};
|
||||
if (n == 0) return error.EndOfStream;
|
||||
r.pos += n;
|
||||
return n;
|
||||
}
|
||||
|
||||
pub fn readStreaming(r: *Reader, dest: []u8) std.io.Reader.Error!usize {
|
||||
const n = r.file.read(dest) catch |err| {
|
||||
r.err = err;
|
||||
return error.ReadFailed;
|
||||
};
|
||||
if (n == 0) return error.EndOfStream;
|
||||
r.pos += n;
|
||||
return n;
|
||||
}
|
||||
|
||||
pub fn read(r: *Reader, dest: []u8) std.io.Reader.Error!usize {
|
||||
switch (r.mode) {
|
||||
.positional, .positional_reading => return readPositional(r, dest),
|
||||
.streaming, .streaming_reading => return readStreaming(r, dest),
|
||||
.failure => return error.ReadFailed,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const Writer = struct {
|
||||
@ -1266,7 +1161,6 @@ pub const Writer = struct {
|
||||
mode: Writer.Mode = .positional,
|
||||
pos: u64 = 0,
|
||||
sendfile_err: ?SendfileError = null,
|
||||
read_err: ?ReadError = null,
|
||||
seek_err: ?SeekError = null,
|
||||
|
||||
pub const Mode = Reader.Mode;
|
||||
@ -1359,15 +1253,14 @@ pub const Writer = struct {
|
||||
|
||||
pub fn writeFile(
|
||||
context: ?*anyopaque,
|
||||
in_file: std.fs.File,
|
||||
in_offset: std.io.Writer.Offset,
|
||||
in_limit: std.io.Limit,
|
||||
file_reader: *Reader,
|
||||
limit: std.io.Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) std.io.Writer.FileError!usize {
|
||||
const w: *Writer = @ptrCast(@alignCast(context));
|
||||
const out_fd = w.file.handle;
|
||||
const in_fd = in_file.handle;
|
||||
const in_fd = file_reader.file.handle;
|
||||
// TODO try using copy_file_range on Linux
|
||||
// TODO try using copy_file_range on FreeBSD
|
||||
// TODO try using sendfile on macOS
|
||||
@ -1379,23 +1272,41 @@ pub const Writer = struct {
|
||||
// support a streaming read from in_file.
|
||||
if (headers_len > 0) return writeSplat(context, headers_and_trailers[0..headers_len], 1);
|
||||
const max_count = 0x7ffff000; // Avoid EINVAL.
|
||||
const smaller_len = in_limit.minInt(max_count);
|
||||
var off: std.os.linux.off_t = undefined;
|
||||
const off_ptr: ?*std.os.linux.off_t = if (in_offset.toInt()) |offset| b: {
|
||||
off = std.math.cast(std.os.linux.off_t, offset) orelse
|
||||
return writeSplat(context, headers_and_trailers, 1);
|
||||
break :b &off;
|
||||
} else null;
|
||||
const n = std.os.linux.wrapped.sendfile(out_fd, in_fd, off_ptr, smaller_len) catch |err| switch (err) {
|
||||
// Errors that imply sendfile should be avoided on the next write.
|
||||
error.UnsupportedOperation,
|
||||
error.Unexpected,
|
||||
=> |e| {
|
||||
w.sendfile_err = e;
|
||||
break :sf;
|
||||
const off_ptr: ?*std.os.linux.off_t, const count: usize = switch (file_reader.mode) {
|
||||
.positional => o: {
|
||||
const size = file_reader.size orelse {
|
||||
if (file_reader.file.getEndPos()) |size| {
|
||||
file_reader.size = size;
|
||||
} else |err| {
|
||||
file_reader.size_err = err;
|
||||
file_reader.mode = .streaming;
|
||||
}
|
||||
return 0;
|
||||
};
|
||||
off = std.math.cast(std.os.linux.off_t, file_reader.pos) orelse
|
||||
return writeSplat(context, headers_and_trailers, 1);
|
||||
break :o .{ &off, @min(@intFromEnum(limit), size - file_reader.pos, max_count) };
|
||||
},
|
||||
else => |e| return e,
|
||||
.streaming => .{ null, limit.minInt(max_count) },
|
||||
.streaming_reading, .positional_reading => break :sf,
|
||||
.failure => return error.ReadFailed,
|
||||
};
|
||||
const n = std.os.linux.wrapped.sendfile(out_fd, in_fd, off_ptr, count) catch |err| switch (err) {
|
||||
error.Unseekable => {
|
||||
file_reader.mode = file_reader.mode.toStreaming();
|
||||
if (file_reader.pos != 0) file_reader.seekBy(@intCast(file_reader.pos)) catch {
|
||||
file_reader.mode = .failure;
|
||||
return error.ReadFailed;
|
||||
};
|
||||
return 0;
|
||||
},
|
||||
else => |e| {
|
||||
w.sendfile_err = e;
|
||||
return 0;
|
||||
},
|
||||
};
|
||||
file_reader.pos += n;
|
||||
w.pos += n;
|
||||
return n;
|
||||
}
|
||||
|
||||
@ -1631,7 +1631,7 @@ pub const FetchOptions = struct {
|
||||
list: *std.ArrayListUnmanaged(u8),
|
||||
/// If null then only the existing capacity will be used.
|
||||
allocator: ?Allocator = null,
|
||||
append_limit: std.io.Reader.Limit = .unlimited,
|
||||
append_limit: std.io.Limit = .unlimited,
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@ -160,13 +160,12 @@ fn writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) std.
|
||||
|
||||
fn writeFile(
|
||||
context: ?*anyopaque,
|
||||
file: std.fs.File,
|
||||
offset: std.io.Writer.Offset,
|
||||
file_reader: *std.fs.File.Reader,
|
||||
limit: std.io.Writer.Limit,
|
||||
headers_and_trailers_full: []const []const u8,
|
||||
headers_len_full: usize,
|
||||
) std.io.Writer.FileError!usize {
|
||||
if (std.fs.File.Handle == void) unreachable;
|
||||
if (std.fs.File.Handle == void) return error.Unimplemented;
|
||||
const aw: *AllocatingWriter = @alignCast(@ptrCast(context));
|
||||
const gpa = aw.allocator;
|
||||
var list = aw.toArrayList();
|
||||
@ -178,35 +177,24 @@ fn writeFile(
|
||||
break :b .{ headers_and_trailers_full[1..], headers_len_full - 1 };
|
||||
} else .{ headers_and_trailers_full, headers_len_full };
|
||||
const trailers = headers_and_trailers[headers_len..];
|
||||
const pos = offset.toInt() orelse @panic("TODO treat file as stream");
|
||||
const limit_int = limit.toInt() orelse {
|
||||
var new_capacity: usize = list.capacity + std.atomic.cache_line;
|
||||
for (headers_and_trailers) |bytes| new_capacity += bytes.len;
|
||||
list.ensureTotalCapacity(gpa, new_capacity) catch return error.WriteFailed;
|
||||
for (headers_and_trailers[0..headers_len]) |bytes| list.appendSliceAssumeCapacity(bytes);
|
||||
const dest = list.items.ptr[list.items.len..list.capacity];
|
||||
const n = try file.pread(dest, pos);
|
||||
if (n == 0) {
|
||||
new_capacity = list.capacity;
|
||||
for (trailers) |bytes| new_capacity += bytes.len;
|
||||
list.ensureTotalCapacity(gpa, new_capacity) catch return error.WriteFailed;
|
||||
for (trailers) |bytes| list.appendSliceAssumeCapacity(bytes);
|
||||
return list.items.len - start_len;
|
||||
}
|
||||
list.items.len += n;
|
||||
return list.items.len - start_len;
|
||||
};
|
||||
var new_capacity: usize = list.capacity + limit_int;
|
||||
const pos = file_reader.pos;
|
||||
|
||||
const additional = if (file_reader.getSize()) |size| size - pos else |_| std.atomic.cache_line;
|
||||
var new_capacity: usize = list.capacity + limit.minInt(additional);
|
||||
for (headers_and_trailers) |bytes| new_capacity += bytes.len;
|
||||
list.ensureTotalCapacity(gpa, new_capacity) catch return error.WriteFailed;
|
||||
for (headers_and_trailers[0..headers_len]) |bytes| list.appendSliceAssumeCapacity(bytes);
|
||||
const dest = list.items.ptr[list.items.len..][0..limit_int];
|
||||
const n = try file.pread(dest, pos);
|
||||
list.items.len += n;
|
||||
if (n < dest.len) {
|
||||
return list.items.len - start_len;
|
||||
const dest = limit.slice(list.items.ptr[list.items.len..list.capacity]);
|
||||
const n = try file_reader.read(dest);
|
||||
const is_end = if (file_reader.getSize()) |size| n >= size - pos else n == 0;
|
||||
if (is_end) {
|
||||
new_capacity = list.capacity;
|
||||
for (trailers) |bytes| new_capacity += bytes.len;
|
||||
list.ensureTotalCapacity(gpa, new_capacity) catch return error.WriteFailed;
|
||||
for (trailers) |bytes| list.appendSliceAssumeCapacity(bytes);
|
||||
} else {
|
||||
list.items.len += n;
|
||||
}
|
||||
for (trailers) |bytes| list.appendSliceAssumeCapacity(bytes);
|
||||
return list.items.len - start_len;
|
||||
}
|
||||
|
||||
|
||||
@ -128,7 +128,7 @@ pub const LimitedAllocError = Allocator.Error || ShortError || error{StreamTooLo
|
||||
/// See also:
|
||||
/// * `readRemainingArrayList`
|
||||
/// * `BufferedReader.readRemainingArrayList`
|
||||
pub fn readRemainingAlloc(r: Reader, gpa: Allocator, limit: Reader.Limit) LimitedAllocError![]u8 {
|
||||
pub fn readRemainingAlloc(r: Reader, gpa: Allocator, limit: Limit) LimitedAllocError![]u8 {
|
||||
var buffer: ArrayList(u8) = .empty;
|
||||
defer buffer.deinit(gpa);
|
||||
try readRemainingArrayList(r, gpa, null, &buffer, limit, 1);
|
||||
|
||||
@ -3,9 +3,10 @@ const Limited = @This();
|
||||
const std = @import("../../std.zig");
|
||||
const Reader = std.io.Reader;
|
||||
const BufferedWriter = std.io.BufferedWriter;
|
||||
const Limit = std.io.Limit;
|
||||
|
||||
unlimited_reader: Reader,
|
||||
remaining: Reader.Limit,
|
||||
remaining: Limit,
|
||||
|
||||
pub fn reader(l: *Limited) Reader {
|
||||
return .{
|
||||
@ -18,7 +19,7 @@ pub fn reader(l: *Limited) Reader {
|
||||
};
|
||||
}
|
||||
|
||||
fn passthruRead(context: ?*anyopaque, bw: *BufferedWriter, limit: Reader.Limit) Reader.RwError!usize {
|
||||
fn passthruRead(context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) Reader.RwError!usize {
|
||||
const l: *Limited = @alignCast(@ptrCast(context));
|
||||
const combined_limit = limit.min(l.remaining);
|
||||
const n = try l.unlimited_reader.read(bw, combined_limit);
|
||||
@ -26,7 +27,7 @@ fn passthruRead(context: ?*anyopaque, bw: *BufferedWriter, limit: Reader.Limit)
|
||||
return n;
|
||||
}
|
||||
|
||||
fn passthruDiscard(context: ?*anyopaque, limit: Reader.Limit) Reader.Error!usize {
|
||||
fn passthruDiscard(context: ?*anyopaque, limit: Limit) Reader.Error!usize {
|
||||
const l: *Limited = @alignCast(@ptrCast(context));
|
||||
const combined_limit = limit.min(l.remaining);
|
||||
const n = try l.unlimited_reader.discard(combined_limit);
|
||||
|
||||
@ -1916,7 +1916,7 @@ pub const Stream = struct {
|
||||
fn read(
|
||||
context: ?*anyopaque,
|
||||
bw: *std.io.BufferedWriter,
|
||||
limit: std.io.Reader.Limit,
|
||||
limit: std.io.Limit,
|
||||
) std.io.Reader.Error!usize {
|
||||
const buf = limit.slice(try bw.writableSliceGreedy(1));
|
||||
const n = try readVec(context, &.{buf});
|
||||
@ -1958,7 +1958,7 @@ pub const Stream = struct {
|
||||
return .{ .len = n, .end = n == 0 };
|
||||
}
|
||||
|
||||
fn discard(context: ?*anyopaque, limit: std.io.Reader.Limit) std.io.Reader.Error!usize {
|
||||
fn discard(context: ?*anyopaque, limit: std.io.Limit) std.io.Reader.Error!usize {
|
||||
_ = context;
|
||||
_ = limit;
|
||||
@panic("TODO");
|
||||
|
||||
@ -358,7 +358,7 @@ pub const Iterator = struct {
|
||||
};
|
||||
}
|
||||
|
||||
fn read(context: ?*anyopaque, bw: *std.io.BufferedWriter, limit: std.io.Reader.Limit) std.io.Reader.RwError!usize {
|
||||
fn read(context: ?*anyopaque, bw: *std.io.BufferedWriter, limit: std.io.Limit) std.io.Reader.RwError!usize {
|
||||
const file: *File = @ptrCast(@alignCast(context));
|
||||
if (file.unread_bytes.* == 0) return error.EndOfStream;
|
||||
const n = try file.parent_reader.read(bw, limit.min(.limited(file.unread_bytes.*)));
|
||||
@ -374,7 +374,7 @@ pub const Iterator = struct {
|
||||
return n;
|
||||
}
|
||||
|
||||
fn discard(context: ?*anyopaque, limit: std.io.Reader.Limit) std.io.Reader.Error!usize {
|
||||
fn discard(context: ?*anyopaque, limit: std.io.Limit) std.io.Reader.Error!usize {
|
||||
const file: *File = @ptrCast(@alignCast(context));
|
||||
const n = limit.minInt(file.unread_bytes.*);
|
||||
file.unread_bytes.* -= n;
|
||||
|
||||
@ -175,11 +175,7 @@ pub const Decompress = union {
|
||||
return .{
|
||||
.unbuffered_reader = .{
|
||||
.context = d,
|
||||
.vtable = &.{
|
||||
.read = readStore,
|
||||
.readVec = readVecUnimplemented,
|
||||
.discard = discardUnimplemented,
|
||||
},
|
||||
.vtable = &.{ .read = readStore },
|
||||
},
|
||||
.buffer = buffer,
|
||||
.end = 0,
|
||||
@ -191,11 +187,7 @@ pub const Decompress = union {
|
||||
return .{
|
||||
.unbuffered_reader = .{
|
||||
.context = d,
|
||||
.vtable = &.{
|
||||
.read = readDeflate,
|
||||
.readVec = readVecUnimplemented,
|
||||
.discard = discardUnimplemented,
|
||||
},
|
||||
.vtable = &.{ .read = readDeflate },
|
||||
},
|
||||
.buffer = buffer,
|
||||
.end = 0,
|
||||
@ -223,18 +215,6 @@ pub const Decompress = union {
|
||||
const d: *Decompress = @ptrCast(@alignCast(context));
|
||||
return std.compress.flate.Decompress.read(&d.inflate, writer, limit);
|
||||
}
|
||||
|
||||
fn readVecUnimplemented(context: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
|
||||
_ = context;
|
||||
_ = data;
|
||||
@panic("TODO remove readVec primitive");
|
||||
}
|
||||
|
||||
fn discardUnimplemented(context: ?*anyopaque, limit: std.io.Reader.Limit) std.io.Reader.Error!usize {
|
||||
_ = context;
|
||||
_ = limit;
|
||||
@panic("TODO allow discard to be null");
|
||||
}
|
||||
};
|
||||
|
||||
fn isBadFilename(filename: []const u8) bool {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user