std: update AtomicFile to new API

This commit is contained in:
Andrew Kelley 2025-05-28 19:07:53 -07:00
parent ba684a18ca
commit 2ed47f1ed8
7 changed files with 36 additions and 160 deletions

View File

@ -345,19 +345,6 @@ fn readInner(
}
}
fn readVec(context: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
_ = context;
_ = data;
@panic("TODO remove readVec primitive");
}
fn discard(context: ?*anyopaque, limit: std.io.Limit) std.io.Reader.Error!usize {
_ = context;
_ = limit;
// Problem here is we still need access to the output ring buffer.
@panic("TODO allow discard to be null");
}
/// Write match (back-reference to the same data slice) starting at `distance`
/// back from current write position, and `length` of bytes.
fn writeMatch(bw: *std.io.BufferedWriter, length: u16, distance: u16) !void {
@ -370,11 +357,7 @@ fn writeMatch(bw: *std.io.BufferedWriter, length: u16, distance: u16) !void {
pub fn reader(self: *Decompress) std.io.Reader {
return .{
.context = self,
.vtable = &.{
.read = read,
.readVec = readVec,
.discard = discard,
},
.vtable = &.{ .read = read },
};
}

View File

@ -1,4 +1,12 @@
file: File,
const AtomicFile = @This();
const std = @import("../std.zig");
const File = std.fs.File;
const Dir = std.fs.Dir;
const fs = std.fs;
const assert = std.debug.assert;
const posix = std.posix;
file_writer: File.Writer,
// TODO either replace this with rand_buf or use []u16 on Windows
tmp_path_buf: [tmp_path_len:0]u8,
dest_basename: []const u8,
@ -35,8 +43,8 @@ pub fn init(
else => |e| return e,
};
return AtomicFile{
.file = file,
return .{
.file_writer = file.writer(),
.tmp_path_buf = tmp_path_buf,
.dest_basename = dest_basename,
.file_open = true,
@ -50,7 +58,7 @@ pub fn init(
/// Always call deinit, even after a successful finish().
pub fn deinit(self: *AtomicFile) void {
if (self.file_open) {
self.file.close();
self.file_writer.file.close();
self.file_open = false;
}
if (self.file_exists) {
@ -72,17 +80,9 @@ pub const FinishError = posix.RenameError;
pub fn finish(self: *AtomicFile) FinishError!void {
assert(self.file_exists);
if (self.file_open) {
self.file.close();
self.file_writer.file.close();
self.file_open = false;
}
try posix.renameat(self.dir.fd, self.tmp_path_buf[0..], self.dir.fd, self.dest_basename);
self.file_exists = false;
}
const AtomicFile = @This();
const std = @import("../std.zig");
const File = std.fs.File;
const Dir = std.fs.Dir;
const fs = std.fs;
const assert = std.debug.assert;
const posix = std.posix;

View File

@ -2612,11 +2612,18 @@ pub fn updateFile(
var atomic_file = try dest_dir.atomicFile(dest_path, .{ .mode = actual_mode });
defer atomic_file.deinit();
try atomic_file.file.writeFileAll(src_file, .{
.offset = .zero,
.limit = .limited(src_stat.size),
});
try atomic_file.file.updateTimes(src_stat.atime, src_stat.mtime);
var src_reader: File.Reader = .{
.file = src_file,
.size = src_stat.size,
};
var buffer: [2000]u8 = undefined;
var dest_writer = atomic_file.file_writer.writable(&buffer);
dest_writer.writeFileAll(&src_reader, .{}) catch |err| switch (err) {
error.ReadFailed => return src_reader.err.?,
error.WriteFailed => return atomic_file.file_writer.err.?,
};
try atomic_file.file_writer.file.updateTimes(src_stat.atime, src_stat.mtime);
try atomic_file.finish();
return .stale;
}

View File

@ -887,18 +887,6 @@ pub fn pwritev(self: File, iovecs: []posix.iovec_const, offset: u64) PWriteError
return posix.pwritev(self.handle, iovecs, offset);
}
pub const WriteFileError = PReadError || WriteError;
pub fn writeFileAll(self: File, in_file: File, options: BufferedWriter.WriteFileOptions) WriteFileError!void {
var file_writer = self.writer();
var buffer: [2000]u8 = undefined;
var bw = file_writer.interface().buffered(&buffer);
bw.writeFileAll(in_file, options) catch |err| switch (err) {
error.WriteFailed => return file_writer.err.?,
else => |e| return e,
};
}
/// Memoizes key information about a file handle such as:
/// * The size from calling stat, or the error that occurred therein.
/// * The current seek position.

View File

@ -439,9 +439,8 @@ pub const Reader = struct {
return .{
.context = reader,
.vtable = &.{
.read = &chunkedRead,
.readVec = &chunkedReadVec,
.discard = &chunkedDiscard,
.read = chunkedRead,
.discard = chunkedDiscard,
},
};
},
@ -451,9 +450,8 @@ pub const Reader = struct {
return .{
.context = reader,
.vtable = &.{
.read = &contentLengthRead,
.readVec = &contentLengthReadVec,
.discard = &contentLengthDiscard,
.read = contentLengthRead,
.discard = contentLengthDiscard,
},
};
} else {
@ -521,19 +519,6 @@ pub const Reader = struct {
return n;
}
fn contentLengthReadVec(context: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
const reader: *Reader = @alignCast(@ptrCast(context));
const remaining_content_length = &reader.state.body_remaining_content_length;
const remaining = remaining_content_length.*;
if (remaining == 0) {
reader.state = .ready;
return error.EndOfStream;
}
const n = try reader.in.readVecLimit(data, .limited(remaining));
remaining_content_length.* = remaining - n;
return n;
}
fn contentLengthDiscard(ctx: ?*anyopaque, limit: std.io.Limit) std.io.Reader.Error!usize {
const reader: *Reader = @alignCast(@ptrCast(ctx));
const remaining_content_length = &reader.state.body_remaining_content_length;
@ -621,96 +606,6 @@ pub const Reader = struct {
}
}
fn chunkedReadVec(ctx: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
const reader: *Reader = @alignCast(@ptrCast(ctx));
const chunk_len_ptr = switch (reader.state) {
.ready => return error.EndOfStream,
.body_remaining_chunk_len => |*x| x,
else => unreachable,
};
return chunkedReadVecEndless(reader, data, chunk_len_ptr) catch |err| switch (err) {
error.ReadFailed => return error.ReadFailed,
error.EndOfStream => {
reader.body_err = error.HttpChunkTruncated;
return error.ReadFailed;
},
else => |e| {
reader.body_err = e;
return error.ReadFailed;
},
};
}
fn chunkedReadVecEndless(
reader: *Reader,
data: []const []u8,
chunk_len_ptr: *RemainingChunkLen,
) (BodyError || std.io.Reader.Error)!usize {
const in = reader.in;
var already_requested_more = false;
var amt_read: usize = 0;
data: for (data) |d| {
var d_i: usize = 0;
len: switch (chunk_len_ptr.*) {
.head => {
var cp: ChunkParser = .init;
while (true) {
const i = cp.feed(in.bufferContents());
switch (cp.state) {
.invalid => return error.HttpChunkInvalid,
.data => {
in.toss(i);
break;
},
else => {
in.toss(i);
already_requested_more = true;
try in.fillMore();
continue;
},
}
}
if (cp.chunk_len == 0) return parseTrailers(reader, amt_read);
continue :len .init(cp.chunk_len + 2);
},
.n => {
if (in.bufferContents().len < 1) already_requested_more = true;
if ((try in.takeByte()) != '\n') return error.HttpChunkInvalid;
continue :len .head;
},
.rn => {
if (in.bufferContents().len < 2) already_requested_more = true;
const rn = try in.takeArray(2);
if (rn[0] != '\r' or rn[1] != '\n') return error.HttpChunkInvalid;
continue :len .head;
},
else => |remaining_chunk_len| {
const available_buffer = in.bufferContents();
const copy_len = @min(available_buffer.len, d.len - d_i, remaining_chunk_len.int() - 2);
@memcpy(d[d_i..][0..copy_len], available_buffer[0..copy_len]);
d_i += copy_len;
amt_read += copy_len;
in.toss(copy_len);
const next_chunk_len: RemainingChunkLen = .init(remaining_chunk_len.int() - copy_len);
if (d.len - d_i == 0) {
chunk_len_ptr.* = next_chunk_len;
continue :data;
}
if (available_buffer.len - copy_len == 0) {
if (already_requested_more) {
chunk_len_ptr.* = next_chunk_len;
return amt_read;
}
already_requested_more = true;
try in.fillMore();
}
continue :len next_chunk_len;
},
}
}
return amt_read;
}
fn chunkedDiscard(ctx: ?*anyopaque, limit: std.io.Limit) std.io.Reader.Error!usize {
const reader: *Reader = @alignCast(@ptrCast(ctx));
const chunk_len_ptr = switch (reader.state) {

View File

@ -561,7 +561,10 @@ pub fn writeFileReading(
limit: Limit,
) Writer.ReadingFileError!usize {
const dest = limit.slice(try bw.writableSliceGreedy(1));
const n = try file_reader.read(dest);
const n = file_reader.read(dest) catch |err| switch (err) {
error.EndOfStream => 0,
error.ReadFailed => return error.ReadFailed,
};
bw.advance(n);
return n;
}
@ -663,7 +666,7 @@ pub fn writeFileAll(
bw: *BufferedWriter,
file_reader: *std.fs.File.Reader,
options: WriteFileOptions,
) Writer.FileError!void {
) Writer.ReadingFileError!void {
const headers_and_trailers = options.headers_and_trailers;
const headers = headers_and_trailers[0..options.headers_len];
var remaining = options.limit;

View File

@ -153,7 +153,7 @@ pub fn discardingWriteFile(
const seek_amt = limit.minInt(remaining);
// Error is observable on `file_reader` instance, and is safe to ignore
// depending on the caller's needs. Caller can make that decision.
file_reader.seekForward(seek_amt) catch {};
file_reader.seekBy(@intCast(seek_amt)) catch {};
var n: usize = seek_amt;
for (headers_and_trailers[0..headers_len]) |bytes| n += bytes.len;
if (seek_amt == remaining) {