readable -> reader / writable -> writer

This commit is contained in:
Andrew Kelley 2025-06-22 00:58:32 -07:00
parent a249cc1c7e
commit 52b3275eb2
10 changed files with 53 additions and 80 deletions

View File

@ -70,12 +70,10 @@ hasher: Container.Hasher,
prev_match: ?Token = null,
prev_literal: ?u8 = null,
pub fn readable(c: *Compress, buffer: []u8) std.io.Reader {
pub fn reader(c: *Compress, buffer: []u8) std.io.Reader {
return .{
.unbuffered_reader = .{
.context = c,
.vtable = .{ .read = read },
},
.context = c,
.vtable = .{ .read = read },
.buffer = buffer,
};
}
@ -337,12 +335,6 @@ pub const Huffman = SimpleCompressor(.huffman, .raw);
/// store blocks. That adds 9 bytes of header for each block. Max stored block
/// size is 64K. Block is emitted when flush is called on on finish.
pub const store = struct {
pub fn compress(comptime container: Container, reader: anytype, writer: anytype) !void {
var c = try store.compressor(container, writer);
try c.compress(reader);
try c.finish();
}
pub fn Compressor(comptime container: Container, comptime WriterType: type) type {
return SimpleCompressor(.store, container, WriterType);
}

View File

@ -355,17 +355,14 @@ fn writeMatch(bw: *Writer, length: u16, distance: u16) !void {
@panic("TODO");
}
pub fn reader(self: *Decompress) std.io.Reader {
pub fn reader(self: *Decompress, buffer: []u8) std.io.Reader {
return .{
.context = self,
.vtable = &.{ .read = read },
.buffer = buffer,
};
}
pub fn readable(self: *Decompress, buffer: []u8) std.io.Reader {
return reader(self).buffered(buffer);
}
fn takeBits(d: *Decompress, comptime T: type) !T {
_ = d;
@panic("TODO");

View File

@ -413,7 +413,7 @@ test "splat" {
};
const stream_result = r: {
var sha1: Sha1 = .init(.{});
var bw = sha1.writable(&.{});
var bw = sha1.writer(&.{});
try bw.writeSplatAll(&vecs, splat_len);
try std.testing.expectEqual(vecs[0].len + vecs[1].len + vecs[2].len * splat_len, sha1.total_len);
break :r sha1.finalResult();

View File

@ -594,8 +594,7 @@ pub const ProgramHeaderIterator = struct {
var phdr: Elf64_Phdr = undefined;
const offset = it.elf_header.phoff + @sizeOf(@TypeOf(phdr)) * it.index;
try it.file_reader.seekTo(offset);
var br = it.file_reader.readable(&.{});
try br.readSlice(@ptrCast(&phdr));
try it.file_reader.interface.readSlice(@ptrCast(&phdr));
if (it.elf_header.endian != native_endian)
mem.byteSwapAllFields(Elf64_Phdr, &phdr);
return phdr;
@ -604,8 +603,7 @@ pub const ProgramHeaderIterator = struct {
var phdr: Elf32_Phdr = undefined;
const offset = it.elf_header.phoff + @sizeOf(@TypeOf(phdr)) * it.index;
try it.file_reader.seekTo(offset);
var br = it.file_reader.readable(&.{});
try br.readSlice(@ptrCast(&phdr));
try it.file_reader.interface.readSlice(@ptrCast(&phdr));
if (it.elf_header.endian != native_endian)
mem.byteSwapAllFields(Elf32_Phdr, &phdr);
return .{
@ -634,8 +632,7 @@ pub const SectionHeaderIterator = struct {
var shdr: Elf64_Shdr = undefined;
const offset = it.elf_header.shoff + @sizeOf(@TypeOf(shdr)) * it.index;
try it.file_reader.seekTo(offset);
var br = it.file_reader.readable(&.{});
try br.readSlice(@ptrCast(&shdr));
try it.file_reader.interface.readSlice(@ptrCast(&shdr));
if (it.elf_header.endian != native_endian)
mem.byteSwapAllFields(Elf64_Shdr, &shdr);
return shdr;
@ -644,8 +641,7 @@ pub const SectionHeaderIterator = struct {
var shdr: Elf32_Shdr = undefined;
const offset = it.elf_header.shoff + @sizeOf(@TypeOf(shdr)) * it.index;
try it.file_reader.seekTo(offset);
var br = it.file_reader.readable(&.{});
try br.readSlice(@ptrCast(&shdr));
try it.file_reader.interface.readSlice(@ptrCast(&shdr));
if (it.elf_header.endian != native_endian)
mem.byteSwapAllFields(Elf32_Shdr, &shdr);
return .{

View File

@ -2617,7 +2617,7 @@ pub fn updateFile(
.size = src_stat.size,
};
var buffer: [2000]u8 = undefined;
var dest_writer = atomic_file.file_writer.writable(&buffer);
var dest_writer = atomic_file.file_writer.writer(&buffer);
dest_writer.writeFileAll(&src_reader, .{}) catch |err| switch (err) {
error.ReadFailed => return src_reader.err.?,

View File

@ -1448,7 +1448,7 @@ pub fn Hashed(comptime Hasher: type) type {
fn discard(r: *Reader, limit: Limit) Error!usize {
const this: *@This() = @alignCast(@fieldParentPtr("interface", r));
var w = this.hasher.writable(&.{});
var w = this.hasher.writer(&.{});
const n = this.in.read(&w, limit) catch |err| switch (err) {
error.WriteFailed => unreachable,
else => |e| return e,

View File

@ -571,7 +571,7 @@ pub const Iterator = struct {
};
defer out_file.close();
var file_writer = out_file.writer();
var file_bw = file_writer.writable(&.{});
var file_bw = file_writer.writer(&.{});
const local_data_file_offset: u64 =
@as(u64, self.file_offset) +
@as(u64, @sizeOf(LocalFileHeader)) +
@ -585,7 +585,7 @@ pub const Iterator = struct {
var decompress_br = decompress.readable(&limited_br, self.compression_method, &decompress_read_buffer);
const start_out = file_bw.count;
var hash_writer = file_bw.hashed(std.hash.Crc32.init());
var hash_bw = hash_writer.writable(&.{});
var hash_bw = hash_writer.writer(&.{});
decompress_br.readAll(&hash_bw, .limited(self.uncompressed_size)) catch |err| switch (err) {
error.ReadFailed => return stream.err.?,
error.WriteFailed => return file_writer.err.?,

View File

@ -34,10 +34,9 @@ fn expectFiles(
std.mem.replaceScalar(u8, normalized_sub_path, '\\', '/');
var file = try dir.openFile(normalized_sub_path, .{});
defer file.close();
var file_reader = file.reader();
var file_br = file_reader.readable(&.{});
var content_buf: [4096]u8 = undefined;
const n = try file_br.readSliceShort(&content_buf);
var file_reader = file.reader(&content_buf);
const n = try file_reader.interface.readSliceShort(&content_buf);
try testing.expectEqualStrings(test_file.content, content_buf[0..n]);
}
}
@ -65,7 +64,7 @@ fn makeZipWithStore(
store: []FileStore,
) !void {
var buffer: [200]u8 = undefined;
var bw = file_writer.writable(&buffer);
var bw = file_writer.writer(&buffer);
try writeZip(&bw, files, store, options);
}
@ -201,7 +200,7 @@ const Zipper = struct {
const offset = writer.count;
var br: std.io.Reader = .fixed(opt.content);
var compress: std.compress.flate.Compress = .init(&br, .{});
var compress_br = compress.readable(&.{});
var compress_br = compress.reader(&.{});
const n = try compress_br.readRemaining(writer);
assert(br.seek == opt.content.len);
try testing.expectEqual(n, writer.count - offset);
@ -431,7 +430,7 @@ test "bad zip files" {
{
const tmp_file = tmp.createFile();
defer tmp_file.close();
var file_writer = tmp_file.writable(&buffer);
var file_writer = tmp_file.writer(&buffer);
try makeZip(&file_writer, &.{}, .{ .end = .{ .sig = [_]u8{ 1, 2, 3, 4 } } });
var file_reader = file_writer.moveToReader();
try testing.expectError(error.ZipNoEndRecord, zip.extract(tmp.dir, &file_reader, .{}));
@ -439,7 +438,7 @@ test "bad zip files" {
{
const tmp_file = tmp.createFile();
defer tmp_file.close();
var file_writer = tmp_file.writable(&buffer);
var file_writer = tmp_file.writer(&buffer);
try makeZip(&file_writer, &.{}, .{ .end = .{ .comment_len = 1 } });
var file_reader = file_writer.moveToReader();
try testing.expectError(error.ZipNoEndRecord, zip.extract(tmp.dir, &file_reader, .{}));
@ -447,7 +446,7 @@ test "bad zip files" {
{
const tmp_file = tmp.createFile();
defer tmp_file.close();
var file_writer = tmp_file.writable(&buffer);
var file_writer = tmp_file.writer(&buffer);
try makeZip(&file_writer, &.{}, .{ .end = .{ .comment = "a", .comment_len = 0 } });
var file_reader = file_writer.moveToReader();
try testing.expectError(error.ZipNoEndRecord, zip.extract(tmp.dir, &file_reader, .{}));
@ -455,7 +454,7 @@ test "bad zip files" {
{
const tmp_file = tmp.createFile();
defer tmp_file.close();
var file_writer = tmp_file.writable(&buffer);
var file_writer = tmp_file.writer(&buffer);
try makeZip(&file_writer, &.{}, .{ .end = .{ .disk_number = 1 } });
var file_reader = file_writer.moveToReader();
try testing.expectError(error.ZipMultiDiskUnsupported, zip.extract(tmp.dir, &file_reader, .{}));
@ -463,7 +462,7 @@ test "bad zip files" {
{
const tmp_file = tmp.createFile();
defer tmp_file.close();
var file_writer = tmp_file.writable(&buffer);
var file_writer = tmp_file.writer(&buffer);
try makeZip(&file_writer, &.{}, .{ .end = .{ .central_directory_disk_number = 1 } });
var file_reader = file_writer.moveToReader();
try testing.expectError(error.ZipMultiDiskUnsupported, zip.extract(tmp.dir, &file_reader, .{}));
@ -471,7 +470,7 @@ test "bad zip files" {
{
const tmp_file = tmp.createFile();
defer tmp_file.close();
var file_writer = tmp_file.writable(&buffer);
var file_writer = tmp_file.writer(&buffer);
try makeZip(&file_writer, &.{}, .{ .end = .{ .record_count_disk = 1 } });
var file_reader = file_writer.moveToReader();
try testing.expectError(error.ZipDiskRecordCountTooLarge, zip.extract(tmp.dir, &file_reader, .{}));
@ -479,7 +478,7 @@ test "bad zip files" {
{
const tmp_file = tmp.createFile();
defer tmp_file.close();
var file_writer = tmp_file.writable(&buffer);
var file_writer = tmp_file.writer(&buffer);
try makeZip(&file_writer, &.{}, .{ .end = .{ .central_directory_size = 1 } });
var file_reader = file_writer.moveToReader();
try testing.expectError(error.ZipCdOversized, zip.extract(tmp.dir, &file_reader, .{}));
@ -487,7 +486,7 @@ test "bad zip files" {
{
const tmp_file = tmp.createFile();
defer tmp_file.close();
var file_writer = tmp_file.writable(&buffer);
var file_writer = tmp_file.writer(&buffer);
try makeZip(&file_writer, &file_a, .{ .end = .{ .central_directory_size = 0 } });
var file_reader = file_writer.moveToReader();
try testing.expectError(error.ZipCdUndersized, zip.extract(tmp.dir, &file_reader, .{}));
@ -495,7 +494,7 @@ test "bad zip files" {
{
const tmp_file = tmp.createFile();
defer tmp_file.close();
var file_writer = tmp_file.writable(&buffer);
var file_writer = tmp_file.writer(&buffer);
try makeZip(&file_writer, &file_a, .{ .end = .{ .central_directory_offset = 0 } });
var file_reader = file_writer.moveToReader();
try testing.expectError(error.ZipBadCdOffset, zip.extract(tmp.dir, &file_reader, .{}));
@ -503,7 +502,7 @@ test "bad zip files" {
{
const tmp_file = tmp.createFile();
defer tmp_file.close();
var file_writer = tmp_file.writable(&buffer);
var file_writer = tmp_file.writer(&buffer);
try makeZip(&file_writer, &file_a, .{
.end = .{
.zip64 = .{ .locator_sig = [_]u8{ 1, 2, 3, 4 } },

View File

@ -68,7 +68,7 @@ pub const Oid = union(Format) {
pub fn writer(hasher: *Hasher, buffer: []u8) Writer {
return switch (hasher.*) {
inline else => |*inner| inner.writable(buffer),
inline else => |*inner| inner.writer(buffer),
};
}
};
@ -383,9 +383,7 @@ const Odb = struct {
.index_file = index_file,
.allocator = allocator,
};
var buffer: [1032]u8 = undefined;
var index_file_br = index_file.readable(&buffer);
try odb.index_header.read(&index_file_br);
try odb.index_header.read(&index_file.interface);
}
fn deinit(odb: *Odb) void {
@ -395,22 +393,20 @@ const Odb = struct {
/// Reads the object at the current position in the database.
fn readObject(odb: *Odb) !Object {
var pack_read_buffer: [64]u8 = undefined;
var base_offset = odb.pack_file.pos;
var pack_br = odb.pack_file.readable(&pack_read_buffer);
const pack_br = &odb.pack_file.interface;
var base_header: EntryHeader = undefined;
var delta_offsets: std.ArrayListUnmanaged(u64) = .empty;
defer delta_offsets.deinit(odb.allocator);
const base_object = while (true) {
if (odb.cache.get(base_offset)) |base_object| break base_object;
base_header = try EntryHeader.read(odb.format, &pack_br);
base_header = try EntryHeader.read(odb.format, pack_br);
switch (base_header) {
.ofs_delta => |ofs_delta| {
try delta_offsets.append(odb.allocator, base_offset);
base_offset = std.math.sub(u64, base_offset, ofs_delta.offset) catch return error.InvalidFormat;
try odb.pack_file.seekTo(base_offset);
pack_br = odb.pack_file.readable(&pack_read_buffer);
},
.ref_delta => |ref_delta| {
try delta_offsets.append(odb.allocator, base_offset);
@ -418,7 +414,7 @@ const Odb = struct {
base_offset = odb.pack_file.pos - pack_br.bufferedLen();
},
else => {
const base_data = try readObjectRaw(odb.allocator, &pack_br, base_header.uncompressedLength());
const base_data = try readObjectRaw(odb.allocator, pack_br, base_header.uncompressedLength());
errdefer odb.allocator.free(base_data);
const base_object: Object = .{ .type = base_header.objectType(), .data = base_data };
try odb.cache.put(odb.allocator, base_offset, base_object);
@ -1294,10 +1290,10 @@ pub fn indexPack(
}
@memset(fan_out_table[fan_out_index..], count);
var index_writer_bw = index_writer.writable(&.{});
var index_writer_bw = index_writer.writer(&.{});
var index_hashed_writer = index_writer_bw.hashed(Oid.Hasher.init(format));
var write_buffer: [256]u8 = undefined;
var writer = index_hashed_writer.writable(&write_buffer);
var writer = index_hashed_writer.writer(&write_buffer);
try writer.writeAll(IndexHeader.signature);
try writer.writeInt(u32, IndexHeader.supported_version, .big);
for (fan_out_table) |fan_out_entry| {
@ -1345,27 +1341,25 @@ fn indexPackFirstPass(
index_entries: *std.AutoHashMapUnmanaged(Oid, IndexEntry),
pending_deltas: *std.ArrayListUnmanaged(IndexEntry),
) !Oid {
var pack_br = pack.readable(&.{});
var pack_hashed_reader = pack_br.hashed(Oid.Hasher.init(format));
var pack_buffer: [2048]u8 = undefined; // Reasonably large buffer for file system.
var pack_hashed_br = pack_hashed_reader.readable(&pack_buffer);
var pack_hashed = pack.interface.hashed(Oid.Hasher.init(format), &pack_buffer);
const pack_header = try PackHeader.read(&pack_hashed_br);
const pack_header = try PackHeader.read(&pack_hashed.interface);
for (0..pack_header.total_objects) |_| {
const entry_offset = pack.pos - pack_hashed_br.bufferContents().len;
var entry_crc32_reader = pack_hashed_br.hashed(std.hash.Crc32.init());
const entry_offset = pack.pos - pack_hashed.interface.bufferContents().len;
var entry_buffer: [64]u8 = undefined; // Buffer only needed for loading EntryHeader.
var entry_crc32_br = entry_crc32_reader.readable(&entry_buffer);
var entry_crc32_reader = pack_hashed.interface.hashed(std.hash.Crc32.init(), &entry_buffer);
const entry_crc32_br = &entry_crc32_reader.interface;
const entry_header = try EntryHeader.read(format, &entry_crc32_br);
var entry_decompress_stream: zlib.Decompressor = .init(&entry_crc32_br);
// Decompress uses large output buffer; no input buffer needed.
var entry_decompress_br = entry_decompress_stream.readable(&.{});
var entry_decompress_br = entry_decompress_stream.reader(&.{});
switch (entry_header) {
.commit, .tree, .blob, .tag => |object| {
var oid_hasher = Oid.Hasher.init(format);
var oid_hasher_buffer: [zlib.max_window_len]u8 = undefined;
var oid_hasher_bw = oid_hasher.writable(&oid_hasher_buffer);
var oid_hasher_bw = oid_hasher.writer(&oid_hasher_buffer);
// The object header is not included in the pack data but is
// part of the object's ID.
try oid_hasher_bw.print("{s} {d}\x00", .{ @tagName(entry_header), object.uncompressed_length });
@ -1389,8 +1383,8 @@ fn indexPackFirstPass(
}
}
const pack_checksum = pack_hashed_reader.hasher.finalResult();
const recorded_checksum = try Oid.readBytes(format, &pack_br);
const pack_checksum = pack_hashed.hasher.finalResult();
const recorded_checksum = try Oid.readBytes(format, &pack.interface);
if (!mem.eql(u8, pack_checksum.slice(), recorded_checksum.slice())) {
return error.CorruptedPack;
}
@ -1417,9 +1411,7 @@ fn indexPackHashDelta(
if (cache.get(base_offset)) |base_object| break base_object;
try pack.seekTo(base_offset);
var pack_read_buffer: [64]u8 = undefined;
var pack_br = pack.readable(&pack_read_buffer);
base_header = try EntryHeader.read(format, &pack_br);
base_header = try EntryHeader.read(format, &pack.interface);
switch (base_header) {
.ofs_delta => |ofs_delta| {
try delta_offsets.append(allocator, base_offset);
@ -1430,7 +1422,7 @@ fn indexPackHashDelta(
base_offset = (index_entries.get(ref_delta.base_object) orelse return null).offset;
},
else => {
const base_data = try readObjectRaw(allocator, &pack_br, base_header.uncompressedLength());
const base_data = try readObjectRaw(allocator, &pack.interface, base_header.uncompressedLength());
errdefer allocator.free(base_data);
const base_object: Object = .{ .type = base_header.objectType(), .data = base_data };
try cache.put(allocator, base_offset, base_object);
@ -1443,7 +1435,7 @@ fn indexPackHashDelta(
var entry_hasher: Oid.Hasher = .init(format);
var entry_hasher_buffer: [64]u8 = undefined;
var entry_hasher_bw = entry_hasher.writable(&entry_hasher_buffer);
var entry_hasher_bw = entry_hasher.writer(&entry_hasher_buffer);
// Writes to hashers cannot fail.
entry_hasher_bw.print("{s} {d}\x00", .{ @tagName(base_object.type), base_data.len }) catch unreachable;
entry_hasher_bw.writeAll(base_data) catch unreachable;
@ -1470,13 +1462,11 @@ fn resolveDeltaChain(
const delta_offset = delta_offsets[i];
try pack.seekTo(delta_offset);
var pack_read_buffer: [64]u8 = undefined;
var pack_br = pack.readable(&pack_read_buffer);
const delta_header = try EntryHeader.read(format, &pack_br);
const delta_header = try EntryHeader.read(format, &pack.interface);
_ = delta_header;
var delta_decompress: zlib.Decompressor = .init(&pack_br);
var delta_decompress: zlib.Decompressor = .init(&pack.interface);
var delta_decompress_buffer: [zlib.max_window_len]u8 = undefined;
var delta_reader = delta_decompress.readable(&delta_decompress_buffer);
var delta_reader = delta_decompress.reader(&delta_decompress_buffer);
_ = try readSizeVarInt(&delta_reader); // base object size
const expanded_size = try readSizeVarInt(&delta_reader);
const expanded_alloc_size = std.math.cast(usize, expanded_size) orelse return error.ObjectTooLarge;

View File

@ -3330,11 +3330,10 @@ fn buildOutputType(
// for the hashing algorithm here and in the cache are the same.
// We are providing our own cache key, because this file has nothing
// to do with the cache manifest.
var file_writer = f.writer();
var file_writer_bw = file_writer.writable(&.{});
var hasher_writer = file_writer_bw.hashed(Cache.Hasher.init("0123456789abcdef"));
var file_writer = f.writer(&.{});
var hasher_writer = file_writer.interface.hashed(Cache.Hasher.init("0123456789abcdef"));
var buffer: [1000]u8 = undefined;
var bw = hasher_writer.writable(&buffer);
var bw = hasher_writer.writer(&buffer);
bw.writeFileAll(.stdin(), .{}) catch |err| switch (err) {
error.WriteFailed => fatal("failed to write {s}: {s}", .{ dump_path, file_writer.err.? }),
else => fatal("failed to pipe stdin to {s}: {s}", .{ dump_path, err }),