From 3c98e2c826e1a8650090a889cf3a1560335b2968 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 29 May 2025 17:11:05 -0700 Subject: [PATCH] std: combine BufferedReader into Reader --- lib/compiler/resinator/errors.zig | 6 +- lib/compiler/std-docs.zig | 2 +- lib/docs/wasm/main.zig | 3 +- lib/std/Build/Step/CheckObject.zig | 39 +- lib/std/coff.zig | 14 +- lib/std/compress/flate.zig | 66 +- lib/std/compress/flate/Compress.zig | 10 +- lib/std/compress/flate/Decompress.zig | 18 +- lib/std/compress/lzma.zig | 42 +- lib/std/compress/lzma2.zig | 10 +- lib/std/compress/xz/test.zig | 5 +- lib/std/compress/zstd.zig | 6 +- lib/std/compress/zstd/Decompress.zig | 23 +- lib/std/crypto/codecs/asn1.zig | 6 +- lib/std/crypto/ecdsa.zig | 21 +- lib/std/crypto/tls.zig | 4 +- lib/std/crypto/tls/Client.zig | 6 +- lib/std/debug/Dwarf.zig | 3 +- lib/std/debug/Dwarf/call_frame.zig | 2 +- lib/std/debug/Dwarf/expression.zig | 3 +- lib/std/debug/FixedBufferReader.zig | 4 +- lib/std/debug/SelfInfo.zig | 7 +- lib/std/elf.zig | 6 +- lib/std/fs/File.zig | 29 +- lib/std/http.zig | 4 +- lib/std/http/Client.zig | 4 +- lib/std/http/Server.zig | 4 +- lib/std/io.zig | 26 +- lib/std/io/AllocatingWriter.zig | 2 +- lib/std/io/BufferedReader.zig | 1230 ----------------------- lib/std/io/Reader.zig | 1324 ++++++++++++++++++++++--- lib/std/io/Reader/Limited.zig | 44 +- lib/std/leb128.zig | 23 +- lib/std/net.zig | 4 +- lib/std/process/Child.zig | 10 +- lib/std/tar.zig | 30 +- lib/std/tar/Writer.zig | 8 +- lib/std/tar/test.zig | 23 +- lib/std/tz.zig | 8 +- lib/std/zig/Server.zig | 4 +- lib/std/zig/llvm/BitcodeReader.zig | 14 +- lib/std/zig/system/linux.zig | 5 +- lib/std/zip.zig | 6 +- lib/std/zip/test.zig | 4 +- src/Compilation.zig | 5 +- src/Package/Fetch/git.zig | 18 +- src/Zcu.zig | 4 +- src/arch/x86_64/Disassembler.zig | 7 +- src/libs/glibc.zig | 3 +- src/link/Dwarf.zig | 5 +- src/link/Elf/Object.zig | 11 +- src/link/Elf/eh_frame.zig | 12 +- src/link/MachO/Dwarf.zig | 14 +- src/link/MachO/Dylib.zig | 7 +- src/link/MachO/eh_frame.zig | 27 +- src/link/Wasm.zig | 9 +- src/link/Wasm/Archive.zig | 6 +- src/link/Wasm/Object.zig | 10 +- src/link/riscv.zig | 5 +- 59 files changed, 1524 insertions(+), 1731 deletions(-) delete mode 100644 lib/std/io/BufferedReader.zig diff --git a/lib/compiler/resinator/errors.zig b/lib/compiler/resinator/errors.zig index 9727872367..8aa9e74578 100644 --- a/lib/compiler/resinator/errors.zig +++ b/lib/compiler/resinator/errors.zig @@ -1082,11 +1082,9 @@ const CorrespondingLines = struct { at_eof: bool = false, span: SourceMappings.CorrespondingSpan, file: std.fs.File, - buffered_reader: BufferedReaderType, + buffered_reader: *std.io.Reader, code_page: SupportedCodePage, - const BufferedReaderType = std.io.BufferedReader(512, std.fs.File.Reader); - pub fn init(cwd: std.fs.Dir, err_details: ErrorDetails, line_for_comparison: []const u8, corresponding_span: SourceMappings.CorrespondingSpan, corresponding_file: []const u8) !CorrespondingLines { // We don't do line comparison for this error, so don't print the note if the line // number is different @@ -1105,7 +1103,7 @@ const CorrespondingLines = struct { .buffered_reader = undefined, .code_page = err_details.code_page, }; - corresponding_lines.buffered_reader = BufferedReaderType{ + corresponding_lines.buffered_reader = .{ .unbuffered_reader = corresponding_lines.file.reader(), }; errdefer corresponding_lines.deinit(); diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig index 5d1ffd68f1..a094771a18 100644 --- a/lib/compiler/std-docs.zig +++ b/lib/compiler/std-docs.zig @@ -390,7 +390,7 @@ fn receiveWasmMessage( gpa: Allocator, arena: Allocator, context: *Context, - br: *std.io.BufferedReader, + br: *std.io.Reader, result: *?Cache.Path, result_error_bundle: *std.zig.ErrorBundle, ) !void { diff --git a/lib/docs/wasm/main.zig b/lib/docs/wasm/main.zig index fc45d77022..614f989c58 100644 --- a/lib/docs/wasm/main.zig +++ b/lib/docs/wasm/main.zig @@ -778,8 +778,7 @@ export fn decl_type_html(decl_index: Decl.Index) String { const Oom = error{OutOfMemory}; fn unpackInner(tar_bytes: []u8) !void { - var br: std.io.BufferedReader = undefined; - br.initFixed(tar_bytes); + var br: std.io.Reader = .fixed(tar_bytes); var file_name_buffer: [1024]u8 = undefined; var link_name_buffer: [1024]u8 = undefined; var it = std.tar.Iterator.init(&br, .{ diff --git a/lib/std/Build/Step/CheckObject.zig b/lib/std/Build/Step/CheckObject.zig index 8cf9b78154..3bc03d0d0e 100644 --- a/lib/std/Build/Step/CheckObject.zig +++ b/lib/std/Build/Step/CheckObject.zig @@ -1238,8 +1238,7 @@ const MachODumper = struct { } fn parseRebaseInfo(ctx: ObjectContext, data: []const u8, rebases: *std.ArrayList(u64)) !void { - var br: std.io.BufferedReader = undefined; - br.initFixed(@constCast(data)); + var br: std.io.Reader = .fixed(data); var seg_id: ?u8 = null; var offset: u64 = 0; @@ -1349,7 +1348,7 @@ const MachODumper = struct { } fn parseBindInfo(ctx: ObjectContext, data: []const u8, bindings: *std.ArrayList(Binding)) !void { - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(@constCast(data)); var seg_id: ?u8 = null; @@ -1447,7 +1446,7 @@ const MachODumper = struct { defer arena.deinit(); var exports: std.ArrayList(Export) = .init(arena.allocator()); - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(@constCast(data)); try parseTrieNode(arena.allocator(), &br, "", &exports); @@ -1517,7 +1516,7 @@ const MachODumper = struct { fn parseTrieNode( arena: Allocator, - br: *std.io.BufferedReader, + br: *std.io.Reader, prefix: []const u8, exports: *std.ArrayList(Export), ) !void { @@ -1705,7 +1704,7 @@ const ElfDumper = struct { fn parseAndDumpArchive(step: *Step, check: Check, bytes: []const u8) ![]const u8 { const gpa = step.owner.allocator; - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(@constCast(bytes)); if (!mem.eql(u8, try br.takeArray(elf.ARMAG.len), elf.ARMAG)) return error.InvalidArchiveMagicNumber; @@ -1780,7 +1779,7 @@ const ElfDumper = struct { } fn parseSymtab(ctx: *ArchiveContext, data: []const u8, ptr_width: enum { p32, p64 }) !void { - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(@constCast(data)); const num = switch (ptr_width) { .p32 => try br.takeInt(u32, .big), @@ -1851,7 +1850,7 @@ const ElfDumper = struct { fn parseAndDumpObject(step: *Step, check: Check, bytes: []const u8) ![]const u8 { const gpa = step.owner.allocator; - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(@constCast(bytes)); const hdr = try br.takeStruct(elf.Elf64_Ehdr); @@ -2354,7 +2353,7 @@ const WasmDumper = struct { fn parseAndDump(step: *Step, check: Check, bytes: []const u8) ![]const u8 { const gpa = step.owner.allocator; - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(@constCast(bytes)); const buf = try br.takeArray(8); @@ -2376,10 +2375,10 @@ const WasmDumper = struct { fn parseAndDumpInner( step: *Step, check: Check, - br: *std.io.BufferedReader, + br: *std.io.Reader, bw: *std.io.BufferedWriter, ) !void { - var section_br: std.io.BufferedReader = undefined; + var section_br: std.io.Reader = undefined; switch (check.kind) { .headers => while (br.takeEnum(std.wasm.Section, .little)) |section| { section_br.initFixed(try br.take(try br.takeLeb128(u32))); @@ -2396,7 +2395,7 @@ const WasmDumper = struct { fn parseAndDumpSection( step: *Step, section: std.wasm.Section, - br: *std.io.BufferedReader, + br: *std.io.Reader, bw: *std.io.BufferedWriter, ) !void { try bw.print( @@ -2445,7 +2444,7 @@ const WasmDumper = struct { } } - fn parseSection(step: *Step, section: std.wasm.Section, br: *std.io.BufferedReader, entries: u32, bw: *std.io.BufferedWriter) !void { + fn parseSection(step: *Step, section: std.wasm.Section, br: *std.io.Reader, entries: u32, bw: *std.io.BufferedWriter) !void { switch (section) { .type => { var i: u32 = 0; @@ -2576,7 +2575,7 @@ const WasmDumper = struct { } } - fn parseDumpType(step: *Step, comptime E: type, br: *std.io.BufferedReader, bw: *std.io.BufferedWriter) !E { + fn parseDumpType(step: *Step, comptime E: type, br: *std.io.Reader, bw: *std.io.BufferedWriter) !E { const tag = br.takeEnum(E, .little) catch |err| switch (err) { error.InvalidEnumTag => return step.fail("invalid wasm type value", .{}), else => |e| return e, @@ -2585,7 +2584,7 @@ const WasmDumper = struct { return tag; } - fn parseDumpLimits(br: *std.io.BufferedReader, bw: *std.io.BufferedWriter) !void { + fn parseDumpLimits(br: *std.io.Reader, bw: *std.io.BufferedWriter) !void { const flags = try br.takeLeb128(u8); const min = try br.takeLeb128(u32); @@ -2593,7 +2592,7 @@ const WasmDumper = struct { if (flags != 0) try bw.print("max {x}\n", .{try br.takeLeb128(u32)}); } - fn parseDumpInit(step: *Step, br: *std.io.BufferedReader, bw: *std.io.BufferedWriter) !void { + fn parseDumpInit(step: *Step, br: *std.io.Reader, bw: *std.io.BufferedWriter) !void { const opcode = br.takeEnum(std.wasm.Opcode, .little) catch |err| switch (err) { error.InvalidEnumTag => return step.fail("invalid wasm opcode", .{}), else => |e| return e, @@ -2613,8 +2612,8 @@ const WasmDumper = struct { } /// https://webassembly.github.io/spec/core/appendix/custom.html - fn parseDumpNames(step: *Step, br: *std.io.BufferedReader, bw: *std.io.BufferedWriter) !void { - var subsection_br: std.io.BufferedReader = undefined; + fn parseDumpNames(step: *Step, br: *std.io.Reader, bw: *std.io.BufferedWriter) !void { + var subsection_br: std.io.Reader = undefined; while (br.seek < br.buffer.len) { switch (try parseDumpType(step, std.wasm.NameSubsection, br, bw)) { // The module name subsection ... consists of a single name @@ -2662,7 +2661,7 @@ const WasmDumper = struct { } } - fn parseDumpProducers(br: *std.io.BufferedReader, bw: *std.io.BufferedWriter) !void { + fn parseDumpProducers(br: *std.io.Reader, bw: *std.io.BufferedWriter) !void { const field_count = try br.takeLeb128(u32); try bw.print( \\fields {d} @@ -2690,7 +2689,7 @@ const WasmDumper = struct { } } - fn parseDumpFeatures(br: *std.io.BufferedReader, bw: *std.io.BufferedWriter) !void { + fn parseDumpFeatures(br: *std.io.Reader, bw: *std.io.BufferedWriter) !void { const feature_count = try br.takeLeb128(u32); try bw.print( \\features {d} diff --git a/lib/std/coff.zig b/lib/std/coff.zig index 0b9c407e70..3ba6e0bf03 100644 --- a/lib/std/coff.zig +++ b/lib/std/coff.zig @@ -1087,10 +1087,9 @@ pub const Coff = struct { const pe_pointer_offset = 0x3C; const pe_magic = "PE\x00\x00"; - var reader: std.io.BufferedReader = undefined; - reader.initFixed(data[pe_pointer_offset..]); + var reader: std.io.Reader = .fixed(data[pe_pointer_offset..]); const coff_header_offset = try reader.readInt(u32, .little); - reader.initFixed(data[coff_header_offset..]); + reader = .fixed(data[coff_header_offset..]); const magic = try reader.peek(4); const is_image = mem.eql(u8, pe_magic, magic); @@ -1121,16 +1120,15 @@ pub const Coff = struct { if (@intFromEnum(DirectoryEntry.DEBUG) >= data_dirs.len) return null; const debug_dir = data_dirs[@intFromEnum(DirectoryEntry.DEBUG)]; - var reader: std.io.BufferedReader = undefined; - reader.initFixed(self.data); + var reader: std.io.Reader = .fixed(self.data); if (self.is_loaded) { - reader.initFixed(self.data[debug_dir.virtual_address..]); + reader = .fixed(self.data[debug_dir.virtual_address..]); } else { // Find what section the debug_dir is in, in order to convert the RVA to a file offset for (self.getSectionHeaders()) |*sect| { if (debug_dir.virtual_address >= sect.virtual_address and debug_dir.virtual_address < sect.virtual_address + sect.virtual_size) { - reader.initFixed(self.data[sect.pointer_to_raw_data + (debug_dir.virtual_address - sect.virtual_address) ..]); + reader = .fixed(self.data[sect.pointer_to_raw_data + (debug_dir.virtual_address - sect.virtual_address) ..]); break; } } else return error.InvalidDebugDirectory; @@ -1144,7 +1142,7 @@ pub const Coff = struct { const debug_dir_entry = try reader.takeStruct(DebugDirectoryEntry); if (debug_dir_entry.type == .CODEVIEW) { const dir_offset = if (self.is_loaded) debug_dir_entry.address_of_raw_data else debug_dir_entry.pointer_to_raw_data; - reader.initFixed(self.data[dir_offset..]); + reader = .fixed(self.data[dir_offset..]); break; } } else return null; diff --git a/lib/std/compress/flate.zig b/lib/std/compress/flate.zig index 8e57998aad..2c66510f84 100644 --- a/lib/std/compress/flate.zig +++ b/lib/std/compress/flate.zig @@ -229,8 +229,7 @@ test "compress/decompress" { // compress original stream to compressed stream { - var original: std.io.BufferedReader = undefined; - original.initFixed(@constCast(data)); + var original: std.io.Reader = .fixed(data); var compressed: std.io.BufferedWriter = undefined; compressed.initFixed(&cmp_buf); var compress: Compress = .init(&original, .raw); @@ -246,8 +245,7 @@ test "compress/decompress" { } // decompress compressed stream to decompressed stream { - var compressed: std.io.BufferedReader = undefined; - compressed.initFixed(cmp_buf[0..compressed_size]); + var compressed: std.io.Reader = .fixed(cmp_buf[0..compressed_size]); var decompressed: std.io.BufferedWriter = undefined; decompressed.initFixed(&dcm_buf); try Decompress.pump(container, &compressed, &decompressed); @@ -267,8 +265,7 @@ test "compress/decompress" { } // decompressor reader interface { - var compressed: std.io.BufferedReader = undefined; - compressed.initFixed(cmp_buf[0..compressed_size]); + var compressed: std.io.Reader = .fixed(cmp_buf[0..compressed_size]); var dcm = Decompress.pump(container, &compressed); var dcm_rdr = dcm.reader(); const n = try dcm_rdr.readAll(&dcm_buf); @@ -287,8 +284,7 @@ test "compress/decompress" { // compress original stream to compressed stream { - var original: std.io.BufferedReader = undefined; - original.initFixed(data); + var original: std.io.Reader = .fixed(data); var compressed: std.io.BufferedWriter = undefined; compressed.initFixed(&cmp_buf); var cmp = try Compress.Huffman.init(container, &compressed); @@ -303,8 +299,7 @@ test "compress/decompress" { } // decompress compressed stream to decompressed stream { - var compressed: std.io.BufferedReader = undefined; - compressed.initFixed(cmp_buf[0..compressed_size]); + var compressed: std.io.Reader = .fixed(cmp_buf[0..compressed_size]); var decompressed: std.io.BufferedWriter = undefined; decompressed.initFixed(&dcm_buf); try Decompress.pump(container, &compressed, &decompressed); @@ -323,8 +318,7 @@ test "compress/decompress" { // compress original stream to compressed stream { - var original: std.io.BufferedReader = undefined; - original.initFixed(data); + var original: std.io.Reader = .fixed(data); var compressed: std.io.BufferedWriter = undefined; compressed.initFixed(&cmp_buf); var cmp = try Compress.SimpleCompressor(.store, container).init(&compressed); @@ -340,8 +334,7 @@ test "compress/decompress" { } // decompress compressed stream to decompressed stream { - var compressed: std.io.BufferedReader = undefined; - compressed.initFixed(cmp_buf[0..compressed_size]); + var compressed: std.io.Reader = .fixed(cmp_buf[0..compressed_size]); var decompressed: std.io.BufferedWriter = undefined; decompressed.initFixed(&dcm_buf); try Decompress.pump(container, &compressed, &decompressed); @@ -353,8 +346,7 @@ test "compress/decompress" { } fn testDecompress(comptime container: Container, compressed: []const u8, expected_plain: []const u8) !void { - var in: std.io.BufferedReader = undefined; - in.initFixed(compressed); + var in: std.io.Reader = .fixed(compressed); var out: std.io.AllocatingWriter = undefined; out.init(testing.allocator); defer out.deinit(); @@ -502,8 +494,7 @@ fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const var plain: std.io.BufferedWriter = undefined; plain.initFixed(&buffer2); - var in: std.io.BufferedReader = undefined; - in.initFixed(gzip_data); + var in: std.io.Reader = .fixed(gzip_data); try pkg.decompress(&in, &plain); try testing.expectEqualSlices(u8, plain_data, plain.getWritten()); } @@ -515,12 +506,10 @@ fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const var compressed: std.io.BufferedWriter = undefined; compressed.initFixed(&buffer1); - var in: std.io.BufferedReader = undefined; - in.initFixed(plain_data); + var in: std.io.Reader = .fixed(plain_data); try pkg.compress(&in, &compressed, .{}); - var compressed_br: std.io.BufferedReader = undefined; - compressed_br.initFixed(&buffer1); + var compressed_br: std.io.Reader = .fixed(&buffer1); try pkg.decompress(&compressed_br, &plain); try testing.expectEqualSlices(u8, plain_data, plain.getWritten()); } @@ -532,14 +521,12 @@ fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const var compressed: std.io.BufferedWriter = undefined; compressed.initFixed(&buffer1); - var in: std.io.BufferedReader = undefined; - in.initFixed(plain_data); + var in: std.io.Reader = .fixed(plain_data); var cmp = try pkg.compressor(&compressed, .{}); try cmp.compress(&in); try cmp.finish(); - var compressed_br: std.io.BufferedReader = undefined; - compressed_br.initFixed(&buffer1); + var compressed_br: std.io.Reader = .fixed(&buffer1); var dcp = pkg.decompressor(&compressed_br); try dcp.decompress(&plain); try testing.expectEqualSlices(u8, plain_data, plain.getWritten()); @@ -554,12 +541,10 @@ fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const var compressed: std.io.BufferedWriter = undefined; compressed.initFixed(&buffer1); - var in: std.io.BufferedReader = undefined; - in.initFixed(plain_data); + var in: std.io.Reader = .fixed(plain_data); try pkg.huffman.compress(&in, &compressed); - var compressed_br: std.io.BufferedReader = undefined; - compressed_br.initFixed(&buffer1); + var compressed_br: std.io.Reader = .fixed(&buffer1); try pkg.decompress(&compressed_br, &plain); try testing.expectEqualSlices(u8, plain_data, plain.getWritten()); } @@ -571,14 +556,12 @@ fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const var compressed: std.io.BufferedWriter = undefined; compressed.initFixed(&buffer1); - var in: std.io.BufferedReader = undefined; - in.initFixed(plain_data); + var in: std.io.Reader = .fixed(plain_data); var cmp = try pkg.huffman.compressor(&compressed); try cmp.compress(&in); try cmp.finish(); - var compressed_br: std.io.BufferedReader = undefined; - compressed_br.initFixed(&buffer1); + var compressed_br: std.io.Reader = .fixed(&buffer1); try pkg.decompress(&compressed_br, &plain); try testing.expectEqualSlices(u8, plain_data, plain.getWritten()); } @@ -593,12 +576,10 @@ fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const var compressed: std.io.BufferedWriter = undefined; compressed.initFixed(&buffer1); - var in: std.io.BufferedReader = undefined; - in.initFixed(plain_data); + var in: std.io.Reader = .fixed(plain_data); try pkg.store.compress(&in, &compressed); - var compressed_br: std.io.BufferedReader = undefined; - compressed_br.initFixed(&buffer1); + var compressed_br: std.io.Reader = .fixed(&buffer1); try pkg.decompress(&compressed_br, &plain); try testing.expectEqualSlices(u8, plain_data, plain.getWritten()); } @@ -610,14 +591,12 @@ fn testInterface(comptime pkg: type, gzip_data: []const u8, plain_data: []const var compressed: std.io.BufferedWriter = undefined; compressed.initFixed(&buffer1); - var in: std.io.BufferedReader = undefined; - in.initFixed(plain_data); + var in: std.io.Reader = .fixed(plain_data); var cmp = try pkg.store.compressor(&compressed); try cmp.compress(&in); try cmp.finish(); - var compressed_br: std.io.BufferedReader = undefined; - compressed_br.initFixed(&buffer1); + var compressed_br: std.io.Reader = .fixed(&buffer1); try pkg.decompress(&compressed_br, &plain); try testing.expectEqualSlices(u8, plain_data, plain.getWritten()); } @@ -650,8 +629,7 @@ test "zlib should not overshoot" { 0x03, 0x00, 0x8b, 0x61, 0x0f, 0xa4, 0x52, 0x5a, 0x94, 0x12, }; - var stream: std.io.BufferedReader = undefined; - stream.initFixed(&data); + var stream: std.io.Reader = .fixed(&data); const reader = stream.reader(); var dcp = Decompress.init(reader); diff --git a/lib/std/compress/flate/Compress.zig b/lib/std/compress/flate/Compress.zig index 417cfdef75..4f04e7582a 100644 --- a/lib/std/compress/flate/Compress.zig +++ b/lib/std/compress/flate/Compress.zig @@ -59,7 +59,7 @@ const huffman = flate.huffman; lookup: Lookup = .{}, tokens: Tokens = .{}, /// Asserted to have a buffer capacity of at least `flate.max_window_len`. -input: *std.io.BufferedReader, +input: *std.io.Reader, block_writer: BlockWriter, level: LevelArgs, hasher: Container.Hasher, @@ -69,7 +69,7 @@ hasher: Container.Hasher, prev_match: ?Token = null, prev_literal: ?u8 = null, -pub fn readable(c: *Compress, buffer: []u8) std.io.BufferedReader { +pub fn readable(c: *Compress, buffer: []u8) std.io.Reader { return .{ .unbuffered_reader = .{ .context = c, @@ -126,7 +126,7 @@ const LevelArgs = struct { } }; -pub fn init(input: *std.io.BufferedReader, options: Options) Compress { +pub fn init(input: *std.io.Reader, options: Options) Compress { return .{ .input = input, .block_writer = undefined, @@ -1147,7 +1147,7 @@ test "file tokenization" { const data = case.data; for (levels, 0..) |level, i| { // for each compression level - var original: std.io.BufferedReader = undefined; + var original: std.io.Reader = undefined; original.initFixed(data); // buffer for decompressed data @@ -1222,7 +1222,7 @@ test "store simple compressor" { //0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x21, }; - var fbs: std.io.BufferedReader = undefined; + var fbs: std.io.Reader = undefined; fbs.initFixed(data); var al = std.ArrayList(u8).init(testing.allocator); defer al.deinit(); diff --git a/lib/std/compress/flate/Decompress.zig b/lib/std/compress/flate/Decompress.zig index defda8b85a..9ffc2b588f 100644 --- a/lib/std/compress/flate/Decompress.zig +++ b/lib/std/compress/flate/Decompress.zig @@ -24,7 +24,7 @@ const Token = @import("Token.zig"); const testing = std.testing; const Decompress = @This(); -input: *std.io.BufferedReader, +input: *std.io.Reader, // Hashes, produces checksum, of uncompressed data for gzip/zlib footer. hasher: Container.Hasher, @@ -67,7 +67,7 @@ pub const Error = Container.Error || error{ MissingEndOfBlockCode, }; -pub fn init(input: *std.io.BufferedReader, container: Container) Decompress { +pub fn init(input: *std.io.Reader, container: Container) Decompress { return .{ .input = input, .hasher = .init(container), @@ -361,7 +361,7 @@ pub fn reader(self: *Decompress) std.io.Reader { }; } -pub fn readable(self: *Decompress, buffer: []u8) std.io.BufferedReader { +pub fn readable(self: *Decompress, buffer: []u8) std.io.Reader { return reader(self).buffered(buffer); } @@ -727,7 +727,7 @@ test "decompress" { }, }; for (cases) |c| { - var fb: std.io.BufferedReader = undefined; + var fb: std.io.Reader = undefined; fb.initFixed(@constCast(c.in)); var aw: std.io.AllocatingWriter = undefined; aw.init(testing.allocator); @@ -788,7 +788,7 @@ test "gzip decompress" { }, }; for (cases) |c| { - var fb: std.io.BufferedReader = undefined; + var fb: std.io.Reader = undefined; fb.initFixed(@constCast(c.in)); var aw: std.io.AllocatingWriter = undefined; aw.init(testing.allocator); @@ -818,7 +818,7 @@ test "zlib decompress" { }, }; for (cases) |c| { - var fb: std.io.BufferedReader = undefined; + var fb: std.io.Reader = undefined; fb.initFixed(@constCast(c.in)); var aw: std.io.AllocatingWriter = undefined; aw.init(testing.allocator); @@ -880,7 +880,7 @@ test "fuzzing tests" { }; inline for (cases, 0..) |c, case_no| { - var in: std.io.BufferedReader = undefined; + var in: std.io.Reader = undefined; in.initFixed(@constCast(@embedFile("testdata/fuzz/" ++ c.input ++ ".input"))); var aw: std.io.AllocatingWriter = undefined; aw.init(testing.allocator); @@ -903,7 +903,7 @@ test "bug 18966" { const input = @embedFile("testdata/fuzz/bug_18966.input"); const expect = @embedFile("testdata/fuzz/bug_18966.expect"); - var in: std.io.BufferedReader = undefined; + var in: std.io.Reader = undefined; in.initFixed(@constCast(input)); var aw: std.io.AllocatingWriter = undefined; aw.init(testing.allocator); @@ -921,7 +921,7 @@ test "reading into empty buffer" { 0b0000_0001, 0b0000_1100, 0x00, 0b1111_0011, 0xff, // deflate fixed buffer header len, nlen 'H', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd', 0x0a, // non compressed data }; - var in: std.io.BufferedReader = undefined; + var in: std.io.Reader = undefined; in.initFixed(@constCast(input)); var decomp: Decompress = .init(&in, .raw); var decompress_br = decomp.readable(&.{}); diff --git a/lib/std/compress/lzma.zig b/lib/std/compress/lzma.zig index 34ae45d2ce..fbbdc46ae3 100644 --- a/lib/std/compress/lzma.zig +++ b/lib/std/compress/lzma.zig @@ -11,7 +11,7 @@ pub const RangeDecoder = struct { range: u32, code: u32, - pub fn init(rd: *RangeDecoder, br: *std.io.BufferedReader) std.io.Reader.Error!usize { + pub fn init(rd: *RangeDecoder, br: *std.io.Reader) std.io.Reader.Error!usize { const reserved = try br.takeByte(); if (reserved != 0) return error.CorruptInput; rd.* = .{ @@ -25,14 +25,14 @@ pub const RangeDecoder = struct { return self.code == 0; } - inline fn normalize(self: *RangeDecoder, br: *std.io.BufferedReader) !void { + inline fn normalize(self: *RangeDecoder, br: *std.io.Reader) !void { if (self.range < 0x0100_0000) { self.range <<= 8; self.code = (self.code << 8) ^ @as(u32, try br.takeByte()); } } - inline fn getBit(self: *RangeDecoder, br: *std.io.BufferedReader) !bool { + inline fn getBit(self: *RangeDecoder, br: *std.io.Reader) !bool { self.range >>= 1; const bit = self.code >= self.range; @@ -43,7 +43,7 @@ pub const RangeDecoder = struct { return bit; } - pub fn get(self: *RangeDecoder, br: *std.io.BufferedReader, count: usize) !u32 { + pub fn get(self: *RangeDecoder, br: *std.io.Reader, count: usize) !u32 { var result: u32 = 0; var i: usize = 0; while (i < count) : (i += 1) @@ -51,7 +51,7 @@ pub const RangeDecoder = struct { return result; } - pub inline fn decodeBit(self: *RangeDecoder, br: *std.io.BufferedReader, prob: *u16, update: bool) !bool { + pub inline fn decodeBit(self: *RangeDecoder, br: *std.io.Reader, prob: *u16, update: bool) !bool { const bound = (self.range >> 11) * prob.*; if (self.code < bound) { @@ -74,7 +74,7 @@ pub const RangeDecoder = struct { fn parseBitTree( self: *RangeDecoder, - br: *std.io.BufferedReader, + br: *std.io.Reader, num_bits: u5, probs: []u16, update: bool, @@ -90,7 +90,7 @@ pub const RangeDecoder = struct { pub fn parseReverseBitTree( self: *RangeDecoder, - br: *std.io.BufferedReader, + br: *std.io.Reader, num_bits: u5, probs: []u16, offset: usize, @@ -117,7 +117,7 @@ pub const LenDecoder = struct { pub fn decode( self: *LenDecoder, - br: *std.io.BufferedReader, + br: *std.io.Reader, decoder: *RangeDecoder, pos_state: usize, update: bool, @@ -148,7 +148,7 @@ pub fn BitTree(comptime num_bits: usize) type { pub fn parse( self: *Self, - br: *std.io.BufferedReader, + br: *std.io.Reader, decoder: *RangeDecoder, update: bool, ) !u32 { @@ -157,7 +157,7 @@ pub fn BitTree(comptime num_bits: usize) type { pub fn parseReverse( self: *Self, - br: *std.io.BufferedReader, + br: *std.io.Reader, decoder: *RangeDecoder, update: bool, ) !u32 { @@ -222,7 +222,7 @@ pub const Decode = struct { dict_size: u32, unpacked_size: ?u64, - pub fn readHeader(br: *std.io.BufferedReader, options: Options) std.io.Reader.Error!Params { + pub fn readHeader(br: *std.io.Reader, options: Options) std.io.Reader.Error!Params { var props = try br.readByte(); if (props >= 225) { return error.CorruptInput; @@ -319,7 +319,7 @@ pub const Decode = struct { fn processNextInner( self: *Decode, allocator: Allocator, - br: *std.io.BufferedReader, + br: *std.io.Reader, bw: *std.io.BufferedWriter, buffer: anytype, decoder: *RangeDecoder, @@ -416,7 +416,7 @@ pub const Decode = struct { fn processNext( self: *Decode, allocator: Allocator, - br: *std.io.BufferedReader, + br: *std.io.Reader, bw: *std.io.BufferedWriter, buffer: anytype, decoder: *RangeDecoder, @@ -428,7 +428,7 @@ pub const Decode = struct { pub fn process( self: *Decode, allocator: Allocator, - br: *std.io.BufferedReader, + br: *std.io.Reader, bw: *std.io.BufferedWriter, buffer: anytype, decoder: *RangeDecoder, @@ -460,7 +460,7 @@ pub const Decode = struct { fn decodeLiteral( self: *Decode, - br: *std.io.BufferedReader, + br: *std.io.Reader, buffer: anytype, decoder: *RangeDecoder, update: bool, @@ -502,7 +502,7 @@ pub const Decode = struct { fn decodeDistance( self: *Decode, - br: *std.io.BufferedReader, + br: *std.io.Reader, decoder: *RangeDecoder, length: usize, update: bool, @@ -542,19 +542,19 @@ pub const Decompress = struct { error{ CorruptInput, EndOfStream, Overflow }; allocator: Allocator, - in_reader: *std.io.BufferedReader, + in_reader: *std.io.Reader, to_read: std.ArrayListUnmanaged(u8), buffer: LzCircularBuffer, decoder: RangeDecoder, state: Decode, - pub fn initOptions(allocator: Allocator, br: *std.io.BufferedReader, options: Decode.Options) !Decompress { + pub fn initOptions(allocator: Allocator, br: *std.io.Reader, options: Decode.Options) !Decompress { const params = try Decode.Params.readHeader(br, options); return init(allocator, br, params, options.memlimit); } - pub fn init(allocator: Allocator, source: *std.io.BufferedReader, params: Decode.Params, memlimit: ?usize) !Decompress { + pub fn init(allocator: Allocator, source: *std.io.Reader, params: Decode.Params, memlimit: ?usize) !Decompress { return .{ .allocator = allocator, .in_reader = source, @@ -839,7 +839,7 @@ test "Vec2D get addition overflow" { fn testDecompress(compressed: []const u8) ![]u8 { const allocator = std.testing.allocator; - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(compressed); var decompressor = try Decompress.initOptions(allocator, &br, .{}); defer decompressor.deinit(); @@ -927,7 +927,7 @@ test "too small uncompressed size in header" { test "reading one byte" { const compressed = @embedFile("testdata/good-known_size-with_eopm.lzma"); - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(compressed); var decompressor = try Decompress.initOptions(std.testing.allocator, &br, .{}); defer decompressor.deinit(); diff --git a/lib/std/compress/lzma2.zig b/lib/std/compress/lzma2.zig index e71ce93172..4b83f2afb2 100644 --- a/lib/std/compress/lzma2.zig +++ b/lib/std/compress/lzma2.zig @@ -2,7 +2,7 @@ const std = @import("../std.zig"); const Allocator = std.mem.Allocator; const lzma = std.compress.lzma; -pub fn decompress(gpa: Allocator, reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter) std.io.Reader.StreamError!void { +pub fn decompress(gpa: Allocator, reader: *std.io.Reader, writer: *std.io.BufferedWriter) std.io.Reader.StreamError!void { var decoder = try Decode.init(gpa); defer decoder.deinit(gpa); return decoder.decompress(gpa, reader, writer); @@ -33,7 +33,7 @@ pub const Decode = struct { pub fn decompress( self: *Decode, allocator: Allocator, - reader: *std.io.BufferedReader, + reader: *std.io.Reader, writer: *std.io.BufferedWriter, ) !void { var accum = LzAccumBuffer.init(std.math.maxInt(usize)); @@ -56,7 +56,7 @@ pub const Decode = struct { fn parseLzma( self: *Decode, allocator: Allocator, - br: *std.io.BufferedReader, + br: *std.io.Reader, writer: *std.io.BufferedWriter, accum: *LzAccumBuffer, status: u8, @@ -149,7 +149,7 @@ pub const Decode = struct { fn parseUncompressed( allocator: Allocator, - reader: *std.io.BufferedReader, + reader: *std.io.Reader, writer: *std.io.BufferedWriter, accum: *LzAccumBuffer, reset_dict: bool, @@ -276,7 +276,7 @@ test decompress { 0x01, 0x00, 0x05, 0x48, 0x65, 0x6C, 0x6C, 0x6F, 0x0A, 0x02, 0x00, 0x06, 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x21, 0x0A, 0x00, }; - var stream: std.io.BufferedReader = undefined; + var stream: std.io.Reader = undefined; stream.initFixed(&compressed); var decomp: std.io.AllocatingWriter = undefined; const decomp_bw = decomp.init(std.testing.allocator); diff --git a/lib/std/compress/xz/test.zig b/lib/std/compress/xz/test.zig index 02f74e4421..90d3fecf4d 100644 --- a/lib/std/compress/xz/test.zig +++ b/lib/std/compress/xz/test.zig @@ -3,10 +3,9 @@ const testing = std.testing; const xz = std.compress.xz; fn decompress(data: []const u8) ![]u8 { - var in_stream: std.io.BufferedReader = undefined; - in_stream.initFixed(data); + var r: std.io.Reader = .fixed(data); - var xz_stream = try xz.decompress(testing.allocator, &in_stream); + var xz_stream = try xz.decompress(testing.allocator, &r); defer xz_stream.deinit(); return xz_stream.reader().readAllAlloc(testing.allocator, std.math.maxInt(usize)); diff --git a/lib/std/compress/zstd.zig b/lib/std/compress/zstd.zig index 2ed25e0931..8dea3db5d9 100644 --- a/lib/std/compress/zstd.zig +++ b/lib/std/compress/zstd.zig @@ -82,8 +82,7 @@ fn testDecompress(gpa: std.mem.Allocator, compressed: []const u8) ![]u8 { var out: std.ArrayListUnmanaged(u8) = .empty; defer out.deinit(gpa); - var in: std.io.BufferedReader = undefined; - in.initFixed(@constCast(compressed)); + var in: std.io.Reader = .fixed(compressed); var zstd_stream: Decompress = .init(&in, .{}); try zstd_stream.reader().readRemainingArrayList(gpa, null, &out, .unlimited, default_window_len); @@ -103,8 +102,7 @@ fn testExpectDecompressError(err: anyerror, compressed: []const u8) !void { var out: std.ArrayListUnmanaged(u8) = .empty; defer out.deinit(gpa); - var in: std.io.BufferedReader = undefined; - in.initFixed(@constCast(compressed)); + var in: std.io.Reader = .fixed(compressed); var zstd_stream: Decompress = .init(&in, .{}); try std.testing.expectError( error.ReadFailed, diff --git a/lib/std/compress/zstd/Decompress.zig b/lib/std/compress/zstd/Decompress.zig index 8cead1ee62..face1a364a 100644 --- a/lib/std/compress/zstd/Decompress.zig +++ b/lib/std/compress/zstd/Decompress.zig @@ -4,10 +4,9 @@ const assert = std.debug.assert; const Reader = std.io.Reader; const Limit = std.io.Limit; const BufferedWriter = std.io.BufferedWriter; -const BufferedReader = std.io.BufferedReader; const zstd = @import("../zstd.zig"); -input: *BufferedReader, +input: *Reader, state: State, verify_checksum: bool, err: ?Error = null, @@ -63,7 +62,7 @@ pub const Error = error{ WindowSizeUnknown, }; -pub fn init(input: *BufferedReader, options: Options) Decompress { +pub fn init(input: *Reader, options: Options) Decompress { return .{ .input = input, .state = .new_frame, @@ -305,7 +304,7 @@ pub const Frame = struct { pub const DecodeError = Reader.Error || error{ReservedBitSet}; - pub fn decode(in: *BufferedReader) DecodeError!Header { + pub fn decode(in: *Reader) DecodeError!Header { const descriptor: Descriptor = @bitCast(try in.takeByte()); if (descriptor.reserved) return error.ReservedBitSet; @@ -446,7 +445,7 @@ pub const Frame = struct { /// FSE tables from `in`. pub fn prepare( self: *Decode, - in: *BufferedReader, + in: *Reader, remaining: *Limit, literals: LiteralsSection, sequences_header: SequencesSection.Header, @@ -536,7 +535,7 @@ pub const Frame = struct { /// TODO: don't use `@field` fn updateFseTable( self: *Decode, - in: *BufferedReader, + in: *Reader, remaining: *Limit, comptime choice: DataType, mode: SequencesSection.Header.Mode, @@ -858,7 +857,7 @@ pub const LiteralsSection = struct { compressed_size: ?u18, /// Decode a literals section header. - pub fn decode(in: *BufferedReader, remaining: *Limit) !Header { + pub fn decode(in: *Reader, remaining: *Limit) !Header { remaining.* = remaining.subtract(1) orelse return error.EndOfStream; const byte0 = try in.takeByte(); const block_type: BlockType = @enumFromInt(byte0 & 0b11); @@ -965,7 +964,7 @@ pub const LiteralsSection = struct { MissingStartBit, }; - pub fn decode(in: *BufferedReader, remaining: *Limit) HuffmanTree.DecodeError!HuffmanTree { + pub fn decode(in: *Reader, remaining: *Limit) HuffmanTree.DecodeError!HuffmanTree { remaining.* = remaining.subtract(1) orelse return error.EndOfStream; const header = try in.takeByte(); if (header < 128) { @@ -976,7 +975,7 @@ pub const LiteralsSection = struct { } fn decodeDirect( - in: *BufferedReader, + in: *Reader, remaining: *Limit, encoded_symbol_count: usize, ) HuffmanTree.DecodeError!HuffmanTree { @@ -993,7 +992,7 @@ pub const LiteralsSection = struct { } fn decodeFse( - in: *BufferedReader, + in: *Reader, remaining: *Limit, compressed_size: usize, ) HuffmanTree.DecodeError!HuffmanTree { @@ -1162,7 +1161,7 @@ pub const LiteralsSection = struct { MissingStartBit, }; - pub fn decode(in: *BufferedReader, remaining: *Limit, buffer: []u8) DecodeError!LiteralsSection { + pub fn decode(in: *Reader, remaining: *Limit, buffer: []u8) DecodeError!LiteralsSection { const header = try Header.decode(in, remaining); switch (header.block_type) { .raw => { @@ -1233,7 +1232,7 @@ pub const SequencesSection = struct { ReadFailed, }; - pub fn decode(in: *BufferedReader, remaining: *Limit) DecodeError!Header { + pub fn decode(in: *Reader, remaining: *Limit) DecodeError!Header { var sequence_count: u24 = undefined; remaining.* = remaining.subtract(1) orelse return error.EndOfStream; diff --git a/lib/std/crypto/codecs/asn1.zig b/lib/std/crypto/codecs/asn1.zig index 70580d12da..20d22c0236 100644 --- a/lib/std/crypto/codecs/asn1.zig +++ b/lib/std/crypto/codecs/asn1.zig @@ -154,8 +154,7 @@ pub const Tag = struct { test Tag { const buf = [_]u8{0xa3}; - var stream: std.io.BufferedReader = undefined; - stream.initFixed(&buf); + var stream: std.io.Reader = .fixed(&buf); const t = Tag.decode(stream.reader()); try std.testing.expectEqual(Tag.init(@enumFromInt(3), true, .context_specific), t); } @@ -185,8 +184,7 @@ pub const Element = struct { /// - Ensures length is within `bytes` /// - Ensures length is less than `std.math.maxInt(Index)` pub fn decode(bytes: []const u8, index: Index) DecodeError!Element { - var reader: std.io.BufferedReader = undefined; - reader.initFixed(bytes[index..]); + var reader: std.io.Reader = .fixed(bytes[index..]); const tag = try Tag.decode(reader); const size_or_len_size = try reader.readByte(); diff --git a/lib/std/crypto/ecdsa.zig b/lib/std/crypto/ecdsa.zig index ad7b682b7e..e88a7a54c7 100644 --- a/lib/std/crypto/ecdsa.zig +++ b/lib/std/crypto/ecdsa.zig @@ -155,20 +155,20 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type { } // Read a DER-encoded integer. - // Asserts `br` has storage capacity >= 2. - fn readDerInt(out: []u8, br: *std.io.BufferedReader) EncodingError!void { - const buf = br.take(2) catch return error.InvalidEncoding; + // Asserts `r` has storage capacity >= 2. + fn readDerInt(out: []u8, r: *std.io.Reader) EncodingError!void { + const buf = r.take(2) catch return error.InvalidEncoding; if (buf[0] != 0x02) return error.InvalidEncoding; var expected_len: usize = buf[1]; if (expected_len == 0 or expected_len > 1 + out.len) return error.InvalidEncoding; var has_top_bit = false; if (expected_len == 1 + out.len) { - if ((br.takeByte() catch return error.InvalidEncoding) != 0) return error.InvalidEncoding; + if ((r.takeByte() catch return error.InvalidEncoding) != 0) return error.InvalidEncoding; expected_len -= 1; has_top_bit = true; } const out_slice = out[out.len - expected_len ..]; - br.readSlice(out_slice) catch return error.InvalidEncoding; + r.readSlice(out_slice) catch return error.InvalidEncoding; if (@intFromBool(has_top_bit) != out[0] >> 7) return error.InvalidEncoding; } @@ -176,14 +176,13 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type { /// Returns InvalidEncoding if the DER encoding is invalid. pub fn fromDer(der: []const u8) EncodingError!Signature { if (der.len < 2) return error.InvalidEncoding; - var br: std.io.BufferedReader = undefined; - br.initFixed(@constCast(der)); - const buf = br.take(2) catch return error.InvalidEncoding; + var r: std.io.Reader = .fixed(der); + const buf = r.take(2) catch return error.InvalidEncoding; if (buf[0] != 0x30 or @as(usize, buf[1]) + 2 != der.len) return error.InvalidEncoding; var sig: Signature = mem.zeroInit(Signature, .{}); - try readDerInt(&sig.r, &br); - try readDerInt(&sig.s, &br); - if (br.seek != der.len) return error.InvalidEncoding; + try readDerInt(&sig.r, &r); + try readDerInt(&sig.s, &r); + if (r.seek != der.len) return error.InvalidEncoding; return sig; } }; diff --git a/lib/std/crypto/tls.zig b/lib/std/crypto/tls.zig index 73a5fef528..64fcf15896 100644 --- a/lib/std/crypto/tls.zig +++ b/lib/std/crypto/tls.zig @@ -655,7 +655,7 @@ pub const Decoder = struct { } /// Use this function to increase `their_end`. - pub fn readAtLeast(d: *Decoder, stream: *std.io.BufferedReader, their_amt: usize) !void { + pub fn readAtLeast(d: *Decoder, stream: *std.io.Reader, their_amt: usize) !void { assert(!d.disable_reads); const existing_amt = d.cap - d.idx; d.their_end = d.idx + their_amt; @@ -672,7 +672,7 @@ pub const Decoder = struct { /// Same as `readAtLeast` but also increases `our_end` by exactly `our_amt`. /// Use when `our_amt` is calculated by us, not by them. - pub fn readAtLeastOurAmt(d: *Decoder, stream: *std.io.BufferedReader, our_amt: usize) !void { + pub fn readAtLeastOurAmt(d: *Decoder, stream: *std.io.Reader, our_amt: usize) !void { assert(!d.disable_reads); try readAtLeast(d, stream, our_amt); d.our_end = d.idx + our_amt; diff --git a/lib/std/crypto/tls/Client.zig b/lib/std/crypto/tls/Client.zig index 9e6d696400..971e2a4171 100644 --- a/lib/std/crypto/tls/Client.zig +++ b/lib/std/crypto/tls/Client.zig @@ -21,7 +21,7 @@ const array = tls.array; /// here via `reader`. /// /// The buffer is asserted to have capacity at least `min_buffer_len`. -input: *std.io.BufferedReader, +input: *std.io.Reader, /// The encrypted stream from the client to the server. Bytes are pushed here /// via `writer`. @@ -85,7 +85,7 @@ pub const SslKeyLog = struct { } }; -/// The `std.io.BufferedReader` supplied to `init` requires a buffer capacity +/// The `std.io.Reader` supplied to `init` requires a buffer capacity /// at least this amount. pub const min_buffer_len = tls.max_ciphertext_record_len; @@ -175,7 +175,7 @@ const InitError = error{ /// `input` is asserted to have buffer capacity at least `min_buffer_len`. pub fn init( client: *Client, - input: *std.io.BufferedReader, + input: *std.io.Reader, output: *std.io.BufferedWriter, options: Options, ) InitError!void { diff --git a/lib/std/debug/Dwarf.zig b/lib/std/debug/Dwarf.zig index 1b52e1a589..e11e2a4fb5 100644 --- a/lib/std/debug/Dwarf.zig +++ b/lib/std/debug/Dwarf.zig @@ -2235,8 +2235,7 @@ pub const ElfModule = struct { const section_bytes = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size); sections[section_index.?] = if ((shdr.sh_flags & elf.SHF_COMPRESSED) > 0) blk: { - var section_reader: std.io.BufferedReader = undefined; - section_reader.initFixed(@constCast(section_bytes)); + var section_reader: std.io.Reader = .fixed(section_bytes); const chdr = section_reader.takeStruct(elf.Chdr) catch continue; if (chdr.ch_type != .ZLIB) continue; const ch_size = chdr.ch_size; diff --git a/lib/std/debug/Dwarf/call_frame.zig b/lib/std/debug/Dwarf/call_frame.zig index 73bf255dae..1aac7e7206 100644 --- a/lib/std/debug/Dwarf/call_frame.zig +++ b/lib/std/debug/Dwarf/call_frame.zig @@ -136,7 +136,7 @@ pub const Instruction = union(Opcode) { }, pub fn read( - reader: *std.io.BufferedReader, + reader: *std.io.Reader, addr_size_bytes: u8, endian: std.builtin.Endian, ) !Instruction { diff --git a/lib/std/debug/Dwarf/expression.zig b/lib/std/debug/Dwarf/expression.zig index fa3b09bd7c..88d8e9f7f8 100644 --- a/lib/std/debug/Dwarf/expression.zig +++ b/lib/std/debug/Dwarf/expression.zig @@ -774,8 +774,7 @@ pub fn StackMachine(comptime options: Options) type { } fn nextLeb128(expression: []const u8, i: *usize, comptime I: type) !I { - var br: std.io.BufferedReader = undefined; - br.initFixed(@constCast(expression)); + var br: std.io.Reader = .fixed(expression); br.seek = i.*; assert(br.seek <= br.end); const result = br.takeLeb128(I) catch |err| switch (err) { diff --git a/lib/std/debug/FixedBufferReader.zig b/lib/std/debug/FixedBufferReader.zig index 035495ca55..939586ede9 100644 --- a/lib/std/debug/FixedBufferReader.zig +++ b/lib/std/debug/FixedBufferReader.zig @@ -1,6 +1,6 @@ //! Optimized for performance in debug builds. -// TODO I'm pretty sure this can be deleted thanks to the new std.io.BufferedReader semantics +// TODO I'm pretty sure this can be deleted thanks to the new std.io.Reader semantics const std = @import("../std.zig"); const MemoryAccessor = std.debug.MemoryAccessor; @@ -52,7 +52,7 @@ pub fn readIntChecked( } pub fn readLeb128(fbr: *FixedBufferReader, comptime T: type) Error!T { - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(@constCast(fbr.buf)); br.seek = fbr.pos; const result = br.takeLeb128(T); diff --git a/lib/std/debug/SelfInfo.zig b/lib/std/debug/SelfInfo.zig index 28e4e251e4..4f6f7d5130 100644 --- a/lib/std/debug/SelfInfo.zig +++ b/lib/std/debug/SelfInfo.zig @@ -2025,9 +2025,10 @@ pub const VirtualMachine = struct { assert(self.cie_row == null); if (pc < fde.pc_begin or pc >= fde.pc_begin + fde.pc_range) return error.AddressOutOfRange; - var readers: [2]std.io.BufferedReader = undefined; - readers[0].initFixed(@constCast(cie.initial_instructions)); - readers[1].initFixed(@constCast(fde.instructions)); + var readers: [2]std.io.Reader = .{ + .fixed(cie.initial_instructions), + .fixed(fde.instructions), + }; var prev_row: Row = self.current_row; for (&readers, [2]bool{ true, false }) |*reader, is_initial| { diff --git a/lib/std/elf.zig b/lib/std/elf.zig index 63f5d156dd..aad018a540 100644 --- a/lib/std/elf.zig +++ b/lib/std/elf.zig @@ -510,10 +510,10 @@ pub const Header = struct { pub const ReadError = std.io.Reader.Error || ParseError; - pub fn read(br: *std.io.BufferedReader) ReadError!Header { - const buf = try br.peek(@sizeOf(Elf64_Ehdr)); + pub fn read(r: *std.io.Reader) ReadError!Header { + const buf = try r.peek(@sizeOf(Elf64_Ehdr)); const result = try parse(@ptrCast(buf)); - br.toss(if (result.is_64) @sizeOf(Elf64_Ehdr) else @sizeOf(Elf32_Ehdr)); + r.toss(if (result.is_64) @sizeOf(Elf64_Ehdr) else @sizeOf(Elf32_Ehdr)); return result; } diff --git a/lib/std/fs/File.zig b/lib/std/fs/File.zig index 302a49b52f..2837535d57 100644 --- a/lib/std/fs/File.zig +++ b/lib/std/fs/File.zig @@ -904,6 +904,7 @@ pub const Reader = struct { size: ?u64 = null, size_err: ?GetEndPosError = null, seek_err: ?Reader.SeekError = null, + interface: std.io.Reader, pub const SeekError = File.SeekError || error{ /// Seeking fell back to reading, and reached the end before the requested seek position. @@ -940,18 +941,24 @@ pub const Reader = struct { } }; - pub fn interface(r: *Reader) std.io.Reader { + pub fn initInterface(buffer: []u8) std.io.Reader { return .{ - .context = r, + .context = undefined, .vtable = &.{ - .read = Reader.stream, + .stream = Reader.stream, .discard = Reader.discard, }, + .buffer = buffer, + .seek = 0, + .end = 0, }; } - pub fn readable(r: *Reader, buffer: []u8) std.io.BufferedReader { - return interface(r).buffered(buffer); + pub fn init(file: File, buffer: []u8) Reader { + return .{ + .file = file, + .interface = initInterface(buffer), + }; } pub fn getSize(r: *Reader) GetEndPosError!u64 { @@ -1021,11 +1028,11 @@ pub const Reader = struct { const max_buffers_len = 16; fn stream( - context: ?*anyopaque, + io_reader: *std.io.Reader, bw: *BufferedWriter, limit: std.io.Limit, ) std.io.Reader.StreamError!usize { - const r: *Reader = @ptrCast(@alignCast(context)); + const r: *Reader = @fieldParentPtr("interface", io_reader); switch (r.mode) { .positional, .streaming => return bw.writeFile(r, limit, &.{}, 0) catch |write_err| switch (write_err) { error.ReadFailed => return error.ReadFailed, @@ -1051,8 +1058,8 @@ pub const Reader = struct { } } - fn discard(context: ?*anyopaque, limit: std.io.Limit) std.io.Reader.Error!usize { - const r: *Reader = @ptrCast(@alignCast(context)); + fn discard(io_reader: *std.io.Reader, limit: std.io.Limit) std.io.Reader.Error!usize { + const r: *Reader = @fieldParentPtr("interface", io_reader); const file = r.file; const pos = r.pos; switch (r.mode) { @@ -1357,8 +1364,8 @@ pub const Writer = struct { /// /// Positional is more threadsafe, since the global seek position is not /// affected. -pub fn reader(file: File) Reader { - return .{ .file = file }; +pub fn reader(file: File, buffer: []u8) Reader { + return .init(file, buffer); } /// Positional is more threadsafe, since the global seek position is not diff --git a/lib/std/http.zig b/lib/std/http.zig index ac3ccdf44a..dd9177d97f 100644 --- a/lib/std/http.zig +++ b/lib/std/http.zig @@ -325,7 +325,7 @@ pub const Header = struct { }; pub const Reader = struct { - in: *std.io.BufferedReader, + in: *std.io.Reader, /// Keeps track of whether the stream is ready to accept a new request, /// making invalid API usage cause assertion failures rather than HTTP /// protocol violations. @@ -703,7 +703,7 @@ pub const Reader = struct { pub const Decompressor = struct { compression: Compression, - buffered_reader: std.io.BufferedReader, + buffered_reader: std.io.Reader, pub const Compression = union(enum) { deflate: std.compress.flate.Decompressor, diff --git a/lib/std/http/Client.zig b/lib/std/http/Client.zig index 766390f078..2fc7432bd1 100644 --- a/lib/std/http/Client.zig +++ b/lib/std/http/Client.zig @@ -232,7 +232,7 @@ pub const Connection = struct { writer: std.io.BufferedWriter, /// HTTP protocol from server to client. /// This either comes directly from `stream_reader`, or from a TLS client. - reader: std.io.BufferedReader, + reader: std.io.Reader, /// Entry in `ConnectionPool.used` or `ConnectionPool.free`. pool_node: std.DoublyLinkedList.Node, port: u16, @@ -299,7 +299,7 @@ pub const Connection = struct { /// Data from `client` to `Connection.stream`. writer: std.io.BufferedWriter, /// Data from `Connection.stream` to `client`. - reader: std.io.BufferedReader, + reader: std.io.Reader, client: std.crypto.tls.Client, connection: Connection, diff --git a/lib/std/http/Server.zig b/lib/std/http/Server.zig index cef6120740..67d7b9ec12 100644 --- a/lib/std/http/Server.zig +++ b/lib/std/http/Server.zig @@ -20,7 +20,7 @@ reader: http.Reader, /// header, otherwise `receiveHead` returns `error.HttpHeadersOversize`. /// /// The returned `Server` is ready for `receiveHead` to be called. -pub fn init(in: *std.io.BufferedReader, out: *std.io.BufferedWriter) Server { +pub fn init(in: *std.io.Reader, out: *std.io.BufferedWriter) Server { return .{ .reader = .{ .in = in, @@ -610,7 +610,7 @@ pub const Request = struct { /// See https://tools.ietf.org/html/rfc6455 pub const WebSocket = struct { key: []const u8, - input: *std.io.BufferedReader, + input: *std.io.Reader, output: *std.io.BufferedWriter, pub const Header0 = packed struct(u8) { diff --git a/lib/std/io.zig b/lib/std/io.zig index 36dd3e681b..e907524c0b 100644 --- a/lib/std/io.zig +++ b/lib/std/io.zig @@ -72,8 +72,6 @@ pub const Limit = enum(usize) { pub const Reader = @import("io/Reader.zig"); pub const Writer = @import("io/Writer.zig"); -pub const BufferedReader = @import("io/BufferedReader.zig"); -pub const BufferedWriter = @import("io/BufferedWriter.zig"); pub const AllocatingWriter = @import("io/AllocatingWriter.zig"); pub const ChangeDetectionStream = @import("io/change_detection_stream.zig").ChangeDetectionStream; @@ -131,7 +129,7 @@ pub fn Poller(comptime StreamEnum: type) type { const PollFd = if (is_windows) void else posix.pollfd; gpa: Allocator, - readers: [enum_fields.len]BufferedReader, + readers: [enum_fields.len]Reader, poll_fds: [enum_fields.len]PollFd, windows: if (is_windows) struct { first_read_done: bool, @@ -163,7 +161,7 @@ pub fn Poller(comptime StreamEnum: type) type { _ = windows.kernel32.CancelIo(h); } } - inline for (&self.readers) |*br| gpa.free(br.buffer); + inline for (&self.readers) |*r| gpa.free(r.buffer); self.* = undefined; } @@ -183,7 +181,7 @@ pub fn Poller(comptime StreamEnum: type) type { } } - pub inline fn reader(self: *Self, comptime which: StreamEnum) *BufferedReader { + pub inline fn reader(self: *Self, comptime which: StreamEnum) *Reader { return &self.readers[@intFromEnum(which)]; } @@ -295,18 +293,18 @@ pub fn Poller(comptime StreamEnum: type) type { } var keep_polling = false; - inline for (&self.poll_fds, &self.readers) |*poll_fd, *br| { + inline for (&self.poll_fds, &self.readers) |*poll_fd, *r| { // Try reading whatever is available before checking the error // conditions. // It's still possible to read after a POLL.HUP is received, // always check if there's some data waiting to be read first. if (poll_fd.revents & posix.POLL.IN != 0) { - const buf = try br.writableSliceGreedyAlloc(gpa, bump_amt); + const buf = try r.writableSliceGreedyAlloc(gpa, bump_amt); const amt = posix.read(poll_fd.fd, buf) catch |err| switch (err) { error.BrokenPipe => 0, // Handle the same as EOF. else => |e| return e, }; - br.advanceBufferEnd(amt); + r.advanceBufferEnd(amt); if (amt == 0) { // Remove the fd when the EOF condition is met. poll_fd.fd = -1; @@ -337,14 +335,14 @@ var win_dummy_bytes_read: u32 = undefined; fn windowsAsyncReadToFifoAndQueueSmallRead( handle: windows.HANDLE, overlapped: *windows.OVERLAPPED, - br: *BufferedReader, + r: *Reader, small_buf: *[128]u8, bump_amt: usize, ) !enum { empty, populated, closed_populated, closed } { var read_any_data = false; while (true) { const fifo_read_pending = while (true) { - const buf = try br.writableWithSize(bump_amt); + const buf = try r.writableWithSize(bump_amt); const buf_len = math.cast(u32, buf.len) orelse math.maxInt(u32); if (0 == windows.kernel32.ReadFile( @@ -366,7 +364,7 @@ fn windowsAsyncReadToFifoAndQueueSmallRead( }; read_any_data = true; - br.update(num_bytes_read); + r.update(num_bytes_read); if (num_bytes_read == buf_len) { // We filled the buffer, so there's probably more data available. @@ -396,7 +394,7 @@ fn windowsAsyncReadToFifoAndQueueSmallRead( .aborted => break :cancel_read, }; read_any_data = true; - br.update(num_bytes_read); + r.update(num_bytes_read); } // Try to queue the 1-byte read. @@ -421,7 +419,7 @@ fn windowsAsyncReadToFifoAndQueueSmallRead( .closed => return if (read_any_data) .closed_populated else .closed, .aborted => unreachable, }; - try br.write(small_buf[0..num_bytes_read]); + try r.write(small_buf[0..num_bytes_read]); read_any_data = true; } } @@ -488,8 +486,6 @@ pub fn PollFiles(comptime StreamEnum: type) type { test { _ = AllocatingWriter; - _ = BufferedReader; - _ = BufferedWriter; _ = Reader; _ = Writer; _ = @import("io/test.zig"); diff --git a/lib/std/io/AllocatingWriter.zig b/lib/std/io/AllocatingWriter.zig index 31e4a846af..dc10620e78 100644 --- a/lib/std/io/AllocatingWriter.zig +++ b/lib/std/io/AllocatingWriter.zig @@ -6,7 +6,7 @@ //! `std.io.BufferedWriter` state such that it writes to the unused capacity of //! an array list, filling it up completely before making a call through the //! vtable, causing a resize. Consequently, the same, optimized, non-generic -//! machine code that uses `std.io.BufferedReader`, such as formatted printing, +//! machine code that uses `std.io.Reader`, such as formatted printing, //! takes the hot paths when using this API. const std = @import("../std.zig"); diff --git a/lib/std/io/BufferedReader.zig b/lib/std/io/BufferedReader.zig deleted file mode 100644 index e4a41e1670..0000000000 --- a/lib/std/io/BufferedReader.zig +++ /dev/null @@ -1,1230 +0,0 @@ -const builtin = @import("builtin"); -const native_endian = builtin.target.cpu.arch.endian(); - -const std = @import("../std.zig"); -const assert = std.debug.assert; -const testing = std.testing; -const BufferedWriter = std.io.BufferedWriter; -const Reader = std.io.Reader; -const Writer = std.io.Writer; -const Allocator = std.mem.Allocator; -const ArrayList = std.ArrayListUnmanaged; -const Limit = std.io.Limit; - -const BufferedReader = @This(); - -unbuffered_reader: Reader, -buffer: []u8, -/// In `buffer` before this are buffered bytes, after this is `undefined`. -end: usize, -/// Number of bytes which have been consumed from `buffer`. -seek: usize, - -/// Constructs `br` such that it will read from `buffer` and then end. -/// -/// Most methods do not require mutating `buffer`. Those that do are marked, -/// and if they are avoided then `buffer` can be safely used with `@constCast`. -pub fn initFixed(br: *BufferedReader, buffer: []u8) void { - br.* = .{ - .unbuffered_reader = .ending, - .buffer = buffer, - .end = buffer.len, - .seek = 0, - }; -} - -pub fn bufferContents(br: *BufferedReader) []u8 { - return br.buffer[br.seek..br.end]; -} - -pub fn bufferedLen(br: *const BufferedReader) usize { - return br.end - br.seek; -} - -/// Although `BufferedReader` can easily satisfy the `Reader` interface, it's -/// generally more practical to pass a `BufferedReader` instance itself around, -/// since it will result in fewer calls across vtable boundaries. -pub fn reader(br: *BufferedReader) Reader { - return .{ - .context = br, - .vtable = &.{ - .read = passthruRead, - .readVec = passthruReadVec, - .discard = passthruDiscard, - }, - }; -} - -pub fn hashed(br: *BufferedReader, hasher: anytype) Reader.Hashed(@TypeOf(hasher)) { - return .{ .in = br, .hasher = hasher }; -} - -/// Equivalent semantics to `std.io.Reader.VTable.readVec`. -pub fn readVec(br: *BufferedReader, data: []const []u8) Reader.Error!usize { - return readVecLimit(br, data, .unlimited); -} - -/// Equivalent semantics to `std.io.Reader.VTable.read`. -pub fn read(br: *BufferedReader, bw: *BufferedWriter, limit: Limit) Reader.StreamError!usize { - return passthruRead(br, bw, limit); -} - -/// Equivalent semantics to `std.io.Reader.VTable.discard`. -pub fn discard(br: *BufferedReader, limit: Limit) Reader.Error!usize { - return passthruDiscard(br, limit); -} - -pub fn readVecAll(br: *BufferedReader, data: [][]u8) Reader.Error!void { - var index: usize = 0; - var truncate: usize = 0; - while (index < data.len) { - { - const untruncated = data[index]; - data[index] = untruncated[truncate..]; - defer data[index] = untruncated; - truncate += try br.readVec(data[index..]); - } - while (index < data.len and truncate >= data[index].len) { - truncate -= data[index].len; - index += 1; - } - } -} - -/// "Pump" data from the reader to the writer. -pub fn readAll(br: *BufferedReader, bw: *BufferedWriter, limit: Limit) Reader.StreamError!void { - var remaining = limit; - while (remaining.nonzero()) { - const n = try br.read(bw, remaining); - remaining = remaining.subtract(n).?; - } -} - -/// "Pump" data from the reader to the writer, handling `error.EndOfStream` as -/// a success case. -/// -/// Returns total number of bytes written to `bw`. -pub fn readRemaining(br: *BufferedReader, bw: *BufferedWriter) Reader.StreamRemainingError!usize { - var offset: usize = 0; - while (true) { - offset += br.read(bw, .unlimited) catch |err| switch (err) { - error.EndOfStream => return offset, - else => |e| return e, - }; - } -} - -/// Equivalent to `readVec` but reads at most `limit` bytes. -pub fn readVecLimit(br: *BufferedReader, data: []const []u8, limit: Limit) Reader.Error!usize { - assert(@intFromEnum(Limit.unlimited) == std.math.maxInt(usize)); - var remaining = @intFromEnum(limit); - for (data, 0..) |buf, i| { - const buffered = br.buffer[br.seek..br.end]; - const copy_len = @min(buffered.len, buf.len, remaining); - @memcpy(buf[0..copy_len], buffered[0..copy_len]); - br.seek += copy_len; - remaining -= copy_len; - if (remaining == 0) break; - if (buf.len - copy_len == 0) continue; - - br.seek = 0; - br.end = 0; - var vecs: [8][]u8 = undefined; // Arbitrarily chosen value. - const available_remaining_buf = buf[copy_len..]; - vecs[0] = available_remaining_buf[0..@min(available_remaining_buf.len, remaining)]; - const vec_start_remaining = remaining; - remaining -= vecs[0].len; - var vecs_i: usize = 1; - var data_i: usize = i + 1; - while (true) { - if (vecs.len - vecs_i == 0) { - const n = try br.unbuffered_reader.readVec(&vecs); - return @intFromEnum(limit) - vec_start_remaining + n; - } - if (remaining == 0 or data.len - data_i == 0) { - vecs[vecs_i] = br.buffer; - vecs_i += 1; - const n = try br.unbuffered_reader.readVec(vecs[0..vecs_i]); - const cutoff = vec_start_remaining - remaining; - if (n > cutoff) { - br.end = n - cutoff; - return @intFromEnum(limit) - remaining; - } else { - return @intFromEnum(limit) - vec_start_remaining + n; - } - } - if (data[data_i].len == 0) { - data_i += 1; - continue; - } - const data_elem = data[data_i]; - vecs[vecs_i] = data_elem[0..@min(data_elem.len, remaining)]; - remaining -= vecs[vecs_i].len; - vecs_i += 1; - data_i += 1; - } - } - return @intFromEnum(limit) - remaining; -} - -fn passthruRead(context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) Reader.StreamError!usize { - const br: *BufferedReader = @alignCast(@ptrCast(context)); - const buffer = limit.slice(br.buffer[br.seek..br.end]); - if (buffer.len > 0) { - const n = try bw.write(buffer); - br.seek += n; - return n; - } - return br.unbuffered_reader.read(bw, limit); -} - -fn passthruDiscard(context: ?*anyopaque, limit: Limit) Reader.Error!usize { - const br: *BufferedReader = @alignCast(@ptrCast(context)); - const buffered_len = br.end - br.seek; - const remaining: Limit = if (limit.toInt()) |n| l: { - if (buffered_len >= n) { - br.seek += n; - return n; - } - break :l .limited(n - buffered_len); - } else .unlimited; - br.seek = 0; - br.end = 0; - const n = if (br.unbuffered_reader.discard) |f| try f(remaining) else try br.defaultDiscard(remaining); - return buffered_len + n; -} - -fn passthruReadVec(context: ?*anyopaque, data: []const []u8) Reader.Error!usize { - const br: *BufferedReader = @alignCast(@ptrCast(context)); - return readVecLimit(br, data, .unlimited); -} - -fn defaultDiscard(br: *BufferedReader, limit: Limit) Reader.Error!usize { - assert(br.seek == 0); - assert(br.end == 0); - var bw: BufferedWriter = .{ - .unbuffered_writer = .discarding, - .buffer = br.buffer, - }; - const n = br.read(&bw, limit) catch |err| switch (err) { - error.WriteFailed => unreachable, - error.ReadFailed => return error.ReadFailed, - error.EndOfStream => return error.EndOfStream, - }; - if (n > @intFromEnum(limit)) { - const over_amt = n - @intFromEnum(limit); - assert(over_amt <= bw.buffer.end); // limit may be exceeded only by an amount within buffer capacity. - br.seek = bw.end - over_amt; - br.end = bw.end; - return @intFromEnum(limit); - } - return n; -} - -/// Returns the next `len` bytes from `unbuffered_reader`, filling the buffer as -/// necessary. -/// -/// Invalidates previously returned values from `peek`. -/// -/// Asserts that the `BufferedReader` was initialized with a buffer capacity at -/// least as big as `len`. -/// -/// If there are fewer than `len` bytes left in the stream, `error.EndOfStream` -/// is returned instead. -/// -/// See also: -/// * `peek` -/// * `toss` -pub fn peek(br: *BufferedReader, n: usize) Reader.Error![]u8 { - try br.fill(n); - return br.buffer[br.seek..][0..n]; -} - -/// Returns all the next buffered bytes from `unbuffered_reader`, after filling -/// the buffer to ensure it contains at least `n` bytes. -/// -/// Invalidates previously returned values from `peek` and `peekGreedy`. -/// -/// Asserts that the `BufferedReader` was initialized with a buffer capacity at -/// least as big as `n`. -/// -/// If there are fewer than `n` bytes left in the stream, `error.EndOfStream` -/// is returned instead. -/// -/// See also: -/// * `peek` -/// * `toss` -pub fn peekGreedy(br: *BufferedReader, n: usize) Reader.Error![]u8 { - try br.fill(n); - return br.buffer[br.seek..br.end]; -} - -/// Skips the next `n` bytes from the stream, advancing the seek position. This -/// is typically and safely used after `peek`. -/// -/// Asserts that the number of bytes buffered is at least as many as `n`. -/// -/// The "tossed" memory remains alive until a "peek" operation occurs. -/// -/// See also: -/// * `peek`. -/// * `discard`. -pub fn toss(br: *BufferedReader, n: usize) void { - br.seek += n; - assert(br.seek <= br.end); -} - -/// Equivalent to `toss(br.bufferedLen())`. -pub fn tossAll(br: *BufferedReader) void { - br.seek = 0; - br.end = 0; -} - -/// Equivalent to `peek` followed by `toss`. -/// -/// The data returned is invalidated by the next call to `take`, `peek`, -/// `fill`, and functions with those prefixes. -pub fn take(br: *BufferedReader, n: usize) Reader.Error![]u8 { - const result = try br.peek(n); - br.toss(n); - return result; -} - -/// Returns the next `n` bytes from `unbuffered_reader` as an array, filling -/// the buffer as necessary and advancing the seek position `n` bytes. -/// -/// Asserts that the `BufferedReader` was initialized with a buffer capacity at -/// least as big as `n`. -/// -/// If there are fewer than `n` bytes left in the stream, `error.EndOfStream` -/// is returned instead. -/// -/// See also: -/// * `take` -pub fn takeArray(br: *BufferedReader, comptime n: usize) Reader.Error!*[n]u8 { - return (try br.take(n))[0..n]; -} - -/// Returns the next `n` bytes from `unbuffered_reader` as an array, filling -/// the buffer as necessary, without advancing the seek position. -/// -/// Asserts that the `BufferedReader` was initialized with a buffer capacity at -/// least as big as `n`. -/// -/// If there are fewer than `n` bytes left in the stream, `error.EndOfStream` -/// is returned instead. -/// -/// See also: -/// * `peek` -/// * `takeArray` -pub fn peekArray(br: *BufferedReader, comptime n: usize) Reader.Error!*[n]u8 { - return (try br.peek(n))[0..n]; -} - -/// Skips the next `n` bytes from the stream, advancing the seek position. -/// -/// Unlike `toss` which is infallible, in this function `n` can be any amount. -/// -/// Returns `error.EndOfStream` if fewer than `n` bytes could be discarded. -/// -/// See also: -/// * `toss` -/// * `discardRemaining` -/// * `discardShort` -/// * `discard` -pub fn discardAll(br: *BufferedReader, n: usize) Reader.Error!void { - if ((try br.discardShort(n)) != n) return error.EndOfStream; -} - -pub fn discardAll64(br: *BufferedReader, n: u64) Reader.Error!void { - var remaining: u64 = n; - while (remaining > 0) { - const limited = std.math.cast(usize, remaining) orelse std.math.maxInt(usize); - try discardAll(br, limited); - remaining -= limited; - } -} - -/// Skips the next `n` bytes from the stream, advancing the seek position. -/// -/// Unlike `toss` which is infallible, in this function `n` can be any amount. -/// -/// Returns the number of bytes discarded, which is less than `n` if and only -/// if the stream reached the end. -/// -/// See also: -/// * `discardAll` -/// * `discardRemaining` -/// * `discard` -pub fn discardShort(br: *BufferedReader, n: usize) Reader.ShortError!usize { - const proposed_seek = br.seek + n; - if (proposed_seek <= br.end) { - @branchHint(.likely); - br.seek = proposed_seek; - return n; - } - var remaining = n - (br.end - br.seek); - br.end = 0; - br.seek = 0; - while (true) { - const discard_len = br.unbuffered_reader.discard(.limited(remaining)) catch |err| switch (err) { - error.EndOfStream => return n - remaining, - error.ReadFailed => return error.ReadFailed, - }; - remaining -= discard_len; - if (remaining == 0) return n; - } -} - -/// Reads the stream until the end, ignoring all the data. -/// Returns the number of bytes discarded. -pub fn discardRemaining(br: *BufferedReader) Reader.ShortError!usize { - const buffered_len = br.end; - br.end = 0; - return buffered_len + try br.unbuffered_reader.discardRemaining(); -} - -/// Fill `buffer` with the next `buffer.len` bytes from the stream, advancing -/// the seek position. -/// -/// Invalidates previously returned values from `peek`. -/// -/// If the provided buffer cannot be filled completely, `error.EndOfStream` is -/// returned instead. -/// -/// See also: -/// * `peek` -/// * `readSliceShort` -pub fn readSlice(br: *BufferedReader, buffer: []u8) Reader.Error!void { - const n = try readSliceShort(br, buffer); - if (n != buffer.len) return error.EndOfStream; -} - -/// Fill `buffer` with the next `buffer.len` bytes from the stream, advancing -/// the seek position. -/// -/// Invalidates previously returned values from `peek`. -/// -/// Returns the number of bytes read, which is less than `buffer.len` if and -/// only if the stream reached the end. -/// -/// See also: -/// * `readSlice` -pub fn readSliceShort(br: *BufferedReader, buffer: []u8) Reader.ShortError!usize { - const in_buffer = br.buffer[br.seek..br.end]; - const copy_len = @min(buffer.len, in_buffer.len); - @memcpy(buffer[0..copy_len], in_buffer[0..copy_len]); - if (buffer.len - copy_len == 0) { - br.seek += copy_len; - return buffer.len; - } - var i: usize = copy_len; - br.end = 0; - br.seek = 0; - while (true) { - const remaining = buffer[i..]; - const n = br.unbuffered_reader.readVec(&.{ remaining, br.buffer }) catch |err| switch (err) { - error.EndOfStream => return i, - error.ReadFailed => return error.ReadFailed, - }; - if (n < remaining.len) { - i += n; - continue; - } - br.end = n - remaining.len; - return buffer.len; - } -} - -/// Fill `buffer` with the next `buffer.len` bytes from the stream, advancing -/// the seek position. -/// -/// Invalidates previously returned values from `peek`. -/// -/// If the provided buffer cannot be filled completely, `error.EndOfStream` is -/// returned instead. -/// -/// The function is inline to avoid the dead code in case `endian` is -/// comptime-known and matches host endianness. -/// -/// See also: -/// * `readSlice` -/// * `readSliceEndianAlloc` -pub inline fn readSliceEndian( - br: *BufferedReader, - comptime Elem: type, - buffer: []Elem, - endian: std.builtin.Endian, -) Reader.Error!void { - try readSlice(br, @ptrCast(buffer)); - if (native_endian != endian) for (buffer) |*elem| std.mem.byteSwapAllFields(Elem, elem); -} - -pub const ReadAllocError = Reader.Error || Allocator.Error; - -/// The function is inline to avoid the dead code in case `endian` is -/// comptime-known and matches host endianness. -pub inline fn readSliceEndianAlloc( - br: *BufferedReader, - allocator: Allocator, - comptime Elem: type, - len: usize, - endian: std.builtin.Endian, -) ReadAllocError![]Elem { - const dest = try allocator.alloc(Elem, len); - errdefer allocator.free(dest); - try readSlice(br, @ptrCast(dest)); - if (native_endian != endian) for (dest) |*elem| std.mem.byteSwapAllFields(Elem, elem); - return dest; -} - -pub fn readSliceAlloc(br: *BufferedReader, allocator: Allocator, len: usize) ReadAllocError![]u8 { - const dest = try allocator.alloc(u8, len); - errdefer allocator.free(dest); - try readSlice(br, dest); - return dest; -} - -/// Transfers all bytes from the current position to the end of the stream, up -/// to `limit`, returning them as a caller-owned allocated slice. -/// -/// If `limit` is exceeded, returns `error.StreamTooLong`. In such case, the -/// stream is advanced an unspecified amount, and the consumed data is -/// unrecoverable. The other function listed below does not have this caveat. -/// -/// Asserts `br` was initialized with at least one byte of storage capacity. -/// -/// See also: -/// * `readRemainingArrayList` -pub fn readRemainingAlloc(r: Reader, gpa: Allocator, limit: Limit) Reader.LimitedAllocError![]u8 { - var buffer: ArrayList(u8) = .empty; - defer buffer.deinit(gpa); - try readRemainingArrayList(r, gpa, null, &buffer, limit); - return buffer.toOwnedSlice(gpa); -} - -/// Transfers all bytes from the current position to the end of the stream, up -/// to `limit`, appending them to `list`. -/// -/// If `limit` would be exceeded, `error.StreamTooLong` is returned instead. In -/// such case, the stream is in a well-defined state. The next byte that would -/// be read will be the first one to exceed `limit`, and all preceeding bytes -/// have been appended to `list`. -/// -/// Asserts `br` was initialized with at least one byte of storage capacity. -/// -/// See also: -/// * `readRemainingAlloc` -pub fn readRemainingArrayList( - br: *BufferedReader, - gpa: Allocator, - comptime alignment: ?std.mem.Alignment, - list: *std.ArrayListAlignedUnmanaged(u8, alignment), - limit: Limit, -) Reader.LimitedAllocError!void { - const buffer = br.buffer; - const buffered = buffer[br.seek..br.end]; - const copy_len = limit.minInt(buffered.len); - try list.ensureUnusedCapacity(gpa, copy_len); - @memcpy(list.unusedCapacitySlice()[0..copy_len], buffer[0..copy_len]); - list.items.len += copy_len; - br.seek += copy_len; - if (copy_len == buffered.len) { - br.seek = 0; - br.end = 0; - } - var remaining = limit.subtract(copy_len).?; - while (true) { - try list.ensureUnusedCapacity(gpa, 1); - const dest = remaining.slice(list.unusedCapacitySlice()); - const additional_buffer = if (@intFromEnum(remaining) == dest.len) buffer else &.{}; - const n = br.unbuffered_reader.readVec(&.{ dest, additional_buffer }) catch |err| switch (err) { - error.EndOfStream => break, - error.ReadFailed => return error.ReadFailed, - }; - if (n >= dest.len) { - br.end = n - dest.len; - list.items.len += dest.len; - if (n == dest.len) return; - return error.StreamTooLong; - } - list.items.len += n; - remaining = remaining.subtract(n).?; - } -} - -pub const DelimiterError = error{ - /// See the `Reader` implementation for detailed diagnostics. - ReadFailed, - /// For "inclusive" functions, stream ended before the delimiter was found. - /// For "exclusive" functions, stream ended and there are no more bytes to - /// return. - EndOfStream, - /// The delimiter was not found within a number of bytes matching the - /// capacity of the `BufferedReader`. - StreamTooLong, -}; - -/// Returns a slice of the next bytes of buffered data from the stream until -/// `sentinel` is found, advancing the seek position. -/// -/// Returned slice has a sentinel. -/// -/// Invalidates previously returned values from `peek`. -/// -/// See also: -/// * `peekSentinel` -/// * `takeDelimiterExclusive` -/// * `takeDelimiterInclusive` -pub fn takeSentinel(br: *BufferedReader, comptime sentinel: u8) DelimiterError![:sentinel]u8 { - const result = try br.peekSentinel(sentinel); - br.toss(result.len + 1); - return result; -} - -pub fn peekSentinel(br: *BufferedReader, comptime sentinel: u8) DelimiterError![:sentinel]u8 { - const result = try br.peekDelimiterInclusive(sentinel); - return result[0 .. result.len - 1 :sentinel]; -} - -/// Returns a slice of the next bytes of buffered data from the stream until -/// `delimiter` is found, advancing the seek position. -/// -/// Returned slice includes the delimiter as the last byte. -/// -/// Invalidates previously returned values from `peek`. -/// -/// See also: -/// * `takeSentinel` -/// * `takeDelimiterExclusive` -/// * `peekDelimiterInclusive` -pub fn takeDelimiterInclusive(br: *BufferedReader, delimiter: u8) DelimiterError![]u8 { - const result = try br.peekDelimiterInclusive(delimiter); - br.toss(result.len); - return result; -} - -/// Returns a slice of the next bytes of buffered data from the stream until -/// `delimiter` is found, without advancing the seek position. -/// -/// Returned slice includes the delimiter as the last byte. -/// -/// Invalidates previously returned values from `peek`. -/// -/// See also: -/// * `peekSentinel` -/// * `peekDelimiterExclusive` -/// * `takeDelimiterInclusive` -pub fn peekDelimiterInclusive(br: *BufferedReader, delimiter: u8) DelimiterError![]u8 { - const buffer = br.buffer[0..br.end]; - const seek = br.seek; - if (std.mem.indexOfScalarPos(u8, buffer, seek, delimiter)) |end| { - @branchHint(.likely); - return buffer[seek .. end + 1]; - } - if (seek > 0) { - const remainder = buffer[seek..]; - std.mem.copyForwards(u8, buffer[0..remainder.len], remainder); - br.end = remainder.len; - br.seek = 0; - } - while (br.end < br.buffer.len) { - const n = try br.unbuffered_reader.readVec(&.{br.buffer[br.end..]}); - const prev_end = br.end; - br.end = prev_end + n; - if (std.mem.indexOfScalarPos(u8, br.buffer[0..br.end], prev_end, delimiter)) |end| { - return br.buffer[0 .. end + 1]; - } - } - return error.StreamTooLong; -} - -/// Returns a slice of the next bytes of buffered data from the stream until -/// `delimiter` is found, advancing the seek position. -/// -/// Returned slice excludes the delimiter. End-of-stream is treated equivalent -/// to a delimiter, unless it would result in a length 0 return value, in which -/// case `error.EndOfStream` is returned instead. -/// -/// If the delimiter is not found within a number of bytes matching the -/// capacity of this `BufferedReader`, `error.StreamTooLong` is returned. In -/// such case, the stream state is unmodified as if this function was never -/// called. -/// -/// Invalidates previously returned values from `peek`. -/// -/// See also: -/// * `takeDelimiterInclusive` -/// * `peekDelimiterExclusive` -pub fn takeDelimiterExclusive(br: *BufferedReader, delimiter: u8) DelimiterError![]u8 { - const result = br.peekDelimiterInclusive(delimiter) catch |err| switch (err) { - error.EndOfStream => { - if (br.end == 0) return error.EndOfStream; - br.toss(br.end); - return br.buffer[0..br.end]; - }, - else => |e| return e, - }; - br.toss(result.len); - return result[0 .. result.len - 1]; -} - -/// Returns a slice of the next bytes of buffered data from the stream until -/// `delimiter` is found, without advancing the seek position. -/// -/// Returned slice excludes the delimiter. End-of-stream is treated equivalent -/// to a delimiter, unless it would result in a length 0 return value, in which -/// case `error.EndOfStream` is returned instead. -/// -/// If the delimiter is not found within a number of bytes matching the -/// capacity of this `BufferedReader`, `error.StreamTooLong` is returned. In -/// such case, the stream state is unmodified as if this function was never -/// called. -/// -/// Invalidates previously returned values from `peek`. -/// -/// See also: -/// * `peekDelimiterInclusive` -/// * `takeDelimiterExclusive` -pub fn peekDelimiterExclusive(br: *BufferedReader, delimiter: u8) DelimiterError![]u8 { - const result = br.peekDelimiterInclusive(delimiter) catch |err| switch (err) { - error.EndOfStream => { - if (br.end == 0) return error.EndOfStream; - return br.buffer[0..br.end]; - }, - else => |e| return e, - }; - return result[0 .. result.len - 1]; -} - -/// Appends to `bw` contents by reading from the stream until `delimiter` is -/// found. Does not write the delimiter itself. -/// -/// Returns number of bytes streamed. -pub fn readDelimiter(br: *BufferedReader, bw: *BufferedWriter, delimiter: u8) Reader.StreamError!usize { - const amount, const to = try br.readAny(bw, delimiter, .unlimited); - return switch (to) { - .delimiter => amount, - .limit => unreachable, - .end => error.EndOfStream, - }; -} - -/// Appends to `bw` contents by reading from the stream until `delimiter` is found. -/// Does not write the delimiter itself. -/// -/// Succeeds if stream ends before delimiter found. -/// -/// Returns number of bytes streamed. The end is not signaled to the writer. -pub fn readDelimiterEnding( - br: *BufferedReader, - bw: *BufferedWriter, - delimiter: u8, -) Reader.StreamRemainingError!usize { - const amount, const to = try br.readAny(bw, delimiter, .unlimited); - return switch (to) { - .delimiter, .end => amount, - .limit => unreachable, - }; -} - -pub const StreamDelimiterLimitedError = Reader.StreamRemainingError || error{ - /// Stream ended before the delimiter was found. - EndOfStream, - /// The delimiter was not found within the limit. - StreamTooLong, -}; - -/// Appends to `bw` contents by reading from the stream until `delimiter` is found. -/// Does not write the delimiter itself. -/// -/// Returns number of bytes streamed. -pub fn readDelimiterLimit( - br: *BufferedReader, - bw: *BufferedWriter, - delimiter: u8, - limit: Limit, -) StreamDelimiterLimitedError!usize { - const amount, const to = try br.readAny(bw, delimiter, limit); - return switch (to) { - .delimiter => amount, - .limit => error.StreamTooLong, - .end => error.EndOfStream, - }; -} - -fn readAny( - br: *BufferedReader, - bw: *BufferedWriter, - delimiter: ?u8, - limit: Limit, -) Reader.StreamRemainingError!struct { usize, enum { delimiter, limit, end } } { - var amount: usize = 0; - var remaining = limit; - while (remaining.nonzero()) { - const available = remaining.slice(br.peekGreedy(1) catch |err| switch (err) { - error.ReadFailed => |e| return e, - error.EndOfStream => return .{ amount, .end }, - }); - if (delimiter) |d| if (std.mem.indexOfScalar(u8, available, d)) |delimiter_index| { - try bw.writeAll(available[0..delimiter_index]); - br.toss(delimiter_index + 1); - return .{ amount + delimiter_index, .delimiter }; - }; - try bw.writeAll(available); - br.toss(available.len); - amount += available.len; - remaining = remaining.subtract(available.len).?; - } - return .{ amount, .limit }; -} - -/// Reads from the stream until specified byte is found, discarding all data, -/// including the delimiter. -/// -/// If end of stream is found, this function succeeds. -pub fn discardDelimiterInclusive(br: *BufferedReader, delimiter: u8) Reader.Error!void { - _ = br; - _ = delimiter; - @panic("TODO"); -} - -/// Reads from the stream until specified byte is found, discarding all data, -/// excluding the delimiter. -/// -/// Succeeds if stream ends before delimiter found. -pub fn discardDelimiterExclusive(br: *BufferedReader, delimiter: u8) Reader.ShortError!void { - _ = br; - _ = delimiter; - @panic("TODO"); -} - -/// Fills the buffer such that it contains at least `n` bytes, without -/// advancing the seek position. -/// -/// Returns `error.EndOfStream` if and only if there are fewer than `n` bytes -/// remaining. -/// -/// Asserts buffer capacity is at least `n`. -pub fn fill(br: *BufferedReader, n: usize) Reader.Error!void { - assert(n <= br.buffer.len); - if (br.seek + n <= br.end) { - @branchHint(.likely); - return; - } - rebaseCapacity(br, n); - while (br.end < br.seek + n) { - br.end += try br.unbuffered_reader.readVec(&.{br.buffer[br.end..]}); - } -} - -/// Without advancing the seek position, does exactly one underlying read, filling the buffer as -/// much as possible. This may result in zero bytes added to the buffer, which is not an end of -/// stream condition. End of stream is communicated via returning `error.EndOfStream`. -/// -/// Asserts buffer capacity is at least 1. -pub fn fillMore(br: *BufferedReader) Reader.Error!void { - rebaseCapacity(br, 1); - br.end += try br.unbuffered_reader.readVec(&.{br.buffer[br.end..]}); -} - -/// Returns the next byte from the stream or returns `error.EndOfStream`. -/// -/// Does not advance the seek position. -/// -/// Asserts the buffer capacity is nonzero. -pub fn peekByte(br: *BufferedReader) Reader.Error!u8 { - const buffer = br.buffer[0..br.end]; - const seek = br.seek; - if (seek >= buffer.len) { - @branchHint(.unlikely); - try fill(br, 1); - } - return buffer[seek]; -} - -/// Reads 1 byte from the stream or returns `error.EndOfStream`. -/// -/// Asserts the buffer capacity is nonzero. -pub fn takeByte(br: *BufferedReader) Reader.Error!u8 { - const result = try peekByte(br); - br.seek += 1; - return result; -} - -/// Same as `takeByte` except the returned byte is signed. -pub fn takeByteSigned(br: *BufferedReader) Reader.Error!i8 { - return @bitCast(try br.takeByte()); -} - -/// Asserts the buffer was initialized with a capacity at least `@bitSizeOf(T) / 8`. -pub inline fn takeInt(br: *BufferedReader, comptime T: type, endian: std.builtin.Endian) Reader.Error!T { - const n = @divExact(@typeInfo(T).int.bits, 8); - return std.mem.readInt(T, try br.takeArray(n), endian); -} - -/// Asserts the buffer was initialized with a capacity at least `n`. -pub fn takeVarInt(br: *BufferedReader, comptime Int: type, endian: std.builtin.Endian, n: usize) Reader.Error!Int { - assert(n <= @sizeOf(Int)); - return std.mem.readVarInt(Int, try br.take(n), endian); -} - -/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`. -/// -/// Advances the seek position. -/// -/// See also: -/// * `peekStruct` -pub fn takeStruct(br: *BufferedReader, comptime T: type) Reader.Error!*align(1) T { - // Only extern and packed structs have defined in-memory layout. - comptime assert(@typeInfo(T).@"struct".layout != .auto); - return @ptrCast(try br.takeArray(@sizeOf(T))); -} - -/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`. -/// -/// Does not advance the seek position. -/// -/// See also: -/// * `takeStruct` -pub fn peekStruct(br: *BufferedReader, comptime T: type) Reader.Error!*align(1) T { - // Only extern and packed structs have defined in-memory layout. - comptime assert(@typeInfo(T).@"struct".layout != .auto); - return @ptrCast(try br.peekArray(@sizeOf(T))); -} - -/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`. -/// -/// This function is inline to avoid referencing `std.mem.byteSwapAllFields` -/// when `endian` is comptime-known and matches the host endianness. -pub inline fn takeStructEndian(br: *BufferedReader, comptime T: type, endian: std.builtin.Endian) Reader.Error!T { - var res = (try br.takeStruct(T)).*; - if (native_endian != endian) std.mem.byteSwapAllFields(T, &res); - return res; -} - -/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`. -/// -/// This function is inline to avoid referencing `std.mem.byteSwapAllFields` -/// when `endian` is comptime-known and matches the host endianness. -pub inline fn peekStructEndian(br: *BufferedReader, comptime T: type, endian: std.builtin.Endian) Reader.Error!T { - var res = (try br.peekStruct(T)).*; - if (native_endian != endian) std.mem.byteSwapAllFields(T, &res); - return res; -} - -pub const TakeEnumError = Reader.Error || error{InvalidEnumTag}; - -/// Reads an integer with the same size as the given enum's tag type. If the -/// integer matches an enum tag, casts the integer to the enum tag and returns -/// it. Otherwise, returns `error.InvalidEnumTag`. -/// -/// Asserts the buffer was initialized with a capacity at least `@sizeOf(Enum)`. -pub fn takeEnum(br: *BufferedReader, comptime Enum: type, endian: std.builtin.Endian) TakeEnumError!Enum { - const Tag = @typeInfo(Enum).@"enum".tag_type; - const int = try br.takeInt(Tag, endian); - return std.meta.intToEnum(Enum, int); -} - -/// Reads an integer with the same size as the given nonexhaustive enum's tag type. -/// -/// Asserts the buffer was initialized with a capacity at least `@sizeOf(Enum)`. -pub fn takeEnumNonexhaustive(br: *BufferedReader, comptime Enum: type, endian: std.builtin.Endian) Reader.Error!Enum { - const info = @typeInfo(Enum).@"enum"; - comptime assert(!info.is_exhaustive); - comptime assert(@bitSizeOf(info.tag_type) == @sizeOf(info.tag_type) * 8); - return takeEnum(br, Enum, endian) catch |err| switch (err) { - error.InvalidEnumTag => unreachable, - else => |e| return e, - }; -} - -pub const TakeLeb128Error = Reader.Error || error{Overflow}; - -/// Read a single LEB128 value as type T, or `error.Overflow` if the value cannot fit. -pub fn takeLeb128(br: *BufferedReader, comptime Result: type) TakeLeb128Error!Result { - const result_info = @typeInfo(Result).int; - return std.math.cast(Result, try br.takeMultipleOf7Leb128(@Type(.{ .int = .{ - .signedness = result_info.signedness, - .bits = std.mem.alignForwardAnyAlign(u16, result_info.bits, 7), - } }))) orelse error.Overflow; -} - -pub fn expandTotalCapacity(br: *BufferedReader, allocator: Allocator, n: usize) Allocator.Error!void { - if (n <= br.buffer.len) return; - if (br.seek > 0) rebase(br); - var list: ArrayList(u8) = .{ - .items = br.buffer[0..br.end], - .capacity = br.buffer.len, - }; - defer br.buffer = list.allocatedSlice(); - try list.ensureTotalCapacity(allocator, n); -} - -pub const FillAllocError = Reader.Error || Allocator.Error; - -pub fn fillAlloc(br: *BufferedReader, allocator: Allocator, n: usize) FillAllocError!void { - try expandTotalCapacity(br, allocator, n); - return fill(br, n); -} - -/// Returns a slice into the unused capacity of `buffer` with at least -/// `min_len` bytes, extending `buffer` by resizing it with `gpa` as necessary. -/// -/// After calling this function, typically the caller will follow up with a -/// call to `advanceBufferEnd` to report the actual number of bytes buffered. -pub fn writableSliceGreedyAlloc(br: *BufferedReader, allocator: Allocator, min_len: usize) Allocator.Error![]u8 { - { - const unused = br.buffer[br.end..]; - if (unused.len >= min_len) return unused; - } - if (br.seek > 0) rebase(br); - { - var list: ArrayList(u8) = .{ - .items = br.buffer[0..br.end], - .capacity = br.buffer.len, - }; - defer br.buffer = list.allocatedSlice(); - try list.ensureUnusedCapacity(allocator, min_len); - } - const unused = br.buffer[br.end..]; - assert(unused.len >= min_len); - return unused; -} - -/// After writing directly into the unused capacity of `buffer`, this function -/// updates `end` so that users of `BufferedReader` can receive the data. -pub fn advanceBufferEnd(br: *BufferedReader, n: usize) void { - assert(n <= br.buffer.len - br.end); - br.end += n; -} - -fn takeMultipleOf7Leb128(br: *BufferedReader, comptime Result: type) TakeLeb128Error!Result { - const result_info = @typeInfo(Result).int; - comptime assert(result_info.bits % 7 == 0); - var remaining_bits: std.math.Log2IntCeil(Result) = result_info.bits; - const UnsignedResult = @Type(.{ .int = .{ - .signedness = .unsigned, - .bits = result_info.bits, - } }); - var result: UnsignedResult = 0; - var fits = true; - while (true) { - const buffer: []const packed struct(u8) { bits: u7, more: bool } = @ptrCast(try br.peekGreedy(1)); - for (buffer, 1..) |byte, len| { - if (remaining_bits > 0) { - result = @shlExact(@as(UnsignedResult, byte.bits), result_info.bits - 7) | - if (result_info.bits > 7) @shrExact(result, 7) else 0; - remaining_bits -= 7; - } else if (fits) fits = switch (result_info.signedness) { - .signed => @as(i7, @bitCast(byte.bits)) == - @as(i7, @truncate(@as(Result, @bitCast(result)) >> (result_info.bits - 1))), - .unsigned => byte.bits == 0, - }; - if (byte.more) continue; - br.toss(len); - return if (fits) @as(Result, @bitCast(result)) >> remaining_bits else error.Overflow; - } - br.toss(buffer.len); - } -} - -/// Left-aligns data such that `br.seek` becomes zero. -pub fn rebase(br: *BufferedReader) void { - const data = br.buffer[br.seek..br.end]; - const dest = br.buffer[0..data.len]; - std.mem.copyForwards(u8, dest, data); - br.seek = 0; - br.end = data.len; -} - -/// Ensures `capacity` more data can be buffered without rebasing, by rebasing -/// if necessary. -/// -/// Asserts `capacity` is within the buffer capacity. -pub fn rebaseCapacity(br: *BufferedReader, capacity: usize) void { - if (br.end > br.buffer.len - capacity) rebase(br); -} - -/// Advances the stream and decreases the size of the storage buffer by `n`, -/// returning the range of bytes no longer accessible by `br`. -/// -/// This action can be undone by `restitute`. -/// -/// Asserts there are at least `n` buffered bytes already. -/// -/// Asserts that `br.seek` is zero, i.e. the buffer is in a rebased state. -pub fn steal(br: *BufferedReader, n: usize) []u8 { - assert(br.seek == 0); - assert(n <= br.end); - const stolen = br.buffer[0..n]; - br.buffer = br.buffer[n..]; - br.end -= n; - return stolen; -} - -/// Expands the storage buffer, undoing the effects of `steal` -/// Assumes that `n` does not exceed the total number of stolen bytes. -pub fn restitute(br: *BufferedReader, n: usize) void { - br.buffer = (br.buffer.ptr - n)[0 .. br.buffer.len + n]; - br.end += n; - br.seek += n; -} - -test initFixed { - var br: BufferedReader = undefined; - br.initFixed("a\x02"); - try testing.expect((try br.takeByte()) == 'a'); - try testing.expect((try br.takeEnum(enum(u8) { - a = 0, - b = 99, - c = 2, - d = 3, - }, builtin.cpu.arch.endian())) == .c); - try testing.expectError(error.EndOfStream, br.takeByte()); -} - -test peek { - return error.Unimplemented; -} - -test peekGreedy { - return error.Unimplemented; -} - -test toss { - return error.Unimplemented; -} - -test take { - return error.Unimplemented; -} - -test takeArray { - return error.Unimplemented; -} - -test peekArray { - return error.Unimplemented; -} - -test discardAll { - var br: BufferedReader = undefined; - br.initFixed("foobar"); - try br.discard(3); - try testing.expectEqualStrings("bar", try br.take(3)); - try br.discard(0); - try testing.expectError(error.EndOfStream, br.discard(1)); -} - -test discardRemaining { - return error.Unimplemented; -} - -test read { - return error.Unimplemented; -} - -test takeSentinel { - return error.Unimplemented; -} - -test peekSentinel { - return error.Unimplemented; -} - -test takeDelimiterInclusive { - return error.Unimplemented; -} - -test peekDelimiterInclusive { - return error.Unimplemented; -} - -test takeDelimiterExclusive { - return error.Unimplemented; -} - -test peekDelimiterExclusive { - return error.Unimplemented; -} - -test readDelimiter { - return error.Unimplemented; -} - -test readDelimiterEnding { - return error.Unimplemented; -} - -test readDelimiterLimit { - return error.Unimplemented; -} - -test discardDelimiterExclusive { - return error.Unimplemented; -} - -test discardDelimiterInclusive { - return error.Unimplemented; -} - -test fill { - return error.Unimplemented; -} - -test takeByte { - return error.Unimplemented; -} - -test takeByteSigned { - return error.Unimplemented; -} - -test takeInt { - return error.Unimplemented; -} - -test takeVarInt { - return error.Unimplemented; -} - -test takeStruct { - return error.Unimplemented; -} - -test peekStruct { - return error.Unimplemented; -} - -test takeStructEndian { - return error.Unimplemented; -} - -test peekStructEndian { - return error.Unimplemented; -} - -test takeEnum { - return error.Unimplemented; -} - -test takeLeb128 { - return error.Unimplemented; -} - -test readSliceShort { - return error.Unimplemented; -} - -test readVec { - return error.Unimplemented; -} - -test "expected error.EndOfStream" { - // Unit test inspired by https://github.com/ziglang/zig/issues/17733 - var br: std.io.BufferedReader = undefined; - br.initFixed(""); - try std.testing.expectError(error.EndOfStream, br.readEnum(enum(u8) { a, b }, .little)); - try std.testing.expectError(error.EndOfStream, br.isBytes("foo")); -} diff --git a/lib/std/io/Reader.zig b/lib/std/io/Reader.zig index d91edebfc2..27d9170e3b 100644 --- a/lib/std/io/Reader.zig +++ b/lib/std/io/Reader.zig @@ -1,8 +1,12 @@ -const std = @import("../std.zig"); const Reader = @This(); + +const builtin = @import("builtin"); +const native_endian = builtin.target.cpu.arch.endian(); + +const std = @import("../std.zig"); +const Writer = std.io.Writer; const assert = std.debug.assert; -const BufferedWriter = std.io.BufferedWriter; -const BufferedReader = std.io.BufferedReader; +const testing = std.testing; const Allocator = std.mem.Allocator; const ArrayList = std.ArrayListUnmanaged; const Limit = std.io.Limit; @@ -11,9 +15,14 @@ pub const Limited = @import("Reader/Limited.zig"); context: ?*anyopaque, vtable: *const VTable, +buffer: []u8, +/// Number of bytes which have been consumed from `buffer`. +seek: usize, +/// In `buffer` before this are buffered bytes, after this is `undefined`. +end: usize, pub const VTable = struct { - /// Writes bytes from the internally tracked stream position to `bw`. + /// Writes bytes from the internally tracked logical position to `bw`. /// /// Returns the number of bytes written, which will be at minimum `0` and /// at most `limit`. The number returned, including zero, does not indicate @@ -26,7 +35,7 @@ pub const VTable = struct { /// Implementations are encouraged to utilize mandatory minimum buffer /// sizes combined with short reads (returning a value less than `limit`) /// in order to minimize complexity. - read: *const fn (context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) StreamError!usize, + stream: *const fn (r: *Reader, w: *Writer, limit: Limit) StreamError!usize, /// Consumes bytes from the internally tracked stream position without /// providing access to them. @@ -42,11 +51,10 @@ pub const VTable = struct { /// sizes combined with short reads (returning a value less than `limit`) /// in order to minimize complexity. /// - /// If an implementation sets this to `null`, a default implementation is - /// provided which is based on calling `read`, borrowing - /// `BufferedReader.buffer` to construct a temporary `BufferedWriter` and - /// ignoring the written data. - discard: ?*const fn (context: ?*anyopaque, limit: Limit) Error!usize = null, + /// The default implementation is is based on calling `read`, borrowing + /// `buffer` to construct a temporary `Writer` and ignoring the written + /// data. + discard: *const fn (r: *Reader, limit: Limit) Error!usize = defaultDiscard, }; pub const StreamError = error{ @@ -77,26 +85,97 @@ pub const ShortError = error{ ReadFailed, }; -pub fn read(r: Reader, bw: *BufferedWriter, limit: Limit) StreamError!usize { - const before = bw.count; - const n = try r.vtable.read(r.context, bw, limit); +pub const failing: Reader = .{ + .context = undefined, + .vtable = &.{ + .read = failingRead, + .discard = failingDiscard, + }, + .buffer = &.{}, + .seek = 0, + .end = 0, +}; + +pub const ending: Reader = .fixed(&.{}); + +pub fn limited(r: *Reader, limit: Limit, buffer: []u8) Limited { + return Limited.init(r, limit, buffer); +} + +/// Constructs a `Reader` such that it will read from `buffer` and then end. +pub fn fixed(buffer: []const u8) Reader { + return .{ + .context = undefined, + .vtable = &.{ + .read = endingRead, + .discard = endingDiscard, + }, + // This cast is safe because all potential writes to it will instead + // return `error.EndOfStream`. + .buffer = @constCast(buffer), + .end = buffer.len, + .seek = 0, + }; +} + +pub fn stream(r: *Reader, w: *Writer, limit: Limit) StreamError!usize { + const buffer = limit.slice(r.buffer[r.seek..r.end]); + if (buffer.len > 0) { + @branchHint(.likely); + const n = try w.write(buffer); + r.seek += n; + return n; + } + const before = w.count; + const n = try r.vtable.stream(r, w, limit); assert(n <= @intFromEnum(limit)); - assert(bw.count == before + n); + assert(w.count == before + n); return n; } -pub fn discard(r: Reader, limit: Limit) Error!usize { - const n = try r.vtable.discard(r.context, limit); - assert(n <= @intFromEnum(limit)); +pub fn discard(r: *Reader, limit: Limit) Error!usize { + const buffered_len = r.end - r.seek; + const remaining: Limit = if (limit.toInt()) |n| l: { + if (buffered_len >= n) { + r.seek += n; + return n; + } + break :l .limited(n - buffered_len); + } else .unlimited; + r.seek = 0; + r.end = 0; + const n = r.vtable.discard(r, remaining); + assert(n <= @intFromEnum(remaining)); + return buffered_len + n; +} + +pub fn defaultDiscard(r: *Reader, limit: Limit) Error!usize { + assert(r.seek == 0); + assert(r.end == 0); + var w: Writer = .discarding(r.buffer); + const n = r.stream(&w, limit) catch |err| switch (err) { + error.WriteFailed => unreachable, + error.ReadFailed => return error.ReadFailed, + error.EndOfStream => return error.EndOfStream, + }; + if (n > @intFromEnum(limit)) { + const over_amt = n - @intFromEnum(limit); + assert(over_amt <= w.buffer.end); // limit may be exceeded only by an amount within buffer capacity. + r.seek = w.end - over_amt; + r.end = w.end; + return @intFromEnum(limit); + } return n; } -/// Returns total number of bytes written to `bw`. -pub fn readRemaining(r: Reader, bw: *BufferedWriter) StreamRemainingError!usize { - const readFn = r.vtable.read; +/// "Pump" data from the reader to the writer, handling `error.EndOfStream` as +/// a success case. +/// +/// Returns total number of bytes written to `w`. +pub fn streamRemaining(r: *Reader, w: *Writer) StreamRemainingError!usize { var offset: usize = 0; while (true) { - offset += readFn(r.context, bw, .unlimited) catch |err| switch (err) { + offset += r.stream(w, .unlimited) catch |err| switch (err) { error.EndOfStream => return offset, else => |e| return e, }; @@ -105,11 +184,12 @@ pub fn readRemaining(r: Reader, bw: *BufferedWriter) StreamRemainingError!usize /// Consumes the stream until the end, ignoring all the data, returning the /// number of bytes discarded. -pub fn discardRemaining(r: Reader) ShortError!usize { - const discardFn = r.vtable.discard; - var offset: usize = 0; +pub fn discardRemaining(r: *Reader) ShortError!usize { + var offset: usize = r.end; + r.seek = 0; + r.end = 0; while (true) { - offset += discardFn(r.context, .unlimited) catch |err| switch (err) { + offset += r.vtable.discard(r, .unlimited) catch |err| switch (err) { error.EndOfStream => return offset, else => |e| return e, }; @@ -121,112 +201,1133 @@ pub const LimitedAllocError = Allocator.Error || ShortError || error{StreamTooLo /// Transfers all bytes from the current position to the end of the stream, up /// to `limit`, returning them as a caller-owned allocated slice. /// -/// If `limit` is exceeded, returns `error.StreamTooLong`. In such case, the -/// stream is advanced one byte beyond the limit, and the consumed data is -/// unrecoverable. Other functions listed below do not have this caveat. +/// If `limit` would be exceeded, `error.StreamTooLong` is returned instead. In +/// such case, the next byte that would be read will be the first one to exceed +/// `limit`, and all preceeding bytes have been discarded. +/// +/// Asserts `buffer` has nonzero capacity. /// /// See also: -/// * `readRemainingArrayList` -/// * `BufferedReader.readRemainingArrayList` -pub fn readRemainingAlloc(r: Reader, gpa: Allocator, limit: Limit) LimitedAllocError![]u8 { +/// * `appendRemaining` +pub fn allocRemaining(r: *Reader, gpa: Allocator, limit: Limit) LimitedAllocError![]u8 { var buffer: ArrayList(u8) = .empty; defer buffer.deinit(gpa); - try readRemainingArrayList(r, gpa, null, &buffer, limit, 1); + try appendRemaining(r, gpa, null, &buffer, limit, 1); return buffer.toOwnedSlice(gpa); } /// Transfers all bytes from the current position to the end of the stream, up /// to `limit`, appending them to `list`. /// -/// If `limit` is exceeded: -/// * The array list's length is increased by exactly one byte past `limit`. -/// * The stream seek position is advanced by exactly one byte past `limit`. -/// * `error.StreamTooLong` is returned. +/// If `limit` would be exceeded, `error.StreamTooLong` is returned instead. In +/// such case, the next byte that would be read will be the first one to exceed +/// `limit`, and all preceeding bytes have been appended to `list`. /// -/// The other function listed below has different semantics for an exceeded -/// limit. +/// Asserts `buffer` has nonzero capacity. /// /// See also: -/// * `BufferedReader.readRemainingArrayList` -pub fn readRemainingArrayList( - r: Reader, +/// * `allocRemaining` +pub fn appendRemaining( + r: *Reader, gpa: Allocator, comptime alignment: ?std.mem.Alignment, list: *std.ArrayListAlignedUnmanaged(u8, alignment), limit: Limit, - minimum_buffer_size: usize, ) LimitedAllocError!void { - var remaining = limit; + const buffer = r.buffer; + const buffered = buffer[r.seek..r.end]; + const copy_len = limit.minInt(buffered.len); + try list.ensureUnusedCapacity(gpa, copy_len); + @memcpy(list.unusedCapacitySlice()[0..copy_len], buffer[0..copy_len]); + list.items.len += copy_len; + r.seek += copy_len; + if (copy_len == buffered.len) { + r.seek = 0; + r.end = 0; + } + var remaining = limit.subtract(copy_len).?; while (true) { - try list.ensureUnusedCapacity(gpa, minimum_buffer_size); - const buffer = remaining.slice1(list.unusedCapacitySlice()); - const n = r.vtable.readVec(r.context, &.{buffer}) catch |err| switch (err) { - error.EndOfStream => return, + try list.ensureUnusedCapacity(gpa, 1); + const dest = remaining.slice(list.unusedCapacitySlice()); + const additional_buffer = if (@intFromEnum(remaining) == dest.len) buffer else &.{}; + const n = readVec(r, &.{ dest, additional_buffer }) catch |err| switch (err) { + error.EndOfStream => break, error.ReadFailed => return error.ReadFailed, }; + if (n >= dest.len) { + r.end = n - dest.len; + list.items.len += dest.len; + if (n == dest.len) return; + return error.StreamTooLong; + } list.items.len += n; - remaining = remaining.subtract(n) orelse return error.StreamTooLong; + remaining = remaining.subtract(n).?; } } -pub const failing: Reader = .{ - .context = undefined, - .vtable = &.{ - .read = failingRead, - .discard = failingDiscard, - }, -}; - -pub const ending: Reader = .{ - .context = undefined, - .vtable = &.{ - .read = endingRead, - .discard = endingDiscard, - }, -}; - -pub fn unbuffered(r: Reader) BufferedReader { - return buffered(r, &.{}); +/// Writes bytes from the internally tracked stream position to `data`. +/// +/// Returns the number of bytes written, which will be at minimum `0` and +/// at most the sum of each data slice length. The number of bytes read, +/// including zero, does not indicate end of stream. +/// +/// The reader's internal logical seek position moves forward in accordance +/// with the number of bytes returned from this function. +pub fn readVec(r: *Reader, data: []const []u8) Error!usize { + return readVecLimit(r, data, .unlimited); } -pub fn buffered(r: Reader, buffer: []u8) BufferedReader { - return .{ - .unbuffered_reader = r, - .seek = 0, - .buffer = buffer, - .end = 0, +/// Equivalent to `readVec` but reads at most `limit` bytes. +/// +/// This ultimately will lower to a call to `stream`, but it must ensure +/// that the buffer used has at least as much capacity, in case that function +/// depends on a minimum buffer capacity. It also ensures that if the `stream` +/// implementation calls `Writer.writableVector`, it will get this data slice +/// along with the buffer at the end. +pub fn readVecLimit(r: *Reader, data: []const []u8, limit: Limit) Error!usize { + comptime assert(@intFromEnum(Limit.unlimited) == std.math.maxInt(usize)); + var remaining = @intFromEnum(limit); + for (data, 0..) |buf, i| { + const buffered = r.buffer[r.seek..r.end]; + const copy_len = @min(buffered.len, buf.len, remaining); + @memcpy(buf[0..copy_len], buffered[0..copy_len]); + r.seek += copy_len; + remaining -= copy_len; + if (remaining == 0) break; + if (buf.len - copy_len == 0) continue; + + r.seek = 0; + r.end = 0; + var vecs: [8][]u8 = undefined; // Arbitrarily chosen value. + const available_remaining_buf = buf[copy_len..]; + vecs[0] = available_remaining_buf[0..@min(available_remaining_buf.len, remaining)]; + const vec_start_remaining = remaining; + remaining -= vecs[0].len; + var vecs_i: usize = 1; + var data_i: usize = i + 1; + while (true) { + if (vecs.len - vecs_i == 0) { + const n = try r.unbuffered_reader.readVec(&vecs); + return @intFromEnum(limit) - vec_start_remaining + n; + } + if (remaining == 0 or data.len - data_i == 0) { + vecs[vecs_i] = r.buffer; + vecs_i += 1; + const n = try r.unbuffered_reader.readVec(vecs[0..vecs_i]); + const cutoff = vec_start_remaining - remaining; + if (n > cutoff) { + r.end = n - cutoff; + return @intFromEnum(limit) - remaining; + } else { + return @intFromEnum(limit) - vec_start_remaining + n; + } + } + if (data[data_i].len == 0) { + data_i += 1; + continue; + } + const data_elem = data[data_i]; + vecs[vecs_i] = data_elem[0..@min(data_elem.len, remaining)]; + remaining -= vecs[vecs_i].len; + vecs_i += 1; + data_i += 1; + } + } + return @intFromEnum(limit) - remaining; +} + +pub fn bufferContents(r: *Reader) []u8 { + return r.buffer[r.seek..r.end]; +} + +pub fn bufferedLen(r: *const Reader) usize { + return r.end - r.seek; +} + +pub fn hashed(r: *Reader, hasher: anytype) Hashed(@TypeOf(hasher)) { + return .{ .in = r, .hasher = hasher }; +} + +pub fn readVecAll(r: *Reader, data: [][]u8) Error!void { + var index: usize = 0; + var truncate: usize = 0; + while (index < data.len) { + { + const untruncated = data[index]; + data[index] = untruncated[truncate..]; + defer data[index] = untruncated; + truncate += try r.readVec(data[index..]); + } + while (index < data.len and truncate >= data[index].len) { + truncate -= data[index].len; + index += 1; + } + } +} + +/// "Pump" data from the reader to the writer. +pub fn readAll(r: *Reader, bw: *Writer, limit: Limit) StreamError!void { + var remaining = limit; + while (remaining.nonzero()) { + const n = try r.read(bw, remaining); + remaining = remaining.subtract(n).?; + } +} + +/// Returns the next `len` bytes from `unbuffered_reader`, filling the buffer as +/// necessary. +/// +/// Invalidates previously returned values from `peek`. +/// +/// Asserts that the `Reader` was initialized with a buffer capacity at +/// least as big as `len`. +/// +/// If there are fewer than `len` bytes left in the stream, `error.EndOfStream` +/// is returned instead. +/// +/// See also: +/// * `peek` +/// * `toss` +pub fn peek(r: *Reader, n: usize) Error![]u8 { + try r.fill(n); + return r.buffer[r.seek..][0..n]; +} + +/// Returns all the next buffered bytes from `unbuffered_reader`, after filling +/// the buffer to ensure it contains at least `n` bytes. +/// +/// Invalidates previously returned values from `peek` and `peekGreedy`. +/// +/// Asserts that the `Reader` was initialized with a buffer capacity at +/// least as big as `n`. +/// +/// If there are fewer than `n` bytes left in the stream, `error.EndOfStream` +/// is returned instead. +/// +/// See also: +/// * `peek` +/// * `toss` +pub fn peekGreedy(r: *Reader, n: usize) Error![]u8 { + try r.fill(n); + return r.buffer[r.seek..r.end]; +} + +/// Skips the next `n` bytes from the stream, advancing the seek position. This +/// is typically and safely used after `peek`. +/// +/// Asserts that the number of bytes buffered is at least as many as `n`. +/// +/// The "tossed" memory remains alive until a "peek" operation occurs. +/// +/// See also: +/// * `peek`. +/// * `discard`. +pub fn toss(r: *Reader, n: usize) void { + r.seek += n; + assert(r.seek <= r.end); +} + +/// Equivalent to `toss(r.bufferedLen())`. +pub fn tossAll(r: *Reader) void { + r.seek = 0; + r.end = 0; +} + +/// Equivalent to `peek` followed by `toss`. +/// +/// The data returned is invalidated by the next call to `take`, `peek`, +/// `fill`, and functions with those prefixes. +pub fn take(r: *Reader, n: usize) Error![]u8 { + const result = try r.peek(n); + r.toss(n); + return result; +} + +/// Returns the next `n` bytes from `unbuffered_reader` as an array, filling +/// the buffer as necessary and advancing the seek position `n` bytes. +/// +/// Asserts that the `Reader` was initialized with a buffer capacity at +/// least as big as `n`. +/// +/// If there are fewer than `n` bytes left in the stream, `error.EndOfStream` +/// is returned instead. +/// +/// See also: +/// * `take` +pub fn takeArray(r: *Reader, comptime n: usize) Error!*[n]u8 { + return (try r.take(n))[0..n]; +} + +/// Returns the next `n` bytes from `unbuffered_reader` as an array, filling +/// the buffer as necessary, without advancing the seek position. +/// +/// Asserts that the `Reader` was initialized with a buffer capacity at +/// least as big as `n`. +/// +/// If there are fewer than `n` bytes left in the stream, `error.EndOfStream` +/// is returned instead. +/// +/// See also: +/// * `peek` +/// * `takeArray` +pub fn peekArray(r: *Reader, comptime n: usize) Error!*[n]u8 { + return (try r.peek(n))[0..n]; +} + +/// Skips the next `n` bytes from the stream, advancing the seek position. +/// +/// Unlike `toss` which is infallible, in this function `n` can be any amount. +/// +/// Returns `error.EndOfStream` if fewer than `n` bytes could be discarded. +/// +/// See also: +/// * `toss` +/// * `discardRemaining` +/// * `discardShort` +/// * `discard` +pub fn discardAll(r: *Reader, n: usize) Error!void { + if ((try r.discardShort(n)) != n) return error.EndOfStream; +} + +pub fn discardAll64(r: *Reader, n: u64) Error!void { + var remaining: u64 = n; + while (remaining > 0) { + const limited_remaining = std.math.cast(usize, remaining) orelse std.math.maxInt(usize); + try discardAll(r, limited_remaining); + remaining -= limited_remaining; + } +} + +/// Skips the next `n` bytes from the stream, advancing the seek position. +/// +/// Unlike `toss` which is infallible, in this function `n` can be any amount. +/// +/// Returns the number of bytes discarded, which is less than `n` if and only +/// if the stream reached the end. +/// +/// See also: +/// * `discardAll` +/// * `discardRemaining` +/// * `discard` +pub fn discardShort(r: *Reader, n: usize) ShortError!usize { + const proposed_seek = r.seek + n; + if (proposed_seek <= r.end) { + @branchHint(.likely); + r.seek = proposed_seek; + return n; + } + var remaining = n - (r.end - r.seek); + r.end = 0; + r.seek = 0; + while (true) { + const discard_len = r.unbuffered_reader.discard(.limited(remaining)) catch |err| switch (err) { + error.EndOfStream => return n - remaining, + error.ReadFailed => return error.ReadFailed, + }; + remaining -= discard_len; + if (remaining == 0) return n; + } +} + +/// Fill `buffer` with the next `buffer.len` bytes from the stream, advancing +/// the seek position. +/// +/// Invalidates previously returned values from `peek`. +/// +/// If the provided buffer cannot be filled completely, `error.EndOfStream` is +/// returned instead. +/// +/// See also: +/// * `peek` +/// * `readSliceShort` +pub fn readSlice(r: *Reader, buffer: []u8) Error!void { + const n = try readSliceShort(r, buffer); + if (n != buffer.len) return error.EndOfStream; +} + +/// Fill `buffer` with the next `buffer.len` bytes from the stream, advancing +/// the seek position. +/// +/// Invalidates previously returned values from `peek`. +/// +/// Returns the number of bytes read, which is less than `buffer.len` if and +/// only if the stream reached the end. +/// +/// See also: +/// * `readSlice` +pub fn readSliceShort(r: *Reader, buffer: []u8) ShortError!usize { + const in_buffer = r.buffer[r.seek..r.end]; + const copy_len = @min(buffer.len, in_buffer.len); + @memcpy(buffer[0..copy_len], in_buffer[0..copy_len]); + if (buffer.len - copy_len == 0) { + r.seek += copy_len; + return buffer.len; + } + var i: usize = copy_len; + r.end = 0; + r.seek = 0; + while (true) { + const remaining = buffer[i..]; + const n = r.unbuffered_reader.readVec(&.{ remaining, r.buffer }) catch |err| switch (err) { + error.EndOfStream => return i, + error.ReadFailed => return error.ReadFailed, + }; + if (n < remaining.len) { + i += n; + continue; + } + r.end = n - remaining.len; + return buffer.len; + } +} + +/// Fill `buffer` with the next `buffer.len` bytes from the stream, advancing +/// the seek position. +/// +/// Invalidates previously returned values from `peek`. +/// +/// If the provided buffer cannot be filled completely, `error.EndOfStream` is +/// returned instead. +/// +/// The function is inline to avoid the dead code in case `endian` is +/// comptime-known and matches host endianness. +/// +/// See also: +/// * `readSlice` +/// * `readSliceEndianAlloc` +pub inline fn readSliceEndian( + r: *Reader, + comptime Elem: type, + buffer: []Elem, + endian: std.builtin.Endian, +) Error!void { + try readSlice(r, @ptrCast(buffer)); + if (native_endian != endian) for (buffer) |*elem| std.mem.byteSwapAllFields(Elem, elem); +} + +pub const ReadAllocError = Error || Allocator.Error; + +/// The function is inline to avoid the dead code in case `endian` is +/// comptime-known and matches host endianness. +pub inline fn readSliceEndianAlloc( + r: *Reader, + allocator: Allocator, + comptime Elem: type, + len: usize, + endian: std.builtin.Endian, +) ReadAllocError![]Elem { + const dest = try allocator.alloc(Elem, len); + errdefer allocator.free(dest); + try readSlice(r, @ptrCast(dest)); + if (native_endian != endian) for (dest) |*elem| std.mem.byteSwapAllFields(Elem, elem); + return dest; +} + +pub fn readSliceAlloc(r: *Reader, allocator: Allocator, len: usize) ReadAllocError![]u8 { + const dest = try allocator.alloc(u8, len); + errdefer allocator.free(dest); + try readSlice(r, dest); + return dest; +} + +pub const DelimiterError = error{ + /// See the `Reader` implementation for detailed diagnostics. + ReadFailed, + /// For "inclusive" functions, stream ended before the delimiter was found. + /// For "exclusive" functions, stream ended and there are no more bytes to + /// return. + EndOfStream, + /// The delimiter was not found within a number of bytes matching the + /// capacity of the `Reader`. + StreamTooLong, +}; + +/// Returns a slice of the next bytes of buffered data from the stream until +/// `sentinel` is found, advancing the seek position. +/// +/// Returned slice has a sentinel. +/// +/// Invalidates previously returned values from `peek`. +/// +/// See also: +/// * `peekSentinel` +/// * `takeDelimiterExclusive` +/// * `takeDelimiterInclusive` +pub fn takeSentinel(r: *Reader, comptime sentinel: u8) DelimiterError![:sentinel]u8 { + const result = try r.peekSentinel(sentinel); + r.toss(result.len + 1); + return result; +} + +pub fn peekSentinel(r: *Reader, comptime sentinel: u8) DelimiterError![:sentinel]u8 { + const result = try r.peekDelimiterInclusive(sentinel); + return result[0 .. result.len - 1 :sentinel]; +} + +/// Returns a slice of the next bytes of buffered data from the stream until +/// `delimiter` is found, advancing the seek position. +/// +/// Returned slice includes the delimiter as the last byte. +/// +/// Invalidates previously returned values from `peek`. +/// +/// See also: +/// * `takeSentinel` +/// * `takeDelimiterExclusive` +/// * `peekDelimiterInclusive` +pub fn takeDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 { + const result = try r.peekDelimiterInclusive(delimiter); + r.toss(result.len); + return result; +} + +/// Returns a slice of the next bytes of buffered data from the stream until +/// `delimiter` is found, without advancing the seek position. +/// +/// Returned slice includes the delimiter as the last byte. +/// +/// Invalidates previously returned values from `peek`. +/// +/// See also: +/// * `peekSentinel` +/// * `peekDelimiterExclusive` +/// * `takeDelimiterInclusive` +pub fn peekDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 { + const buffer = r.buffer[0..r.end]; + const seek = r.seek; + if (std.mem.indexOfScalarPos(u8, buffer, seek, delimiter)) |end| { + @branchHint(.likely); + return buffer[seek .. end + 1]; + } + if (seek > 0) { + const remainder = buffer[seek..]; + std.mem.copyForwards(u8, buffer[0..remainder.len], remainder); + r.end = remainder.len; + r.seek = 0; + } + while (r.end < r.buffer.len) { + const n = try r.unbuffered_reader.readVec(&.{r.buffer[r.end..]}); + const prev_end = r.end; + r.end = prev_end + n; + if (std.mem.indexOfScalarPos(u8, r.buffer[0..r.end], prev_end, delimiter)) |end| { + return r.buffer[0 .. end + 1]; + } + } + return error.StreamTooLong; +} + +/// Returns a slice of the next bytes of buffered data from the stream until +/// `delimiter` is found, advancing the seek position. +/// +/// Returned slice excludes the delimiter. End-of-stream is treated equivalent +/// to a delimiter, unless it would result in a length 0 return value, in which +/// case `error.EndOfStream` is returned instead. +/// +/// If the delimiter is not found within a number of bytes matching the +/// capacity of this `Reader`, `error.StreamTooLong` is returned. In +/// such case, the stream state is unmodified as if this function was never +/// called. +/// +/// Invalidates previously returned values from `peek`. +/// +/// See also: +/// * `takeDelimiterInclusive` +/// * `peekDelimiterExclusive` +pub fn takeDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 { + const result = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) { + error.EndOfStream => { + if (r.end == 0) return error.EndOfStream; + r.toss(r.end); + return r.buffer[0..r.end]; + }, + else => |e| return e, + }; + r.toss(result.len); + return result[0 .. result.len - 1]; +} + +/// Returns a slice of the next bytes of buffered data from the stream until +/// `delimiter` is found, without advancing the seek position. +/// +/// Returned slice excludes the delimiter. End-of-stream is treated equivalent +/// to a delimiter, unless it would result in a length 0 return value, in which +/// case `error.EndOfStream` is returned instead. +/// +/// If the delimiter is not found within a number of bytes matching the +/// capacity of this `Reader`, `error.StreamTooLong` is returned. In +/// such case, the stream state is unmodified as if this function was never +/// called. +/// +/// Invalidates previously returned values from `peek`. +/// +/// See also: +/// * `peekDelimiterInclusive` +/// * `takeDelimiterExclusive` +pub fn peekDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 { + const result = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) { + error.EndOfStream => { + if (r.end == 0) return error.EndOfStream; + return r.buffer[0..r.end]; + }, + else => |e| return e, + }; + return result[0 .. result.len - 1]; +} + +/// Appends to `bw` contents by reading from the stream until `delimiter` is +/// found. Does not write the delimiter itself. +/// +/// Returns number of bytes streamed. +pub fn readDelimiter(r: *Reader, bw: *Writer, delimiter: u8) StreamError!usize { + const amount, const to = try r.readAny(bw, delimiter, .unlimited); + return switch (to) { + .delimiter => amount, + .limit => unreachable, + .end => error.EndOfStream, }; } -pub fn limited(r: Reader, limit: Limit) Limited { - return .{ - .unlimited_reader = r, - .remaining = limit, +/// Appends to `bw` contents by reading from the stream until `delimiter` is found. +/// Does not write the delimiter itself. +/// +/// Succeeds if stream ends before delimiter found. +/// +/// Returns number of bytes streamed. The end is not signaled to the writer. +pub fn readDelimiterEnding( + r: *Reader, + bw: *Writer, + delimiter: u8, +) StreamRemainingError!usize { + const amount, const to = try r.readAny(bw, delimiter, .unlimited); + return switch (to) { + .delimiter, .end => amount, + .limit => unreachable, }; } -fn endingRead(context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) StreamError!usize { - _ = context; - _ = bw; +pub const StreamDelimiterLimitedError = StreamRemainingError || error{ + /// Stream ended before the delimiter was found. + EndOfStream, + /// The delimiter was not found within the limit. + StreamTooLong, +}; + +/// Appends to `bw` contents by reading from the stream until `delimiter` is found. +/// Does not write the delimiter itself. +/// +/// Returns number of bytes streamed. +pub fn readDelimiterLimit( + r: *Reader, + bw: *Writer, + delimiter: u8, + limit: Limit, +) StreamDelimiterLimitedError!usize { + const amount, const to = try r.readAny(bw, delimiter, limit); + return switch (to) { + .delimiter => amount, + .limit => error.StreamTooLong, + .end => error.EndOfStream, + }; +} + +fn readAny( + r: *Reader, + bw: *Writer, + delimiter: ?u8, + limit: Limit, +) StreamRemainingError!struct { usize, enum { delimiter, limit, end } } { + var amount: usize = 0; + var remaining = limit; + while (remaining.nonzero()) { + const available = remaining.slice(r.peekGreedy(1) catch |err| switch (err) { + error.ReadFailed => |e| return e, + error.EndOfStream => return .{ amount, .end }, + }); + if (delimiter) |d| if (std.mem.indexOfScalar(u8, available, d)) |delimiter_index| { + try bw.writeAll(available[0..delimiter_index]); + r.toss(delimiter_index + 1); + return .{ amount + delimiter_index, .delimiter }; + }; + try bw.writeAll(available); + r.toss(available.len); + amount += available.len; + remaining = remaining.subtract(available.len).?; + } + return .{ amount, .limit }; +} + +/// Reads from the stream until specified byte is found, discarding all data, +/// including the delimiter. +/// +/// If end of stream is found, this function succeeds. +pub fn discardDelimiterInclusive(r: *Reader, delimiter: u8) Error!void { + _ = r; + _ = delimiter; + @panic("TODO"); +} + +/// Reads from the stream until specified byte is found, discarding all data, +/// excluding the delimiter. +/// +/// Succeeds if stream ends before delimiter found. +pub fn discardDelimiterExclusive(r: *Reader, delimiter: u8) ShortError!void { + _ = r; + _ = delimiter; + @panic("TODO"); +} + +/// Fills the buffer such that it contains at least `n` bytes, without +/// advancing the seek position. +/// +/// Returns `error.EndOfStream` if and only if there are fewer than `n` bytes +/// remaining. +/// +/// Asserts buffer capacity is at least `n`. +pub fn fill(r: *Reader, n: usize) Error!void { + assert(n <= r.buffer.len); + if (r.seek + n <= r.end) { + @branchHint(.likely); + return; + } + rebaseCapacity(r, n); + while (r.end < r.seek + n) { + r.end += try r.unbuffered_reader.readVec(&.{r.buffer[r.end..]}); + } +} + +/// Without advancing the seek position, does exactly one underlying read, filling the buffer as +/// much as possible. This may result in zero bytes added to the buffer, which is not an end of +/// stream condition. End of stream is communicated via returning `error.EndOfStream`. +/// +/// Asserts buffer capacity is at least 1. +pub fn fillMore(r: *Reader) Error!void { + rebaseCapacity(r, 1); + r.end += try r.unbuffered_reader.readVec(&.{r.buffer[r.end..]}); +} + +/// Returns the next byte from the stream or returns `error.EndOfStream`. +/// +/// Does not advance the seek position. +/// +/// Asserts the buffer capacity is nonzero. +pub fn peekByte(r: *Reader) Error!u8 { + const buffer = r.buffer[0..r.end]; + const seek = r.seek; + if (seek >= buffer.len) { + @branchHint(.unlikely); + try fill(r, 1); + } + return buffer[seek]; +} + +/// Reads 1 byte from the stream or returns `error.EndOfStream`. +/// +/// Asserts the buffer capacity is nonzero. +pub fn takeByte(r: *Reader) Error!u8 { + const result = try peekByte(r); + r.seek += 1; + return result; +} + +/// Same as `takeByte` except the returned byte is signed. +pub fn takeByteSigned(r: *Reader) Error!i8 { + return @bitCast(try r.takeByte()); +} + +/// Asserts the buffer was initialized with a capacity at least `@bitSizeOf(T) / 8`. +pub inline fn takeInt(r: *Reader, comptime T: type, endian: std.builtin.Endian) Error!T { + const n = @divExact(@typeInfo(T).int.bits, 8); + return std.mem.readInt(T, try r.takeArray(n), endian); +} + +/// Asserts the buffer was initialized with a capacity at least `n`. +pub fn takeVarInt(r: *Reader, comptime Int: type, endian: std.builtin.Endian, n: usize) Error!Int { + assert(n <= @sizeOf(Int)); + return std.mem.readVarInt(Int, try r.take(n), endian); +} + +/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`. +/// +/// Advances the seek position. +/// +/// See also: +/// * `peekStruct` +pub fn takeStruct(r: *Reader, comptime T: type) Error!*align(1) T { + // Only extern and packed structs have defined in-memory layout. + comptime assert(@typeInfo(T).@"struct".layout != .auto); + return @ptrCast(try r.takeArray(@sizeOf(T))); +} + +/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`. +/// +/// Does not advance the seek position. +/// +/// See also: +/// * `takeStruct` +pub fn peekStruct(r: *Reader, comptime T: type) Error!*align(1) T { + // Only extern and packed structs have defined in-memory layout. + comptime assert(@typeInfo(T).@"struct".layout != .auto); + return @ptrCast(try r.peekArray(@sizeOf(T))); +} + +/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`. +/// +/// This function is inline to avoid referencing `std.mem.byteSwapAllFields` +/// when `endian` is comptime-known and matches the host endianness. +pub inline fn takeStructEndian(r: *Reader, comptime T: type, endian: std.builtin.Endian) Error!T { + var res = (try r.takeStruct(T)).*; + if (native_endian != endian) std.mem.byteSwapAllFields(T, &res); + return res; +} + +/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`. +/// +/// This function is inline to avoid referencing `std.mem.byteSwapAllFields` +/// when `endian` is comptime-known and matches the host endianness. +pub inline fn peekStructEndian(r: *Reader, comptime T: type, endian: std.builtin.Endian) Error!T { + var res = (try r.peekStruct(T)).*; + if (native_endian != endian) std.mem.byteSwapAllFields(T, &res); + return res; +} + +pub const TakeEnumError = Error || error{InvalidEnumTag}; + +/// Reads an integer with the same size as the given enum's tag type. If the +/// integer matches an enum tag, casts the integer to the enum tag and returns +/// it. Otherwise, returns `error.InvalidEnumTag`. +/// +/// Asserts the buffer was initialized with a capacity at least `@sizeOf(Enum)`. +pub fn takeEnum(r: *Reader, comptime Enum: type, endian: std.builtin.Endian) TakeEnumError!Enum { + const Tag = @typeInfo(Enum).@"enum".tag_type; + const int = try r.takeInt(Tag, endian); + return std.meta.intToEnum(Enum, int); +} + +/// Reads an integer with the same size as the given nonexhaustive enum's tag type. +/// +/// Asserts the buffer was initialized with a capacity at least `@sizeOf(Enum)`. +pub fn takeEnumNonexhaustive(r: *Reader, comptime Enum: type, endian: std.builtin.Endian) Error!Enum { + const info = @typeInfo(Enum).@"enum"; + comptime assert(!info.is_exhaustive); + comptime assert(@bitSizeOf(info.tag_type) == @sizeOf(info.tag_type) * 8); + return takeEnum(r, Enum, endian) catch |err| switch (err) { + error.InvalidEnumTag => unreachable, + else => |e| return e, + }; +} + +pub const TakeLeb128Error = Error || error{Overflow}; + +/// Read a single LEB128 value as type T, or `error.Overflow` if the value cannot fit. +pub fn takeLeb128(r: *Reader, comptime Result: type) TakeLeb128Error!Result { + const result_info = @typeInfo(Result).int; + return std.math.cast(Result, try r.takeMultipleOf7Leb128(@Type(.{ .int = .{ + .signedness = result_info.signedness, + .bits = std.mem.alignForwardAnyAlign(u16, result_info.bits, 7), + } }))) orelse error.Overflow; +} + +pub fn expandTotalCapacity(r: *Reader, allocator: Allocator, n: usize) Allocator.Error!void { + if (n <= r.buffer.len) return; + if (r.seek > 0) rebase(r); + var list: ArrayList(u8) = .{ + .items = r.buffer[0..r.end], + .capacity = r.buffer.len, + }; + defer r.buffer = list.allocatedSlice(); + try list.ensureTotalCapacity(allocator, n); +} + +pub const FillAllocError = Error || Allocator.Error; + +pub fn fillAlloc(r: *Reader, allocator: Allocator, n: usize) FillAllocError!void { + try expandTotalCapacity(r, allocator, n); + return fill(r, n); +} + +/// Returns a slice into the unused capacity of `buffer` with at least +/// `min_len` bytes, extending `buffer` by resizing it with `gpa` as necessary. +/// +/// After calling this function, typically the caller will follow up with a +/// call to `advanceBufferEnd` to report the actual number of bytes buffered. +pub fn writableSliceGreedyAlloc(r: *Reader, allocator: Allocator, min_len: usize) Allocator.Error![]u8 { + { + const unused = r.buffer[r.end..]; + if (unused.len >= min_len) return unused; + } + if (r.seek > 0) rebase(r); + { + var list: ArrayList(u8) = .{ + .items = r.buffer[0..r.end], + .capacity = r.buffer.len, + }; + defer r.buffer = list.allocatedSlice(); + try list.ensureUnusedCapacity(allocator, min_len); + } + const unused = r.buffer[r.end..]; + assert(unused.len >= min_len); + return unused; +} + +/// After writing directly into the unused capacity of `buffer`, this function +/// updates `end` so that users of `Reader` can receive the data. +pub fn advanceBufferEnd(r: *Reader, n: usize) void { + assert(n <= r.buffer.len - r.end); + r.end += n; +} + +fn takeMultipleOf7Leb128(r: *Reader, comptime Result: type) TakeLeb128Error!Result { + const result_info = @typeInfo(Result).int; + comptime assert(result_info.bits % 7 == 0); + var remaining_bits: std.math.Log2IntCeil(Result) = result_info.bits; + const UnsignedResult = @Type(.{ .int = .{ + .signedness = .unsigned, + .bits = result_info.bits, + } }); + var result: UnsignedResult = 0; + var fits = true; + while (true) { + const buffer: []const packed struct(u8) { bits: u7, more: bool } = @ptrCast(try r.peekGreedy(1)); + for (buffer, 1..) |byte, len| { + if (remaining_bits > 0) { + result = @shlExact(@as(UnsignedResult, byte.bits), result_info.bits - 7) | + if (result_info.bits > 7) @shrExact(result, 7) else 0; + remaining_bits -= 7; + } else if (fits) fits = switch (result_info.signedness) { + .signed => @as(i7, @bitCast(byte.bits)) == + @as(i7, @truncate(@as(Result, @bitCast(result)) >> (result_info.bits - 1))), + .unsigned => byte.bits == 0, + }; + if (byte.more) continue; + r.toss(len); + return if (fits) @as(Result, @bitCast(result)) >> remaining_bits else error.Overflow; + } + r.toss(buffer.len); + } +} + +/// Left-aligns data such that `r.seek` becomes zero. +pub fn rebase(r: *Reader) void { + const data = r.buffer[r.seek..r.end]; + const dest = r.buffer[0..data.len]; + std.mem.copyForwards(u8, dest, data); + r.seek = 0; + r.end = data.len; +} + +/// Ensures `capacity` more data can be buffered without rebasing, by rebasing +/// if necessary. +/// +/// Asserts `capacity` is within the buffer capacity. +pub fn rebaseCapacity(r: *Reader, capacity: usize) void { + if (r.end > r.buffer.len - capacity) rebase(r); +} + +/// Advances the stream and decreases the size of the storage buffer by `n`, +/// returning the range of bytes no longer accessible by `r`. +/// +/// This action can be undone by `restitute`. +/// +/// Asserts there are at least `n` buffered bytes already. +/// +/// Asserts that `r.seek` is zero, i.e. the buffer is in a rebased state. +pub fn steal(r: *Reader, n: usize) []u8 { + assert(r.seek == 0); + assert(n <= r.end); + const stolen = r.buffer[0..n]; + r.buffer = r.buffer[n..]; + r.end -= n; + return stolen; +} + +/// Expands the storage buffer, undoing the effects of `steal` +/// Assumes that `n` does not exceed the total number of stolen bytes. +pub fn restitute(r: *Reader, n: usize) void { + r.buffer = (r.buffer.ptr - n)[0 .. r.buffer.len + n]; + r.end += n; + r.seek += n; +} + +test fixed { + var r: Reader = undefined; + r.initFixed("a\x02"); + try testing.expect((try r.takeByte()) == 'a'); + try testing.expect((try r.takeEnum(enum(u8) { + a = 0, + b = 99, + c = 2, + d = 3, + }, builtin.cpu.arch.endian())) == .c); + try testing.expectError(error.EndOfStream, r.takeByte()); +} + +test peek { + return error.Unimplemented; +} + +test peekGreedy { + return error.Unimplemented; +} + +test toss { + return error.Unimplemented; +} + +test take { + return error.Unimplemented; +} + +test takeArray { + return error.Unimplemented; +} + +test peekArray { + return error.Unimplemented; +} + +test discardAll { + var r: Reader = undefined; + r.initFixed("foobar"); + try r.discard(3); + try testing.expectEqualStrings("bar", try r.take(3)); + try r.discard(0); + try testing.expectError(error.EndOfStream, r.discard(1)); +} + +test discardRemaining { + return error.Unimplemented; +} + +test stream { + return error.Unimplemented; +} + +test takeSentinel { + return error.Unimplemented; +} + +test peekSentinel { + return error.Unimplemented; +} + +test takeDelimiterInclusive { + return error.Unimplemented; +} + +test peekDelimiterInclusive { + return error.Unimplemented; +} + +test takeDelimiterExclusive { + return error.Unimplemented; +} + +test peekDelimiterExclusive { + return error.Unimplemented; +} + +test readDelimiter { + return error.Unimplemented; +} + +test readDelimiterEnding { + return error.Unimplemented; +} + +test readDelimiterLimit { + return error.Unimplemented; +} + +test discardDelimiterExclusive { + return error.Unimplemented; +} + +test discardDelimiterInclusive { + return error.Unimplemented; +} + +test fill { + return error.Unimplemented; +} + +test takeByte { + return error.Unimplemented; +} + +test takeByteSigned { + return error.Unimplemented; +} + +test takeInt { + return error.Unimplemented; +} + +test takeVarInt { + return error.Unimplemented; +} + +test takeStruct { + return error.Unimplemented; +} + +test peekStruct { + return error.Unimplemented; +} + +test takeStructEndian { + return error.Unimplemented; +} + +test peekStructEndian { + return error.Unimplemented; +} + +test takeEnum { + return error.Unimplemented; +} + +test takeLeb128 { + return error.Unimplemented; +} + +test readSliceShort { + return error.Unimplemented; +} + +test readVec { + return error.Unimplemented; +} + +test "expected error.EndOfStream" { + // Unit test inspired by https://github.com/ziglang/zig/issues/17733 + var r: std.io.Reader = undefined; + r.initFixed(""); + try std.testing.expectError(error.EndOfStream, r.readEnum(enum(u8) { a, b }, .little)); + try std.testing.expectError(error.EndOfStream, r.isBytes("foo")); +} + +fn endingRead(r: *Reader, w: *Writer, limit: Limit) StreamError!usize { + _ = r; + _ = w; _ = limit; return error.EndOfStream; } -fn endingDiscard(context: ?*anyopaque, limit: Limit) Error!usize { - _ = context; +fn endingDiscard(r: *Reader, limit: Limit) Error!usize { + _ = r; _ = limit; return error.EndOfStream; } -fn failingRead(context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) StreamError!usize { - _ = context; - _ = bw; +fn failingRead(r: *Reader, w: *Writer, limit: Limit) StreamError!usize { + _ = r; + _ = w; _ = limit; return error.ReadFailed; } -fn failingDiscard(context: ?*anyopaque, limit: Limit) Error!usize { - _ = context; +fn failingDiscard(r: *Reader, limit: Limit) Error!usize { + _ = r; _ = limit; return error.ReadFailed; } @@ -270,46 +1371,31 @@ test "readAlloc when the backing reader provides one byte at a time" { /// implementation details. pub fn Hashed(comptime Hasher: type) type { return struct { - in: *BufferedReader, + in: *Reader, hasher: Hasher, + interface: Reader, - pub fn readable(this: *@This(), buffer: []u8) BufferedReader { + pub fn init(in: *Reader, hasher: Hasher, buffer: []u8) @This() { return .{ - .unbuffered_reader = .{ - .context = this, + .in = in, + .hasher = hasher, + .interface = .{ .vtable = &.{ .read = @This().read, .discard = @This().discard, }, + .buffer = buffer, + .end = 0, + .seek = 0, }, - .buffer = buffer, - .end = 0, - .seek = 0, }; } - fn read(context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) StreamError!usize { - const this: *@This() = @alignCast(@ptrCast(context)); - const slice = limit.slice(try bw.writableSliceGreedy(1)); - const n = try this.in.readVec(&.{slice}); - this.hasher.update(slice[0..n]); - bw.advance(n); - return n; - } - - fn discard(context: ?*anyopaque, limit: Limit) Error!usize { - const this: *@This() = @alignCast(@ptrCast(context)); - var bw = this.hasher.writable(&.{}); - const n = this.in.read(&bw, limit) catch |err| switch (err) { - error.WriteFailed => unreachable, - else => |e| return e, - }; - return n; - } - - fn readVec(context: ?*anyopaque, data: []const []u8) Error!usize { - const this: *@This() = @alignCast(@ptrCast(context)); + fn read(r: *Reader, w: *Writer, limit: Limit) StreamError!usize { + const this: *@This() = @alignCast(@fieldParentPtr("interface", r)); + const data = w.writableVector(limit); const n = try this.in.readVec(data); + w.advanceVector(n); var remaining: usize = n; for (data) |slice| { if (remaining < slice.len) { @@ -323,5 +1409,15 @@ pub fn Hashed(comptime Hasher: type) type { assert(remaining == 0); return n; } + + fn discard(r: *Reader, limit: Limit) Error!usize { + const this: *@This() = @alignCast(@fieldParentPtr("interface", r)); + var bw = this.hasher.writable(&.{}); + const n = this.in.read(&bw, limit) catch |err| switch (err) { + error.WriteFailed => unreachable, + else => |e| return e, + }; + return n; + } }; } diff --git a/lib/std/io/Reader/Limited.zig b/lib/std/io/Reader/Limited.zig index 257bf88348..0c6d3d32ea 100644 --- a/lib/std/io/Reader/Limited.zig +++ b/lib/std/io/Reader/Limited.zig @@ -5,21 +5,27 @@ const Reader = std.io.Reader; const BufferedWriter = std.io.BufferedWriter; const Limit = std.io.Limit; -unlimited_reader: Reader, +unlimited: *Reader, remaining: Limit, +interface: Reader, -pub fn reader(l: *Limited) Reader { +pub fn init(reader: *Reader, limit: Limit, buffer: []u8) Limited { return .{ - .context = l, - .vtable = &.{ - .read = passthruRead, - .readVec = passthruReadVec, - .discard = passthruDiscard, + .unlimited = reader, + .remaining = limit, + .interface = .{ + .vtable = &.{ + .stream = stream, + .discard = discard, + }, + .buffer = buffer, + .seek = 0, + .end = 0, }, }; } -fn passthruRead(context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) Reader.StreamError!usize { +fn stream(context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) Reader.StreamError!usize { const l: *Limited = @alignCast(@ptrCast(context)); const combined_limit = limit.min(l.remaining); const n = try l.unlimited_reader.read(bw, combined_limit); @@ -27,30 +33,10 @@ fn passthruRead(context: ?*anyopaque, bw: *BufferedWriter, limit: Limit) Reader. return n; } -fn passthruDiscard(context: ?*anyopaque, limit: Limit) Reader.Error!usize { +fn discard(context: ?*anyopaque, limit: Limit) Reader.Error!usize { const l: *Limited = @alignCast(@ptrCast(context)); const combined_limit = limit.min(l.remaining); const n = try l.unlimited_reader.discard(combined_limit); l.remaining = l.remaining.subtract(n).?; return n; } - -fn passthruReadVec(context: ?*anyopaque, data: []const []u8) Reader.Error!usize { - const l: *Limited = @alignCast(@ptrCast(context)); - if (data.len == 0) return 0; - if (data[0].len >= @intFromEnum(l.remaining)) { - const n = try l.unlimited_reader.readVec(&.{l.remaining.slice(data[0])}); - l.remaining = l.remaining.subtract(n).?; - return n; - } - var total: usize = 0; - for (data, 0..) |buf, i| { - total += buf.len; - if (total > @intFromEnum(l.remaining)) { - const n = try l.unlimited_reader.readVec(data[0..i]); - l.remaining = l.remaining.subtract(n).?; - return n; - } - } - return 0; -} diff --git a/lib/std/leb128.zig b/lib/std/leb128.zig index be53fba57a..861125fe31 100644 --- a/lib/std/leb128.zig +++ b/lib/std/leb128.zig @@ -114,32 +114,27 @@ test writeSignedFixed { } fn test_read_stream_ileb128(comptime T: type, encoded: []const u8) !T { - var br: std.io.BufferedReader = undefined; - br.initFixed(encoded); + var br: std.io.Reader = .fixed(encoded); return br.takeIleb128(T); } fn test_read_stream_uleb128(comptime T: type, encoded: []const u8) !T { - var br: std.io.BufferedReader = undefined; - br.initFixed(encoded); + var br: std.io.Reader = .fixed(encoded); return br.takeUleb128(T); } fn test_read_ileb128(comptime T: type, encoded: []const u8) !T { - var br: std.io.BufferedReader = undefined; - br.initFixed(encoded); + var br: std.io.Reader = .fixed(encoded); return br.readIleb128(T); } fn test_read_uleb128(comptime T: type, encoded: []const u8) !T { - var br: std.io.BufferedReader = undefined; - br.initFixed(encoded); + var br: std.io.Reader = .fixed(encoded); return br.readUleb128(T); } fn test_read_ileb128_seq(comptime T: type, comptime N: usize, encoded: []const u8) !void { - var br: std.io.BufferedReader = undefined; - br.initFixed(encoded); + var br: std.io.Reader = .fixed(encoded); var i: usize = 0; while (i < N) : (i += 1) { _ = try br.readIleb128(T); @@ -147,8 +142,7 @@ fn test_read_ileb128_seq(comptime T: type, comptime N: usize, encoded: []const u } fn test_read_uleb128_seq(comptime T: type, comptime N: usize, encoded: []const u8) !void { - var br: std.io.BufferedReader = undefined; - br.initFixed(encoded); + var br: std.io.Reader = .fixed(encoded); var i: usize = 0; while (i < N) : (i += 1) { _ = try br.readUleb128(T); @@ -248,7 +242,7 @@ fn test_write_leb128(value: anytype) !void { const t_signed = signedness == .signed; const writeStream = if (t_signed) std.io.BufferedWriter.writeIleb128 else std.io.BufferedWriter.writeUleb128; - const readStream = if (t_signed) std.io.BufferedReader.readIleb128 else std.io.BufferedReader.readUleb128; + const readStream = if (t_signed) std.io.Reader.readIleb128 else std.io.Reader.readUleb128; // decode to a larger bit size too, to ensure sign extension // is working as expected @@ -275,8 +269,7 @@ fn test_write_leb128(value: anytype) !void { try testing.expect(bw.buffer.items.len == bytes_needed); // stream read - var br: std.io.BufferedReader = undefined; - br.initFixed(&buf); + var br: std.io.Reader = .fixed(&buf); const sr = try readStream(&br, T); try testing.expect(br.seek == bytes_needed); try testing.expect(sr == value); diff --git a/lib/std/net.zig b/lib/std/net.zig index 224d38e617..a33898d3b6 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -1378,7 +1378,7 @@ fn parseHosts( name: []const u8, family: posix.sa_family_t, port: u16, - br: *std.io.BufferedReader, + br: *std.io.Reader, ) error{ OutOfMemory, ReadFailed }!void { while (true) { const line = br.takeDelimiterExclusive('\n') catch |err| switch (err) { @@ -1592,7 +1592,7 @@ const ResolvConf = struct { }; } - fn parse(rc: *ResolvConf, br: *std.io.BufferedReader) !void { + fn parse(rc: *ResolvConf, br: *std.io.Reader) !void { const gpa = rc.gpa; while (br.takeSentinel('\n')) |line_with_comment| { const line = line: { diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index e27a64f31a..617ea966ab 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -348,15 +348,15 @@ pub const RunResult = struct { stderr: []u8, }; -fn writeBufferedReaderToArrayList(allocator: Allocator, list: *std.ArrayListUnmanaged(u8), br: *std.io.BufferedReader) !void { - assert(br.seek == 0); +fn writeBufferedReaderToArrayList(allocator: Allocator, list: *std.ArrayListUnmanaged(u8), r: *std.io.Reader) !void { + assert(r.seek == 0); if (list.capacity == 0) { list.* = .{ - .items = br.bufferContents(), - .capacity = br.buffer.len, + .items = r.bufferContents(), + .capacity = r.buffer.len, }; } else { - try list.appendSlice(allocator, br.bufferContents()); + try list.appendSlice(allocator, r.bufferContents()); } } diff --git a/lib/std/tar.zig b/lib/std/tar.zig index 37d51c04fc..ff7baf9d69 100644 --- a/lib/std/tar.zig +++ b/lib/std/tar.zig @@ -302,7 +302,7 @@ pub const FileKind = enum { /// Iterator over entries in the tar file represented by reader. pub const Iterator = struct { - reader: *std.io.BufferedReader, + reader: *std.io.Reader, diagnostics: ?*Diagnostics = null, // buffers for heeader and file attributes @@ -328,7 +328,7 @@ pub const Iterator = struct { /// Iterates over files in tar archive. /// `next` returns each file in tar archive. - pub fn init(reader: *std.io.BufferedReader, options: Options) Iterator { + pub fn init(reader: *std.io.Reader, options: Options) Iterator { return .{ .reader = reader, .diagnostics = options.diagnostics, @@ -345,7 +345,7 @@ pub const Iterator = struct { kind: FileKind = .file, unread_bytes: *u64, - parent_reader: *std.io.BufferedReader, + parent_reader: *std.io.Reader, pub fn reader(self: *File) std.io.Reader { return .{ @@ -537,14 +537,14 @@ const pax_max_size_attr_len = 64; pub const PaxIterator = struct { size: usize, // cumulative size of all pax attributes - reader: *std.io.BufferedReader, + reader: *std.io.Reader, const Self = @This(); const Attribute = struct { kind: PaxAttributeKind, len: usize, // length of the attribute value - reader: *std.io.BufferedReader, // reader positioned at value start + reader: *std.io.Reader, // reader positioned at value start // Copies pax attribute value into destination buffer. // Must be called with destination buffer of size at least Attribute.len. @@ -611,13 +611,13 @@ pub const PaxIterator = struct { } // Checks that each record ends with new line. - fn validateAttributeEnding(reader: *std.io.BufferedReader) !void { + fn validateAttributeEnding(reader: *std.io.Reader) !void { if (try reader.takeByte() != '\n') return error.PaxInvalidAttributeEnd; } }; /// Saves tar file content to the file systems. -pub fn pipeToFileSystem(dir: std.fs.Dir, reader: *std.io.BufferedReader, options: PipeOptions) !void { +pub fn pipeToFileSystem(dir: std.fs.Dir, reader: *std.io.Reader, options: PipeOptions) !void { var file_name_buffer: [std.fs.max_path_bytes]u8 = undefined; var link_name_buffer: [std.fs.max_path_bytes]u8 = undefined; var iter: Iterator = .init(reader, .{ @@ -818,7 +818,7 @@ test PaxIterator { var buffer: [1024]u8 = undefined; outer: for (cases) |case| { - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(case.data); var iter: PaxIterator = .init(&br, case.data.len); @@ -955,7 +955,7 @@ test Iterator { // example/empty/ const data = @embedFile("tar/testdata/example.tar"); - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(data); // User provided buffers to the iterator @@ -1015,7 +1015,7 @@ test pipeToFileSystem { // example/empty/ const data = @embedFile("tar/testdata/example.tar"); - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(data); var tmp = testing.tmpDir(.{ .no_follow = true }); @@ -1047,7 +1047,7 @@ test pipeToFileSystem { test "pipeToFileSystem root_dir" { const data = @embedFile("tar/testdata/example.tar"); - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(data); // with strip_components = 1 @@ -1096,7 +1096,7 @@ test "pipeToFileSystem root_dir" { test "findRoot with single file archive" { const data = @embedFile("tar/testdata/22752.tar"); - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(data); var tmp = testing.tmpDir(.{}); @@ -1111,7 +1111,7 @@ test "findRoot with single file archive" { test "findRoot without explicit root dir" { const data = @embedFile("tar/testdata/19820.tar"); - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(data); var tmp = testing.tmpDir(.{}); @@ -1126,7 +1126,7 @@ test "findRoot without explicit root dir" { test "pipeToFileSystem strip_components" { const data = @embedFile("tar/testdata/example.tar"); - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(data); var tmp = testing.tmpDir(.{ .no_follow = true }); @@ -1188,7 +1188,7 @@ test "executable bit" { const data = @embedFile("tar/testdata/example.tar"); for ([_]PipeOptions.ModeMode{ .ignore, .executable_bit_only }) |opt| { - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(data); var tmp = testing.tmpDir(.{ .no_follow = true }); diff --git a/lib/std/tar/Writer.zig b/lib/std/tar/Writer.zig index 3b9653eef2..a108674db2 100644 --- a/lib/std/tar/Writer.zig +++ b/lib/std/tar/Writer.zig @@ -67,7 +67,7 @@ pub fn writeFileStream( w: *Writer, sub_path: []const u8, size: usize, - reader: *std.io.BufferedReader, + reader: *std.io.Reader, options: Options, ) std.io.Reader.StreamError!void { try w.writeHeader(.regular, sub_path, "", @intCast(size), options); @@ -441,7 +441,7 @@ test "write files" { for (files) |file| try wrt.writeFileBytes(file.path, file.content, .{}); - var input: std.io.BufferedReader = undefined; + var input: std.io.Reader = undefined; input.initFixed(output.getWritten()); var iter = std.tar.iterator(&input, .{ .file_name_buffer = &file_name_buffer, @@ -476,12 +476,12 @@ test "write files" { var wrt: Writer = .{ .underlying_writer = &output.buffered_writer }; defer output.deinit(); for (files) |file| { - var content: std.io.BufferedReader = undefined; + var content: std.io.Reader = undefined; content.initFixed(file.content); try wrt.writeFileStream(file.path, file.content.len, &content, .{}); } - var input: std.io.BufferedReader = undefined; + var input: std.io.Reader = undefined; input.initFixed(output.getWritten()); var iter = std.tar.iterator(&input, .{ .file_name_buffer = &file_name_buffer, diff --git a/lib/std/tar/test.zig b/lib/std/tar/test.zig index cd72b23139..f3b55ae523 100644 --- a/lib/std/tar/test.zig +++ b/lib/std/tar/test.zig @@ -346,8 +346,7 @@ test "run test cases" { var link_name_buffer: [std.fs.max_path_bytes]u8 = undefined; for (cases) |case| { - var br: std.io.BufferedReader = undefined; - br.initFixed(case.data); + var br: std.io.Reader = .fixed(case.data); var iter = tar.iterator(&br, .{ .file_name_buffer = &file_name_buffer, .link_name_buffer = &link_name_buffer, @@ -391,8 +390,7 @@ test "pax/gnu long names with small buffer" { const long_name_cases = [_]Case{ cases[11], cases[25], cases[28] }; for (long_name_cases) |case| { - var br: std.io.BufferedReader = undefined; - br.initFixed(case.data); + var br: std.io.Reader = .fixed(case.data); var iter = tar.iterator(&br, .{ .file_name_buffer = &min_file_name_buffer, .link_name_buffer = &min_link_name_buffer, @@ -413,8 +411,7 @@ test "insufficient buffer in Header name filed" { var min_file_name_buffer: [9]u8 = undefined; var min_link_name_buffer: [100]u8 = undefined; - var br: std.io.BufferedReader = undefined; - br.initFixed(cases[0].data); + var br: std.io.Reader = .fixed(cases[0].data); var iter = tar.iterator(&br, .{ .file_name_buffer = &min_file_name_buffer, .link_name_buffer = &min_link_name_buffer, @@ -469,22 +466,21 @@ test "should not overwrite existing file" { // This ensures that file is not overwritten. // const data = @embedFile("testdata/overwrite_file.tar"); - var br: std.io.BufferedReader = undefined; - br.initFixed(data); + var r: std.io.Reader = .fixed(data); // Unpack with strip_components = 1 should fail var root = std.testing.tmpDir(.{}); defer root.cleanup(); try testing.expectError( error.PathAlreadyExists, - tar.pipeToFileSystem(root.dir, &br, .{ .mode_mode = .ignore, .strip_components = 1 }), + tar.pipeToFileSystem(root.dir, &r, .{ .mode_mode = .ignore, .strip_components = 1 }), ); // Unpack with strip_components = 0 should pass - br.initFixed(data); + r = .fixed(data); var root2 = std.testing.tmpDir(.{}); defer root2.cleanup(); - try tar.pipeToFileSystem(root2.dir, &br, .{ .mode_mode = .ignore, .strip_components = 0 }); + try tar.pipeToFileSystem(root2.dir, &r, .{ .mode_mode = .ignore, .strip_components = 0 }); } test "case sensitivity" { @@ -498,13 +494,12 @@ test "case sensitivity" { // 18089/alacritty/Darkermatrix.yml // const data = @embedFile("testdata/18089.tar"); - var br: std.io.BufferedReader = undefined; - br.initFixed(data); + var r: std.io.Reader = .fixed(data); var root = std.testing.tmpDir(.{}); defer root.cleanup(); - tar.pipeToFileSystem(root.dir, &br, .{ .mode_mode = .ignore, .strip_components = 1 }) catch |err| { + tar.pipeToFileSystem(root.dir, &r, .{ .mode_mode = .ignore, .strip_components = 1 }) catch |err| { // on case insensitive fs we fail on overwrite existing file try testing.expectEqual(error.PathAlreadyExists, err); return; diff --git a/lib/std/tz.zig b/lib/std/tz.zig index 5ff58f1a5b..8f1accb3a5 100644 --- a/lib/std/tz.zig +++ b/lib/std/tz.zig @@ -54,7 +54,7 @@ pub const Tz = struct { }, }; - pub fn parse(allocator: std.mem.Allocator, reader: *std.io.BufferedReader) !Tz { + pub fn parse(allocator: std.mem.Allocator, reader: *std.io.Reader) !Tz { var legacy_header = try reader.takeStruct(Header); if (!std.mem.eql(u8, &legacy_header.magic, "TZif")) return error.BadHeader; if (legacy_header.version != 0 and legacy_header.version != '2' and legacy_header.version != '3') return error.BadVersion; @@ -215,7 +215,7 @@ pub const Tz = struct { test "slim" { const data = @embedFile("tz/asia_tokyo.tzif"); - var in_stream: std.io.BufferedReader = undefined; + var in_stream: std.io.Reader = undefined; in_stream.initFixed(data); var tz = try std.Tz.parse(std.testing.allocator, &in_stream); @@ -229,7 +229,7 @@ test "slim" { test "fat" { const data = @embedFile("tz/antarctica_davis.tzif"); - var in_stream: std.io.BufferedReader = undefined; + var in_stream: std.io.Reader = undefined; in_stream.initFixed(data); var tz = try std.Tz.parse(std.testing.allocator, &in_stream); @@ -243,7 +243,7 @@ test "fat" { test "legacy" { // Taken from Slackware 8.0, from 2001 const data = @embedFile("tz/europe_vatican.tzif"); - var in_stream: std.io.BufferedReader = undefined; + var in_stream: std.io.Reader = undefined; in_stream.initFixed(data); var tz = try std.Tz.parse(std.testing.allocator, &in_stream); diff --git a/lib/std/zig/Server.zig b/lib/std/zig/Server.zig index 7041778542..fef8ffd20f 100644 --- a/lib/std/zig/Server.zig +++ b/lib/std/zig/Server.zig @@ -1,4 +1,4 @@ -in: *std.io.BufferedReader, +in: *std.io.Reader, out: *std.io.BufferedWriter, pub const Message = struct { @@ -93,7 +93,7 @@ pub const Message = struct { }; pub const Options = struct { - in: *std.io.BufferedReader, + in: *std.io.Reader, out: *std.io.BufferedWriter, zig_version: []const u8, }; diff --git a/lib/std/zig/llvm/BitcodeReader.zig b/lib/std/zig/llvm/BitcodeReader.zig index 691ca29d24..05cb236d98 100644 --- a/lib/std/zig/llvm/BitcodeReader.zig +++ b/lib/std/zig/llvm/BitcodeReader.zig @@ -1,6 +1,6 @@ allocator: std.mem.Allocator, record_arena: std.heap.ArenaAllocator.State, -br: *std.io.BufferedReader, +reader: *std.io.Reader, keep_names: bool, bit_buffer: u32, bit_offset: u5, @@ -93,14 +93,14 @@ pub const Record = struct { }; pub const InitOptions = struct { - br: *std.io.BufferedReader, + reader: *std.io.Reader, keep_names: bool = false, }; pub fn init(allocator: std.mem.Allocator, options: InitOptions) BitcodeReader { return .{ .allocator = allocator, .record_arena = .{}, - .br = options.br, + .reader = options.reader, .keep_names = options.keep_names, .bit_buffer = 0, .bit_offset = 0, @@ -172,7 +172,7 @@ pub fn next(bc: *BitcodeReader) !?Item { pub fn skipBlock(bc: *BitcodeReader, block: Block) !void { assert(bc.bit_offset == 0); - try bc.br.discard(4 * @as(u34, block.len)); + try bc.reader.discard(4 * @as(u34, block.len)); try bc.endBlock(); } @@ -371,17 +371,17 @@ fn align32Bits(bc: *BitcodeReader) void { fn read32Bits(bc: *BitcodeReader) !u32 { assert(bc.bit_offset == 0); - return bc.br.takeInt(u32, .little); + return bc.reader.takeInt(u32, .little); } fn readBytes(bc: *BitcodeReader, bytes: []u8) !void { assert(bc.bit_offset == 0); - try bc.br.read(bytes); + try bc.reader.read(bytes); const trailing_bytes = bytes.len % 4; if (trailing_bytes > 0) { var bit_buffer: [4]u8 = @splat(0); - try bc.br.read(bit_buffer[trailing_bytes..]); + try bc.reader.read(bit_buffer[trailing_bytes..]); bc.bit_buffer = std.mem.readInt(u32, &bit_buffer, .little); bc.bit_offset = @intCast(8 * trailing_bytes); } diff --git a/lib/std/zig/system/linux.zig b/lib/std/zig/system/linux.zig index 8d8b00ff83..7233bbbb6d 100644 --- a/lib/std/zig/system/linux.zig +++ b/lib/std/zig/system/linux.zig @@ -342,9 +342,8 @@ fn testParser( expected_model: *const Target.Cpu.Model, input: []const u8, ) !void { - var br: std.io.BufferedReader = undefined; - br.initFixed(@constCast(input)); - const result = try parser.parse(arch, &br); + var r: std.io.Reader = .fixed(input); + const result = try parser.parse(arch, &r); try testing.expectEqual(expected_model, result.?.model); try testing.expect(expected_model.features.eql(result.?.features)); } diff --git a/lib/std/zip.zig b/lib/std/zip.zig index d71927675c..ff20f3847d 100644 --- a/lib/std/zip.zig +++ b/lib/std/zip.zig @@ -161,14 +161,14 @@ pub const EndRecord = extern struct { pub const Decompress = union { inflate: std.compress.flate.Decompress, - store: *std.io.BufferedReader, + store: *std.io.Reader, fn readable( d: *Decompress, - reader: *std.io.BufferedReader, + reader: *std.io.Reader, method: CompressionMethod, buffer: []u8, - ) std.io.BufferedReader { + ) std.io.Reader { switch (method) { .store => { d.* = .{ .store = reader }; diff --git a/lib/std/zip/test.zig b/lib/std/zip/test.zig index 49ebffe15b..a02ee1ab2b 100644 --- a/lib/std/zip/test.zig +++ b/lib/std/zip/test.zig @@ -51,7 +51,7 @@ const FileStore = struct { uncompressed_size: usize, }; -fn makeZip(file_writer: *std.fs.File.Writer, files: []const File, options: WriteZipOptions) !std.io.BufferedReader { +fn makeZip(file_writer: *std.fs.File.Writer, files: []const File, options: WriteZipOptions) !std.io.Reader { const store = try std.testing.allocator.alloc(FileStore, files.len); defer std.testing.allocator.free(store); return makeZipWithStore(file_writer, files, options, store); @@ -198,7 +198,7 @@ const Zipper = struct { }, .deflate => { const offset = writer.count; - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(@constCast(opt.content)); var compress: std.compress.flate.Compress = .init(&br, .{}); var compress_br = compress.readable(&.{}); diff --git a/src/Compilation.zig b/src/Compilation.zig index 7e650c4ab7..ea8d63e6d2 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1076,9 +1076,8 @@ pub const CObject = struct { var buffer: [1024]u8 = undefined; const file = try std.fs.cwd().openFile(path, .{}); defer file.close(); - var br: std.io.BufferedReader = undefined; - br.init(file.reader(), &buffer); - var bc = std.zig.llvm.BitcodeReader.init(gpa, .{ .br = &br }); + var file_reader = file.reader(&buffer); + var bc = std.zig.llvm.BitcodeReader.init(gpa, .{ .reader = &file_reader.interface }); defer bc.deinit(); var file_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .empty; diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig index 89ef199996..c6cdf82774 100644 --- a/src/Package/Fetch/git.zig +++ b/src/Package/Fetch/git.zig @@ -79,7 +79,7 @@ pub const Oid = union(Format) { }; } - pub fn readBytes(oid_format: Format, reader: *std.io.BufferedReader) std.io.Reader.Error!Oid { + pub fn readBytes(oid_format: Format, reader: *std.io.Reader) std.io.Reader.Error!Oid { return switch (oid_format) { .sha1 => { var result: Oid = .{ .sha1 = undefined }; @@ -593,7 +593,7 @@ const Packet = union(enum) { const max_data_length = 65516; /// Reads a packet in pkt-line format. - fn read(reader: *std.io.BufferedReader, buf: *[max_data_length]u8) !Packet { + fn read(reader: *std.io.Reader, buf: *[max_data_length]u8) !Packet { const length = std.fmt.parseUnsigned(u16, &try reader.readBytesNoEof(4), 16) catch return error.InvalidPacket; switch (length) { 0 => return .flush, @@ -1107,7 +1107,7 @@ const PackHeader = struct { const signature = "PACK"; const supported_version = 2; - fn read(reader: *std.io.BufferedReader) !PackHeader { + fn read(reader: *std.io.Reader) !PackHeader { const actual_signature = try reader.take(4); if (!mem.eql(u8, actual_signature, signature)) return error.InvalidHeader; const version = try reader.takeInt(u32, .big); @@ -1161,7 +1161,7 @@ const EntryHeader = union(Type) { }; } - fn read(format: Oid.Format, reader: *std.io.BufferedReader) !EntryHeader { + fn read(format: Oid.Format, reader: *std.io.Reader) !EntryHeader { const InitialByte = packed struct { len: u4, type: u3, has_next: bool }; const initial: InitialByte = @bitCast(try reader.takeByte()); const rest_len = if (initial.has_next) try readSizeVarInt(reader) else 0; @@ -1187,7 +1187,7 @@ const EntryHeader = union(Type) { } }; -fn readSizeVarInt(r: *std.io.BufferedReader) !u64 { +fn readSizeVarInt(r: *std.io.Reader) !u64 { const Byte = packed struct { value: u7, has_next: bool }; var b: Byte = @bitCast(try r.takeByte()); var value: u64 = b.value; @@ -1200,7 +1200,7 @@ fn readSizeVarInt(r: *std.io.BufferedReader) !u64 { return value; } -fn readOffsetVarInt(r: *std.io.BufferedReader) !u64 { +fn readOffsetVarInt(r: *std.io.Reader) !u64 { const Byte = packed struct { value: u7, has_next: bool }; var b: Byte = @bitCast(try r.takeByte()); var value: u64 = b.value; @@ -1219,7 +1219,7 @@ const IndexHeader = struct { const supported_version = 2; const size = 4 + 4 + @sizeOf([256]u32); - fn read(index_header: *IndexHeader, br: *std.io.BufferedReader) !void { + fn read(index_header: *IndexHeader, br: *std.io.Reader) !void { const sig = try br.take(4); if (!mem.eql(u8, sig, signature)) return error.InvalidHeader; const version = try br.takeInt(u32, .big); @@ -1493,7 +1493,7 @@ fn resolveDeltaChain( } /// Reads the complete contents of an object from `reader`. -fn readObjectRaw(gpa: Allocator, reader: *std.io.BufferedReader, size: u64) ![]u8 { +fn readObjectRaw(gpa: Allocator, reader: *std.io.Reader, size: u64) ![]u8 { const alloc_size = std.math.cast(usize, size) orelse return error.ObjectTooLarge; var decompress: zlib.Decompressor = .init(reader); var buffer: std.ArrayListUnmanaged(u8) = .empty; @@ -1505,7 +1505,7 @@ fn readObjectRaw(gpa: Allocator, reader: *std.io.BufferedReader, size: u64) ![]u /// The format of the delta data is documented in /// [pack-format](https://git-scm.com/docs/pack-format). -fn expandDelta(base_object: []const u8, delta_reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter) !void { +fn expandDelta(base_object: []const u8, delta_reader: *std.io.Reader, writer: *std.io.BufferedWriter) !void { var base_offset: u32 = 0; while (true) { const inst: packed struct { value: u7, copy: bool } = @bitCast(delta_reader.takeByte() catch |e| switch (e) { diff --git a/src/Zcu.zig b/src/Zcu.zig index 10fec614d1..5508d6533d 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -2828,7 +2828,7 @@ pub fn loadZirCache(gpa: Allocator, cache_file: std.fs.File) !Zir { }; } -pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_br: *std.io.BufferedReader) !Zir { +pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_br: *std.io.Reader) !Zir { var instructions: std.MultiArrayList(Zir.Inst) = .{}; errdefer instructions.deinit(gpa); @@ -2947,7 +2947,7 @@ pub fn saveZoirCache(cache_file: std.fs.File, stat: std.fs.File.Stat, zoir: Zoir }; } -pub fn loadZoirCacheBody(gpa: Allocator, header: Zoir.Header, cache_br: *std.io.BufferedReader) !Zoir { +pub fn loadZoirCacheBody(gpa: Allocator, header: Zoir.Header, cache_br: *std.io.Reader) !Zoir { var zoir: Zoir = .{ .nodes = .empty, .extra = &.{}, diff --git a/src/arch/x86_64/Disassembler.zig b/src/arch/x86_64/Disassembler.zig index 88e5f4cd4d..856e3e850d 100644 --- a/src/arch/x86_64/Disassembler.zig +++ b/src/arch/x86_64/Disassembler.zig @@ -372,7 +372,7 @@ fn parseGpRegister(low_enc: u3, is_extended: bool, rex: Rex, bit_size: u64) Regi } fn parseImm(dis: *Disassembler, kind: Encoding.Op) !Immediate { - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(dis.code[dis.pos..]); defer dis.pos += br.seek; return switch (kind) { @@ -388,7 +388,7 @@ fn parseImm(dis: *Disassembler, kind: Encoding.Op) !Immediate { } fn parseOffset(dis: *Disassembler) !u64 { - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(dis.code[dis.pos..]); defer dis.pos += br.seek; return br.takeInt(u64, .little); @@ -464,8 +464,7 @@ fn parseSibByte(dis: *Disassembler) !Sib { } fn parseDisplacement(dis: *Disassembler, modrm: ModRm, sib: ?Sib) !i32 { - var br: std.io.BufferedReader = undefined; - br.initFixed(dis.code[dis.pos..]); + var br: std.io.Reader = .fixed(dis.code[dis.pos..]); defer dis.pos += br.seek; if (sib) |info| { if (info.base == 0b101 and modrm.mod == 0) { diff --git a/src/libs/glibc.zig b/src/libs/glibc.zig index bed242e8e6..f45d776776 100644 --- a/src/libs/glibc.zig +++ b/src/libs/glibc.zig @@ -793,8 +793,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye // twice, which causes a "duplicate symbol" assembler error. var versions_written = std.AutoArrayHashMap(Version, void).init(arena); - var inc_br: std.io.BufferedReader = undefined; - inc_br.initFixed(metadata.inclusions); + var inc_br: std.io.Reader = .fixed(metadata.inclusions); const fn_inclusions_len = try inc_br.takeInt(u16, .little); diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 76e0f5b5db..cb45cd59c0 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -142,8 +142,7 @@ const DebugInfo = struct { &abbrev_code_buf, debug_info.section.off(dwarf) + unit_ptr.off + unit_ptr.header_len + entry_ptr.off, ) != abbrev_code_buf.len) return error.InputOutput; - var abbrev_code_br: std.io.BufferedReader = undefined; - abbrev_code_br.initFixed(&abbrev_code_buf); + var abbrev_code_br: std.io.Reader = .fixed(&abbrev_code_buf); return @enumFromInt(abbrev_code_br.takeLeb128(@typeInfo(AbbrevCode).@"enum".tag_type) catch unreachable); } @@ -2757,7 +2756,7 @@ fn finishWipNavFuncInner( try dibw.writeLeb128(@intFromEnum(AbbrevCode.null)); } else { const abbrev_code_buf = wip_nav.debug_info.getWritten()[0..AbbrevCode.decl_bytes]; - var abbrev_code_br: std.io.BufferedReader = undefined; + var abbrev_code_br: std.io.Reader = undefined; abbrev_code_br.initFixed(abbrev_code_buf); const abbrev_code: AbbrevCode = @enumFromInt(abbrev_code_br.takeLeb128(@typeInfo(AbbrevCode).@"enum".tag_type) catch unreachable); std.leb.writeUnsignedFixed( diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index 1c1b8e11f7..c5b77723d6 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -1192,18 +1192,17 @@ pub fn codeDecompressAlloc(self: *Object, elf_file: *Elf, atom_index: Atom.Index const atom_ptr = self.atom(atom_index).?; const shdr = atom_ptr.inputShdr(elf_file); const handle = elf_file.fileHandle(self.file_handle); - var br: std.io.BufferedReader = undefined; - br.initFixed(try self.preadShdrContentsAlloc(gpa, handle, atom_ptr.input_section_index)); - defer if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) gpa.free(br.storageBuffer()); + var r: std.io.Reader = .fixed(try self.preadShdrContentsAlloc(gpa, handle, atom_ptr.input_section_index)); + defer if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) gpa.free(r.storageBuffer()); if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) { - const chdr = (try br.takeStruct(elf.Elf64_Chdr)).*; + const chdr = (try r.takeStruct(elf.Elf64_Chdr)).*; switch (chdr.ch_type) { .ZLIB => { var bw: std.io.BufferedWriter = undefined; bw.initFixed(try gpa.alloc(u8, std.math.cast(usize, chdr.ch_size) orelse return error.Overflow)); errdefer gpa.free(bw.buffer); - try std.compress.zlib.decompress(&br, &bw); + try std.compress.zlib.decompress(&r, &bw); if (bw.end != bw.buffer.len) return error.InputOutput; return bw.buffer; }, @@ -1211,7 +1210,7 @@ pub fn codeDecompressAlloc(self: *Object, elf_file: *Elf, atom_index: Atom.Index } } - return br.storageBuffer(); + return r.storageBuffer(); } fn locals(self: *Object) []Symbol { diff --git a/src/link/Elf/eh_frame.zig b/src/link/Elf/eh_frame.zig index 60a7741f78..5d46b54d39 100644 --- a/src/link/Elf/eh_frame.zig +++ b/src/link/Elf/eh_frame.zig @@ -187,7 +187,7 @@ pub const Cie = struct { }; pub const Iterator = struct { - br: std.io.BufferedReader, + reader: std.io.Reader, pub const Record = struct { tag: enum { fde, cie }, @@ -196,18 +196,18 @@ pub const Iterator = struct { }; pub fn next(it: *Iterator) !?Record { - if (it.br.seek >= it.br.storageBuffer().len) return null; + if (it.reader.seek >= it.reader.storageBuffer().len) return null; - const size = try it.br.takeInt(u32, .little); + const size = try it.reader.takeInt(u32, .little); if (size == 0xFFFFFFFF) @panic("DWARF CFI is 32bit on macOS"); - const id = try it.br.takeInt(u32, .little); + const id = try it.reader.takeInt(u32, .little); const record: Record = .{ .tag = if (id == 0) .cie else .fde, - .offset = it.br.seek, + .offset = it.reader.seek, .size = size, }; - try it.br.discard(size); + try it.reader.discard(size); return record; } diff --git a/src/link/MachO/Dwarf.zig b/src/link/MachO/Dwarf.zig index 5ec20dec47..bc5fda2b9e 100644 --- a/src/link/MachO/Dwarf.zig +++ b/src/link/MachO/Dwarf.zig @@ -273,10 +273,9 @@ pub const InfoReader = struct { } pub fn readLeb128(p: *InfoReader, comptime Type: type) !Type { - var br: std.io.BufferedReader = undefined; - br.initFixed(p.bytes()[p.pos..]); - defer p.pos += br.seek; - return br.takeLeb128(Type); + var r: std.io.Reader = .fixed(p.bytes()[p.pos..]); + defer p.pos += r.seek; + return r.takeLeb128(Type); } pub fn seekTo(p: *InfoReader, off: u64) !void { @@ -331,10 +330,9 @@ pub const AbbrevReader = struct { } pub fn readLeb128(p: *AbbrevReader, comptime Type: type) !Type { - var br: std.io.BufferedReader = undefined; - br.initFixed(p.bytes()[p.pos..]); - defer p.pos += br.seek; - return br.takeLeb128(Type); + var r: std.io.Reader = .fixed(p.bytes()[p.pos..]); + defer p.pos += r.seek; + return r.takeLeb128(Type); } pub fn seekTo(p: *AbbrevReader, off: u64) !void { diff --git a/src/link/MachO/Dylib.zig b/src/link/MachO/Dylib.zig index 97918fc4a7..5a0191bda6 100644 --- a/src/link/MachO/Dylib.zig +++ b/src/link/MachO/Dylib.zig @@ -167,7 +167,7 @@ pub fn addExport(self: *Dylib, allocator: Allocator, name: []const u8, flags: Ex fn parseTrieNode( self: *Dylib, - br: *std.io.BufferedReader, + br: *std.io.Reader, allocator: Allocator, arena: Allocator, prefix: []const u8, @@ -216,9 +216,8 @@ fn parseTrie(self: *Dylib, data: []const u8, macho_file: *MachO) !void { var arena = std.heap.ArenaAllocator.init(gpa); defer arena.deinit(); - var br: std.io.BufferedReader = undefined; - br.initFixed(data); - try self.parseTrieNode(&br, gpa, arena.allocator(), ""); + var r: std.io.Reader = .fixed(data); + try self.parseTrieNode(&r, gpa, arena.allocator(), ""); } fn parseTbd(self: *Dylib, macho_file: *MachO) !void { diff --git a/src/link/MachO/eh_frame.zig b/src/link/MachO/eh_frame.zig index 3993e9fc97..2a452604b0 100644 --- a/src/link/MachO/eh_frame.zig +++ b/src/link/MachO/eh_frame.zig @@ -12,34 +12,33 @@ pub const Cie = struct { const tracy = trace(@src()); defer tracy.end(); - var br: std.io.BufferedReader = undefined; - br.initFixed(cie.getData(macho_file)); + var r: std.io.Reader = .fixed(cie.getData(macho_file)); - try br.discard(9); - const aug = try br.takeSentinel(0); + try r.discard(9); + const aug = try r.takeSentinel(0); if (aug[0] != 'z') return; // TODO should we error out? - _ = try br.takeLeb128(u64); // code alignment factor - _ = try br.takeLeb128(u64); // data alignment factor - _ = try br.takeLeb128(u64); // return address register - _ = try br.takeLeb128(u64); // augmentation data length + _ = try r.takeLeb128(u64); // code alignment factor + _ = try r.takeLeb128(u64); // data alignment factor + _ = try r.takeLeb128(u64); // return address register + _ = try r.takeLeb128(u64); // augmentation data length for (aug[1..]) |ch| switch (ch) { 'R' => { - const enc = try br.takeByte(); + const enc = try r.takeByte(); if (enc != DW_EH_PE.pcrel | DW_EH_PE.absptr) { @panic("unexpected pointer encoding"); // TODO error } }, 'P' => { - const enc = try br.takeByte(); + const enc = try r.takeByte(); if (enc != DW_EH_PE.pcrel | DW_EH_PE.indirect | DW_EH_PE.sdata4) { @panic("unexpected personality pointer encoding"); // TODO error } - _ = try br.takeInt(u32, .little); // personality pointer + _ = try r.takeInt(u32, .little); // personality pointer }, 'L' => { - const enc = try br.takeByte(); + const enc = try r.takeByte(); switch (enc & DW_EH_PE.type_mask) { DW_EH_PE.sdata4 => cie.lsda_size = .p32, DW_EH_PE.absptr => cie.lsda_size = .p64, @@ -143,7 +142,7 @@ pub const Fde = struct { const object = fde.getObject(macho_file); const sect = object.sections.items(.header)[object.eh_frame_sect_index.?]; - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(fde.getData(macho_file)); try br.discard(4); @@ -267,7 +266,7 @@ pub const Fde = struct { }; pub const Iterator = struct { - br: std.io.BufferedReader, + br: std.io.Reader, pub const Record = struct { tag: enum { fde, cie }, diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index b16a8bfd07..0a8c3b6c6f 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -2087,10 +2087,9 @@ pub const Expr = enum(u32) { pub const end = @intFromEnum(std.wasm.Opcode.end); pub fn slice(index: Expr, wasm: *const Wasm) [:end]const u8 { - var br: std.io.BufferedReader = undefined; - br.initFixed(wasm.string_bytes.items[@intFromEnum(index)..]); - Object.skipInit(&br) catch unreachable; - return br.storageBuffer()[0 .. br.seek - 1 :end]; + var r: std.io.Reader = .fixed(wasm.string_bytes.items[@intFromEnum(index)..]); + Object.skipInit(&r) catch unreachable; + return r.storageBuffer()[0 .. r.seek - 1 :end]; } }; @@ -3038,7 +3037,7 @@ fn parseObject(wasm: *Wasm, obj: link.Input.Object) !void { const stat = try obj.file.stat(); const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig; - var br: std.io.BufferedReader = undefined; + var br: std.io.Reader = undefined; br.initFixed(try gpa.alloc(u8, size)); defer gpa.free(br.storageBuffer()); diff --git a/src/link/Wasm/Archive.zig b/src/link/Wasm/Archive.zig index c1877bd166..0285b07d2d 100644 --- a/src/link/Wasm/Archive.zig +++ b/src/link/Wasm/Archive.zig @@ -167,10 +167,8 @@ pub fn parseObject( }; const object_file_size = try header.parsedSize(); - var br: std.io.BufferedReader = undefined; - br.initFixed(file_contents[object_offset + @sizeOf(Header) ..][0..object_file_size]); - - return Object.parse(wasm, &br, path, object_name, host_name, scratch_space, must_link, gc_sections); + var r: std.io.Reader = .fixed(file_contents[object_offset + @sizeOf(Header) ..][0..object_file_size]); + return Object.parse(wasm, &r, path, object_name, host_name, scratch_space, must_link, gc_sections); } const Archive = @This(); diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig index a630453548..57a86ef8bd 100644 --- a/src/link/Wasm/Object.zig +++ b/src/link/Wasm/Object.zig @@ -252,7 +252,7 @@ pub const ScratchSpace = struct { pub fn parse( wasm: *Wasm, - br: *std.io.BufferedReader, + br: *std.io.Reader, path: Path, archive_member_name: ?[]const u8, host_name: Wasm.OptionalString, @@ -1402,7 +1402,7 @@ pub fn parse( /// Based on the "features" custom section, parses it into a list of /// features that tell the linker what features were enabled and may be mandatory /// to be able to link. -fn parseFeatures(wasm: *Wasm, br: *std.io.BufferedReader, path: Path) error{ OutOfMemory, LinkFailure }!Wasm.Feature.Set { +fn parseFeatures(wasm: *Wasm, br: *std.io.Reader, path: Path) error{ OutOfMemory, LinkFailure }!Wasm.Feature.Set { const gpa = wasm.base.comp.gpa; const diags = &wasm.base.comp.link_diags; const features_len = try br.takeLeb128(u32); @@ -1430,7 +1430,7 @@ fn parseFeatures(wasm: *Wasm, br: *std.io.BufferedReader, path: Path) error{ Out return .fromString(try wasm.internString(@ptrCast(feature_buffer))); } -fn readLimits(br: *std.io.BufferedReader) std.io.Reader.Error!std.wasm.Limits { +fn readLimits(br: *std.io.Reader) std.io.Reader.Error!std.wasm.Limits { const flags: std.wasm.Limits.Flags = @bitCast(try br.takeByte()); const min = try br.takeLeb128(u32); const max = if (flags.has_max) try br.takeLeb128(u32) else 0; @@ -1441,13 +1441,13 @@ fn readLimits(br: *std.io.BufferedReader) std.io.Reader.Error!std.wasm.Limits { }; } -fn readInit(wasm: *Wasm, br: *std.io.BufferedReader) std.io.Reader.Error!Wasm.Expr { +fn readInit(wasm: *Wasm, br: *std.io.Reader) std.io.Reader.Error!Wasm.Expr { const start = br.seek; try skipInit(br); // one after the end opcode return wasm.addExpr(br.storageBuffer()[start..br.seek]); } -pub fn skipInit(br: *std.io.BufferedReader) std.io.Reader.Error!void { +pub fn skipInit(br: *std.io.Reader) std.io.Reader.Error!void { switch (try br.takeEnum(std.wasm.Opcode, .little)) { .i32_const => _ = try br.takeLeb128(i32), .i64_const => _ = try br.takeLeb128(i64), diff --git a/src/link/riscv.zig b/src/link/riscv.zig index 665a56ad17..29bfdb3bde 100644 --- a/src/link/riscv.zig +++ b/src/link/riscv.zig @@ -13,9 +13,8 @@ pub fn writeSetSubUleb(comptime op: enum { set, sub }, addend: i64, bw: *std.io. switch (op) { .set => try overwriteUleb(@intCast(addend), bw), .sub => { - var br: std.io.BufferedReader = undefined; - br.initFixed(try bw.writableArray(1)); - const old_value = try br.takeLeb128(u64); + var r: std.io.Reader = .fixed(try bw.writableArray(1)); + const old_value = try r.takeLeb128(u64); try overwriteUleb(old_value -% @as(u64, @intCast(addend)), bw); }, }