diff --git a/lib/std/compress/flate/bit_reader.zig b/lib/std/compress/flate/bit_reader.zig index 1e41f081c1..a68fe096ca 100644 --- a/lib/std/compress/flate/bit_reader.zig +++ b/lib/std/compress/flate/bit_reader.zig @@ -2,17 +2,14 @@ const std = @import("std"); const assert = std.debug.assert; const testing = std.testing; -pub fn bitReader(comptime T: type, reader: anytype) BitReader(T, @TypeOf(reader)) { - return BitReader(T, @TypeOf(reader)).init(reader); -} - -pub fn BitReader64(comptime ReaderType: type) type { - return BitReader(u64, ReaderType); -} - -pub fn BitReader32(comptime ReaderType: type) type { - return BitReader(u32, ReaderType); -} +pub const Flags = packed struct(u3) { + /// dont advance internal buffer, just get bits, leave them in buffer + peek: bool = false, + /// assume that there is no need to fill, fill should be called before + buffered: bool = false, + /// bit reverse read bits + reverse: bool = false, +}; /// Bit reader used during inflate (decompression). Has internal buffer of 64 /// bits which shifts right after bits are consumed. Uses forward_reader to fill @@ -23,14 +20,14 @@ pub fn BitReader32(comptime ReaderType: type) type { /// fill buffer from forward_reader by calling fill in advance and readF with /// buffered flag set. /// -pub fn BitReader(comptime T: type, comptime ReaderType: type) type { +pub fn BitReader(comptime T: type) type { assert(T == u32 or T == u64); const t_bytes: usize = @sizeOf(T); const Tshift = if (T == u64) u6 else u5; return struct { // Underlying reader used for filling internal bits buffer - forward_reader: ReaderType = undefined, + forward_reader: *std.io.BufferedReader, // Internal buffer of 64 bits bits: T = 0, // Number of bits in the buffer @@ -38,10 +35,8 @@ pub fn BitReader(comptime T: type, comptime ReaderType: type) type { const Self = @This(); - pub const Error = ReaderType.Error || error{EndOfStream}; - - pub fn init(rdr: ReaderType) Self { - var self = Self{ .forward_reader = rdr }; + pub fn init(forward_reader: *std.io.BufferedReader) Self { + var self = Self{ .forward_reader = forward_reader }; self.fill(1) catch {}; return self; } @@ -55,7 +50,7 @@ pub fn BitReader(comptime T: type, comptime ReaderType: type) type { /// bits to decode. So `nice` is not hard limit, it will just try to have /// that number of bits available. If end of forward stream is reached /// it may be some extra zero bits in buffer. - pub inline fn fill(self: *Self, nice: u6) !void { + pub fn fill(self: *Self, nice: u6) !void { if (self.nbits >= nice and nice != 0) { return; // We have enough bits } @@ -86,31 +81,29 @@ pub fn BitReader(comptime T: type, comptime ReaderType: type) type { // First read from internal bits buffer. var n: usize = 0; while (self.nbits > 0 and n < buf.len) { - buf[n] = try self.readF(u8, flag.buffered); + buf[n] = try self.readF(u8, .{ .buffered = true }); n += 1; } // Then use forward reader for all other bytes. try self.forward_reader.readNoEof(buf[n..]); } - pub const flag = struct { - pub const peek: u3 = 0b001; // dont advance internal buffer, just get bits, leave them in buffer - pub const buffered: u3 = 0b010; // assume that there is no need to fill, fill should be called before - pub const reverse: u3 = 0b100; // bit reverse read bits - }; - /// Alias for readF(U, 0). pub fn read(self: *Self, comptime U: type) !U { return self.readF(U, 0); } /// Alias for readF with flag.peak set. - pub inline fn peekF(self: *Self, comptime U: type, comptime how: u3) !U { - return self.readF(U, how | flag.peek); + pub inline fn peekF(self: *Self, comptime U: type, comptime how: Flags) !U { + return self.readF(U, .{ + .peek = true, + .buffered = how.buffered, + .reverse = how.reverse, + }); } /// Read with flags provided. - pub fn readF(self: *Self, comptime U: type, comptime how: u3) !U { + pub fn readF(self: *Self, comptime U: type, comptime how: Flags) !U { if (U == T) { assert(how == 0); assert(self.alignBits() == 0); @@ -129,34 +122,35 @@ pub fn BitReader(comptime T: type, comptime ReaderType: type) type { try self.shift(n); // advance buffer for n return u; }, - (flag.peek) => { // no shift, leave bits in the buffer + .{ .peek = true } => { // no shift, leave bits in the buffer try self.fill(n); return @truncate(self.bits); }, - flag.buffered => { // no fill, assume that buffer has enough bits + .{ .buffered = true } => { // no fill, assume that buffer has enough bits const u: U = @truncate(self.bits); try self.shift(n); return u; }, - (flag.reverse) => { // same as 0 with bit reverse + .{ .reverse = true } => { // same as 0 with bit reverse try self.fill(n); const u: U = @truncate(self.bits); try self.shift(n); return @bitReverse(u); }, - (flag.peek | flag.reverse) => { + .{ .peek = true, .reverse = true } => { try self.fill(n); return @bitReverse(@as(U, @truncate(self.bits))); }, - (flag.buffered | flag.reverse) => { + .{ .buffered = true, .reverse = true } => { const u: U = @truncate(self.bits); try self.shift(n); return @bitReverse(u); }, - (flag.peek | flag.buffered) => { + .{ .peek = true, .buffered = true }, + => { return @truncate(self.bits); }, - (flag.peek | flag.buffered | flag.reverse) => { + .{ .peek = true, .buffered = true, .reverse = true } => { return @bitReverse(@as(U, @truncate(self.bits))); }, } @@ -169,7 +163,7 @@ pub fn BitReader(comptime T: type, comptime ReaderType: type) type { 0 => { try self.fill(n); }, - flag.buffered => {}, + .{ .buffered = true } => {}, else => unreachable, } const mask: u16 = (@as(u16, 1) << n) - 1; @@ -226,24 +220,24 @@ pub fn BitReader(comptime T: type, comptime ReaderType: type) type { /// 11000111 pub fn readFixedCode(self: *Self) !u16 { try self.fill(7 + 2); - const code7 = try self.readF(u7, flag.buffered | flag.reverse); + const code7 = try self.readF(u7, .{ .buffered = true, .reverse = true }); if (code7 <= 0b0010_111) { // 7 bits, 256-279, codes 0000_000 - 0010_111 return @as(u16, code7) + 256; } else if (code7 <= 0b1011_111) { // 8 bits, 0-143, codes 0011_0000 through 1011_1111 - return (@as(u16, code7) << 1) + @as(u16, try self.readF(u1, flag.buffered)) - 0b0011_0000; + return (@as(u16, code7) << 1) + @as(u16, try self.readF(u1, .{ .buffered = true })) - 0b0011_0000; } else if (code7 <= 0b1100_011) { // 8 bit, 280-287, codes 1100_0000 - 1100_0111 - return (@as(u16, code7 - 0b1100000) << 1) + try self.readF(u1, flag.buffered) + 280; + return (@as(u16, code7 - 0b1100000) << 1) + try self.readF(u1, .{ .buffered = true }) + 280; } else { // 9 bit, 144-255, codes 1_1001_0000 - 1_1111_1111 - return (@as(u16, code7 - 0b1100_100) << 2) + @as(u16, try self.readF(u2, flag.buffered | flag.reverse)) + 144; + return (@as(u16, code7 - 0b1100_100) << 2) + @as(u16, try self.readF(u2, .{ .buffered = true, .reverse = true })) + 144; } } }; } test "readF" { - var fbs = std.io.fixedBufferStream(&[_]u8{ 0xf3, 0x48, 0xcd, 0xc9, 0x00, 0x00 }); - var br = bitReader(u64, fbs.reader()); - const F = BitReader64(@TypeOf(fbs.reader())).flag; + var input: std.io.BufferedReader = undefined; + input.initFixed(&[_]u8{ 0xf3, 0x48, 0xcd, 0xc9, 0x00, 0x00 }); + var br: BitReader(u64) = .init(&input); try testing.expectEqual(@as(u8, 48), br.nbits); try testing.expectEqual(@as(u64, 0xc9cd48f3), br.bits); @@ -253,8 +247,8 @@ test "readF" { try testing.expectEqual(@as(u8, 48 - 3), br.nbits); try testing.expectEqual(@as(u3, 5), br.alignBits()); - try testing.expect(try br.readF(u8, F.peek) == 0b0001_1110); - try testing.expect(try br.readF(u9, F.peek) == 0b1_0001_1110); + try testing.expect(try br.readF(u8, .{ .peek = true }) == 0b0001_1110); + try testing.expect(try br.readF(u9, .{ .peek = true }) == 0b1_0001_1110); try br.shift(9); try testing.expectEqual(@as(u8, 36), br.nbits); try testing.expectEqual(@as(u3, 4), br.alignBits()); @@ -283,15 +277,15 @@ test "read block type 1 data" { 0x0c, 0x01, 0x02, 0x03, // 0xaa, 0xbb, 0xcc, 0xdd, }; - var fbs = std.io.fixedBufferStream(&data); - var br = bitReader(T, fbs.reader()); - const F = BitReader(T, @TypeOf(fbs.reader())).flag; + var fbs: std.io.BufferedReader = undefined; + fbs.initFixed(&data); + var br: BitReader(T) = .init(&fbs); try testing.expectEqual(@as(u1, 1), try br.readF(u1, 0)); // bfinal try testing.expectEqual(@as(u2, 1), try br.readF(u2, 0)); // block_type for ("Hello world\n") |c| { - try testing.expectEqual(@as(u8, c), try br.readF(u8, F.reverse) - 0x30); + try testing.expectEqual(@as(u8, c), try br.readF(u8, .{ .reverse = true }) - 0x30); } try testing.expectEqual(@as(u7, 0), try br.readF(u7, 0)); // end of block br.alignToByte(); @@ -306,8 +300,9 @@ test "shift/fill" { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, }; - var fbs = std.io.fixedBufferStream(&data); - var br = bitReader(u64, fbs.reader()); + var fbs: std.io.BufferedReader = undefined; + fbs.initFixed(&data); + var br: BitReader(u64) = .init(&fbs); try testing.expectEqual(@as(u64, 0x08_07_06_05_04_03_02_01), br.bits); try br.shift(8); @@ -332,8 +327,9 @@ test "readAll" { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, }; - var fbs = std.io.fixedBufferStream(&data); - var br = bitReader(T, fbs.reader()); + var fbs: std.io.BufferedReader = undefined; + fbs.initFixed(&data); + var br: BitReader(T) = .init(&fbs); switch (T) { u64 => try testing.expectEqual(@as(u64, 0x08_07_06_05_04_03_02_01), br.bits), @@ -354,8 +350,9 @@ test "readFixedCode" { inline for ([_]type{ u64, u32 }) |T| { const fixed_codes = @import("huffman_encoder.zig").fixed_codes; - var fbs = std.io.fixedBufferStream(&fixed_codes); - var rdr = bitReader(T, fbs.reader()); + var fbs: std.io.BufferedReader = undefined; + fbs.initFixed(&fixed_codes); + var rdr: BitReader(T) = .init(&fbs); for (0..286) |c| { try testing.expectEqual(c, try rdr.readFixedCode()); @@ -369,8 +366,9 @@ test "u32 leaves no bits on u32 reads" { 0xff, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, }; - var fbs = std.io.fixedBufferStream(&data); - var br = bitReader(u32, fbs.reader()); + var fbs: std.io.BufferedReader = undefined; + fbs.initFixed(&data); + var br: BitReader(u32) = .init(&fbs); _ = try br.read(u3); try testing.expectEqual(29, br.nbits); @@ -396,8 +394,9 @@ test "u64 need fill after alignToByte" { }; // without fill - var fbs = std.io.fixedBufferStream(&data); - var br = bitReader(u64, fbs.reader()); + var fbs: std.io.BufferedReader = undefined; + fbs.initFixed(&data); + var br: BitReader(u64) = .init(&fbs); _ = try br.read(u23); try testing.expectEqual(41, br.nbits); br.alignToByte(); @@ -409,7 +408,7 @@ test "u64 need fill after alignToByte" { // fill after align ensures all bits filled fbs.reset(); - br = bitReader(u64, fbs.reader()); + br = .init(&fbs); _ = try br.read(u23); try testing.expectEqual(41, br.nbits); br.alignToByte(); diff --git a/lib/std/compress/lzma2.zig b/lib/std/compress/lzma2.zig index 036a0d879b..f48c875ee8 100644 --- a/lib/std/compress/lzma2.zig +++ b/lib/std/compress/lzma2.zig @@ -3,11 +3,7 @@ const Allocator = std.mem.Allocator; pub const decode = @import("lzma2/decode.zig"); -pub fn decompress( - allocator: Allocator, - reader: anytype, - writer: anytype, -) !void { +pub fn decompress(allocator: Allocator, reader: *std.io.BufferedReader, writer: *std.io.BufferedWriter) !void { var decoder = try decode.Decoder.init(allocator); defer decoder.deinit(allocator); return decoder.decompress(allocator, reader, writer); @@ -19,11 +15,11 @@ test { 0x01, 0x00, 0x05, 0x48, 0x65, 0x6C, 0x6C, 0x6F, 0x0A, 0x02, 0x00, 0x06, 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x21, 0x0A, 0x00, }; - var stream: std.io.FixedBufferStream = .{ .buffer = compressed }; - + var stream: std.io.BufferedReader = undefined; + stream.initFixed(&compressed); var decomp: std.io.AllocatingWriter = undefined; const decomp_bw = decomp.init(std.testing.allocator); defer decomp.deinit(); - try decompress(std.testing.allocator, stream.reader(), decomp_bw); + try decompress(std.testing.allocator, &stream, decomp_bw); try std.testing.expectEqualSlices(u8, expected, decomp.getWritten()); } diff --git a/lib/std/debug/Dwarf.zig b/lib/std/debug/Dwarf.zig index f5e4888599..4b25546eb2 100644 --- a/lib/std/debug/Dwarf.zig +++ b/lib/std/debug/Dwarf.zig @@ -2235,14 +2235,15 @@ pub const ElfModule = struct { const section_bytes = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size); sections[section_index.?] = if ((shdr.sh_flags & elf.SHF_COMPRESSED) > 0) blk: { - var section_stream: std.io.FixedBufferStream = .{ .buffer = section_bytes }; - const section_reader = section_stream.reader(); - const chdr = section_reader.readStruct(elf.Chdr) catch continue; + var section_reader: std.io.BufferedReader = undefined; + section_reader.initFixed(section_bytes); + const chdr = section_reader.takeStruct(elf.Chdr) catch continue; if (chdr.ch_type != .ZLIB) continue; + const ch_size = chdr.ch_size; - var zlib_stream = std.compress.zlib.decompressor(section_reader); + var zlib_stream = std.compress.zlib.decompressor(§ion_reader); - const decompressed_section = try gpa.alloc(u8, chdr.ch_size); + const decompressed_section = try gpa.alloc(u8, ch_size); errdefer gpa.free(decompressed_section); const read = zlib_stream.reader().readAll(decompressed_section) catch continue; diff --git a/lib/std/fs/File.zig b/lib/std/fs/File.zig index 720bd9382a..5692d39ce7 100644 --- a/lib/std/fs/File.zig +++ b/lib/std/fs/File.zig @@ -1590,8 +1590,10 @@ pub fn reader(file: File) std.io.Reader { return .{ .context = handleToOpaque(file.handle), .vtable = .{ - .seekRead = reader_seekRead, + .posRead = reader_posRead, + .posReadVec = reader_posReadVec, .streamRead = reader_streamRead, + .streamReadVec = reader_streamReadVec, }, }; } @@ -1610,7 +1612,7 @@ pub fn writer(file: File) std.io.Writer { /// vectors through the underlying write calls as possible. const max_buffers_len = 16; -pub fn reader_seekRead( +pub fn reader_posRead( context: *anyopaque, bw: *std.io.BufferedWriter, limit: std.io.Reader.Limit, @@ -1621,10 +1623,36 @@ pub fn reader_seekRead( return writer.writeFile(bw, file, .init(offset), len, &.{}, 0); } -pub fn reader_streamRead(context: *anyopaque, bw: *std.io.BufferedWriter, limit: std.io.Reader.Limit) anyerror!usize { +pub fn reader_posReadVec(context: *anyopaque, data: []const []u8, offset: u64) anyerror!std.io.Reader.Status { + const file = opaqueToHandle(context); + const n = try file.preadv(data, offset); + return .{ + .len = n, + .end = n == 0, + }; +} + +pub fn reader_streamRead( + context: *anyopaque, + bw: *std.io.BufferedWriter, + limit: std.io.Reader.Limit, +) anyerror!std.io.Reader.Status { const file = opaqueToHandle(context); const len: std.io.Writer.Len = if (limit.unwrap()) |l| .init(l) else .entire_file; - return writer.writeFile(bw, file, .none, len, &.{}, 0); + const n = try writer.writeFile(bw, file, .none, len, &.{}, 0); + return .{ + .len = n, + .end = n == 0, + }; +} + +pub fn reader_streamReadVec(context: *anyopaque, data: []const []u8) anyerror!std.io.Reader.Status { + const file = opaqueToHandle(context); + const n = try file.readv(data); + return .{ + .len = n, + .end = n == 0, + }; } pub fn writer_writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) anyerror!usize { @@ -1671,7 +1699,7 @@ pub fn writer_writeFile( context: *anyopaque, in_file: std.fs.File, in_offset: u64, - in_len: std.io.Writer.VTable.FileLen, + in_len: std.io.Writer.FileLen, headers_and_trailers: []const []const u8, headers_len: usize, ) anyerror!usize { @@ -1705,7 +1733,7 @@ fn writeFileUnseekable( out_fd: Handle, in_fd: Handle, in_offset: u64, - in_len: std.io.Writer.VTable.FileLen, + in_len: std.io.Writer.FileLen, headers_and_trailers: []const []const u8, headers_len: usize, ) anyerror!usize { diff --git a/lib/std/http/Client.zig b/lib/std/http/Client.zig index 664da5524c..815cad6d27 100644 --- a/lib/std/http/Client.zig +++ b/lib/std/http/Client.zig @@ -1119,7 +1119,7 @@ pub const Request = struct { context: *anyopaque, file: std.fs.File, offset: u64, - len: std.io.Writer.VTable.FileLen, + len: std.io.Writer.FileLen, headers_and_trailers: []const []const u8, headers_len: usize, ) anyerror!usize { @@ -1159,7 +1159,7 @@ pub const Request = struct { context: *anyopaque, file: std.fs.File, offset: u64, - len: std.io.Writer.VTable.FileLen, + len: std.io.Writer.FileLen, headers_and_trailers: []const []const u8, headers_len: usize, ) anyerror!usize { diff --git a/lib/std/http/Server.zig b/lib/std/http/Server.zig index bec647739d..5d444427b3 100644 --- a/lib/std/http/Server.zig +++ b/lib/std/http/Server.zig @@ -881,7 +881,7 @@ pub const Response = struct { context: *anyopaque, file: std.fs.File, offset: u64, - len: std.io.Writer.VTable.FileLen, + len: std.io.Writer.FileLen, headers_and_trailers: []const []const u8, headers_len: usize, ) anyerror!usize { @@ -948,7 +948,7 @@ pub const Response = struct { context: *anyopaque, file: std.fs.File, offset: u64, - len: std.io.Writer.VTable.FileLen, + len: std.io.Writer.FileLen, headers_and_trailers: []const []const u8, headers_len: usize, ) anyerror!usize { diff --git a/lib/std/io.zig b/lib/std/io.zig index 02480598a5..fbb51cb5c8 100644 --- a/lib/std/io.zig +++ b/lib/std/io.zig @@ -118,7 +118,7 @@ fn null_writeFile( context: *anyopaque, file: std.fs.File, offset: u64, - len: Writer.VTable.FileLen, + len: Writer.FileLen, headers_and_trailers: []const []const u8, headers_len: usize, ) anyerror!usize { diff --git a/lib/std/io/AllocatingWriter.zig b/lib/std/io/AllocatingWriter.zig index cde8cefa84..8fd35bda4f 100644 --- a/lib/std/io/AllocatingWriter.zig +++ b/lib/std/io/AllocatingWriter.zig @@ -153,7 +153,7 @@ fn writeFile( context: *anyopaque, file: std.fs.File, offset: u64, - len: std.io.Writer.VTable.FileLen, + len: std.io.Writer.FileLen, headers_and_trailers_full: []const []const u8, headers_len_full: usize, ) anyerror!usize { diff --git a/lib/std/io/BufferedReader.zig b/lib/std/io/BufferedReader.zig index 0d0541b45f..7224aec495 100644 --- a/lib/std/io/BufferedReader.zig +++ b/lib/std/io/BufferedReader.zig @@ -25,7 +25,7 @@ pub fn initFixed(br: *BufferedReader, buffer: []const u8) void { .context = br, .vtable = &.{ .streamRead = null, - .seekRead = null, + .posRead = null, }, }, }; @@ -44,7 +44,9 @@ pub fn reader(br: *BufferedReader) Reader { .context = br, .vtable = &.{ .streamRead = passthru_streamRead, - .seekRead = passthru_seekRead, + .streamReadVec = passthru_streamReadVec, + .posRead = passthru_posRead, + .posReadVec = passthru_posReadVec, }, }; } @@ -65,14 +67,29 @@ fn passthru_streamRead(ctx: *anyopaque, bw: *BufferedWriter, limit: Reader.Limit return br.unbuffered_reader.streamRead(bw, limit); } -fn passthru_seekRead(ctx: *anyopaque, bw: *BufferedWriter, limit: Reader.Limit, off: u64) anyerror!Reader.Status { +fn passthru_streamReadVec(ctx: *anyopaque, data: []const []u8) anyerror!Reader.Status { + const br: *BufferedReader = @alignCast(@ptrCast(ctx)); + _ = br; + _ = data; + @panic("TODO"); +} + +fn passthru_posRead(ctx: *anyopaque, bw: *BufferedWriter, limit: Reader.Limit, off: u64) anyerror!Reader.Status { const br: *BufferedReader = @alignCast(@ptrCast(ctx)); const buffer = br.storage.buffer.items; if (off < buffer.len) { const send = buffer[off..limit.min(buffer.len)]; return bw.writeSplat(send, 1); } - return br.unbuffered_reader.seekRead(bw, limit, off - buffer.len); + return br.unbuffered_reader.posRead(bw, limit, off - buffer.len); +} + +fn passthru_posReadVec(ctx: *anyopaque, data: []const []u8, off: u64) anyerror!Reader.Status { + const br: *BufferedReader = @alignCast(@ptrCast(ctx)); + _ = br; + _ = data; + _ = off; + @panic("TODO"); } /// Returns the next `n` bytes from `unbuffered_reader`, filling the buffer as diff --git a/lib/std/io/BufferedWriter.zig b/lib/std/io/BufferedWriter.zig index aabc755073..5a22f8c0d1 100644 --- a/lib/std/io/BufferedWriter.zig +++ b/lib/std/io/BufferedWriter.zig @@ -10,30 +10,15 @@ const testing = std.testing; /// /// If this has capacity zero, the writer is unbuffered, and `flush` is a no-op. buffer: std.ArrayListUnmanaged(u8), -mode: union(enum) { - /// Return `error.NoSpaceLeft` if a write could not fit into the buffer. - fixed, - /// Underlying stream to send bytes to. - /// - /// A write will only be sent here if it could not fit into `buffer`, or if - /// it is a `writeFile`. - /// - /// `unbuffered_writer` may modify `buffer` if the number of bytes returned - /// equals number of bytes provided. This property is exploited by - /// `std.io.AllocatingWriter` for example. - writer: Writer, - /// If this is provided, `buffer` will grow superlinearly rather than - /// become full. - allocator: Allocator, -}, - -pub fn deinit(bw: *BufferedWriter) void { - switch (bw.mode) { - .allocator => |gpa| bw.buffer.deinit(gpa), - .fixed, .writer => {}, - } - bw.* = undefined; -} +/// Underlying stream to send bytes to. +/// +/// A write will only be sent here if it could not fit into `buffer`, or if it +/// is a `writeFile`. +/// +/// `unbuffered_writer` may modify `buffer` if the number of bytes returned +/// equals number of bytes provided. This property is exploited by +/// `std.io.AllocatingWriter` for example. +unbuffered_writer: Writer, /// Number of slices to store on the stack, when trying to send as many byte /// vectors through the underlying write calls as possible. @@ -72,23 +57,24 @@ pub fn initFixed(bw: *BufferedWriter, buffer: []u8) void { /// This function is available when using `initFixed`. pub fn getWritten(bw: *const BufferedWriter) []u8 { assert(bw.unbuffered_writer.vtable == &fixed_vtable); - return bw.buffer[0..bw.end]; + return bw.buffer.items; } /// This function is available when using `initFixed`. pub fn reset(bw: *BufferedWriter) void { assert(bw.unbuffered_writer.vtable == &fixed_vtable); - bw.end = 0; + bw.buffer.items.len = 0; } pub fn flush(bw: *BufferedWriter) anyerror!void { - const send_buffer = bw.buffer[0..bw.end]; + const list = &bw.buffer; + const send_buffer = list.items; try bw.unbuffered_writer.writeAll(send_buffer); - bw.end = 0; + list.items.len = 0; } pub fn unusedCapacitySlice(bw: *const BufferedWriter) []u8 { - return bw.buffer[bw.end..]; + return bw.buffer.unusedCapacitySlice(); } /// The `data` parameter is mutable because this function needs to mutate the @@ -116,11 +102,12 @@ pub fn writev(bw: *BufferedWriter, data: []const []const u8) anyerror!usize { fn passthru_writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) anyerror!usize { const bw: *BufferedWriter = @alignCast(@ptrCast(context)); - const buffer = bw.buffer; - const start_end = bw.end; + const list = &bw.buffer; + const buffer = list.allocatedSlice(); + const start_end = list.items.len; var buffers: [max_buffers_len][]const u8 = undefined; - var end = bw.end; + var end = start_end; for (data, 0..) |bytes, i| { const new_end = end + bytes.len; if (new_end <= buffer.len) { @@ -144,10 +131,10 @@ fn passthru_writeSplat(context: *anyopaque, data: []const []const u8, splat: usi @branchHint(.unlikely); const remainder = buffer[n..end]; std.mem.copyForwards(u8, buffer[0..remainder.len], remainder); - bw.end = remainder.len; + list.items.len = remainder.len; return end - start_end; } - bw.end = 0; + list.items.len = 0; return n - start_end; } const n = try bw.unbuffered_writer.writeSplat(send_buffers, 1); @@ -155,10 +142,10 @@ fn passthru_writeSplat(context: *anyopaque, data: []const []const u8, splat: usi @branchHint(.unlikely); const remainder = buffer[n..end]; std.mem.copyForwards(u8, buffer[0..remainder.len], remainder); - bw.end = remainder.len; + list.items.len = remainder.len; return end - start_end; } - bw.end = 0; + list.items.len = 0; return n - start_end; } @@ -168,7 +155,7 @@ fn passthru_writeSplat(context: *anyopaque, data: []const []const u8, splat: usi @branchHint(.unlikely); // It was added in the loop above; undo it here. end -= pattern.len; - bw.end = end; + list.items.len = end; return end - start_end; } @@ -176,7 +163,7 @@ fn passthru_writeSplat(context: *anyopaque, data: []const []const u8, splat: usi switch (pattern.len) { 0 => { - bw.end = end; + list.items.len = end; return end - start_end; }, 1 => { @@ -184,7 +171,7 @@ fn passthru_writeSplat(context: *anyopaque, data: []const []const u8, splat: usi if (new_end <= buffer.len) { @branchHint(.likely); @memset(buffer[end..new_end], pattern[0]); - bw.end = new_end; + list.items.len = new_end; return new_end - start_end; } buffers[0] = buffer[0..end]; @@ -194,10 +181,10 @@ fn passthru_writeSplat(context: *anyopaque, data: []const []const u8, splat: usi @branchHint(.unlikely); const remainder = buffer[n..end]; std.mem.copyForwards(u8, buffer[0..remainder.len], remainder); - bw.end = remainder.len; + list.items.len = remainder.len; return end - start_end; } - bw.end = 0; + list.items.len = 0; return n - start_end; }, else => { @@ -207,7 +194,7 @@ fn passthru_writeSplat(context: *anyopaque, data: []const []const u8, splat: usi while (end < new_end) : (end += pattern.len) { @memcpy(buffer[end..][0..pattern.len], pattern); } - bw.end = new_end; + list.items.len = new_end; return new_end - start_end; } buffers[0] = buffer[0..end]; @@ -217,10 +204,10 @@ fn passthru_writeSplat(context: *anyopaque, data: []const []const u8, splat: usi @branchHint(.unlikely); const remainder = buffer[n..end]; std.mem.copyForwards(u8, buffer[0..remainder.len], remainder); - bw.end = remainder.len; + list.items.len = remainder.len; return end - start_end; } - bw.end = 0; + list.items.len = 0; return n - start_end; }, } @@ -228,12 +215,14 @@ fn passthru_writeSplat(context: *anyopaque, data: []const []const u8, splat: usi fn fixed_writev(context: *anyopaque, data: []const []const u8) anyerror!usize { const bw: *BufferedWriter = @alignCast(@ptrCast(context)); + const list = &bw.buffer; // When this function is called it means the buffer got full, so it's time // to return an error. However, we still need to make sure all of the // available buffer has been used. const first = data[0]; - const dest = bw.buffer[bw.end..]; + const dest = list.unusedCapacitySlice(); @memcpy(dest, first[0..dest.len]); + list.items.len = list.capacity; return error.NoSpaceLeft; } @@ -242,26 +231,29 @@ fn fixed_writev(context: *anyopaque, data: []const []const u8) anyerror!usize { /// available buffer has been filled. fn fixed_writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) anyerror!usize { const bw: *BufferedWriter = @alignCast(@ptrCast(context)); + const list = &bw.buffer; for (data) |bytes| { - const dest = bw.buffer[bw.end..]; + const dest = list.unusedCapacitySlice(); if (dest.len == 0) return error.NoSpaceLeft; const len = @min(bytes.len, dest.len); @memcpy(dest[0..len], bytes[0..len]); - bw.end += len; + list.items.len += len; } const pattern = data[data.len - 1]; - const dest = bw.buffer[bw.end..]; + const dest = list.unusedCapacitySlice(); switch (pattern.len) { 0 => unreachable, 1 => @memset(dest, pattern[0]), else => for (0..splat - 1) |i| @memcpy(dest[i * pattern.len ..][0..pattern.len], pattern), } + list.items.len = list.capacity; return error.NoSpaceLeft; } pub fn write(bw: *BufferedWriter, bytes: []const u8) anyerror!usize { - const buffer = bw.buffer; - const end = bw.end; + const list = &bw.buffer; + const buffer = list.allocatedSlice(); + const end = list.items.len; const new_end = end + bytes.len; if (new_end > buffer.len) { var data: [2][]const u8 = .{ buffer[0..end], bytes }; @@ -270,14 +262,14 @@ pub fn write(bw: *BufferedWriter, bytes: []const u8) anyerror!usize { @branchHint(.unlikely); const remainder = buffer[n..end]; std.mem.copyForwards(u8, buffer[0..remainder.len], remainder); - bw.end = remainder.len; + list.items.len = remainder.len; return 0; } - bw.end = 0; + list.items.len = 0; return n - end; } @memcpy(buffer[end..new_end], bytes); - bw.end = new_end; + list.items.len = new_end; return bytes.len; } @@ -302,35 +294,29 @@ pub fn writeByte(bw: *BufferedWriter, byte: u8) anyerror!void { list.items.len = buffer.len + 1; return; } - switch (bw.mode) { - .fixed => return error.NoSpaceLeft, - .writer => |w| { - var buffers: [2][]const u8 = .{ buffer, &.{byte} }; - while (true) { - const n = try w.writev(&buffers); - if (n == 0) { - @branchHint(.unlikely); - continue; - } else if (n >= buffer.len) { - @branchHint(.likely); - if (n > buffer.len) { - @branchHint(.likely); - list.items.len = 0; - return; - } else { - buffer[0] = byte; - list.items.len = 1; - return; - } - } - const remainder = buffer[n..]; - std.mem.copyForwards(u8, buffer[0..remainder.len], remainder); - buffer[remainder.len] = byte; - list.items.len = remainder.len + 1; + var buffers: [2][]const u8 = .{ buffer, &.{byte} }; + while (true) { + const n = try bw.unbuffered_writer.writev(&buffers); + if (n == 0) { + @branchHint(.unlikely); + continue; + } else if (n >= buffer.len) { + @branchHint(.likely); + if (n > buffer.len) { + @branchHint(.likely); + list.items.len = 0; + return; + } else { + buffer[0] = byte; + list.items.len = 1; return; } - }, - .allocator => |gpa| try list.append(gpa, byte), + } + const remainder = buffer[n..]; + std.mem.copyForwards(u8, buffer[0..remainder.len], remainder); + buffer[remainder.len] = byte; + list.items.len = remainder.len + 1; + return; } } @@ -395,7 +381,7 @@ pub fn writeFile( bw: *BufferedWriter, file: std.fs.File, offset: u64, - len: Writer.VTable.FileLen, + len: Writer.FileLen, headers_and_trailers: []const []const u8, headers_len: usize, ) anyerror!usize { @@ -406,14 +392,15 @@ fn passthru_writeFile( context: *anyopaque, file: std.fs.File, offset: u64, - len: Writer.VTable.FileLen, + len: Writer.FileLen, headers_and_trailers: []const []const u8, headers_len: usize, ) anyerror!usize { const bw: *BufferedWriter = @alignCast(@ptrCast(context)); - const buffer = bw.buffer; + const list = &bw.buffer; + const buffer = list.allocatedSlice(); if (buffer.len == 0) return bw.unbuffered_writer.writeFile(file, offset, len, headers_and_trailers, headers_len); - const start_end = bw.end; + const start_end = list.items.len; const headers = headers_and_trailers[0..headers_len]; const trailers = headers_and_trailers[headers_len..]; var buffers: [max_buffers_len][]const u8 = undefined; @@ -443,10 +430,10 @@ fn passthru_writeFile( @branchHint(.unlikely); const remainder = buffer[n..end]; std.mem.copyForwards(u8, buffer[0..remainder.len], remainder); - bw.end = remainder.len; + list.items.len = remainder.len; return end - start_end; } - bw.end = 0; + list.items.len = 0; return n - start_end; } // Have not made it past the headers yet; must call `writev`. @@ -455,10 +442,10 @@ fn passthru_writeFile( @branchHint(.unlikely); const remainder = buffer[n..end]; std.mem.copyForwards(u8, buffer[0..remainder.len], remainder); - bw.end = remainder.len; + list.items.len = remainder.len; return end - start_end; } - bw.end = 0; + list.items.len = 0; return n - start_end; } // All headers written to buffer. @@ -473,10 +460,10 @@ fn passthru_writeFile( @branchHint(.unlikely); const remainder = buffer[n..end]; std.mem.copyForwards(u8, buffer[0..remainder.len], remainder); - bw.end = remainder.len; + list.items.len = remainder.len; return end - start_end; } - bw.end = 0; + list.items.len = 0; return n - start_end; } @@ -484,7 +471,7 @@ pub const WriteFileOptions = struct { offset: u64 = 0, /// If the size of the source file is known, it is likely that passing the /// size here will save one syscall. - len: Writer.VTable.FileLen = .entire_file, + len: Writer.FileLen = .entire_file, /// Headers and trailers must be passed together so that in case `len` is /// zero, they can be forwarded directly to `Writer.VTable.writev`. /// diff --git a/lib/std/io/CountingWriter.zig b/lib/std/io/CountingWriter.zig index fcc9fdfb62..f421b0d217 100644 --- a/lib/std/io/CountingWriter.zig +++ b/lib/std/io/CountingWriter.zig @@ -33,7 +33,7 @@ fn passthru_writeFile( context: *anyopaque, file: std.fs.File, offset: u64, - len: Writer.VTable.FileLen, + len: Writer.FileLen, headers_and_trailers: []const []const u8, headers_len: usize, ) anyerror!usize { diff --git a/lib/std/io/Reader.zig b/lib/std/io/Reader.zig index f7e3fde9ac..fbb124c0a3 100644 --- a/lib/std/io/Reader.zig +++ b/lib/std/io/Reader.zig @@ -19,10 +19,11 @@ pub const VTable = struct { /// /// If this is `null` it is equivalent to always returning /// `error.Unseekable`. - seekRead: ?*const fn (ctx: *anyopaque, bw: *std.io.BufferedWriter, limit: Limit, offset: u64) anyerror!Status, + posRead: ?*const fn (ctx: *anyopaque, bw: *std.io.BufferedWriter, limit: Limit, offset: u64) anyerror!Status, + posReadVec: ?*const fn (ctx: *anyopaque, data: []const []u8, offset: u64) anyerror!Status, /// Writes bytes from the internally tracked stream position to `bw`, or - /// returns `error.Unstreamable`, indicating `seekRead` should be used + /// returns `error.Unstreamable`, indicating `posRead` should be used /// instead. /// /// Returns the number of bytes written, which will be at minimum `0` and at @@ -37,9 +38,10 @@ pub const VTable = struct { /// If this is `null` it is equivalent to always returning /// `error.Unstreamable`. streamRead: ?*const fn (ctx: *anyopaque, bw: *std.io.BufferedWriter, limit: Limit) anyerror!Status, + streamReadVec: ?*const fn (ctx: *anyopaque, data: []const []u8) anyerror!Status, }; -pub const Len = @Type(.{ .signedness = .unsigned, .bits = @bitSizeOf(usize) - 1 }); +pub const Len = @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(usize) - 1 } }); pub const Status = packed struct(usize) { /// Number of bytes that were written to `writer`. @@ -56,7 +58,7 @@ pub const Limit = enum(usize) { /// Returns total number of bytes written to `w`. pub fn readAll(r: Reader, w: *std.io.BufferedWriter) anyerror!usize { if (r.vtable.pread != null) { - return seekReadAll(r, w) catch |err| switch (err) { + return posReadAll(r, w) catch |err| switch (err) { error.Unseekable => {}, else => return err, }; @@ -68,11 +70,11 @@ pub fn readAll(r: Reader, w: *std.io.BufferedWriter) anyerror!usize { /// /// May return `error.Unseekable`, indicating this function cannot be used to /// read from the reader. -pub fn seekReadAll(r: Reader, w: *std.io.BufferedWriter, start_offset: u64) anyerror!usize { - const vtable_seekRead = r.vtable.seekRead.?; +pub fn posReadAll(r: Reader, w: *std.io.BufferedWriter, start_offset: u64) anyerror!usize { + const vtable_posRead = r.vtable.posRead.?; var offset: u64 = start_offset; while (true) { - const status = try vtable_seekRead(r.context, w, .none, offset); + const status = try vtable_posRead(r.context, w, .none, offset); offset += status.len; if (status.end) return @intCast(offset - start_offset); } @@ -130,6 +132,10 @@ pub fn buffered(r: Reader, buffer: []u8) std.io.BufferedReader { }; } +pub fn unbuffered(r: Reader) std.io.BufferedReader { + return buffered(r, &.{}); +} + pub fn allocating(r: Reader, gpa: std.mem.Allocator) std.io.BufferedReader { return .{ .reader = r, @@ -140,10 +146,6 @@ pub fn allocating(r: Reader, gpa: std.mem.Allocator) std.io.BufferedReader { }; } -pub fn unbuffered(r: Reader) std.io.BufferedReader { - return buffered(r, &.{}); -} - test "when the backing reader provides one byte at a time" { const OneByteReader = struct { str: []const u8, diff --git a/lib/std/io/Writer.zig b/lib/std/io/Writer.zig index aed2f23036..e69ebad04b 100644 --- a/lib/std/io/Writer.zig +++ b/lib/std/io/Writer.zig @@ -80,7 +80,7 @@ pub fn writeFile( w: Writer, file: std.fs.File, offset: u64, - len: VTable.FileLen, + len: FileLen, headers_and_trailers: []const []const u8, headers_len: usize, ) anyerror!usize { @@ -91,7 +91,7 @@ pub fn unimplemented_writeFile( context: *anyopaque, file: std.fs.File, offset: u64, - len: VTable.FileLen, + len: FileLen, headers_and_trailers: []const []const u8, headers_len: usize, ) anyerror!usize { @@ -107,7 +107,7 @@ pub fn unimplemented_writeFile( pub fn buffered(w: Writer, buffer: []u8) std.io.BufferedWriter { return .{ .buffer = .initBuffer(buffer), - .mode = .{ .writer = w }, + .unbuffered_writer = w, }; } diff --git a/lib/std/net.zig b/lib/std/net.zig index 3f3aaeaee7..24a07db179 100644 --- a/lib/std/net.zig +++ b/lib/std/net.zig @@ -2011,7 +2011,7 @@ pub const Stream = struct { context: *anyopaque, in_file: std.fs.File, in_offset: u64, - in_len: std.io.Writer.VTable.FileLen, + in_len: std.io.Writer.FileLen, headers_and_trailers: []const []const u8, headers_len: usize, ) anyerror!usize {