std.io.BufferedWriter: don't use ArrayList for this

it's the wrong abstraction and is only a footgun when used this way.
This commit is contained in:
Andrew Kelley 2025-04-11 00:06:54 -07:00
parent 4ee2534566
commit 383afd19d7
7 changed files with 188 additions and 175 deletions

View File

@ -856,7 +856,7 @@ pub fn count(comptime fmt: []const u8, args: anytype) usize {
var buffer: [std.atomic.cache_line]u8 = undefined;
var bw = std.io.Writer.null.buffered(&buffer);
bw.print(fmt, args) catch unreachable;
return bw.bytes_written;
return bw.count;
}
pub const AllocPrintError = error{OutOfMemory};

View File

@ -27,19 +27,27 @@ const vtable: std.io.Writer.VTable = .{
};
/// Sets the `AllocatingWriter` to an empty state.
pub fn init(aw: *AllocatingWriter, allocator: std.mem.Allocator) *std.io.BufferedWriter {
pub fn init(aw: *AllocatingWriter, allocator: std.mem.Allocator) void {
initOwnedSlice(aw, allocator, &.{});
}
pub fn initCapacity(aw: *AllocatingWriter, allocator: std.mem.Allocator, capacity: usize) error{OutOfMemory}!void {
const initial_buffer = try allocator.alloc(u8, capacity);
initOwnedSlice(aw, allocator, initial_buffer);
}
pub fn initOwnedSlice(aw: *AllocatingWriter, allocator: std.mem.Allocator, slice: []u8) void {
aw.* = .{
.written = &.{},
.written = slice[0..0],
.allocator = allocator,
.buffered_writer = .{
.unbuffered_writer = .{
.context = aw,
.vtable = &vtable,
},
.buffer = &.{},
.buffer = slice,
},
};
return &aw.buffered_writer;
}
pub fn deinit(aw: *AllocatingWriter) void {
@ -118,7 +126,7 @@ pub fn clearRetainingCapacity(aw: *AllocatingWriter) void {
aw.written.len = 0;
}
fn writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
fn writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
const aw: *AllocatingWriter = @alignCast(@ptrCast(context));
const start_len = aw.written.len;
const bw = &aw.buffered_writer;
@ -150,9 +158,9 @@ fn appendPatternAssumeCapacity(list: *std.ArrayListUnmanaged(u8), pattern: []con
}
fn writeFile(
context: *anyopaque,
context: ?*anyopaque,
file: std.fs.File,
offset: u64,
offset: std.io.Writer.Offset,
len: std.io.Writer.FileLen,
headers_and_trailers_full: []const []const u8,
headers_len_full: usize,
@ -168,13 +176,14 @@ fn writeFile(
break :b .{ headers_and_trailers_full[1..], headers_len_full - 1 };
} else .{ headers_and_trailers_full, headers_len_full };
const trailers = headers_and_trailers[headers_len..];
const pos = offset.toInt() orelse @panic("TODO treat file as stream");
if (len == .entire_file) {
var new_capacity: usize = list.capacity + std.atomic.cache_line;
for (headers_and_trailers) |bytes| new_capacity += bytes.len;
try list.ensureTotalCapacity(gpa, new_capacity);
for (headers_and_trailers[0..headers_len]) |bytes| list.appendSliceAssumeCapacity(bytes);
const dest = list.items.ptr[list.items.len..list.capacity];
const n = try file.pread(dest, offset);
const n = try file.pread(dest, pos);
if (n == 0) {
new_capacity = list.capacity;
for (trailers) |bytes| new_capacity += bytes.len;
@ -190,7 +199,7 @@ fn writeFile(
try list.ensureTotalCapacity(gpa, new_capacity);
for (headers_and_trailers[0..headers_len]) |bytes| list.appendSliceAssumeCapacity(bytes);
const dest = list.items.ptr[list.items.len..][0..len.int()];
const n = try file.pread(dest, offset);
const n = try file.pread(dest, pos);
list.items.len += n;
if (n < dest.len) {
return list.items.len - start_len;
@ -201,8 +210,9 @@ fn writeFile(
test AllocatingWriter {
var aw: AllocatingWriter = undefined;
const bw = aw.init(std.testing.allocator);
aw.init(std.testing.allocator);
defer aw.deinit();
const bw = &aw.buffered_writer;
const x: i32 = 42;
const y: i32 = 1234;

View File

@ -74,7 +74,7 @@ pub fn initFixed(br: *BufferedReader, buffer: []const u8) void {
br.* = .{
.seek = 0,
.storage = .{
.buffer = .fromOwnedSlice(@constCast(buffer)),
.buffer = @constCast(buffer),
.unbuffered_writer = .{
.context = undefined,
.vtable = &eof_writer,
@ -88,9 +88,10 @@ pub fn initFixed(br: *BufferedReader, buffer: []const u8) void {
}
pub fn storageBuffer(br: *BufferedReader) []u8 {
assert(br.storage.unbuffered_writer.vtable == &eof_writer);
const storage = &br.storage;
assert(storage.unbuffered_writer.vtable == &eof_writer);
assert(br.unbuffered_reader.vtable == &eof_reader);
return br.storage.buffer.allocatedSlice();
return storage.buffer;
}
/// Although `BufferedReader` can easily satisfy the `Reader` interface, it's
@ -108,7 +109,8 @@ pub fn reader(br: *BufferedReader) Reader {
fn passthru_read(ctx: ?*anyopaque, bw: *BufferedWriter, limit: Reader.Limit) anyerror!Reader.RwResult {
const br: *BufferedReader = @alignCast(@ptrCast(ctx));
const buffer = br.storage.buffer.items;
const storage = &br.storage;
const buffer = storage.buffer[0..storage.end];
const buffered = buffer[br.seek..];
const limited = buffered[0..limit.min(buffered.len)];
if (limited.len > 0) {
@ -135,7 +137,7 @@ pub fn seekBy(br: *BufferedReader, seek_by: i64) anyerror!void {
}
pub fn seekBackwardBy(br: *BufferedReader, seek_by: u64) anyerror!void {
if (seek_by > br.storage.buffer.items.len - br.seek) return error.Unseekable; // TODO
if (seek_by > br.storage.end - br.seek) return error.Unseekable; // TODO
br.seek += @abs(seek_by);
}
@ -178,10 +180,10 @@ pub fn peek(br: *BufferedReader, n: usize) anyerror![]u8 {
/// * `peek`
/// * `toss`
pub fn peekAll(br: *BufferedReader, n: usize) anyerror![]u8 {
const list = &br.storage.buffer;
assert(n <= list.capacity);
const storage = &br.storage;
assert(n <= storage.buffer.len);
try br.fill(n);
return list.items[br.seek..];
return storage.buffer[br.seek..storage.end];
}
/// Skips the next `n` bytes from the stream, advancing the seek position. This
@ -194,7 +196,7 @@ pub fn peekAll(br: *BufferedReader, n: usize) anyerror![]u8 {
/// * `discard`.
pub fn toss(br: *BufferedReader, n: usize) void {
br.seek += n;
assert(br.seek <= br.storage.buffer.items.len);
assert(br.seek <= br.storage.end);
}
/// Equivalent to `peek` + `toss`.
@ -245,22 +247,22 @@ pub fn discard(br: *BufferedReader, n: usize) anyerror!void {
/// * `toss`
/// * `discardUntilEnd`
pub fn discardUpTo(br: *BufferedReader, n: usize) anyerror!usize {
const list = &br.storage.buffer;
const storage = &br.storage;
var remaining = n;
while (remaining > 0) {
const proposed_seek = br.seek + remaining;
if (proposed_seek <= list.items.len) {
if (proposed_seek <= storage.end) {
br.seek = proposed_seek;
return;
}
remaining -= (list.items.len - br.seek);
list.items.len = 0;
remaining -= (storage.end - br.seek);
storage.end = 0;
br.seek = 0;
const result = try br.unbuffered_reader.read(&br.storage, .none);
const result = try br.unbuffered_reader.read(&storage, .none);
result.write_err catch unreachable;
try result.read_err;
assert(result.len == list.items.len);
if (remaining <= list.items.len) continue;
assert(result.len == storage.end);
if (remaining <= storage.end) continue;
if (result.end) return n - remaining;
}
}
@ -268,9 +270,9 @@ pub fn discardUpTo(br: *BufferedReader, n: usize) anyerror!usize {
/// Reads the stream until the end, ignoring all the data.
/// Returns the number of bytes discarded.
pub fn discardUntilEnd(br: *BufferedReader) anyerror!usize {
const list = &br.storage.buffer;
var total: usize = list.items.len;
list.items.len = 0;
const storage = &br.storage;
var total: usize = storage.end;
storage.end = 0;
total += try br.unbuffered_reader.discardUntilEnd();
return total;
}
@ -286,8 +288,8 @@ pub fn discardUntilEnd(br: *BufferedReader) anyerror!usize {
/// See also:
/// * `peek`
pub fn read(br: *BufferedReader, buffer: []u8) anyerror!void {
const list = &br.storage.buffer;
const in_buffer = list.items;
const storage = &br.storage;
const in_buffer = storage.buffer[0..storage.end];
const seek = br.seek;
const proposed_seek = seek + in_buffer.len;
if (proposed_seek <= in_buffer.len) {
@ -296,21 +298,21 @@ pub fn read(br: *BufferedReader, buffer: []u8) anyerror!void {
return;
}
@memcpy(buffer[0..in_buffer.len], in_buffer);
list.items.len = 0;
storage.end = 0;
br.seek = 0;
var i: usize = in_buffer.len;
while (true) {
const status = try br.unbuffered_reader.read(&br.storage, .none);
const next_i = i + list.items.len;
const status = try br.unbuffered_reader.read(storage, .none);
const next_i = i + storage.end;
if (next_i >= buffer.len) {
const remaining = buffer[i..];
@memcpy(remaining, list.items[0..remaining.len]);
@memcpy(remaining, storage.buffer[0..remaining.len]);
br.seek = remaining.len;
return;
}
if (status.end) return error.EndOfStream;
@memcpy(buffer[i..next_i], list.items);
list.items.len = 0;
@memcpy(buffer[i..next_i], storage.buffer[0..storage.end]);
storage.end = 0;
i = next_i;
}
}
@ -347,8 +349,8 @@ pub fn takeDelimiterInclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8
}
pub fn peekDelimiterInclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 {
const list = &br.storage.buffer;
const buffer = list.items;
const storage = &br.storage;
const buffer = storage.buffer[0..storage.end];
const seek = br.seek;
if (std.mem.indexOfScalarPos(u8, buffer, seek, delimiter)) |end| {
@branchHint(.likely);
@ -357,15 +359,15 @@ pub fn peekDelimiterInclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8
const remainder = buffer[seek..];
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
var i = remainder.len;
list.items.len = i;
storage.end = i;
br.seek = 0;
while (i < list.capacity) {
const status = try br.unbuffered_reader.read(&br.storage, .none);
if (std.mem.indexOfScalarPos(u8, list.items, i, delimiter)) |end| {
return list.items[0 .. end + 1];
while (i < storage.buffer.len) {
const status = try br.unbuffered_reader.read(storage, .none);
if (std.mem.indexOfScalarPos(u8, storage.buffer[0..storage.end], i, delimiter)) |end| {
return storage.buffer[0 .. end + 1];
}
if (status.end) return error.EndOfStream;
i = list.items.len;
i = storage.end;
}
return error.StreamTooLong;
}
@ -392,8 +394,8 @@ pub fn takeDelimiterConclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8
}
pub fn peekDelimiterConclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 {
const list = &br.storage.buffer;
const buffer = list.items;
const storage = &br.storage;
const buffer = storage.buffer[0..storage.end];
const seek = br.seek;
if (std.mem.indexOfScalarPos(u8, buffer, seek, delimiter)) |end| {
@branchHint(.likely);
@ -402,15 +404,15 @@ pub fn peekDelimiterConclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8
const remainder = buffer[seek..];
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
var i = remainder.len;
list.items.len = i;
storage.end = i;
br.seek = 0;
while (i < list.capacity) {
const status = try br.unbuffered_reader.read(&br.storage, .none);
if (std.mem.indexOfScalarPos(u8, list.items, i, delimiter)) |end| {
return list.items[0 .. end + 1];
while (i < storage.buffer.len) {
const status = try br.unbuffered_reader.read(storage, .none);
if (std.mem.indexOfScalarPos(u8, storage.buffer[0..storage.end], i, delimiter)) |end| {
return storage.buffer[0 .. end + 1];
}
if (status.end) return list.items;
i = list.items.len;
if (status.end) return storage.buffer[0..storage.end];
i = storage.end;
}
return error.StreamTooLong;
}
@ -490,9 +492,9 @@ pub fn discardDelimiterInclusive(br: *BufferedReader, delimiter: u8) anyerror!vo
///
/// Asserts buffer capacity is at least `n`.
pub fn fill(br: *BufferedReader, n: usize) anyerror!void {
assert(n <= br.storage.buffer.capacity);
const list = &br.storage.buffer;
const buffer = list.items;
const storage = &br.storage;
assert(n <= storage.buffer.len);
const buffer = storage.buffer[0..storage.end];
const seek = br.seek;
if (seek + n <= buffer.len) {
@branchHint(.likely);
@ -500,18 +502,19 @@ pub fn fill(br: *BufferedReader, n: usize) anyerror!void {
}
const remainder = buffer[seek..];
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
list.items.len = remainder.len;
storage.end = remainder.len;
br.seek = 0;
while (true) {
const status = try br.unbuffered_reader.read(&br.storage, .none);
if (n <= list.items.len) return;
const status = try br.unbuffered_reader.read(storage, .none);
if (n <= storage.end) return;
if (status.end) return error.EndOfStream;
}
}
/// Reads 1 byte from the stream or returns `error.EndOfStream`.
pub fn takeByte(br: *BufferedReader) anyerror!u8 {
const buffer = br.storage.buffer.items;
const storage = &br.storage;
const buffer = storage.buffer[0..storage.end];
const seek = br.seek;
if (seek >= buffer.len) {
@branchHint(.unlikely);

View File

@ -6,10 +6,6 @@ const Writer = std.io.Writer;
const Allocator = std.mem.Allocator;
const testing = std.testing;
/// User-provided storage that must outlive this `BufferedWriter`.
///
/// If this has capacity zero, the writer is unbuffered, and `flush` is a no-op.
buffer: std.ArrayListUnmanaged(u8),
/// Underlying stream to send bytes to.
///
/// A write will only be sent here if it could not fit into `buffer`, or if it
@ -19,10 +15,14 @@ buffer: std.ArrayListUnmanaged(u8),
/// equals number of bytes provided. This property is exploited by
/// `std.io.AllocatingWriter` for example.
unbuffered_writer: Writer,
/// If this has length zero, the writer is unbuffered, and `flush` is a no-op.
buffer: []u8,
/// Marks the end of `buffer` - before this are buffered bytes, after this is
/// undefined.
end: usize = 0,
/// Tracks total number of bytes written to this `BufferedWriter`. This value
/// only increases. In the case of fixed mode, this value always equals
/// `buffer.items.len`.
bytes_written: usize = 0,
/// only increases. In the case of fixed mode, this value always equals `end`.
count: usize = 0,
/// Number of slices to store on the stack, when trying to send as many byte
/// vectors through the underlying write calls as possible.
@ -46,71 +46,72 @@ const fixed_vtable: Writer.VTable = .{
.writeFile = Writer.unimplemented_writeFile,
};
/// Replaces the `BufferedWriter` with a new one that writes to `buffer` and
/// returns `error.NoSpaceLeft` when it is full.
/// Replaces the `BufferedWriter` with one that writes to `buffer` and returns
/// `error.NoSpaceLeft` when it is full. `end` and `count` will always be
/// equal.
pub fn initFixed(bw: *BufferedWriter, buffer: []u8) void {
bw.* = .{
.unbuffered_writer = .{
.context = bw,
.vtable = &fixed_vtable,
},
.buffer = .initBuffer(buffer),
.buffer = buffer,
};
}
/// This function is available when using `initFixed`.
pub fn getWritten(bw: *const BufferedWriter) []u8 {
assert(bw.unbuffered_writer.vtable == &fixed_vtable);
return bw.buffer.items;
return bw.buffer[0..bw.end];
}
/// This function is available when using `initFixed`.
pub fn reset(bw: *BufferedWriter) void {
assert(bw.unbuffered_writer.vtable == &fixed_vtable);
bw.buffer.items.len = 0;
bw.end = 0;
bw.count = 0;
}
pub fn flush(bw: *BufferedWriter) anyerror!void {
const list = &bw.buffer;
const send_buffer = list.items;
const send_buffer = bw.buffer[0..bw.end];
var index: usize = 0;
while (index < send_buffer.len) index += try bw.unbuffered_writer.writev(&.{send_buffer[index..]});
list.items.len = 0;
bw.end = 0;
}
pub fn unusedCapacitySlice(bw: *const BufferedWriter) []u8 {
return bw.buffer.unusedCapacitySlice();
return bw.buffer[bw.end..];
}
/// Asserts the provided buffer has total capacity enough for `minimum_length`.
pub fn writableSlice(bw: *BufferedWriter, minimum_length: usize) anyerror![]u8 {
const list = &bw.buffer;
assert(list.capacity >= minimum_length);
const cap_slice = list.unusedCapacitySlice();
assert(bw.buffer.len >= minimum_length);
const cap_slice = bw.buffer[bw.end..];
if (cap_slice.len >= minimum_length) {
@branchHint(.likely);
return cap_slice;
}
const buffer = list.items;
const buffer = bw.buffer[0..bw.end];
const n = try bw.unbuffered_writer.write(buffer);
if (n == buffer.len) {
@branchHint(.likely);
list.items.len = 0;
return list.unusedCapacitySlice();
bw.end = 0;
return bw.buffer;
}
if (n > 0) {
const remainder = buffer[n..];
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
list.items.len = remainder.len;
bw.end = remainder.len;
}
return list.unusedCapacitySlice();
return bw.buffer[bw.end..];
}
/// After calling `writableSlice`, this function tracks how many bytes were written to it.
pub fn advance(bw: *BufferedWriter, n: usize) void {
const list = &bw.buffer;
list.items.len += n;
assert(list.items.len <= list.capacity);
bw.bytes_written += n;
const new_end = bw.end + n;
assert(new_end <= bw.buffer.len);
bw.end = new_end;
bw.count += n;
}
/// The `data` parameter is mutable because this function needs to mutate the
@ -139,9 +140,8 @@ pub fn writev(bw: *BufferedWriter, data: []const []const u8) anyerror!usize {
fn passthru_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
const bw: *BufferedWriter = @alignCast(@ptrCast(context));
const list = &bw.buffer;
const buffer = list.allocatedSlice();
const start_end = list.items.len;
const buffer = bw.buffer;
const start_end = bw.end;
var buffers: [max_buffers_len][]const u8 = undefined;
var end = start_end;
@ -153,7 +153,7 @@ fn passthru_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: us
end = new_end;
continue;
}
if (end == 0) return track(&bw.bytes_written, try bw.unbuffered_writer.writeSplat(data, splat));
if (end == 0) return track(&bw.count, try bw.unbuffered_writer.writeSplat(data, splat));
buffers[0] = buffer[0..end];
const remaining_data = data[i..];
const remaining_buffers = buffers[1..];
@ -168,22 +168,22 @@ fn passthru_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: us
@branchHint(.unlikely);
const remainder = buffer[n..end];
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
list.items.len = remainder.len;
return track(&bw.bytes_written, end - start_end);
bw.end = remainder.len;
return track(&bw.count, end - start_end);
}
list.items.len = 0;
return track(&bw.bytes_written, n - start_end);
bw.end = 0;
return track(&bw.count, n - start_end);
}
const n = try bw.unbuffered_writer.writeSplat(send_buffers, 1);
if (n < end) {
@branchHint(.unlikely);
const remainder = buffer[n..end];
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
list.items.len = remainder.len;
return track(&bw.bytes_written, end - start_end);
bw.end = remainder.len;
return track(&bw.count, end - start_end);
}
list.items.len = 0;
return track(&bw.bytes_written, n - start_end);
bw.end = 0;
return track(&bw.count, n - start_end);
}
const pattern = data[data.len - 1];
@ -192,24 +192,24 @@ fn passthru_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: us
@branchHint(.unlikely);
// It was added in the loop above; undo it here.
end -= pattern.len;
list.items.len = end;
return track(&bw.bytes_written, end - start_end);
bw.end = end;
return track(&bw.count, end - start_end);
}
const remaining_splat = splat - 1;
switch (pattern.len) {
0 => {
list.items.len = end;
return track(&bw.bytes_written, end - start_end);
bw.end = end;
return track(&bw.count, end - start_end);
},
1 => {
const new_end = end + remaining_splat;
if (new_end <= buffer.len) {
@branchHint(.likely);
@memset(buffer[end..new_end], pattern[0]);
list.items.len = new_end;
return track(&bw.bytes_written, new_end - start_end);
bw.end = new_end;
return track(&bw.count, new_end - start_end);
}
buffers[0] = buffer[0..end];
buffers[1] = pattern;
@ -218,11 +218,11 @@ fn passthru_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: us
@branchHint(.unlikely);
const remainder = buffer[n..end];
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
list.items.len = remainder.len;
return track(&bw.bytes_written, end - start_end);
bw.end = remainder.len;
return track(&bw.count, end - start_end);
}
list.items.len = 0;
return track(&bw.bytes_written, n - start_end);
bw.end = 0;
return track(&bw.count, n - start_end);
},
else => {
const new_end = end + pattern.len * remaining_splat;
@ -231,8 +231,8 @@ fn passthru_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: us
while (end < new_end) : (end += pattern.len) {
@memcpy(buffer[end..][0..pattern.len], pattern);
}
list.items.len = new_end;
return track(&bw.bytes_written, new_end - start_end);
bw.end = new_end;
return track(&bw.count, new_end - start_end);
}
buffers[0] = buffer[0..end];
buffers[1] = pattern;
@ -241,17 +241,17 @@ fn passthru_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: us
@branchHint(.unlikely);
const remainder = buffer[n..end];
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
list.items.len = remainder.len;
return track(&bw.bytes_written, end - start_end);
bw.end = remainder.len;
return track(&bw.count, end - start_end);
}
list.items.len = 0;
return track(&bw.bytes_written, n - start_end);
bw.end = 0;
return track(&bw.count, n - start_end);
},
}
}
fn track(bytes_written: *usize, n: usize) usize {
bytes_written.* += n;
fn track(count: *usize, n: usize) usize {
count.* += n;
return n;
}
@ -260,31 +260,29 @@ fn track(bytes_written: *usize, n: usize) usize {
/// available buffer has been filled.
fn fixed_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
const bw: *BufferedWriter = @alignCast(@ptrCast(context));
const list = &bw.buffer;
for (data) |bytes| {
const dest = list.unusedCapacitySlice();
const dest = bw.buffer[bw.end..];
if (dest.len == 0) return error.NoSpaceLeft;
const len = @min(bytes.len, dest.len);
@memcpy(dest[0..len], bytes[0..len]);
list.items.len += len;
bw.bytes_written = list.items.len;
bw.end += len;
bw.count = bw.end;
}
const pattern = data[data.len - 1];
const dest = list.unusedCapacitySlice();
const dest = bw.buffer[bw.end..];
switch (pattern.len) {
0 => unreachable,
1 => @memset(dest, pattern[0]),
else => for (0..splat - 1) |i| @memcpy(dest[i * pattern.len ..][0..pattern.len], pattern),
}
list.items.len = list.capacity;
bw.bytes_written = list.items.len;
bw.end = bw.buffer.len;
bw.count = bw.end;
return error.NoSpaceLeft;
}
pub fn write(bw: *BufferedWriter, bytes: []const u8) anyerror!usize {
const list = &bw.buffer;
const buffer = list.allocatedSlice();
const end = list.items.len;
const buffer = bw.buffer;
const end = bw.end;
const new_end = end + bytes.len;
if (new_end > buffer.len) {
var data: [2][]const u8 = .{ buffer[0..end], bytes };
@ -293,15 +291,15 @@ pub fn write(bw: *BufferedWriter, bytes: []const u8) anyerror!usize {
@branchHint(.unlikely);
const remainder = buffer[n..end];
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
list.items.len = remainder.len;
bw.end = remainder.len;
return 0;
}
list.items.len = 0;
return track(&bw.bytes_written, n - end);
bw.end = 0;
return track(&bw.count, n - end);
}
@memcpy(buffer[end..new_end], bytes);
list.items.len = new_end;
return track(&bw.bytes_written, bytes.len);
bw.end = new_end;
return track(&bw.count, bytes.len);
}
/// Calls `write` as many times as necessary such that all of `bytes` are
@ -316,13 +314,12 @@ pub fn print(bw: *BufferedWriter, comptime format: []const u8, args: anytype) an
}
pub fn writeByte(bw: *BufferedWriter, byte: u8) anyerror!void {
const list = &bw.buffer;
const buffer = list.items;
if (buffer.len < list.capacity) {
const buffer = bw.buffer[0..bw.end];
if (buffer.len < bw.buffer.len) {
@branchHint(.likely);
buffer.ptr[buffer.len] = byte;
list.items.len = buffer.len + 1;
bw.bytes_written += 1;
bw.end = buffer.len + 1;
bw.count += 1;
return;
}
var buffers: [2][]const u8 = .{ buffer, &.{byte} };
@ -332,23 +329,23 @@ pub fn writeByte(bw: *BufferedWriter, byte: u8) anyerror!void {
@branchHint(.unlikely);
continue;
}
bw.bytes_written += 1;
bw.count += 1;
if (n >= buffer.len) {
@branchHint(.likely);
if (n > buffer.len) {
@branchHint(.likely);
list.items.len = 0;
bw.end = 0;
return;
} else {
buffer[0] = byte;
list.items.len = 1;
bw.end = 1;
return;
}
}
const remainder = buffer[n..];
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
buffer[remainder.len] = byte;
list.items.len = remainder.len + 1;
bw.end = remainder.len + 1;
return;
}
}
@ -430,13 +427,12 @@ fn passthru_writeFile(
headers_len: usize,
) anyerror!usize {
const bw: *BufferedWriter = @alignCast(@ptrCast(context));
const list = &bw.buffer;
const buffer = list.allocatedSlice();
const buffer = bw.buffer;
if (buffer.len == 0) return track(
&bw.bytes_written,
&bw.count,
try bw.unbuffered_writer.writeFile(file, offset, len, headers_and_trailers, headers_len),
);
const start_end = list.items.len;
const start_end = bw.end;
const headers = headers_and_trailers[0..headers_len];
const trailers = headers_and_trailers[headers_len..];
var buffers: [max_buffers_len][]const u8 = undefined;
@ -466,11 +462,11 @@ fn passthru_writeFile(
@branchHint(.unlikely);
const remainder = buffer[n..end];
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
list.items.len = remainder.len;
return track(&bw.bytes_written, end - start_end);
bw.end = remainder.len;
return track(&bw.count, end - start_end);
}
list.items.len = 0;
return track(&bw.bytes_written, n - start_end);
bw.end = 0;
return track(&bw.count, n - start_end);
}
// Have not made it past the headers yet; must call `writev`.
const n = try bw.unbuffered_writer.writev(buffers[0 .. buffers_len + 1]);
@ -478,11 +474,11 @@ fn passthru_writeFile(
@branchHint(.unlikely);
const remainder = buffer[n..end];
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
list.items.len = remainder.len;
return track(&bw.bytes_written, end - start_end);
bw.end = remainder.len;
return track(&bw.count, end - start_end);
}
list.items.len = 0;
return track(&bw.bytes_written, n - start_end);
bw.end = 0;
return track(&bw.count, n - start_end);
}
// All headers written to buffer.
buffers[0] = buffer[0..end];
@ -496,11 +492,11 @@ fn passthru_writeFile(
@branchHint(.unlikely);
const remainder = buffer[n..end];
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
list.items.len = remainder.len;
return track(&bw.bytes_written, end - start_end);
bw.end = remainder.len;
return track(&bw.count, end - start_end);
}
list.items.len = 0;
return track(&bw.bytes_written, n - start_end);
bw.end = 0;
return track(&bw.count, n - start_end);
}
pub const WriteFileOptions = struct {

View File

@ -112,7 +112,7 @@ pub fn unimplemented_writeFile(
pub fn buffered(w: Writer, buffer: []u8) std.io.BufferedWriter {
return .{
.buffer = .initBuffer(buffer),
.buffer = buffer,
.unbuffered_writer = w,
};
}

View File

@ -420,7 +420,8 @@ test "write files" {
const root = "root";
var output: std.io.AllocatingWriter = undefined;
var wrt: Writer = .{ .underlying_writer = output.init(testing.allocator) };
output.init(testing.allocator);
var wrt: Writer = .{ .underlying_writer = &output.buffered_writer };
defer output.deinit();
try wrt.setRoot(root);
for (files) |file|
@ -456,7 +457,8 @@ test "write files" {
// without root
{
var output: std.io.AllocatingWriter = undefined;
var wrt: Writer = .{ .underlying_writer = output.init(testing.allocator) };
output.init(testing.allocator);
var wrt: Writer = .{ .underlying_writer = &output.buffered_writer };
defer output.deinit();
for (files) |file| {
var content = std.io.fixedBufferStream(file.content);

View File

@ -638,10 +638,12 @@ const Parser = struct {
const pointer = @typeInfo(T).pointer;
var size_hint = ZonGen.strLitSizeHint(self.ast, ast_node);
if (pointer.sentinel() != null) size_hint += 1;
const gpa = self.gpa;
var buf: std.ArrayListUnmanaged(u8) = try .initCapacity(self.gpa, size_hint);
defer buf.deinit(self.gpa);
switch (try ZonGen.parseStrLit(self.ast, ast_node, buf.writer(self.gpa))) {
var aw: std.io.AllocatingWriter = undefined;
try aw.initCapacity(gpa, size_hint);
defer aw.deinit();
switch (try ZonGen.parseStrLit(self.ast, ast_node, &aw.buffered_writer)) {
.success => {},
.failure => |err| {
const token = self.ast.nodeMainToken(ast_node);
@ -660,9 +662,9 @@ const Parser = struct {
}
if (pointer.sentinel() != null) {
return buf.toOwnedSliceSentinel(self.gpa, 0);
return aw.toOwnedSliceSentinel(gpa, 0);
} else {
return buf.toOwnedSlice(self.gpa);
return aw.toOwnedSlice(gpa);
}
}
@ -1064,6 +1066,7 @@ const Parser = struct {
name: []const u8,
) error{ OutOfMemory, ParseZon } {
@branchHint(.cold);
const gpa = self.gpa;
const token = if (field) |f| b: {
var buf: [2]Ast.Node.Index = undefined;
const struct_init = self.ast.fullStructInit(&buf, node.getAstNode(self.zoir)).?;
@ -1081,18 +1084,17 @@ const Parser = struct {
};
} else b: {
const msg = "supported: ";
var buf: std.ArrayListUnmanaged(u8) = try .initCapacity(self.gpa, 64);
defer buf.deinit(self.gpa);
const writer = buf.writer(self.gpa);
try writer.writeAll(msg);
var buf: std.ArrayListUnmanaged(u8) = try .initCapacity(gpa, 64);
defer buf.deinit(gpa);
try buf.appendSlice(gpa, msg);
inline for (info.fields, 0..) |field_info, i| {
if (i != 0) try writer.writeAll(", ");
try writer.print("'{p_}'", .{std.zig.fmtId(field_info.name)});
if (i != 0) try buf.appendSlice(gpa, ", ");
try buf.print(gpa, "'{p_}'", .{std.zig.fmtId(field_info.name)});
}
break :b .{
.token = token,
.offset = 0,
.msg = try buf.toOwnedSlice(self.gpa),
.msg = try buf.toOwnedSlice(gpa),
.owned = true,
};
};