ArrayListWriter

This commit is contained in:
Andrew Kelley 2025-02-15 01:00:42 -08:00
parent 00c6c836a6
commit c2fc6b0b6c
8 changed files with 164 additions and 86 deletions

View File

@ -1824,13 +1824,13 @@ pub fn validateUserInputDidItFail(b: *Build) bool {
return b.invalid_user_input;
}
fn allocPrintCmd(ally: Allocator, opt_cwd: ?[]const u8, argv: []const []const u8) error{OutOfMemory}![]u8 {
var buf = ArrayList(u8).init(ally);
if (opt_cwd) |cwd| try buf.writer().print("cd {s} && ", .{cwd});
fn allocPrintCmd(gpa: Allocator, opt_cwd: ?[]const u8, argv: []const []const u8) error{OutOfMemory}![]u8 {
var buf: std.ArrayListUnmanaged(u8) = .empty;
if (opt_cwd) |cwd| try buf.print(gpa, "cd {s} && ", .{cwd});
for (argv) |arg| {
try buf.writer().print("{s} ", .{arg});
try buf.print(gpa, "{s} ", .{arg});
}
return buf.toOwnedSlice();
return buf.toOwnedSlice(gpa);
}
fn printCmd(ally: Allocator, cwd: ?[]const u8, argv: []const []const u8) void {
@ -2766,11 +2766,10 @@ fn dumpBadDirnameHelp(
comptime msg: []const u8,
args: anytype,
) anyerror!void {
debug.lockStdErr();
var w = debug.lockStdErr2();
defer debug.unlockStdErr();
const stderr = io.getStdErr();
const w = stderr.writer();
try w.print(msg, args);
const tty_config = std.io.tty.detectConfig(stderr);
@ -2803,7 +2802,7 @@ pub fn dumpBadGetPathHelp(
src_builder: *Build,
asking_step: ?*Step,
) anyerror!void {
const w = stderr.writer();
var w = stderr.unbufferedWriter();
try w.print(
\\getPath() was called on a GeneratedFile that wasn't built yet.
\\ source package path: {s}

View File

@ -1001,6 +1001,15 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
return m.len;
}
pub fn print(self: *Self, gpa: Allocator, comptime fmt: []const u8, args: anytype) error{OutOfMemory}!void {
comptime assert(T == u8);
try self.ensureUnusedCapacity(gpa, fmt.len);
var alw: std.io.ArrayListWriter = undefined;
const bw = alw.fromOwned(gpa, self);
defer self.* = alw.toOwned();
bw.print(fmt, args) catch return error.OutOfMemory;
}
pub const FixedWriter = std.io.Writer(*Self, Allocator.Error, appendWriteFixed);
/// Initializes a Writer which will append to the list but will return

View File

@ -95,11 +95,9 @@ pub const StreamInterface = struct {
@panic("unimplemented");
}
/// Returns the number of bytes read, which may be less than the buffer
/// space provided, indicating end-of-stream.
/// The `iovecs` parameter is mutable in case this function needs to mutate
/// the fields in order to handle partial writes from the underlying layer.
pub fn writevAll(this: @This(), iovecs: []std.posix.iovec_const) WriteError!usize {
pub fn writevAll(this: @This(), iovecs: []std.posix.iovec_const) WriteError!void {
// This can be implemented in terms of writev, or specialized if desired.
_ = .{ this, iovecs };
@panic("unimplemented");

View File

@ -1669,7 +1669,8 @@ pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize
if (!enabled) return;
const tty_config = io.tty.detectConfig(std.io.getStdErr());
const stderr = io.getStdErr().writer();
var stderr = lockStdErr2();
defer unlockStdErr();
const end = @min(t.index, size);
const debug_info = getSelfDebugInfo() catch |err| {
stderr.print(
@ -1686,7 +1687,7 @@ pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize
.index = frames.len,
.instruction_addresses = frames,
};
writeStackTrace(stack_trace, stderr, debug_info, tty_config) catch continue;
writeStackTrace(stack_trace, &stderr, debug_info, tty_config) catch continue;
}
if (t.index > end) {
stderr.print("{d} more traces not shown; consider increasing trace size\n", .{

View File

@ -1621,7 +1621,10 @@ const interface = struct {
var iovecs_buffer: [max_buffers_len]std.posix.iovec_const = undefined;
const iovecs = iovecs_buffer[0..@min(iovecs_buffer.len, data.len)];
for (iovecs, data[0..iovecs.len]) |*v, d| v.* = .{ .base = d.ptr, .len = d.len };
for (iovecs, data[0..iovecs.len]) |*v, d| v.* = .{
.base = if (d.len == 0) "" else d.ptr, // OS sadly checks ptr addr before length.
.len = d.len,
};
return std.posix.writev(file, iovecs);
}

View File

@ -289,67 +289,6 @@ pub fn GenericReader(
};
}
pub fn GenericWriter(
comptime Context: type,
comptime WriteError: type,
comptime writeFn: fn (context: Context, bytes: []const u8) WriteError!usize,
) type {
return struct {
context: Context,
const Self = @This();
pub const Error = WriteError;
pub inline fn write(self: Self, bytes: []const u8) Error!usize {
return writeFn(self.context, bytes);
}
pub inline fn writeAll(self: Self, bytes: []const u8) Error!void {
return @errorCast(self.any().writeAll(bytes));
}
pub inline fn print(self: Self, comptime format: []const u8, args: anytype) Error!void {
return @errorCast(self.any().print(format, args));
}
pub inline fn writeByte(self: Self, byte: u8) Error!void {
return @errorCast(self.any().writeByte(byte));
}
pub inline fn writeByteNTimes(self: Self, byte: u8, n: usize) Error!void {
return @errorCast(self.any().writeByteNTimes(byte, n));
}
pub inline fn writeBytesNTimes(self: Self, bytes: []const u8, n: usize) Error!void {
return @errorCast(self.any().writeBytesNTimes(bytes, n));
}
pub inline fn writeInt(self: Self, comptime T: type, value: T, endian: std.builtin.Endian) Error!void {
return @errorCast(self.any().writeInt(T, value, endian));
}
pub inline fn writeStruct(self: Self, value: anytype) Error!void {
return @errorCast(self.any().writeStruct(value));
}
pub inline fn writeStructEndian(self: Self, value: anytype, endian: std.builtin.Endian) Error!void {
return @errorCast(self.any().writeStructEndian(value, endian));
}
pub inline fn any(self: *const Self) Writer {
return .{
.context = @ptrCast(&self.context),
.writeFn = typeErasedWriteFn,
};
}
fn typeErasedWriteFn(context: *const anyopaque, bytes: []const u8) anyerror!usize {
const ptr: *const Context = @alignCast(@ptrCast(context));
return writeFn(ptr.*, bytes);
}
};
}
/// Deprecated; consider switching to `AnyReader` or use `GenericReader`
/// to use previous API. To be removed after 0.14.0 is tagged.
pub const Reader = GenericReader;
@ -362,6 +301,7 @@ pub const AnyWriter = Writer;
pub const SeekableStream = @import("io/seekable_stream.zig").SeekableStream;
pub const BufferedWriter = @import("io/BufferedWriter.zig");
pub const ArrayListWriter = @import("io/ArrayListWriter.zig");
pub const BufferedReader = @import("io/buffered_reader.zig").BufferedReader;
pub const bufferedReader = @import("io/buffered_reader.zig").bufferedReader;
@ -844,6 +784,7 @@ test {
_ = Writer;
_ = CountingWriter;
_ = FixedBufferStream;
_ = ArrayListWriter;
_ = @import("io/bit_reader.zig");
_ = @import("io/bit_writer.zig");
_ = @import("io/buffered_atomic_file.zig");

View File

@ -0,0 +1,127 @@
//! The straightforward way to use `std.ArrayList` as the underlying writer
//! when using `std.io.BufferedWriter` is to populate the `std.io.Writer`
//! interface and then use an empty buffer. However, this means that every use
//! of `std.io.BufferedWriter` will go through the vtable, including for
//! functions such as `writeByte`. This API instead maintains
//! `std.io.BufferedWriter` state such that it writes to the unused capacity of
//! the array list, filling it up completely before making a call through the
//! vtable, causing a resize. Consequently, the same, optimized, non-generic
//! machine code that uses `std.io.BufferedReader`, such as formatted printing,
//! is also used when the underlying writer is backed by `std.ArrayList`.
const std = @import("../std.zig");
const ArrayListWriter = @This();
const assert = std.debug.assert;
items: []u8,
allocator: std.mem.Allocator,
buffered_writer: std.io.BufferedWriter,
/// Replaces `array_list` with empty, taking ownership of the memory.
pub fn fromOwned(
alw: *ArrayListWriter,
allocator: std.mem.Allocator,
array_list: *std.ArrayListUnmanaged(u8),
) *std.io.BufferedWriter {
alw.* = .{
.allocated_slice = array_list.items,
.allocator = allocator,
.buffered_writer = .{
.unbuffered_writer = .{
.context = alw,
.vtable = &.{
.writev = writev,
.writeFile = writeFile,
},
},
.buffer = array_list.unusedCapacitySlice(),
},
};
array_list.* = .empty;
return &alw.buffered_writer;
}
/// Returns the memory back that was borrowed with `fromOwned`.
pub fn toOwned(alw: *ArrayListWriter) std.ArrayListUnmanaged(u8) {
const end = alw.buffered_writer.end;
const result: std.ArrayListUnmanaged(u8) = .{
.items = alw.items.ptr[0 .. alw.items.len + end],
.capacity = alw.buffered_writer.buffer.len - end,
};
alw.* = undefined;
return result;
}
fn writev(context: *anyopaque, data: []const []const u8) anyerror!usize {
const alw: *ArrayListWriter = @alignCast(@ptrCast(context));
const start_len = alw.items.len;
const bw = &alw.buffered_writer;
assert(data[0].ptr == alw.items.ptr + start_len);
const bw_end = data[0].len;
var list: std.ArrayListUnmanaged(u8) = .{
.items = alw.items.ptr[0 .. start_len + bw_end],
.capacity = bw.buffer.len - bw_end,
};
const rest = data[1..];
var new_capacity: usize = list.capacity;
for (rest) |bytes| new_capacity += bytes.len;
try list.ensureTotalCapacity(alw.allocator, new_capacity + 1);
for (rest) |bytes| list.appendSliceAssumeCapacity(bytes);
alw.items = list.items;
bw.buffer = list.unusedCapacitySlice();
return list.items.len - start_len;
}
fn writeFile(
context: *anyopaque,
file: std.fs.File,
offset: u64,
len: std.io.Writer.VTable.FileLen,
headers_and_trailers_full: []const []const u8,
headers_len_full: usize,
) anyerror!usize {
const alw: *ArrayListWriter = @alignCast(@ptrCast(context));
const list = alw.array_list;
const bw = &alw.buffered_writer;
const start_len = list.items.len;
const headers_and_trailers, const headers_len = if (headers_len_full >= 1) b: {
assert(headers_and_trailers_full[0].ptr == list.items.ptr + start_len);
list.items.len += headers_and_trailers_full[0].len;
break :b .{ headers_and_trailers_full[1..], headers_len_full - 1 };
} else .{ headers_and_trailers_full, headers_len_full };
const gpa = alw.allocator;
const trailers = headers_and_trailers[headers_len..];
if (len == .entire_file) {
var new_capacity: usize = list.capacity + std.atomic.cache_line;
for (headers_and_trailers) |bytes| new_capacity += bytes.len;
try list.ensureTotalCapacity(gpa, new_capacity);
for (headers_and_trailers[0..headers_len]) |bytes| list.appendSliceAssumeCapacity(bytes);
const dest = list.items.ptr[list.items.len..list.capacity];
const n = try file.pread(dest, offset);
if (n == 0) {
new_capacity = list.capacity;
for (trailers) |bytes| new_capacity += bytes.len;
try list.ensureTotalCapacity(gpa, new_capacity);
for (trailers) |bytes| list.appendSliceAssumeCapacity(bytes);
bw.buffer = list.unusedCapacitySlice();
return list.items.len - start_len;
}
list.items.len += n;
bw.buffer = list.unusedCapacitySlice();
return list.items.len - start_len;
}
var new_capacity: usize = list.capacity + len.int();
for (headers_and_trailers) |bytes| new_capacity += bytes.len;
try list.ensureTotalCapacity(gpa, new_capacity);
for (headers_and_trailers[0..headers_len]) |bytes| list.appendSliceAssumeCapacity(bytes);
const dest = list.items.ptr[list.items.len..][0..len.int()];
const n = try file.pread(dest, offset);
list.items.len += n;
if (n < dest.len) {
bw.buffer = list.unusedCapacitySlice();
return list.items.len - start_len;
}
for (trailers) |bytes| list.appendSliceAssumeCapacity(bytes);
bw.buffer = list.unusedCapacitySlice();
return list.items.len - start_len;
}

View File

@ -19,23 +19,21 @@ end: usize = 0,
/// vectors through the underlying write calls as possible.
pub const max_buffers_len = 8;
const passthru_vtable: Writer.VTable = .{
.writev = passthru_writev,
.writeFile = passthru_writeFile,
};
pub fn writer(bw: *BufferedWriter) Writer {
return .{
.context = bw,
.vtable = &.{
.writev = passthru_writev,
.writeFile = passthru_writeFile,
},
};
}
const fixed_vtable: Writer.VTable = .{
.writev = fixed_writev,
.writeFile = fixed_writeFile,
};
pub fn writer(bw: *BufferedWriter) Writer {
return .{
.context = bw,
.vtable = &passthru_vtable,
};
}
/// Replaces the `BufferedWriter` with a new one that writes to `buffer` and
/// returns `error.NoSpaceLeft` when it is full.
pub fn initFixed(bw: *BufferedWriter, buffer: []u8) void {
@ -97,6 +95,7 @@ fn passthru_writev(context: *anyopaque, data: []const []const u8) anyerror!usize
end = new_end;
continue;
}
if (end == 0) return bw.unbuffered_writer.writev(data);
var buffers: [max_buffers_len][]const u8 = undefined;
buffers[0] = buffer[0..end];
const remaining_data = data[i..];
@ -365,6 +364,7 @@ fn passthru_writeFile(
) anyerror!usize {
const bw: *BufferedWriter = @alignCast(@ptrCast(context));
const buffer = bw.buffer;
if (buffer.len == 0) return bw.unbuffered_writer.writeFile(file, offset, len, headers_and_trailers, headers_len);
const start_end = bw.end;
const headers = headers_and_trailers[0..headers_len];
const trailers = headers_and_trailers[headers_len..];