update std.io.AllocatingWriter to new API

This commit is contained in:
Andrew Kelley 2025-06-17 03:22:16 -07:00
parent ef8d7aa251
commit a249cc1c7e
44 changed files with 181 additions and 237 deletions

View File

@ -444,9 +444,9 @@ fn parse(file_name: []const u8, source: []u8) Oom!Ast {
const err_loc = std.zig.findLineColumn(ast.source, err_offset);
rendered_err.clearRetainingCapacity();
{
var aw: std.io.AllocatingWriter = undefined;
var aw: std.io.Writer.Allocating = .fromArrayList(gpa, &rendered_err);
defer rendered_err = aw.toArrayList();
ast.renderError(err, aw.fromArrayList(gpa, &rendered_err)) catch |e| switch (e) {
ast.renderError(err, &aw.interface) catch |e| switch (e) {
error.WriteFailed => return error.OutOfMemory,
};
}

View File

@ -733,9 +733,9 @@ fn render_docs(
}
}.render,
};
var aw: std.io.AllocatingWriter = undefined;
var aw: std.io.Writer.Allocating = .fromArrayList(gpa, out);
defer out.* = aw.toArrayList();
render.render(parsed_doc, aw.fromArrayList(gpa, out)) catch |err| switch (err) {
render.render(parsed_doc, &aw.interface) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
};
}

View File

@ -1592,10 +1592,9 @@ const MachODumper = struct {
var ctx = ObjectContext{ .gpa = gpa, .data = bytes, .header = hdr };
try ctx.parse();
var aw: std.io.AllocatingWriter = undefined;
aw.init(gpa);
var aw: std.io.Writer.Allocating = .init(gpa);
defer aw.deinit();
const bw = &aw.buffered_writer;
const bw = &aw.interface;
switch (check.kind) {
.headers => {
@ -1746,10 +1745,9 @@ const ElfDumper = struct {
});
}
var aw: std.io.AllocatingWriter = undefined;
aw.init(gpa);
var aw: std.io.Writer.Allocating = .init(gpa);
defer aw.deinit();
const bw = &aw.buffered_writer;
const bw = &aw.interface;
switch (check.kind) {
.archive_symtab => if (ctx.symtab.len > 0) {
@ -1894,10 +1892,9 @@ const ElfDumper = struct {
else => {},
};
var aw: std.io.AllocatingWriter = undefined;
aw.init(gpa);
var aw: std.io.Writer.Allocating = .init(gpa);
defer aw.deinit();
const bw = &aw.buffered_writer;
const bw = &aw.interface;
switch (check.kind) {
.headers => {
@ -2355,10 +2352,9 @@ const WasmDumper = struct {
if (!mem.eql(u8, buf[0..4], &std.wasm.magic)) return error.InvalidMagicByte;
if (!mem.eql(u8, buf[4..8], &std.wasm.version)) return error.UnsupportedWasmVersion;
var aw: std.io.AllocatingWriter = undefined;
aw.init(gpa);
var aw: std.io.Writer.Allocating = .init(gpa);
defer aw.deinit();
const bw = &aw.buffered_writer;
const bw = &aw.interface;
parseAndDumpInner(step, check, &br, bw) catch |err| switch (err) {
error.EndOfStream => try bw.writeAll("\n<UnexpectedEndOfStream>"),

View File

@ -1969,14 +1969,13 @@ fn checkCompileErrors(compile: *Compile) !void {
const arena = compile.step.owner.allocator;
const actual_errors = ae: {
var aw: std.io.AllocatingWriter = undefined;
aw.init(arena);
var aw: std.io.Writer.Allocating = .init(arena);
defer aw.deinit();
try actual_eb.renderToWriter(.{
.ttyconf = .no_color,
.include_reference_trace = false,
.include_source_line = false,
}, &aw.buffered_writer);
}, &aw.interface);
break :ae try aw.toOwnedSlice();
};

View File

@ -196,10 +196,9 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
man.hash.addBytes(config_header.include_path);
man.hash.addOptionalBytes(config_header.include_guard_override);
var aw: std.io.AllocatingWriter = undefined;
aw.init(gpa);
var aw: std.io.Writer.Allocating = .init(gpa);
defer aw.deinit();
const bw = &aw.buffered_writer;
const bw = &aw.interface;
const header_text = "This file was generated by ConfigHeader using the Zig Build System.";
const c_generated_line = "/* " ++ header_text ++ " */\n";
@ -330,13 +329,13 @@ fn render_autoconf_undef(
fn render_autoconf_at(
step: *Step,
contents: []const u8,
aw: *std.io.AllocatingWriter,
aw: *std.io.Writer.Allocating,
values: std.StringArrayHashMap(Value),
src_path: []const u8,
) !void {
const build = step.owner;
const allocator = build.allocator;
const bw = &aw.buffered_writer;
const bw = &aw.interface;
const used = allocator.alloc(bool, values.count()) catch @panic("OOM");
for (used) |*u| u.* = false;

View File

@ -905,7 +905,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
pub fn print(self: *Self, gpa: Allocator, comptime fmt: []const u8, args: anytype) error{OutOfMemory}!void {
comptime assert(T == u8);
try self.ensureUnusedCapacity(gpa, fmt.len);
var aw: std.io.AllocatingWriter = .fromArrayList(gpa, self);
var aw: std.io.Writer.Allocating = .fromArrayList(gpa, self);
defer self.* = aw.toArrayList();
return aw.interface.print(fmt, args) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,

View File

@ -341,11 +341,10 @@ test "compress/decompress" {
fn testDecompress(comptime container: Container, compressed: []const u8, expected_plain: []const u8) !void {
var in: std.io.Reader = .fixed(compressed);
var out: std.io.AllocatingWriter = undefined;
out.init(testing.allocator);
var out: std.io.Writer.Allocating = .init(testing.allocator);
defer out.deinit();
try Decompress.pump(container, &in, &out.buffered_writer);
try Decompress.pump(container, &in, &out.interface);
try testing.expectEqualSlices(u8, expected_plain, out.items);
}

View File

@ -729,13 +729,12 @@ test "decompress" {
};
for (cases) |c| {
var fb: std.io.Reader = .fixed(c.in);
var aw: std.io.AllocatingWriter = undefined;
aw.init(testing.allocator);
var aw: std.io.Writer.Allocating = .init(testing.allocator);
defer aw.deinit();
var decompress: Decompress = .init(&fb, .raw);
var decompress_br = decompress.readable(&.{});
_ = try decompress_br.readRemaining(&aw.buffered_writer);
_ = try decompress_br.readRemaining(&aw.interface);
try testing.expectEqualStrings(c.out, aw.getWritten());
}
}
@ -789,13 +788,12 @@ test "gzip decompress" {
};
for (cases) |c| {
var fb: std.io.Reader = .fixed(c.in);
var aw: std.io.AllocatingWriter = undefined;
aw.init(testing.allocator);
var aw: std.io.Writer.Allocating = .init(testing.allocator);
defer aw.deinit();
var decompress: Decompress = .init(&fb, .gzip);
var decompress_br = decompress.readable(&.{});
_ = try decompress_br.readRemaining(&aw.buffered_writer);
_ = try decompress_br.readRemaining(&aw.interface);
try testing.expectEqualStrings(c.out, aw.getWritten());
}
}
@ -818,13 +816,12 @@ test "zlib decompress" {
};
for (cases) |c| {
var fb: std.io.Reader = .fixed(c.in);
var aw: std.io.AllocatingWriter = undefined;
aw.init(testing.allocator);
var aw: std.io.Writer.Allocating = .init(testing.allocator);
defer aw.deinit();
var decompress: Decompress = .init(&fb, .zlib);
var decompress_br = decompress.readable(&.{});
_ = try decompress_br.readRemaining(&aw.buffered_writer);
_ = try decompress_br.readRemaining(&aw.interface);
try testing.expectEqualStrings(c.out, aw.getWritten());
}
}
@ -879,18 +876,17 @@ test "fuzzing tests" {
inline for (cases, 0..) |c, case_no| {
var in: std.io.Reader = .fixed(@embedFile("testdata/fuzz/" ++ c.input ++ ".input"));
var aw: std.io.AllocatingWriter = undefined;
aw.init(testing.allocator);
var aw: std.io.Writer.Allocating = .init(testing.allocator);
defer aw.deinit();
errdefer std.debug.print("test case failed {}\n", .{case_no});
var decompress: Decompress = .init(&in, .raw);
var decompress_br = decompress.readable(&.{});
if (c.err) |expected_err| {
try testing.expectError(error.ReadFailed, decompress_br.readRemaining(&aw.buffered_writer));
try testing.expectError(error.ReadFailed, decompress_br.readRemaining(&aw.interface));
try testing.expectError(expected_err, decompress.read_err.?);
} else {
_ = try decompress_br.readRemaining(&aw.buffered_writer);
_ = try decompress_br.readRemaining(&aw.interface);
try testing.expectEqualStrings(c.out, aw.getWritten());
}
}
@ -901,13 +897,12 @@ test "bug 18966" {
const expect = @embedFile("testdata/fuzz/bug_18966.expect");
var in: std.io.Reader = .fixed(input);
var aw: std.io.AllocatingWriter = undefined;
aw.init(testing.allocator);
var aw: std.io.Writer.Allocating = .init(testing.allocator);
defer aw.deinit();
var decompress: Decompress = .init(&in, .gzip);
var decompress_br = decompress.readable(&.{});
_ = try decompress_br.readRemaining(&aw.buffered_writer);
_ = try decompress_br.readRemaining(&aw.interface);
try testing.expectEqualStrings(expect, aw.getWritten());
}

View File

@ -278,9 +278,8 @@ test decompress {
0x00, 0x06, 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x21, 0x0A, 0x00,
};
var stream: std.io.Reader = .fixed(&compressed);
var decomp: std.io.AllocatingWriter = undefined;
const decomp_bw = decomp.init(std.testing.allocator);
var decomp: std.io.Writer.Allocating = .init(std.testing.allocator);
defer decomp.deinit();
try decompress(std.testing.allocator, &stream, decomp_bw);
try decompress(std.testing.allocator, &stream, &decomp.interface);
try std.testing.expectEqualSlices(u8, expected, decomp.getWritten());
}

View File

@ -304,11 +304,10 @@ pub fn dumpHexFallible(bw: *Writer, ttyconf: std.io.tty.Config, bytes: []const u
test dumpHexFallible {
const bytes: []const u8 = &.{ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, 0x01, 0x12, 0x13 };
var aw: std.io.AllocatingWriter = undefined;
var bw = aw.init(std.testing.allocator);
var aw: std.io.Writer.Allocating = .init(std.testing.allocator);
defer aw.deinit();
try dumpHexFallible(&bw, .no_color, bytes);
try dumpHexFallible(&aw.interface, .no_color, bytes);
const expected = try std.fmt.allocPrint(std.testing.allocator,
\\{x:0>[2]} 00 11 22 33 44 55 66 77 88 99 AA BB CC DD EE FF .."3DUfw........
\\{x:0>[2]} 01 12 13 ...

View File

@ -853,10 +853,9 @@ pub fn count(comptime fmt: []const u8, args: anytype) usize {
}
pub fn allocPrint(gpa: Allocator, comptime fmt: []const u8, args: anytype) Allocator.Error![]u8 {
var aw: std.io.AllocatingWriter = undefined;
try aw.initCapacity(gpa, fmt.len);
var aw = try std.io.Writer.Allocating.initCapacity(gpa, fmt.len);
defer aw.deinit();
aw.buffered_writer.print(fmt, args) catch |err| switch (err) {
aw.interface.print(fmt, args) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
};
return aw.toOwnedSlice();
@ -868,10 +867,9 @@ pub fn allocPrintSentinel(
args: anytype,
comptime sentinel: u8,
) Allocator.Error![:sentinel]u8 {
var aw: std.io.AllocatingWriter = undefined;
try aw.initCapacity(gpa, fmt.len);
var aw = try std.io.Writer.Allocating.initCapacity(gpa, fmt.len);
defer aw.deinit();
aw.buffered_writer.print(fmt, args) catch |err| switch (err) {
aw.interface.print(fmt, args) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
};
return aw.toOwnedSliceSentinel(sentinel);

View File

@ -42,9 +42,9 @@ test Value {
}
test Stringify {
var out: std.io.AllocatingWriter = undefined;
var out: std.io.Writer.Allocating = .init(testing.allocator);
var write_stream: Stringify = .{
.writer = out.init(testing.allocator),
.writer = &out.interface,
.options = .{ .whitespace = .indent_2 },
};
defer out.deinit();

View File

@ -576,8 +576,8 @@ pub fn value(v: anytype, options: Options, writer: *Writer) Error!void {
}
test value {
var out: std.io.AllocatingWriter = undefined;
const writer = out.init(std.testing.allocator);
var out: std.io.Writer.Allocating = .init(std.testing.allocator);
const writer = &out.interface;
defer out.deinit();
const T = struct { a: i32, b: []const u8 };
@ -616,8 +616,8 @@ test value {
///
/// Caller owns returned memory.
pub fn valueAlloc(gpa: Allocator, v: anytype, options: Options) error{OutOfMemory}![]u8 {
var aw: std.io.AllocatingWriter = undefined;
const writer = aw.init(gpa);
var aw: std.io.Writer.Allocating = .init(gpa);
const writer = &aw.interface;
defer aw.deinit();
value(v, options, writer) catch return error.OutOfMemory;
return aw.toOwnedSlice();

View File

@ -433,8 +433,8 @@ test "write files" {
{
const root = "root";
var output: std.io.AllocatingWriter = .init(testing.allocator);
var wrt: Writer = .{ .underlying_writer = &output.buffered_writer };
var output: std.io.Writer.Allocating = .init(testing.allocator);
var wrt: Writer = .{ .underlying_writer = &output.interface };
defer output.deinit();
try wrt.setRoot(root);
for (files) |file|
@ -469,9 +469,8 @@ test "write files" {
}
// without root
{
var output: std.io.AllocatingWriter = undefined;
output.init(testing.allocator);
var wrt: Writer = .{ .underlying_writer = &output.buffered_writer };
var output: std.io.Writer.Allocating = .init(testing.allocator);
var wrt: Writer = .{ .underlying_writer = &output.interface };
defer output.deinit();
for (files) |file| {
var content: std.io.Reader = .fixed(file.content);

View File

@ -204,10 +204,9 @@ pub fn parse(gpa: Allocator, source: [:0]const u8, mode: Mode) Allocator.Error!A
/// `gpa` is used for allocating the resulting formatted source code.
/// Caller owns the returned slice of bytes, allocated with `gpa`.
pub fn renderAlloc(tree: Ast, gpa: Allocator) error{OutOfMemory}![]u8 {
var aw: std.io.AllocatingWriter = undefined;
const bw = aw.init(gpa);
var aw: std.io.Writer.Allocating = .init(gpa);
errdefer aw.deinit();
render(tree, gpa, bw, .{}) catch |err| switch (err) {
render(tree, gpa, &aw.interface, .{}) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
};
return aw.toOwnedSlice();

View File

@ -2142,14 +2142,13 @@ fn renderArrayInit(
const section_exprs = row_exprs[0..section_end];
var sub_expr_buffer: std.io.AllocatingWriter = undefined;
sub_expr_buffer.init(gpa);
var sub_expr_buffer: std.io.Writer.Allocating = .init(gpa);
defer sub_expr_buffer.deinit();
const sub_expr_buffer_starts = try gpa.alloc(usize, section_exprs.len + 1);
defer gpa.free(sub_expr_buffer_starts);
var auto_indenting_stream: AutoIndentingStream = .init(gpa, &sub_expr_buffer.buffered_writer, indent_delta);
var auto_indenting_stream: AutoIndentingStream = .init(gpa, &sub_expr_buffer.interface, indent_delta);
defer auto_indenting_stream.deinit();
var sub_render: Render = .{
.gpa = r.gpa,

View File

@ -11442,10 +11442,9 @@ fn parseStrLit(
) InnerError!void {
const raw_string = bytes[offset..];
const result = r: {
var aw: std.io.AllocatingWriter = undefined;
const bw = aw.fromArrayList(astgen.gpa, buf);
var aw: std.io.Writer.Allocating = .fromArrayList(astgen.gpa, buf);
defer buf.* = aw.toArrayList();
break :r std.zig.string_literal.parseWrite(bw, raw_string) catch |err| switch (err) {
break :r std.zig.string_literal.parseWrite(&aw.interface, raw_string) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
};
};
@ -13899,10 +13898,9 @@ fn lowerAstErrors(astgen: *AstGen) error{OutOfMemory}!void {
const tree = astgen.tree;
assert(tree.errors.len > 0);
var msg: std.io.AllocatingWriter = undefined;
msg.init(gpa);
var msg: std.io.Writer.Allocating = .init(gpa);
defer msg.deinit();
const msg_bw = &msg.buffered_writer;
const msg_bw = &msg.interface;
var notes: std.ArrayListUnmanaged(u32) = .empty;
defer notes.deinit(gpa);

View File

@ -788,8 +788,8 @@ pub const Wip = struct {
const ttyconf: std.io.tty.Config = .no_color;
var bundle_buf: std.io.AllocatingWriter = undefined;
const bundle_bw = bundle_buf.init(std.testing.allocator);
var bundle_buf: std.io.Writer.Allocating = .init(std.testing.allocator);
const bundle_bw = &bundle_buf.interface;
defer bundle_buf.deinit();
try bundle.renderToWriter(.{ .ttyconf = ttyconf }, bundle_bw);
@ -804,8 +804,8 @@ pub const Wip = struct {
};
defer copy.deinit(std.testing.allocator);
var copy_buf: std.io.AllocatingWriter = undefined;
const copy_bw = copy_buf.init(std.testing.allocator);
var copy_buf: std.io.Writer.Allocating = .init(std.testing.allocator);
const copy_bw = &copy_buf.interface;
defer copy_buf.deinit();
try copy.renderToWriter(.{ .ttyconf = ttyconf }, copy_bw);

View File

@ -467,10 +467,9 @@ fn appendIdentStr(zg: *ZonGen, ident_token: Ast.TokenIndex) error{ OutOfMemory,
const raw_string = zg.tree.tokenSlice(ident_token)[offset..];
try zg.string_bytes.ensureUnusedCapacity(gpa, raw_string.len);
const result = r: {
var aw: std.io.AllocatingWriter = undefined;
const bw = aw.fromArrayList(gpa, &zg.string_bytes);
var aw: std.io.Writer.Allocating = .fromArrayList(gpa, &zg.string_bytes);
defer zg.string_bytes = aw.toArrayList();
break :r std.zig.string_literal.parseWrite(bw, raw_string) catch |err| switch (err) {
break :r std.zig.string_literal.parseWrite(&aw.interface, raw_string) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
};
};
@ -566,10 +565,9 @@ fn strLitAsString(zg: *ZonGen, str_node: Ast.Node.Index) error{ OutOfMemory, Bad
const size_hint = strLitSizeHint(zg.tree, str_node);
try string_bytes.ensureUnusedCapacity(gpa, size_hint);
const result = r: {
var aw: std.io.AllocatingWriter = undefined;
const bw = aw.fromArrayList(gpa, &zg.string_bytes);
var aw: std.io.Writer.Allocating = .fromArrayList(gpa, &zg.string_bytes);
defer zg.string_bytes = aw.toArrayList();
break :r parseStrLit(zg.tree, str_node, bw) catch |err| switch (err) {
break :r parseStrLit(zg.tree, str_node, &aw.interface) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
};
};
@ -888,10 +886,9 @@ fn lowerAstErrors(zg: *ZonGen) Allocator.Error!void {
const tree = zg.tree;
assert(tree.errors.len > 0);
var msg: std.io.AllocatingWriter = undefined;
msg.init(gpa);
var msg: std.io.Writer.Allocating = .init(gpa);
defer msg.deinit();
const msg_bw = &msg.buffered_writer;
const msg_bw = &msg.interface;
var notes: std.ArrayListUnmanaged(Zoir.CompileError.Note) = .empty;
defer notes.deinit(gpa);

View File

@ -8619,16 +8619,7 @@ pub fn deinit(self: *Builder) void {
self.* = undefined;
}
pub fn setModuleAsm(self: *Builder, aw: *std.io.AllocatingWriter) *Writer {
self.module_asm.clearRetainingCapacity();
return self.appendModuleAsm(aw);
}
pub fn appendModuleAsm(self: *Builder, aw: *std.io.AllocatingWriter) *Writer {
return aw.fromArrayList(self.gpa, &self.module_asm);
}
pub fn finishModuleAsm(self: *Builder, aw: *std.io.AllocatingWriter) Allocator.Error!void {
pub fn finishModuleAsm(self: *Builder, aw: *std.io.Writer.Allocating) Allocator.Error!void {
self.module_asm = aw.toArrayList();
if (self.module_asm.getLastOrNull()) |last| if (last != '\n')
try self.module_asm.append(self.gpa, '\n');
@ -8938,8 +8929,8 @@ pub fn getIntrinsic(
const name = name: {
{
var aw: std.io.AllocatingWriter = undefined;
const bw = aw.fromArrayList(self.gpa, &self.strtab_string_bytes);
var aw: std.io.Writer.Allocating = .fromArrayList(self.gpa, &self.strtab_string_bytes);
const bw = &aw.interface;
defer self.strtab_string_bytes = aw.toArrayList();
bw.print("llvm.{s}", .{@tagName(id)}) catch return error.OutOfMemory;
for (overload) |ty| bw.print(".{fm}", .{ty.fmt(self)}) catch return error.OutOfMemory;

View File

@ -358,10 +358,9 @@ pub fn parseWrite(writer: *Writer, bytes: []const u8) Writer.Error!Result {
/// Higher level API. Does not return extra info about parse errors.
/// Caller owns returned memory.
pub fn parseAlloc(allocator: std.mem.Allocator, bytes: []const u8) ParseError![]u8 {
var aw: std.io.AllocatingWriter = undefined;
aw.init(allocator);
var aw: std.io.Writer.Allocating = .init(allocator);
defer aw.deinit();
const result = parseWrite(&aw.buffered_writer, bytes) catch |err| switch (err) {
const result = parseWrite(&aw.interface, bytes) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
};
switch (result) {

View File

@ -638,10 +638,9 @@ const Parser = struct {
if (pointer.sentinel() != null) size_hint += 1;
const gpa = self.gpa;
var aw: std.io.AllocatingWriter = undefined;
try aw.initCapacity(gpa, size_hint);
var aw = try std.io.Writer.Allocating.initCapacity(gpa, size_hint);
defer aw.deinit();
const parsed = ZonGen.parseStrLit(self.ast, ast_node, &aw.buffered_writer) catch |err| switch (err) {
const parsed = ZonGen.parseStrLit(self.ast, ast_node, &aw.interface) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
};
switch (parsed) {

View File

@ -1054,9 +1054,8 @@ fn expectSerializeEqual(
value: anytype,
options: SerializeOptions,
) !void {
var aw: std.io.AllocatingWriter = undefined;
aw.init(std.testing.allocator);
const bw = &aw.buffered_writer;
var aw: std.io.Writer.Allocating = .init(std.testing.allocator);
const bw = &aw.interface;
defer aw.deinit();
try serialize(value, options, bw);
@ -1157,9 +1156,8 @@ test "std.zon stringify whitespace, high level API" {
}
test "std.zon stringify whitespace, low level API" {
var aw: std.io.AllocatingWriter = undefined;
aw.init(std.testing.allocator);
var s: Serializer = .{ .writer = &aw.buffered_writer };
var aw: std.io.Writer.Allocating = .init(std.testing.allocator);
var s: Serializer = .{ .writer = &aw.interface };
defer aw.deinit();
for ([2]bool{ true, false }) |whitespace| {
@ -1515,9 +1513,8 @@ test "std.zon stringify whitespace, low level API" {
}
test "std.zon stringify utf8 codepoints" {
var aw: std.io.AllocatingWriter = undefined;
aw.init(std.testing.allocator);
var s: Serializer = .{ .writer = &aw.buffered_writer };
var aw: std.io.Writer.Allocating = .init(std.testing.allocator);
var s: Serializer = .{ .writer = &aw.interface };
defer aw.deinit();
// Printable ASCII
@ -1626,9 +1623,8 @@ test "std.zon stringify utf8 codepoints" {
}
test "std.zon stringify strings" {
var aw: std.io.AllocatingWriter = undefined;
aw.init(std.testing.allocator);
var s: Serializer = .{ .writer = &aw.buffered_writer };
var aw: std.io.Writer.Allocating = .init(std.testing.allocator);
var s: Serializer = .{ .writer = &aw.interface };
defer aw.deinit();
// Minimal case
@ -1697,9 +1693,8 @@ test "std.zon stringify strings" {
}
test "std.zon stringify multiline strings" {
var aw: std.io.AllocatingWriter = undefined;
aw.init(std.testing.allocator);
var s: Serializer = .{ .writer = &aw.buffered_writer };
var aw: std.io.Writer.Allocating = .init(std.testing.allocator);
var s: Serializer = .{ .writer = &aw.interface };
defer aw.deinit();
inline for (.{ true, false }) |whitespace| {
@ -1918,9 +1913,8 @@ test "std.zon stringify skip default fields" {
}
test "std.zon depth limits" {
var aw: std.io.AllocatingWriter = undefined;
aw.init(std.testing.allocator);
const bw = &aw.buffered_writer;
var aw: std.io.Writer.Allocating = .init(std.testing.allocator);
const bw = &aw.interface;
defer aw.deinit();
const Recurse = struct { r: []const @This() };
@ -2180,9 +2174,8 @@ test "std.zon stringify primitives" {
}
test "std.zon stringify ident" {
var aw: std.io.AllocatingWriter = undefined;
aw.init(std.testing.allocator);
var s: Serializer = .{ .writer = &aw.buffered_writer };
var aw: std.io.Writer.Allocating = .init(std.testing.allocator);
var s: Serializer = .{ .writer = &aw.interface };
defer aw.deinit();
try expectSerializeEqual(".{ .a = 0 }", .{ .a = 0 }, .{});
@ -2228,9 +2221,8 @@ test "std.zon stringify ident" {
}
test "std.zon stringify as tuple" {
var aw: std.io.AllocatingWriter = undefined;
aw.init(std.testing.allocator);
var s: Serializer = .{ .writer = &aw.buffered_writer };
var aw: std.io.Writer.Allocating = .init(std.testing.allocator);
var s: Serializer = .{ .writer = &aw.interface };
defer aw.deinit();
// Tuples
@ -2250,9 +2242,8 @@ test "std.zon stringify as tuple" {
}
test "std.zon stringify as float" {
var aw: std.io.AllocatingWriter = undefined;
aw.init(std.testing.allocator);
var s: Serializer = .{ .writer = &aw.buffered_writer };
var aw: std.io.Writer.Allocating = .init(std.testing.allocator);
var s: Serializer = .{ .writer = &aw.interface };
defer aw.deinit();
// Comptime float
@ -2355,9 +2346,8 @@ test "std.zon pointers" {
}
test "std.zon tuple/struct field" {
var aw: std.io.AllocatingWriter = undefined;
aw.init(std.testing.allocator);
var s: Serializer = .{ .writer = &aw.buffered_writer };
var aw: std.io.Writer.Allocating = .init(std.testing.allocator);
var s: Serializer = .{ .writer = &aw.interface };
defer aw.deinit();
// Test on structs

View File

@ -857,8 +857,8 @@ pub const Session = struct {
upload_pack_uri.query = null;
upload_pack_uri.fragment = null;
var body: std.io.AllocatingWriter = undefined;
const body_writer = body.init(session.allocator);
var body: std.io.Writer.Allocating = .init(session.allocator);
const body_writer = &body.interface;
defer body.deinit();
try Packet.write(.{ .data = "command=ls-refs\n" }, body_writer);
if (session.supports_agent) {
@ -974,9 +974,9 @@ pub const Session = struct {
upload_pack_uri.query = null;
upload_pack_uri.fragment = null;
var body: std.io.AllocatingWriter = undefined;
const body_writer = body.init(session.allocator);
var body: std.io.Writer.Allocating = .init(session.allocator);
defer body.deinit();
const body_writer = &body.interface;
try Packet.write(.{ .data = "command=fetch\n" }, body_writer);
if (session.supports_agent) {
try Packet.write(.{ .data = agent_capability }, body_writer);

View File

@ -471,9 +471,8 @@ const Parse = struct {
offset: u32,
) InnerError!void {
const raw_string = bytes[offset..];
var aw: std.io.AllocatingWriter = undefined;
const bw = aw.fromArrayList(p.gpa, buf);
const result = std.zig.string_literal.parseWrite(bw, raw_string);
var aw: std.io.Writer.Allocating = .fromArrayList(p.gpa, buf);
const result = std.zig.string_literal.parseWrite(&aw.interface, raw_string);
buf.* = aw.toArrayList();
switch (result catch return error.OutOfMemory) {
.success => {},

View File

@ -3028,10 +3028,9 @@ pub fn createTypeName(
const fn_info = sema.code.getFnInfo(ip.funcZirBodyInst(sema.func_index).resolve(ip) orelse return error.AnalysisFail);
const zir_tags = sema.code.instructions.items(.tag);
var aw: std.io.AllocatingWriter = undefined;
aw.init(gpa);
var aw: std.io.Writer.Allocating = .init(gpa);
defer aw.deinit();
const bw = &aw.buffered_writer;
const bw = &aw.interface;
bw.print("{f}(", .{block.type_name_ctx.fmt(ip)}) catch return error.OutOfMemory;
var arg_i: usize = 0;
@ -5911,9 +5910,9 @@ fn zirCompileLog(
const zcu = pt.zcu;
const gpa = zcu.gpa;
var aw: std.io.AllocatingWriter = undefined;
var aw: std.io.Writer.Allocating = .init(gpa);
defer aw.deinit();
const bw = &aw.buffered_writer;
const bw = &aw.interface;
const extra = sema.code.extraData(Zir.Inst.NodeMultiOp, extended.operand);
const src_node = extra.data.src_node;
@ -37369,13 +37368,12 @@ fn notePathToComptimeAllocPtr(sema: *Sema, msg: *Zcu.ErrorMsg, src: LazySrcLoc,
error.AnalysisFail => unreachable,
};
var second_path_aw: std.io.AllocatingWriter = undefined;
second_path_aw.init(arena);
var second_path_aw: std.io.Writer.Allocating = .init(arena);
defer second_path_aw.deinit();
const inter_name = try std.fmt.allocPrint(arena, "v{d}", .{intermediate_value_count});
const deriv_start = @import("print_value.zig").printPtrDerivation(
derivation,
&second_path_aw.buffered_writer,
&second_path_aw.interface,
pt,
.lvalue,
.{ .str = inter_name },

View File

@ -452,8 +452,8 @@ fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) InnerError!void {
.plan9 => |dbg_out| {
if (delta_pc <= 0) return; // only do this when the pc changes
var aw: std.io.AllocatingWriter = undefined;
const bw = aw.fromArrayList(emit.bin_file.comp.gpa, &dbg_out.dbg_line);
var aw: std.io.Writer.Allocating = .fromArrayList(emit.bin_file.comp.gpa, &dbg_out.dbg_line);
const bw = &aw.interface;
defer dbg_out.dbg_line = aw.toArrayList();
// increasing the line number

View File

@ -368,8 +368,8 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
.plan9 => |dbg_out| {
if (delta_pc <= 0) return; // only do this when the pc changes
var aw: std.io.AllocatingWriter = undefined;
const bw = aw.fromArrayList(self.bin_file.comp.gpa, &dbg_out.dbg_line);
var aw: std.io.Writer.Allocating = .fromArrayList(self.bin_file.comp.gpa, &dbg_out.dbg_line);
const bw = &aw.interface;
defer dbg_out.dbg_line = aw.toArrayList();
// increasing the line number

View File

@ -19,8 +19,8 @@ pub const Error = Lower.Error || error{
pub fn emitMir(emit: *Emit) Error!void {
const gpa = emit.bin_file.comp.gpa;
var aw: std.io.AllocatingWriter = undefined;
const bw = aw.fromArrayList(gpa, emit.code);
var aw: std.io.Writer.Allocating = .fromArrayList(gpa, emit.code);
const bw = &aw.interface;
defer emit.code.* = aw.toArrayList();
log.debug("mir instruction len: {}", .{emit.lower.mir.instructions.len});

View File

@ -924,8 +924,8 @@ fn dbgAdvancePCAndLine(emit: *Emit, loc: Loc, pc: usize) Error!void {
.plan9 => |dbg_out| {
if (delta_pc <= 0) return; // only do this when the pc changes
var aw: std.io.AllocatingWriter = undefined;
const bw = aw.fromArrayList(emit.lower.bin_file.comp.gpa, &dbg_out.dbg_line);
var aw: std.io.Writer.Allocating = .fromArrayList(emit.lower.bin_file.comp.gpa, &dbg_out.dbg_line);
const bw = &aw.interface;
defer dbg_out.dbg_line = aw.toArrayList();
// increasing the line number

View File

@ -314,10 +314,9 @@ pub fn generateSymbol(
const tracy = trace(@src());
defer tracy.end();
var aw: std.io.AllocatingWriter = undefined;
const bw = aw.fromArrayList(pt.zcu.gpa, code);
var aw: std.io.Writer.Allocating = .fromArrayList(pt.zcu.gpa, code);
defer code.* = aw.toArrayList();
return generateSymbolInner(bin_file, pt, src_loc, val, bw, reloc_parent) catch |err| switch (err) {
return generateSymbolInner(bin_file, pt, src_loc, val, &aw.interface, reloc_parent) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
else => |e| return e,
};

View File

@ -694,8 +694,8 @@ pub const Function = struct {
/// It is not available when generating .h file.
pub const Object = struct {
dg: DeclGen,
code_header: std.io.AllocatingWriter,
code: std.io.AllocatingWriter,
code_header: std.io.Writer.Allocating,
code: std.io.Writer.Allocating,
indent_counter: usize,
const indent_width = 1;
@ -731,7 +731,7 @@ pub const DeclGen = struct {
pass: Pass,
is_naked_fn: bool,
expected_block: ?u32,
fwd_decl: std.io.AllocatingWriter,
fwd_decl: std.io.Writer.Allocating,
error_msg: ?*Zcu.ErrorMsg,
ctype_pool: CType.Pool,
scratch: std.ArrayListUnmanaged(u32),

View File

@ -747,13 +747,17 @@ pub const Object = struct {
}
fn genModuleLevelAssembly(object: *Object) Allocator.Error!void {
var aw: std.io.AllocatingWriter = undefined;
const bw = object.builder.setModuleAsm(&aw);
errdefer aw.deinit();
const b = &object.builder;
const gpa = b.gpa;
b.module_asm.clearRetainingCapacity();
for (object.pt.zcu.global_assembly.values()) |assembly| {
bw.print("{s}\n", .{assembly}) catch return error.OutOfMemory;
try b.module_asm.ensureUnusedCapacity(gpa, assembly.len + 1);
b.module_asm.appendSliceAssumeCapacity(assembly);
b.module_asm.appendAssumeCapacity('\n');
}
if (b.module_asm.getLastOrNull()) |last| {
if (last != '\n') try b.module_asm.append(gpa, '\n');
}
try object.builder.finishModuleAsm(&aw);
}
pub const EmitOptions = struct {
@ -2678,10 +2682,9 @@ pub const Object = struct {
}
fn allocTypeName(o: *Object, ty: Type) Allocator.Error![:0]const u8 {
var aw: std.io.AllocatingWriter = undefined;
aw.init(o.gpa);
var aw: std.io.Writer.Allocating = .init(o.gpa);
defer aw.deinit();
ty.print(&aw.buffered_writer, o.pt) catch return error.OutOfMemory;
ty.print(&aw.interface, o.pt) catch return error.OutOfMemory;
return aw.toOwnedSliceSentinel(0);
}

View File

@ -1260,10 +1260,9 @@ const NavGen = struct {
// Turn a Zig type's name into a cache reference.
fn resolveTypeName(self: *NavGen, ty: Type) Allocator.Error![]const u8 {
var aw: std.io.AllocatingWriter = undefined;
aw.init(self.gpa);
var aw: std.io.Writer.Allocating = .init(self.gpa);
defer aw.deinit();
ty.print(&aw.buffered_writer, self.pt) catch return error.OutOfMemory;
ty.print(&aw.interface, self.pt) catch return error.OutOfMemory;
return aw.toOwnedSlice();
}

View File

@ -142,7 +142,7 @@ pub fn run(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
try std.zig.printAstErrorsToStderr(gpa, tree, "<stdin>", color);
process.exit(2);
}
var aw: std.io.AllocatingWriter = .init(gpa);
var aw: std.io.Writer.Allocating = .init(gpa);
defer aw.deinit();
try tree.render(gpa, &aw.interface, .{});
const formatted = aw.getWritten();
@ -336,10 +336,9 @@ fn fmtPathFile(
try fmt.out_buffer.ensureTotalCapacity(gpa, source_code.len);
{
var aw: std.io.AllocatingWriter = undefined;
const bw = aw.fromArrayList(gpa, &fmt.out_buffer);
var aw: std.io.Writer.Allocating = .fromArrayList(gpa, &fmt.out_buffer);
defer fmt.out_buffer = aw.toArrayList();
try tree.render(gpa, bw, .{});
try tree.render(gpa, &aw.interface, .{});
}
if (mem.eql(u8, fmt.out_buffer.items, source_code))
return;

View File

@ -400,8 +400,7 @@ fn findDef(
else => unreachable,
};
var override_path: std.io.AllocatingWriter = undefined;
override_path.init(gpa);
var override_path: std.io.Writer.Allocating = .init(gpa);
defer override_path.deinit();
const s = path.sep_str;
@ -410,9 +409,9 @@ fn findDef(
// Try the archtecture-specific path first.
const fmt_path = "libc" ++ s ++ "mingw" ++ s ++ "{s}" ++ s ++ "{s}.def";
if (zig_lib_directory.path) |p| {
try override_path.buffered_writer.print("{s}" ++ s ++ fmt_path, .{ p, lib_path, lib_name });
try override_path.interface.print("{s}" ++ s ++ fmt_path, .{ p, lib_path, lib_name });
} else {
try override_path.buffered_writer.print(fmt_path, .{ lib_path, lib_name });
try override_path.interface.print(fmt_path, .{ lib_path, lib_name });
}
if (std.fs.cwd().access(override_path.getWritten(), .{})) |_| {
return override_path.toOwnedSlice();
@ -444,9 +443,9 @@ fn findDef(
override_path.clearRetainingCapacity();
const fmt_path = "libc" ++ s ++ "mingw" ++ s ++ "lib-common" ++ s ++ "{s}.def.in";
if (zig_lib_directory.path) |p| {
try override_path.buffered_writer.print("{s}" ++ s ++ fmt_path, .{ p, lib_name });
try override_path.interface.print("{s}" ++ s ++ fmt_path, .{ p, lib_name });
} else {
try override_path.buffered_writer.print(fmt_path, .{lib_name});
try override_path.interface.print(fmt_path, .{lib_name});
}
if (std.fs.cwd().access(override_path.getWritten(), .{})) |_| {
return override_path.toOwnedSlice();

View File

@ -115,8 +115,7 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
var c_source_files = std.ArrayList(Compilation.CSourceFile).init(comp.gpa);
defer c_source_files.deinit();
var override_path: std.io.AllocatingWriter = undefined;
override_path.init(comp.gpa);
var override_path: std.io.Writer.Allocating = .init(comp.gpa);
defer override_path.deinit();
const s = path.sep_str;
@ -141,19 +140,19 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
if (!is_arch_specific) {
// Look for an arch specific override.
override_path.clearRetainingCapacity();
try override_path.buffered_writer.print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.s", .{
try override_path.interface.print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.s", .{
dirname, arch_name, noextbasename,
});
if (source_table.contains(override_path.getWritten())) continue;
override_path.clearRetainingCapacity();
try override_path.buffered_writer.print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.S", .{
try override_path.interface.print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.S", .{
dirname, arch_name, noextbasename,
});
if (source_table.contains(override_path.getWritten())) continue;
override_path.clearRetainingCapacity();
try override_path.buffered_writer.print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.c", .{
try override_path.interface.print("{s}" ++ s ++ "{s}" ++ s ++ "{s}.c", .{
dirname, arch_name, noextbasename,
});
if (source_table.contains(override_path.getWritten())) continue;

View File

@ -1446,10 +1446,10 @@ pub const WipNav = struct {
loc: u32,
cfa: Cfa.RegOff,
},
debug_frame: std.io.AllocatingWriter,
debug_info: std.io.AllocatingWriter,
debug_line: std.io.AllocatingWriter,
debug_loclists: std.io.AllocatingWriter,
debug_frame: std.io.Writer.Allocating,
debug_info: std.io.Writer.Allocating,
debug_line: std.io.Writer.Allocating,
debug_loclists: std.io.Writer.Allocating,
pending_lazy: PendingLazy,
pub fn init(wip_nav: *WipNav) void {
@ -4494,10 +4494,9 @@ fn refAbbrevCode(dwarf: *Dwarf, abbrev_code: AbbrevCode) UpdateError!@typeInfo(A
assert(abbrev_code != .null);
const entry: Entry.Index = @enumFromInt(@intFromEnum(abbrev_code));
if (dwarf.debug_abbrev.section.getUnit(DebugAbbrev.unit).getEntry(entry).len > 0) return @intFromEnum(abbrev_code);
var daaw: std.io.AllocatingWriter = undefined;
daaw.init(dwarf.gpa);
var daaw: std.io.Writer.Allocating = .init(dwarf.gpa);
defer daaw.deinit();
const dabw = &daaw.buffered_writer;
const dabw = &daaw.interface;
const abbrev = AbbrevCode.abbrevs.get(abbrev_code);
try dabw.writeLeb128(@intFromEnum(abbrev_code));
try dabw.writeLeb128(@intFromEnum(abbrev.tag));

View File

@ -304,11 +304,10 @@ pub fn writeAdhocSignature(
var hash: [hash_size]u8 = undefined;
if (self.requirements) |*req| {
var aw: std.io.AllocatingWriter = undefined;
aw.init(allocator);
var aw: std.io.Writer.Allocating = .init(allocator);
defer aw.deinit();
try req.write(&aw.buffered_writer);
try req.write(&aw.interface);
Sha256.hash(aw.getWritten(), &hash, .{});
self.code_directory.addSpecialHash(req.slotType(), hash);
@ -318,11 +317,10 @@ pub fn writeAdhocSignature(
}
if (self.entitlements) |*ents| {
var aw: std.io.AllocatingWriter = undefined;
aw.init(allocator);
var aw: std.io.Writer.Allocating = .init(allocator);
defer aw.deinit();
try ents.write(&aw.buffered_writer);
try ents.write(&aw.interface);
Sha256.hash(aw.getWritten(), &hash, .{});
self.code_directory.addSpecialHash(ents.slotType(), hash);

View File

@ -110,8 +110,8 @@ pub fn updateSize(rebase: *Rebase, macho_file: *MachO) !void {
fn finalize(rebase: *Rebase, gpa: Allocator) !void {
if (rebase.entries.items.len == 0) return;
var aw: std.io.AllocatingWriter = undefined;
const bw = aw.fromArrayList(gpa, &rebase.buffer);
var aw: std.io.Writer.Allocating = .fromArrayList(gpa, &rebase.buffer);
const bw = &aw.interface;
defer rebase.buffer = aw.toArrayList();
log.debug("rebase opcodes", .{});

View File

@ -118,8 +118,8 @@ pub const Bind = struct {
fn finalize(bind: *Bind, gpa: Allocator, ctx: *MachO) !void {
if (bind.entries.items.len == 0) return;
var aw: std.io.AllocatingWriter = undefined;
const bw = aw.fromArrayList(gpa, &bind.buffer);
var aw: std.io.Writer.Allocating = .fromArrayList(gpa, &bind.buffer);
const bw = &aw.interface;
defer bind.buffer = aw.toArrayList();
log.debug("bind opcodes", .{});
@ -359,8 +359,8 @@ pub const WeakBind = struct {
fn finalize(bind: *WeakBind, gpa: Allocator, ctx: *MachO) !void {
if (bind.entries.items.len == 0) return;
var aw: std.io.AllocatingWriter = undefined;
const bw = aw.fromArrayList(gpa, &bind.buffer);
var aw: std.io.Writer.Allocating = .fromArrayList(gpa, &bind.buffer);
const bw = &aw.interface;
defer bind.buffer = aw.toArrayList();
log.debug("weak bind opcodes", .{});
@ -526,8 +526,8 @@ pub const LazyBind = struct {
fn finalize(bind: *LazyBind, gpa: Allocator, ctx: *MachO) !void {
try bind.offsets.ensureTotalCapacityPrecise(gpa, bind.entries.items.len);
var aw: std.io.AllocatingWriter = undefined;
const bw = aw.fromArrayList(gpa, &bind.buffer);
var aw: std.io.Writer.Allocating = .fromArrayList(gpa, &bind.buffer);
const bw = &aw.interface;
defer bind.buffer = aw.toArrayList();
log.debug("lazy bind opcodes", .{});

View File

@ -337,7 +337,7 @@ fn putFn(self: *Plan9, nav_index: InternPool.Nav.Index, out: FnNavOutput) !void
};
try fn_map_res.value_ptr.functions.put(gpa, nav_index, out);
var aw: std.io.AllocatingWriter = .init(arena);
var aw: std.io.Writer.Allocating = .init(arena);
defer aw.deinit();
const w = &aw.interface;
@ -646,12 +646,11 @@ pub fn flush(
var iovecs_i: usize = 1;
var text_i: u64 = 0;
var linecountinfo_aw: std.io.AllocatingWriter = undefined;
linecountinfo_aw.init(gpa);
var linecountinfo_aw: std.io.Writer.Allocating = .init(gpa);
defer linecountinfo_aw.deinit();
// text
{
const linecountinfo_bw = &linecountinfo_aw.buffered_writer;
const linecountinfo_bw = &linecountinfo_aw.interface;
var linecount: i64 = -1;
var it_file = self.fn_nav_table.iterator();
while (it_file.next()) |fentry| {
@ -819,10 +818,9 @@ pub fn flush(
}
}
}
var syms_aw: std.io.AllocatingWriter = undefined;
syms_aw.init(gpa);
var syms_aw: std.io.Writer.Allocating = .init(gpa);
defer syms_aw.deinit();
try self.writeSyms(&syms_aw.buffered_writer);
try self.writeSyms(&syms_aw.interface);
const syms = syms_aw.getWritten();
assert(2 + self.atomCount() - self.externCount() == iovecs_i); // we didn't write all the decls
iovecs[iovecs_i] = .{ .base = syms.ptr, .len = syms.len };

View File

@ -203,11 +203,10 @@ pub fn flush(
// We need to export the list of error names somewhere so that we can pretty-print them in the
// executor. This is not really an important thing though, so we can just dump it in any old
// nonsemantic instruction. For now, just put it in OpSourceExtension with a special name.
var error_info: std.io.AllocatingWriter = undefined;
error_info.init(self.object.gpa);
var error_info: std.io.Writer.Allocating = .init(self.object.gpa);
defer error_info.deinit();
try error_info.buffered_writer.writeAll("zig_errors:");
try error_info.interface.writeAll("zig_errors:");
const ip = &self.base.comp.zcu.?.intern_pool;
for (ip.global_error_set.getNamesFromMainThread()) |name| {
// Errors can contain pretty much any character - to encode them in a string we must escape
@ -215,9 +214,9 @@ pub fn flush(
// name if it contains no strange characters is nice for debugging. URI encoding fits the bill.
// We're using : as separator, which is a reserved character.
try error_info.buffered_writer.writeByte(':');
try error_info.interface.writeByte(':');
try std.Uri.Component.percentEncode(
&error_info.buffered_writer,
&error_info.interface,
name.toSlice(ip),
struct {
fn isValidChar(c: u8) bool {

View File

@ -558,7 +558,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
var data_section_index: ?u32 = null;
assert(f.binary_bytes.items.len == 0);
var aw: std.io.AllocatingWriter = .fromArrayList(gpa, &f.binary_bytes);
var aw: std.io.Writer.Allocating = .fromArrayList(gpa, &f.binary_bytes);
defer f.binary_bytes = aw.toArrayList();
const w = &aw.interface;
@ -1064,7 +1064,7 @@ const VirtualAddrs = struct {
fn emitNameSection(
wasm: *Wasm,
aw: *std.io.AllocatingWriter,
aw: *std.io.Writer.Allocating,
data_segment_groups: []const DataSegmentGroup,
) !void {
const f = &wasm.flush_buffer;
@ -1128,7 +1128,7 @@ fn emitNameSection(
}
}
fn emitFeaturesSection(aw: *std.io.AllocatingWriter, target: *const std.Target) Allocator.Error!void {
fn emitFeaturesSection(aw: *std.io.Writer.Allocating, target: *const std.Target) Allocator.Error!void {
const feature_count = target.cpu.features.count();
if (feature_count == 0) return;
@ -1155,7 +1155,7 @@ fn emitFeaturesSection(aw: *std.io.AllocatingWriter, target: *const std.Target)
assert(safety_count == 0);
}
fn emitBuildIdSection(aw: *std.io.AllocatingWriter, build_id: []const u8) !void {
fn emitBuildIdSection(aw: *std.io.Writer.Allocating, build_id: []const u8) !void {
const w = &aw.interface;
const header_offset = try reserveSectionHeader(w);
defer replaceSectionHeader(aw, header_offset, @intFromEnum(std.wasm.Section.custom));
@ -1169,7 +1169,7 @@ fn emitBuildIdSection(aw: *std.io.AllocatingWriter, build_id: []const u8) !void
try w.writeAll(build_id);
}
fn emitProducerSection(aw: *std.io.AllocatingWriter) !void {
fn emitProducerSection(aw: *std.io.Writer.Allocating) !void {
const w = &aw.interface;
const header_offset = try reserveSectionHeader(w);
defer replaceSectionHeader(aw, header_offset, @intFromEnum(std.wasm.Section.custom));
@ -1259,7 +1259,7 @@ fn reserveVecSectionHeader(w: *Writer) Writer.Error!u32 {
}
fn replaceVecSectionHeader(
aw: *std.io.AllocatingWriter,
aw: *std.io.Writer.Allocating,
offset: u32,
section: std.wasm.Section,
n_items: u32,
@ -1278,7 +1278,7 @@ fn reserveSectionHeader(w: *Writer) Writer.Error!u32 {
return @intCast(offset);
}
fn replaceSectionHeader(aw: *std.io.AllocatingWriter, offset: u32, section: u8) void {
fn replaceSectionHeader(aw: *std.io.Writer.Allocating, offset: u32, section: u8) void {
const header = aw.getWritten()[offset..][0..section_header_size];
header[0] = section;
std.leb.writeUnsignedFixed(5, header[1..6], @intCast(aw.interface.count - offset - section_header_size));
@ -1292,7 +1292,7 @@ fn reserveSizeHeader(w: *Writer) Writer.Error!u32 {
return @intCast(offset);
}
fn replaceSizeHeader(aw: *std.io.AllocatingWriter, offset: u32) void {
fn replaceSizeHeader(aw: *std.io.Writer.Allocating, offset: u32) void {
const header = aw.getWritten()[offset..][0..size_header_size];
std.leb.writeUnsignedFixed(5, header[0..5], @intCast(aw.interface.count - offset - size_header_size));
}
@ -1764,7 +1764,7 @@ fn emitInitTlsFunction(wasm: *const Wasm, w: *Writer) Writer.Error!void {
try w.writeByte(@intFromEnum(std.wasm.Opcode.end));
}
fn emitStartSection(aw: *std.io.AllocatingWriter, i: Wasm.OutputFunctionIndex) !void {
fn emitStartSection(aw: *std.io.Writer.Allocating, i: Wasm.OutputFunctionIndex) !void {
const header_offset = try reserveVecSectionHeader(&aw.interface);
defer replaceVecSectionHeader(aw, header_offset, .start, @intFromEnum(i));
}