update more of the std lib to new API

This commit is contained in:
Andrew Kelley 2025-02-16 23:14:10 -08:00
parent e60adb97d0
commit 716b4489be
16 changed files with 357 additions and 405 deletions

View File

@ -378,13 +378,16 @@ pub fn main() !void {
validateSystemLibraryOptions(builder);
const stdout_writer = io.getStdOut().writer();
var stdout_writer: std.io.BufferedWriter = .{
.buffer = &stdout_buffer,
.unbuffered_writer = std.io.getStdOut().writer(),
};
if (help_menu)
return usage(builder, stdout_writer);
return usage(builder, &stdout_writer);
if (steps_menu)
return steps(builder, stdout_writer);
return steps(builder, &stdout_writer);
var run: Run = .{
.max_rss = max_rss,
@ -696,24 +699,23 @@ fn runStepNames(
const ttyconf = run.ttyconf;
if (run.summary != .none) {
std.debug.lockStdErr();
var bw = std.debug.lockStdErr2();
defer std.debug.unlockStdErr();
const stderr = run.stderr;
const total_count = success_count + failure_count + pending_count + skipped_count;
ttyconf.setColor(stderr, .cyan) catch {};
stderr.writeAll("Build Summary:") catch {};
ttyconf.setColor(stderr, .reset) catch {};
stderr.writer().print(" {d}/{d} steps succeeded", .{ success_count, total_count }) catch {};
if (skipped_count > 0) stderr.writer().print("; {d} skipped", .{skipped_count}) catch {};
if (failure_count > 0) stderr.writer().print("; {d} failed", .{failure_count}) catch {};
ttyconf.setColor(&bw, .cyan) catch {};
bw.writeAll("Build Summary:") catch {};
ttyconf.setColor(&bw, .reset) catch {};
bw.print(" {d}/{d} steps succeeded", .{ success_count, total_count }) catch {};
if (skipped_count > 0) bw.print("; {d} skipped", .{skipped_count}) catch {};
if (failure_count > 0) bw.print("; {d} failed", .{failure_count}) catch {};
if (test_count > 0) stderr.writer().print("; {d}/{d} tests passed", .{ test_pass_count, test_count }) catch {};
if (test_skip_count > 0) stderr.writer().print("; {d} skipped", .{test_skip_count}) catch {};
if (test_fail_count > 0) stderr.writer().print("; {d} failed", .{test_fail_count}) catch {};
if (test_leak_count > 0) stderr.writer().print("; {d} leaked", .{test_leak_count}) catch {};
if (test_count > 0) bw.print("; {d}/{d} tests passed", .{ test_pass_count, test_count }) catch {};
if (test_skip_count > 0) bw.print("; {d} skipped", .{test_skip_count}) catch {};
if (test_fail_count > 0) bw.print("; {d} failed", .{test_fail_count}) catch {};
if (test_leak_count > 0) bw.print("; {d} leaked", .{test_leak_count}) catch {};
stderr.writeAll("\n") catch {};
bw.writeAll("\n") catch {};
// Print a fancy tree with build results.
var step_stack_copy = try step_stack.clone(gpa);
@ -722,7 +724,7 @@ fn runStepNames(
var print_node: PrintNode = .{ .parent = null };
if (step_names.len == 0) {
print_node.last = true;
printTreeStep(b, b.default_step, run, stderr, ttyconf, &print_node, &step_stack_copy) catch {};
printTreeStep(b, b.default_step, run, &bw, ttyconf, &print_node, &step_stack_copy) catch {};
} else {
const last_index = if (run.summary == .all) b.top_level_steps.count() else blk: {
var i: usize = step_names.len;
@ -741,7 +743,7 @@ fn runStepNames(
for (step_names, 0..) |step_name, i| {
const tls = b.top_level_steps.get(step_name).?;
print_node.last = i + 1 == last_index;
printTreeStep(b, &tls.step, run, stderr, ttyconf, &print_node, &step_stack_copy) catch {};
printTreeStep(b, &tls.step, run, &bw, ttyconf, &print_node, &step_stack_copy) catch {};
}
}
}
@ -775,7 +777,7 @@ const PrintNode = struct {
last: bool = false,
};
fn printPrefix(node: *PrintNode, stderr: File, ttyconf: std.io.tty.Config) !void {
fn printPrefix(node: *PrintNode, stderr: *std.io.BufferedWriter, ttyconf: std.io.tty.Config) !void {
const parent = node.parent orelse return;
if (parent.parent == null) return;
try printPrefix(parent, stderr, ttyconf);
@ -789,7 +791,7 @@ fn printPrefix(node: *PrintNode, stderr: File, ttyconf: std.io.tty.Config) !void
}
}
fn printChildNodePrefix(stderr: File, ttyconf: std.io.tty.Config) !void {
fn printChildNodePrefix(stderr: *std.io.BufferedWriter, ttyconf: std.io.tty.Config) !void {
try stderr.writeAll(switch (ttyconf) {
.no_color, .windows_api => "+- ",
.escape_codes => "\x1B\x28\x30\x6d\x71\x1B\x28\x42 ", //
@ -798,7 +800,7 @@ fn printChildNodePrefix(stderr: File, ttyconf: std.io.tty.Config) !void {
fn printStepStatus(
s: *Step,
stderr: File,
stderr: *std.io.BufferedWriter,
ttyconf: std.io.tty.Config,
run: *const Run,
) !void {
@ -820,10 +822,10 @@ fn printStepStatus(
try stderr.writeAll(" cached");
} else if (s.test_results.test_count > 0) {
const pass_count = s.test_results.passCount();
try stderr.writer().print(" {d} passed", .{pass_count});
try stderr.print(" {d} passed", .{pass_count});
if (s.test_results.skip_count > 0) {
try ttyconf.setColor(stderr, .yellow);
try stderr.writer().print(" {d} skipped", .{s.test_results.skip_count});
try stderr.print(" {d} skipped", .{s.test_results.skip_count});
}
} else {
try stderr.writeAll(" success");
@ -832,15 +834,15 @@ fn printStepStatus(
if (s.result_duration_ns) |ns| {
try ttyconf.setColor(stderr, .dim);
if (ns >= std.time.ns_per_min) {
try stderr.writer().print(" {d}m", .{ns / std.time.ns_per_min});
try stderr.print(" {d}m", .{ns / std.time.ns_per_min});
} else if (ns >= std.time.ns_per_s) {
try stderr.writer().print(" {d}s", .{ns / std.time.ns_per_s});
try stderr.print(" {d}s", .{ns / std.time.ns_per_s});
} else if (ns >= std.time.ns_per_ms) {
try stderr.writer().print(" {d}ms", .{ns / std.time.ns_per_ms});
try stderr.print(" {d}ms", .{ns / std.time.ns_per_ms});
} else if (ns >= std.time.ns_per_us) {
try stderr.writer().print(" {d}us", .{ns / std.time.ns_per_us});
try stderr.print(" {d}us", .{ns / std.time.ns_per_us});
} else {
try stderr.writer().print(" {d}ns", .{ns});
try stderr.print(" {d}ns", .{ns});
}
try ttyconf.setColor(stderr, .reset);
}
@ -848,13 +850,13 @@ fn printStepStatus(
const rss = s.result_peak_rss;
try ttyconf.setColor(stderr, .dim);
if (rss >= 1000_000_000) {
try stderr.writer().print(" MaxRSS:{d}G", .{rss / 1000_000_000});
try stderr.print(" MaxRSS:{d}G", .{rss / 1000_000_000});
} else if (rss >= 1000_000) {
try stderr.writer().print(" MaxRSS:{d}M", .{rss / 1000_000});
try stderr.print(" MaxRSS:{d}M", .{rss / 1000_000});
} else if (rss >= 1000) {
try stderr.writer().print(" MaxRSS:{d}K", .{rss / 1000});
try stderr.print(" MaxRSS:{d}K", .{rss / 1000});
} else {
try stderr.writer().print(" MaxRSS:{d}B", .{rss});
try stderr.print(" MaxRSS:{d}B", .{rss});
}
try ttyconf.setColor(stderr, .reset);
}
@ -866,7 +868,7 @@ fn printStepStatus(
if (skip == .skipped_oom) {
try stderr.writeAll(" (not enough memory)");
try ttyconf.setColor(stderr, .dim);
try stderr.writer().print(" upper bound of {d} exceeded runner limit ({d})", .{ s.max_rss, run.max_rss });
try stderr.print(" upper bound of {d} exceeded runner limit ({d})", .{ s.max_rss, run.max_rss });
try ttyconf.setColor(stderr, .yellow);
}
try stderr.writeAll("\n");
@ -878,23 +880,23 @@ fn printStepStatus(
fn printStepFailure(
s: *Step,
stderr: File,
stderr: *std.io.BufferedWriter,
ttyconf: std.io.tty.Config,
) !void {
if (s.result_error_bundle.errorMessageCount() > 0) {
try ttyconf.setColor(stderr, .red);
try stderr.writer().print(" {d} errors\n", .{
try stderr.print(" {d} errors\n", .{
s.result_error_bundle.errorMessageCount(),
});
try ttyconf.setColor(stderr, .reset);
} else if (!s.test_results.isSuccess()) {
try stderr.writer().print(" {d}/{d} passed", .{
try stderr.print(" {d}/{d} passed", .{
s.test_results.passCount(), s.test_results.test_count,
});
if (s.test_results.fail_count > 0) {
try stderr.writeAll(", ");
try ttyconf.setColor(stderr, .red);
try stderr.writer().print("{d} failed", .{
try stderr.print("{d} failed", .{
s.test_results.fail_count,
});
try ttyconf.setColor(stderr, .reset);
@ -902,7 +904,7 @@ fn printStepFailure(
if (s.test_results.skip_count > 0) {
try stderr.writeAll(", ");
try ttyconf.setColor(stderr, .yellow);
try stderr.writer().print("{d} skipped", .{
try stderr.print("{d} skipped", .{
s.test_results.skip_count,
});
try ttyconf.setColor(stderr, .reset);
@ -910,7 +912,7 @@ fn printStepFailure(
if (s.test_results.leak_count > 0) {
try stderr.writeAll(", ");
try ttyconf.setColor(stderr, .red);
try stderr.writer().print("{d} leaked", .{
try stderr.print("{d} leaked", .{
s.test_results.leak_count,
});
try ttyconf.setColor(stderr, .reset);
@ -932,7 +934,7 @@ fn printTreeStep(
b: *std.Build,
s: *Step,
run: *const Run,
stderr: File,
stderr: *std.io.BufferedWriter,
ttyconf: std.io.tty.Config,
parent_node: *PrintNode,
step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void),
@ -992,7 +994,7 @@ fn printTreeStep(
if (s.dependencies.items.len == 0) {
try stderr.writeAll(" (reused)\n");
} else {
try stderr.writer().print(" (+{d} more reused dependencies)\n", .{
try stderr.print(" (+{d} more reused dependencies)\n", .{
s.dependencies.items.len,
});
}
@ -1129,11 +1131,11 @@ fn workerMakeOneStep(
const show_stderr = s.result_stderr.len > 0;
if (show_error_msgs or show_compile_errors or show_stderr) {
std.debug.lockStdErr();
var bw = std.debug.lockStdErr2();
defer std.debug.unlockStdErr();
const gpa = b.allocator;
printErrorMessages(gpa, s, .{ .ttyconf = run.ttyconf }, run.stderr, run.prominent_compile_errors) catch {};
printErrorMessages(gpa, s, .{ .ttyconf = run.ttyconf }, &bw, run.prominent_compile_errors) catch {};
}
handle_result: {
@ -1190,7 +1192,7 @@ pub fn printErrorMessages(
gpa: Allocator,
failing_step: *Step,
options: std.zig.ErrorBundle.RenderOptions,
stderr: File,
stderr: *std.io.BufferedWriter,
prominent_compile_errors: bool,
) !void {
// Provide context for where these error messages are coming from by
@ -1209,7 +1211,7 @@ pub fn printErrorMessages(
var indent: usize = 0;
while (step_stack.pop()) |s| : (indent += 1) {
if (indent > 0) {
try stderr.writer().writeByteNTimes(' ', (indent - 1) * 3);
try stderr.splatByteAll(' ', (indent - 1) * 3);
try printChildNodePrefix(stderr, ttyconf);
}
@ -1231,7 +1233,7 @@ pub fn printErrorMessages(
}
if (!prominent_compile_errors and failing_step.result_error_bundle.errorMessageCount() > 0) {
try failing_step.result_error_bundle.renderToWriter(options, stderr.writer());
try failing_step.result_error_bundle.renderToWriter(options, stderr);
}
for (failing_step.result_error_msgs.items) |msg| {
@ -1243,27 +1245,29 @@ pub fn printErrorMessages(
}
}
fn steps(builder: *std.Build, out_stream: anytype) !void {
fn steps(builder: *std.Build, bw: *std.io.BufferedWriter) !void {
const allocator = builder.allocator;
for (builder.top_level_steps.values()) |top_level_step| {
const name = if (&top_level_step.step == builder.default_step)
try fmt.allocPrint(allocator, "{s} (default)", .{top_level_step.step.name})
else
top_level_step.step.name;
try out_stream.print(" {s:<28} {s}\n", .{ name, top_level_step.description });
try bw.print(" {s:<28} {s}\n", .{ name, top_level_step.description });
}
}
fn usage(b: *std.Build, out_stream: anytype) !void {
try out_stream.print(
var stdout_buffer: [256]u8 = undefined;
fn usage(b: *std.Build, bw: *std.io.BufferedWriter) !void {
try bw.print(
\\Usage: {s} build [steps] [options]
\\
\\Steps:
\\
, .{b.graph.zig_exe});
try steps(b, out_stream);
try steps(b, bw);
try out_stream.writeAll(
try bw.writeAll(
\\
\\General Options:
\\ -p, --prefix [path] Where to install files (default: zig-out)
@ -1319,25 +1323,25 @@ fn usage(b: *std.Build, out_stream: anytype) !void {
const arena = b.allocator;
if (b.available_options_list.items.len == 0) {
try out_stream.print(" (none)\n", .{});
try bw.print(" (none)\n", .{});
} else {
for (b.available_options_list.items) |option| {
const name = try fmt.allocPrint(arena, " -D{s}=[{s}]", .{
option.name,
@tagName(option.type_id),
});
try out_stream.print("{s:<30} {s}\n", .{ name, option.description });
try bw.print("{s:<30} {s}\n", .{ name, option.description });
if (option.enum_options) |enum_options| {
const padding = " " ** 33;
try out_stream.writeAll(padding ++ "Supported Values:\n");
try bw.writeAll(padding ++ "Supported Values:\n");
for (enum_options) |enum_option| {
try out_stream.print(padding ++ " {s}\n", .{enum_option});
try bw.print(padding ++ " {s}\n", .{enum_option});
}
}
}
}
try out_stream.writeAll(
try bw.writeAll(
\\
\\System Integration Options:
\\ --search-prefix [path] Add a path to look for binaries, libraries, headers
@ -1352,7 +1356,7 @@ fn usage(b: *std.Build, out_stream: anytype) !void {
\\
);
if (b.graph.system_library_options.entries.len == 0) {
try out_stream.writeAll(" (none) -\n");
try bw.writeAll(" (none) -\n");
} else {
for (b.graph.system_library_options.keys(), b.graph.system_library_options.values()) |k, v| {
const status = switch (v) {
@ -1360,11 +1364,11 @@ fn usage(b: *std.Build, out_stream: anytype) !void {
.declared_disabled => "no",
.user_enabled, .user_disabled => unreachable, // already emitted error
};
try out_stream.print(" {s:<43} {s}\n", .{ k, status });
try bw.print(" {s:<43} {s}\n", .{ k, status });
}
}
try out_stream.writeAll(
try bw.writeAll(
\\
\\Advanced Options:
\\ -freference-trace[=num] How many lines of reference trace should be shown per compile error

View File

@ -112,7 +112,6 @@ fn rebuildTestsWorkerRun(run: *Step.Run, ttyconf: std.io.tty.Config, parent_prog
fn rebuildTestsWorkerRunFallible(run: *Step.Run, ttyconf: std.io.tty.Config, parent_prog_node: std.Progress.Node) !void {
const gpa = run.step.owner.allocator;
const stderr = std.io.getStdErr();
const compile = run.producer.?;
const prog_node = parent_prog_node.start(compile.step.name, 0);
@ -125,9 +124,9 @@ fn rebuildTestsWorkerRunFallible(run: *Step.Run, ttyconf: std.io.tty.Config, par
const show_stderr = compile.step.result_stderr.len > 0;
if (show_error_msgs or show_compile_errors or show_stderr) {
std.debug.lockStdErr();
var bw = std.debug.lockStdErr2();
defer std.debug.unlockStdErr();
build_runner.printErrorMessages(gpa, &compile.step, .{ .ttyconf = ttyconf }, stderr, false) catch {};
build_runner.printErrorMessages(gpa, &compile.step, .{ .ttyconf = ttyconf }, &bw, false) catch {};
}
const rebuilt_bin_path = result catch |err| switch (err) {
@ -152,10 +151,9 @@ fn fuzzWorkerRun(
run.rerunInFuzzMode(web_server, unit_test_index, prog_node) catch |err| switch (err) {
error.MakeFailed => {
const stderr = std.io.getStdErr();
std.debug.lockStdErr();
var bw = std.debug.lockStdErr2();
defer std.debug.unlockStdErr();
build_runner.printErrorMessages(gpa, &run.step, .{ .ttyconf = ttyconf }, stderr, false) catch {};
build_runner.printErrorMessages(gpa, &run.step, .{ .ttyconf = ttyconf }, &bw, false) catch {};
return;
},
else => {

View File

@ -1393,7 +1393,8 @@ const MachODumper = struct {
},
macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM => {
name_buf.clearRetainingCapacity();
try reader.readUntilDelimiterArrayList(&name_buf, 0, std.math.maxInt(u32));
if (true) @panic("TODO fix this");
//try reader.readUntilDelimiterArrayList(&name_buf, 0, std.math.maxInt(u32));
try name_buf.append(0);
},
macho.BIND_OPCODE_SET_ADDEND_SLEB => {
@ -2430,10 +2431,11 @@ const WasmDumper = struct {
return error.UnsupportedWasmVersion;
}
var output = std.ArrayList(u8).init(gpa);
var output: std.io.AllocatingWriter = undefined;
const bw = output.init(gpa);
defer output.deinit();
parseAndDumpInner(step, check, bytes, &fbs, &output) catch |err| switch (err) {
error.EndOfStream => try output.appendSlice("\n<UnexpectedEndOfStream>"),
parseAndDumpInner(step, check, bytes, &fbs, bw) catch |err| switch (err) {
error.EndOfStream => try bw.writeAll("\n<UnexpectedEndOfStream>"),
else => |e| return e,
};
return output.toOwnedSlice();
@ -2443,11 +2445,10 @@ const WasmDumper = struct {
step: *Step,
check: Check,
bytes: []const u8,
fbs: *std.io.FixedBufferStream([]const u8),
output: *std.ArrayList(u8),
fbs: *std.io.FixedBufferStream,
bw: *std.io.BufferedWriter,
) !void {
const reader = fbs.reader();
const writer = output.writer();
switch (check.kind) {
.headers => {
@ -2457,7 +2458,7 @@ const WasmDumper = struct {
};
const section_length = try std.leb.readUleb128(u32, reader);
try parseAndDumpSection(step, section, bytes[fbs.pos..][0..section_length], writer);
try parseAndDumpSection(step, section, bytes[fbs.pos..][0..section_length], bw);
fbs.pos += section_length;
} else |_| {} // reached end of stream
},

View File

@ -2040,17 +2040,17 @@ fn checkCompileErrors(compile: *Compile) !void {
.exact => |expect_lines| {
for (expect_lines) |expect_line| {
const actual_line = actual_line_it.next() orelse {
try expected_generated.appendSlice(expect_line);
try expected_generated.append('\n');
try expected_generated.appendSlice(arena, expect_line);
try expected_generated.append(arena, '\n');
continue;
};
if (matchCompileError(actual_line, expect_line)) {
try expected_generated.appendSlice(actual_line);
try expected_generated.append('\n');
try expected_generated.appendSlice(arena, actual_line);
try expected_generated.append(arena, '\n');
continue;
}
try expected_generated.appendSlice(expect_line);
try expected_generated.append('\n');
try expected_generated.appendSlice(arena, expect_line);
try expected_generated.append(arena, '\n');
}
if (mem.eql(u8, expected_generated.items, actual_errors)) return;

View File

@ -599,14 +599,14 @@ fn renderValueNasm(output: *std.ArrayList(u8), name: []const u8, value: Value) !
try output.appendSlice(if (b) " 1\n" else " 0\n");
},
.int => |i| {
try output.writer().print("%define {s} {d}\n", .{ name, i });
try output.print("%define {s} {d}\n", .{ name, i });
},
.ident => |ident| {
try output.writer().print("%define {s} {s}\n", .{ name, ident });
try output.print("%define {s} {s}\n", .{ name, ident });
},
.string => |string| {
// TODO: use nasm-specific escaping instead of zig string literals
try output.writer().print("%define {s} \"{}\"\n", .{ name, std.zig.fmtEscapes(string) });
try output.print("%define {s} \"{}\"\n", .{ name, std.zig.fmtEscapes(string) });
},
}
}
@ -707,7 +707,7 @@ fn expand_variables_cmake(
try result.append(if (b) '1' else '0');
},
.int => |i| {
try result.writer().print("{d}", .{i});
try result.print("{d}", .{i});
},
.ident, .string => |s| {
try result.appendSlice(s);
@ -764,7 +764,7 @@ fn expand_variables_cmake(
try result.append(if (b) '1' else '0');
},
.int => |i| {
try result.writer().print("{d}", .{i});
try result.print("{d}", .{i});
},
.ident, .string => |s| {
try result.appendSlice(s);

View File

@ -445,13 +445,14 @@ test remove_dot_segments {
/// 5.2.3. Merge Paths
fn merge_paths(base: Component, new: []u8, aux_buf: *[]u8) error{NoSpaceLeft}!Component {
var aux = std.io.fixedBufferStream(aux_buf.*);
var aux: std.io.BufferedWriter = undefined;
aux.initFixed(aux_buf.*);
if (!base.isEmpty()) {
try aux.writer().print("{path}", .{base});
aux.print("{fpath}", .{base}) catch |err| return @errorCast(err);
aux.pos = std.mem.lastIndexOfScalar(u8, aux.getWritten(), '/') orelse
return remove_dot_segments(new);
}
try aux.writer().print("/{s}", .{new});
aux.print("/{s}", .{new}) catch |err| return @errorCast(err);
const merged_path = remove_dot_segments(aux.getWritten());
aux_buf.* = aux_buf.*[merged_path.percent_encoded.len..];
return merged_path;

View File

@ -338,6 +338,12 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?mem.Alignment) ty
@memcpy(self.items[old_len..][0..items.len], items);
}
pub fn print(self: *Self, comptime fmt: []const u8, args: anytype) error{OutOfMemory}!void {
var unmanaged = self.moveToUnmanaged();
try unmanaged.print(self.allocator, fmt, args);
self.* = unmanaged.toManaged(self.allocator);
}
/// Append a value to the list `n` times.
/// Allocates more memory as necessary.
/// Invalidates element pointers if additional memory is needed.
@ -902,7 +908,15 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
var aw: std.io.AllocatingWriter = undefined;
const bw = aw.fromArrayList(gpa, self);
defer self.* = aw.toArrayList();
bw.print(fmt, args) catch return error.OutOfMemory;
return @errorCast(bw.print(fmt, args));
}
pub fn printAssumeCapacity(self: *Self, comptime fmt: []const u8, args: anytype) void {
comptime assert(T == u8);
var bw: std.io.BufferedWriter = undefined;
bw.initFixed(self.unusedCapacitySlice());
bw.print(fmt, args) catch unreachable;
self.items.len += bw.end;
}
/// Append a value to the list `n` times.

View File

@ -421,9 +421,9 @@ pub const Request = struct {
try request.server.connection.stream.writeAll(h.items);
return;
}
h.fixedWriter().print("{s} {d} {s}\r\n", .{
h.printAssumeCapacity("{s} {d} {s}\r\n", .{
@tagName(options.version), @intFromEnum(options.status), phrase,
}) catch unreachable;
});
switch (options.version) {
.@"HTTP/1.0" => if (keep_alive) h.appendSliceAssumeCapacity("connection: keep-alive\r\n"),
@ -434,7 +434,7 @@ pub const Request = struct {
.none => {},
.chunked => h.appendSliceAssumeCapacity("transfer-encoding: chunked\r\n"),
} else {
h.fixedWriter().print("content-length: {d}\r\n", .{content.len}) catch unreachable;
h.printAssumeCapacity("content-length: {d}\r\n", .{content.len});
}
var chunk_header_buffer: [18]u8 = undefined;
@ -573,9 +573,9 @@ pub const Request = struct {
h.appendSliceAssumeCapacity("content-length: 0\r\n\r\n");
break :eb true;
} else eb: {
h.fixedWriter().print("{s} {d} {s}\r\n", .{
h.printAssumeCapacity("{s} {d} {s}\r\n", .{
@tagName(o.version), @intFromEnum(o.status), phrase,
}) catch unreachable;
});
switch (o.version) {
.@"HTTP/1.0" => if (keep_alive) h.appendSliceAssumeCapacity("connection: keep-alive\r\n"),
@ -586,7 +586,7 @@ pub const Request = struct {
.chunked => h.appendSliceAssumeCapacity("transfer-encoding: chunked\r\n"),
.none => {},
} else if (options.content_length) |len| {
h.fixedWriter().print("content-length: {d}\r\n", .{len}) catch unreachable;
h.printAssumeCapacity("content-length: {d}\r\n", .{len});
} else {
h.appendSliceAssumeCapacity("transfer-encoding: chunked\r\n");
}
@ -889,12 +889,34 @@ pub const Response = struct {
/// when the end of stream occurs by calling `end`.
pub fn write(r: *Response, bytes: []const u8) WriteError!usize {
switch (r.transfer_encoding) {
.content_length, .none => return write_cl(r, bytes),
.chunked => return write_chunked(r, bytes),
.content_length, .none => return @errorCast(cl_writeSplat(r, &.{bytes}, 1)),
.chunked => return @errorCast(chunked_writeSplat(r, &.{bytes}, 1)),
}
}
fn write_cl(context: *const anyopaque, bytes: []const u8) WriteError!usize {
fn cl_writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
_ = splat;
return cl_write(context, data[0]); // TODO: try to send all the data
}
fn cl_writeFile(
context: *anyopaque,
file: std.fs.File,
offset: u64,
len: std.io.Writer.VTable.FileLen,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
_ = context;
_ = file;
_ = offset;
_ = len;
_ = headers_and_trailers;
_ = headers_len;
return error.Unimplemented;
}
fn cl_write(context: *anyopaque, bytes: []const u8) WriteError!usize {
const r: *Response = @constCast(@alignCast(@ptrCast(context)));
var trash: u64 = std.math.maxInt(u64);
@ -944,7 +966,29 @@ pub const Response = struct {
return bytes.len;
}
fn write_chunked(context: *const anyopaque, bytes: []const u8) WriteError!usize {
fn chunked_writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
_ = splat;
return chunked_write(context, data[0]); // TODO: try to send all the data
}
fn chunked_writeFile(
context: *anyopaque,
file: std.fs.File,
offset: u64,
len: std.io.Writer.VTable.FileLen,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
_ = context;
_ = file;
_ = offset;
_ = len;
_ = headers_and_trailers;
_ = headers_len;
return error.Unimplemented;
}
fn chunked_write(context: *anyopaque, bytes: []const u8) WriteError!usize {
const r: *Response = @constCast(@alignCast(@ptrCast(context)));
assert(r.transfer_encoding == .chunked);
@ -1115,11 +1159,17 @@ pub const Response = struct {
r.chunk_len = 0;
}
pub fn writer(r: *Response) std.io.AnyWriter {
pub fn writer(r: *Response) std.io.Writer {
return .{
.writeFn = switch (r.transfer_encoding) {
.none, .content_length => write_cl,
.chunked => write_chunked,
.vtable = switch (r.transfer_encoding) {
.none, .content_length => &.{
.writeSplat = cl_writeSplat,
.writeFile = cl_writeFile,
},
.chunked => &.{
.writeSplat = chunked_writeSplat,
.writeFile = chunked_writeFile,
},
},
.context = r,
};

View File

@ -123,19 +123,6 @@ pub fn GenericReader(
return @errorCast(self.any().readAllAlloc(allocator, max_size));
}
pub inline fn readUntilDelimiterArrayList(
self: Self,
array_list: *std.ArrayList(u8),
delimiter: u8,
max_size: usize,
) (NoEofError || Allocator.Error || error{StreamTooLong})!void {
return @errorCast(self.any().readUntilDelimiterArrayList(
array_list,
delimiter,
max_size,
));
}
pub inline fn readUntilDelimiterAlloc(
self: Self,
allocator: Allocator,

View File

@ -17,6 +17,8 @@ const assert = std.debug.assert;
/// returning a slice that includes both.
written: []u8,
allocator: std.mem.Allocator,
/// When using this API, it is not necessary to call
/// `std.io.BufferedWriter.flush`.
buffered_writer: std.io.BufferedWriter,
const vtable: std.io.Writer.VTable = .{

View File

@ -33,7 +33,7 @@ pub fn writer(bw: *BufferedWriter) Writer {
return .{
.context = bw,
.vtable = &.{
.write = passthru_writeSplat,
.writeSplat = passthru_writeSplat,
.writeFile = passthru_writeFile,
},
};
@ -1003,7 +1003,7 @@ pub fn printFloat(
'x' => {
var sub_bw: BufferedWriter = undefined;
sub_bw.initFixed(&buf);
sub_bw.printFloatHexadecimal(value, options) catch unreachable;
sub_bw.printFloatHexadecimal(value, options.precision) catch unreachable;
return alignBufferOptions(bw, sub_bw.getWritten(), options);
},
else => invalidFmtError(fmt, value),
@ -1103,7 +1103,7 @@ pub fn printFloatHexadecimal(bw: *BufferedWriter, value: anytype, opt_precision:
// Add trailing zeros if explicitly requested.
if (opt_precision) |precision| if (precision > 0) {
if (precision > trimmed.len)
try bw.writeByteNTimes('0', precision - trimmed.len);
try bw.splatByteAll('0', precision - trimmed.len);
};
try bw.writeAll("p");
try printIntOptions(bw, exponent - exponent_bias, 10, .lower, .{});

View File

@ -93,108 +93,14 @@ pub fn readAllAlloc(self: Self, allocator: mem.Allocator, max_size: usize) anyer
return try array_list.toOwnedSlice();
}
/// Deprecated: use `streamUntilDelimiter` with ArrayList's writer instead.
/// Replaces the `std.ArrayList` contents by reading from the stream until `delimiter` is found.
/// Does not include the delimiter in the result.
/// If the `std.ArrayList` length would exceed `max_size`, `error.StreamTooLong` is returned and the
/// `std.ArrayList` is populated with `max_size` bytes from the stream.
pub fn readUntilDelimiterArrayList(
self: Self,
array_list: *std.ArrayList(u8),
delimiter: u8,
max_size: usize,
) anyerror!void {
array_list.shrinkRetainingCapacity(0);
try self.streamUntilDelimiter(array_list.writer(), delimiter, max_size);
}
/// Deprecated: use `streamUntilDelimiter` with ArrayList's writer instead.
/// Allocates enough memory to read until `delimiter`. If the allocated
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readUntilDelimiterAlloc(
self: Self,
allocator: mem.Allocator,
delimiter: u8,
max_size: usize,
) anyerror![]u8 {
var array_list = std.ArrayList(u8).init(allocator);
defer array_list.deinit();
try self.streamUntilDelimiter(array_list.writer(), delimiter, max_size);
return try array_list.toOwnedSlice();
}
/// Deprecated: use `streamUntilDelimiter` with FixedBufferStream's writer instead.
/// Reads from the stream until specified byte is found. If the buffer is not
/// large enough to hold the entire contents, `error.StreamTooLong` is returned.
/// If end-of-stream is found, `error.EndOfStream` is returned.
/// Returns a slice of the stream data, with ptr equal to `buf.ptr`. The
/// delimiter byte is written to the output buffer but is not included
/// in the returned slice.
pub fn readUntilDelimiter(self: Self, buf: []u8, delimiter: u8) anyerror![]u8 {
var fbs = std.io.fixedBufferStream(buf);
try self.streamUntilDelimiter(fbs.writer(), delimiter, fbs.buffer.len);
const output = fbs.getWritten();
buf[output.len] = delimiter; // emulating old behaviour
return output;
}
/// Deprecated: use `streamUntilDelimiter` with ArrayList's (or any other's) writer instead.
/// Allocates enough memory to read until `delimiter` or end-of-stream.
/// If the allocated memory would be greater than `max_size`, returns
/// `error.StreamTooLong`. If end-of-stream is found, returns the rest
/// of the stream. If this function is called again after that, returns
/// null.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readUntilDelimiterOrEofAlloc(
self: Self,
allocator: mem.Allocator,
delimiter: u8,
max_size: usize,
) anyerror!?[]u8 {
var array_list = std.ArrayList(u8).init(allocator);
defer array_list.deinit();
self.streamUntilDelimiter(array_list.writer(), delimiter, max_size) catch |err| switch (err) {
error.EndOfStream => if (array_list.items.len == 0) {
return null;
},
else => |e| return e,
};
return try array_list.toOwnedSlice();
}
/// Deprecated: use `streamUntilDelimiter` with FixedBufferStream's writer instead.
/// Reads from the stream until specified byte is found. If the buffer is not
/// large enough to hold the entire contents, `error.StreamTooLong` is returned.
/// If end-of-stream is found, returns the rest of the stream. If this
/// function is called again after that, returns null.
/// Returns a slice of the stream data, with ptr equal to `buf.ptr`. The
/// delimiter byte is written to the output buffer but is not included
/// in the returned slice.
pub fn readUntilDelimiterOrEof(self: Self, buf: []u8, delimiter: u8) anyerror!?[]u8 {
var fbs = std.io.fixedBufferStream(buf);
self.streamUntilDelimiter(fbs.writer(), delimiter, fbs.buffer.len) catch |err| switch (err) {
error.EndOfStream => if (fbs.getWritten().len == 0) {
return null;
},
else => |e| return e,
};
const output = fbs.getWritten();
buf[output.len] = delimiter; // emulating old behaviour
return output;
}
/// Appends to the `writer` contents by reading from the stream until `delimiter` is found.
/// Appends to `bw` contents by reading from the stream until `delimiter` is found.
/// Does not write the delimiter itself.
/// If `optional_max_size` is not null and amount of written bytes exceeds `optional_max_size`,
/// returns `error.StreamTooLong` and finishes appending.
/// If `optional_max_size` is null, appending is unbounded.
pub fn streamUntilDelimiter(
self: Self,
writer: anytype,
bw: *std.io.BufferedWriter,
delimiter: u8,
optional_max_size: ?usize,
) anyerror!void {
@ -202,14 +108,14 @@ pub fn streamUntilDelimiter(
for (0..max_size) |_| {
const byte: u8 = try self.readByte();
if (byte == delimiter) return;
try writer.writeByte(byte);
try bw.writeByte(byte);
}
return error.StreamTooLong;
} else {
while (true) {
const byte: u8 = try self.readByte();
if (byte == delimiter) return;
try writer.writeByte(byte);
try bw.writeByte(byte);
}
// Can not throw `error.StreamTooLong` since there are no boundary.
}

View File

@ -2,187 +2,176 @@ const std = @import("std");
const assert = std.debug.assert;
const testing = std.testing;
/// Creates tar Writer which will write tar content to the `underlying_writer`.
/// Use setRoot to nest all following entries under single root. If file don't
/// fit into posix header (name+prefix: 100+155 bytes) gnu extented header will
/// be used for long names. Options enables setting file premission mode and
/// mtime. Default is to use current time for mtime and 0o664 for file mode.
pub fn writer(underlying_writer: anytype) Writer(@TypeOf(underlying_writer)) {
return .{ .underlying_writer = underlying_writer };
}
pub const Writer = struct {
const block_size = @sizeOf(Header);
const empty_block: [block_size]u8 = [_]u8{0} ** block_size;
pub fn Writer(comptime WriterType: type) type {
return struct {
const block_size = @sizeOf(Header);
const empty_block: [block_size]u8 = [_]u8{0} ** block_size;
/// Options for writing file/dir/link. If left empty 0o664 is used for
/// file mode and current time for mtime.
pub const Options = struct {
/// File system permission mode.
mode: u32 = 0,
/// File system modification time.
mtime: u64 = 0,
};
const Self = @This();
/// Options for writing file/dir/link. If left empty 0o664 is used for
/// file mode and current time for mtime.
pub const Options = struct {
/// File system permission mode.
mode: u32 = 0,
/// File system modification time.
mtime: u64 = 0,
};
const Self = @This();
underlying_writer: *std.io.BufferedWriter,
prefix: []const u8 = "",
mtime_now: u64 = 0,
underlying_writer: WriterType,
prefix: []const u8 = "",
mtime_now: u64 = 0,
/// Sets prefix for all other write* method paths.
pub fn setRoot(self: *Self, root: []const u8) !void {
if (root.len > 0)
try self.writeDir(root, .{});
/// Sets prefix for all other write* method paths.
pub fn setRoot(self: *Self, root: []const u8) !void {
if (root.len > 0)
try self.writeDir(root, .{});
self.prefix = root;
}
self.prefix = root;
/// Writes directory.
pub fn writeDir(self: *Self, sub_path: []const u8, opt: Options) !void {
try self.writeHeader(.directory, sub_path, "", 0, opt);
}
/// Writes file system file.
pub fn writeFile(self: *Self, sub_path: []const u8, file: std.fs.File) !void {
const stat = try file.stat();
const mtime: u64 = @intCast(@divFloor(stat.mtime, std.time.ns_per_s));
var header = Header{};
try self.setPath(&header, sub_path);
try header.setSize(stat.size);
try header.setMtime(mtime);
try header.write(self.underlying_writer);
try self.underlying_writer.writeFileAll(file, .{ .len = .init(stat.size) });
try self.writePadding(stat.size);
}
/// Writes file reading file content from `reader`. Number of bytes in
/// reader must be equal to `size`.
pub fn writeFileStream(self: *Self, sub_path: []const u8, size: usize, reader: anytype, opt: Options) !void {
try self.writeHeader(.regular, sub_path, "", @intCast(size), opt);
var counting_reader = std.io.countingReader(reader);
var fifo = std.fifo.LinearFifo(u8, .{ .Static = 4096 }).init();
try fifo.pump(counting_reader.reader(), self.underlying_writer);
if (counting_reader.bytes_read != size) return error.WrongReaderSize;
try self.writePadding(size);
}
/// Writes file using bytes buffer `content` for size and file content.
pub fn writeFileBytes(self: *Self, sub_path: []const u8, content: []const u8, opt: Options) !void {
try self.writeHeader(.regular, sub_path, "", @intCast(content.len), opt);
try self.underlying_writer.writeAll(content);
try self.writePadding(content.len);
}
/// Writes symlink.
pub fn writeLink(self: *Self, sub_path: []const u8, link_name: []const u8, opt: Options) !void {
try self.writeHeader(.symbolic_link, sub_path, link_name, 0, opt);
}
/// Writes fs.Dir.WalkerEntry. Uses `mtime` from file system entry and
/// default for entry mode .
pub fn writeEntry(self: *Self, entry: std.fs.Dir.Walker.Entry) !void {
switch (entry.kind) {
.directory => {
try self.writeDir(entry.path, .{ .mtime = try entryMtime(entry) });
},
.file => {
var file = try entry.dir.openFile(entry.basename, .{});
defer file.close();
try self.writeFile(entry.path, file);
},
.sym_link => {
var link_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
const link_name = try entry.dir.readLink(entry.basename, &link_name_buffer);
try self.writeLink(entry.path, link_name, .{ .mtime = try entryMtime(entry) });
},
else => {
return error.UnsupportedWalkerEntryKind;
},
}
}
/// Writes directory.
pub fn writeDir(self: *Self, sub_path: []const u8, opt: Options) !void {
try self.writeHeader(.directory, sub_path, "", 0, opt);
}
/// Writes file system file.
pub fn writeFile(self: *Self, sub_path: []const u8, file: std.fs.File) !void {
const stat = try file.stat();
const mtime: u64 = @intCast(@divFloor(stat.mtime, std.time.ns_per_s));
var header = Header{};
try self.setPath(&header, sub_path);
try header.setSize(stat.size);
try header.setMtime(mtime);
try header.write(self.underlying_writer);
try self.underlying_writer.writeFile(file);
try self.writePadding(stat.size);
}
/// Writes file reading file content from `reader`. Number of bytes in
/// reader must be equal to `size`.
pub fn writeFileStream(self: *Self, sub_path: []const u8, size: usize, reader: anytype, opt: Options) !void {
try self.writeHeader(.regular, sub_path, "", @intCast(size), opt);
var counting_reader = std.io.countingReader(reader);
var fifo = std.fifo.LinearFifo(u8, .{ .Static = 4096 }).init();
try fifo.pump(counting_reader.reader(), self.underlying_writer);
if (counting_reader.bytes_read != size) return error.WrongReaderSize;
try self.writePadding(size);
}
/// Writes file using bytes buffer `content` for size and file content.
pub fn writeFileBytes(self: *Self, sub_path: []const u8, content: []const u8, opt: Options) !void {
try self.writeHeader(.regular, sub_path, "", @intCast(content.len), opt);
try self.underlying_writer.writeAll(content);
try self.writePadding(content.len);
}
/// Writes symlink.
pub fn writeLink(self: *Self, sub_path: []const u8, link_name: []const u8, opt: Options) !void {
try self.writeHeader(.symbolic_link, sub_path, link_name, 0, opt);
}
/// Writes fs.Dir.WalkerEntry. Uses `mtime` from file system entry and
/// default for entry mode .
pub fn writeEntry(self: *Self, entry: std.fs.Dir.Walker.Entry) !void {
switch (entry.kind) {
.directory => {
try self.writeDir(entry.path, .{ .mtime = try entryMtime(entry) });
},
.file => {
var file = try entry.dir.openFile(entry.basename, .{});
defer file.close();
try self.writeFile(entry.path, file);
},
.sym_link => {
var link_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
const link_name = try entry.dir.readLink(entry.basename, &link_name_buffer);
try self.writeLink(entry.path, link_name, .{ .mtime = try entryMtime(entry) });
},
else => {
return error.UnsupportedWalkerEntryKind;
},
}
}
fn writeHeader(
self: *Self,
typeflag: Header.FileType,
sub_path: []const u8,
link_name: []const u8,
size: u64,
opt: Options,
) !void {
var header = Header.init(typeflag);
try self.setPath(&header, sub_path);
try header.setSize(size);
try header.setMtime(if (opt.mtime != 0) opt.mtime else self.mtimeNow());
if (opt.mode != 0)
try header.setMode(opt.mode);
if (typeflag == .symbolic_link)
header.setLinkname(link_name) catch |err| switch (err) {
error.NameTooLong => try self.writeExtendedHeader(.gnu_long_link, &.{link_name}),
else => return err,
};
try header.write(self.underlying_writer);
}
fn mtimeNow(self: *Self) u64 {
if (self.mtime_now == 0)
self.mtime_now = @intCast(std.time.timestamp());
return self.mtime_now;
}
fn entryMtime(entry: std.fs.Dir.Walker.Entry) !u64 {
const stat = try entry.dir.statFile(entry.basename);
return @intCast(@divFloor(stat.mtime, std.time.ns_per_s));
}
/// Writes path in posix header, if don't fit (in name+prefix; 100+155
/// bytes) writes it in gnu extended header.
fn setPath(self: *Self, header: *Header, sub_path: []const u8) !void {
header.setPath(self.prefix, sub_path) catch |err| switch (err) {
error.NameTooLong => {
// write extended header
const buffers: []const []const u8 = if (self.prefix.len == 0)
&.{sub_path}
else
&.{ self.prefix, "/", sub_path };
try self.writeExtendedHeader(.gnu_long_name, buffers);
},
fn writeHeader(
self: *Self,
typeflag: Header.FileType,
sub_path: []const u8,
link_name: []const u8,
size: u64,
opt: Options,
) !void {
var header = Header.init(typeflag);
try self.setPath(&header, sub_path);
try header.setSize(size);
try header.setMtime(if (opt.mtime != 0) opt.mtime else self.mtimeNow());
if (opt.mode != 0)
try header.setMode(opt.mode);
if (typeflag == .symbolic_link)
header.setLinkname(link_name) catch |err| switch (err) {
error.NameTooLong => try self.writeExtendedHeader(.gnu_long_link, &.{link_name}),
else => return err,
};
}
try header.write(self.underlying_writer);
}
/// Writes gnu extended header: gnu_long_name or gnu_long_link.
fn writeExtendedHeader(self: *Self, typeflag: Header.FileType, buffers: []const []const u8) !void {
var len: usize = 0;
for (buffers) |buf|
len += buf.len;
fn mtimeNow(self: *Self) u64 {
if (self.mtime_now == 0)
self.mtime_now = @intCast(std.time.timestamp());
return self.mtime_now;
}
var header = Header.init(typeflag);
try header.setSize(len);
try header.write(self.underlying_writer);
for (buffers) |buf|
try self.underlying_writer.writeAll(buf);
try self.writePadding(len);
}
fn entryMtime(entry: std.fs.Dir.Walker.Entry) !u64 {
const stat = try entry.dir.statFile(entry.basename);
return @intCast(@divFloor(stat.mtime, std.time.ns_per_s));
}
fn writePadding(self: *Self, bytes: u64) !void {
const pos: usize = @intCast(bytes % block_size);
if (pos == 0) return;
try self.underlying_writer.writeAll(empty_block[pos..]);
}
/// Writes path in posix header, if don't fit (in name+prefix; 100+155
/// bytes) writes it in gnu extended header.
fn setPath(self: *Self, header: *Header, sub_path: []const u8) !void {
header.setPath(self.prefix, sub_path) catch |err| switch (err) {
error.NameTooLong => {
// write extended header
const buffers: []const []const u8 = if (self.prefix.len == 0)
&.{sub_path}
else
&.{ self.prefix, "/", sub_path };
try self.writeExtendedHeader(.gnu_long_name, buffers);
},
else => return err,
};
}
/// Tar should finish with two zero blocks, but 'reasonable system must
/// not assume that such a block exists when reading an archive' (from
/// reference). In practice it is safe to skip this finish.
pub fn finish(self: *Self) !void {
try self.underlying_writer.writeAll(&empty_block);
try self.underlying_writer.writeAll(&empty_block);
}
};
}
/// Writes gnu extended header: gnu_long_name or gnu_long_link.
fn writeExtendedHeader(self: *Self, typeflag: Header.FileType, buffers: []const []const u8) !void {
var len: usize = 0;
for (buffers) |buf|
len += buf.len;
var header = Header.init(typeflag);
try header.setSize(len);
try header.write(self.underlying_writer);
for (buffers) |buf|
try self.underlying_writer.writeAll(buf);
try self.writePadding(len);
}
fn writePadding(self: *Self, bytes: u64) !void {
const pos: usize = @intCast(bytes % block_size);
if (pos == 0) return;
try self.underlying_writer.writeAll(empty_block[pos..]);
}
/// Tar should finish with two zero blocks, but 'reasonable system must
/// not assume that such a block exists when reading an archive' (from
/// reference). In practice it is safe to skip this finish.
pub fn finish(self: *Self) !void {
try self.underlying_writer.writeAll(&empty_block);
try self.underlying_writer.writeAll(&empty_block);
}
};
/// A struct that is exactly 512 bytes and matches tar file format. This is
/// intended to be used for outputting tar files; for parsing there is
@ -431,14 +420,14 @@ test "write files" {
{
const root = "root";
var output = std.ArrayList(u8).init(testing.allocator);
var output: std.io.AllocatingWriter = undefined;
var wrt: Writer = .{ .underlying_writer = output.init(testing.allocator) };
defer output.deinit();
var wrt = writer(output.writer());
try wrt.setRoot(root);
for (files) |file|
try wrt.writeFileBytes(file.path, file.content, .{});
var input = std.io.fixedBufferStream(output.items);
var input: std.io.FixedBufferStream = .{ .buffer = output.getWritten() };
var iter = std.tar.iterator(
input.reader(),
.{ .file_name_buffer = &file_name_buffer, .link_name_buffer = &link_name_buffer },
@ -467,15 +456,15 @@ test "write files" {
}
// without root
{
var output = std.ArrayList(u8).init(testing.allocator);
var output: std.io.AllocatingWriter = undefined;
var wrt: Writer = .{ .underlying_writer = output.init(testing.allocator) };
defer output.deinit();
var wrt = writer(output.writer());
for (files) |file| {
var content = std.io.fixedBufferStream(file.content);
try wrt.writeFileStream(file.path, file.content.len, content.reader(), .{});
}
var input = std.io.fixedBufferStream(output.items);
var input: std.io.FixedBufferStream = .{ .buffer = output.getWritten() };
var iter = std.tar.iterator(
input.reader(),
.{ .file_name_buffer = &file_name_buffer, .link_name_buffer = &link_name_buffer },

View File

@ -635,7 +635,7 @@ pub fn parseTargetQueryOrReportFatalError(
var help_text = std.ArrayList(u8).init(allocator);
defer help_text.deinit();
for (diags.arch.?.allCpuModels()) |cpu| {
help_text.writer().print(" {s}\n", .{cpu.name}) catch break :help;
help_text.print(" {s}\n", .{cpu.name}) catch break :help;
}
std.log.info("available CPUs for architecture '{s}':\n{s}", .{
@tagName(diags.arch.?), help_text.items,
@ -648,7 +648,7 @@ pub fn parseTargetQueryOrReportFatalError(
var help_text = std.ArrayList(u8).init(allocator);
defer help_text.deinit();
for (diags.arch.?.allFeaturesList()) |feature| {
help_text.writer().print(" {s}: {s}\n", .{ feature.name, feature.description }) catch break :help;
help_text.print(" {s}: {s}\n", .{ feature.name, feature.description }) catch break :help;
}
std.log.info("available CPU features for architecture '{s}':\n{s}", .{
@tagName(diags.arch.?), help_text.items,
@ -661,7 +661,7 @@ pub fn parseTargetQueryOrReportFatalError(
var help_text = std.ArrayList(u8).init(allocator);
defer help_text.deinit();
inline for (@typeInfo(std.Target.ObjectFormat).@"enum".fields) |field| {
help_text.writer().print(" {s}\n", .{field.name}) catch break :help;
help_text.print(" {s}\n", .{field.name}) catch break :help;
}
std.log.info("available object formats:\n{s}", .{help_text.items});
}
@ -672,7 +672,7 @@ pub fn parseTargetQueryOrReportFatalError(
var help_text = std.ArrayList(u8).init(allocator);
defer help_text.deinit();
inline for (@typeInfo(std.Target.Cpu.Arch).@"enum".fields) |field| {
help_text.writer().print(" {s}\n", .{field.name}) catch break :help;
help_text.print(" {s}\n", .{field.name}) catch break :help;
}
std.log.info("available architectures:\n{s} native\n", .{help_text.items});
}

View File

@ -194,11 +194,11 @@ fn renderErrorMessageToWriter(
) anyerror!void {
const ttyconf = options.ttyconf;
var counting_writer: std.io.CountingWriter = .{ .child_writer = bw.writer() };
const counting_bw = counting_writer.unbufferedWriter();
var counting_bw = counting_writer.unbufferedWriter();
const err_msg = eb.getErrorMessage(err_msg_index);
if (err_msg.src_loc != .none) {
const src = eb.extraData(SourceLocation, @intFromEnum(err_msg.src_loc));
try counting_bw.writeByteNTimes(' ', indent);
try counting_bw.splatByteAll(' ', indent);
try ttyconf.setColor(bw, .bold);
try counting_bw.print("{s}:{d}:{d}: ", .{
eb.nullTerminatedString(src.data.src_path),
@ -210,7 +210,7 @@ fn renderErrorMessageToWriter(
try counting_bw.writeAll(": ");
// This is the length of the part before the error message:
// e.g. "file.zig:4:5: error: "
const prefix_len: usize = @intCast(counting_bw.context.bytes_written);
const prefix_len: usize = @intCast(counting_writer.bytes_written);
try ttyconf.setColor(bw, .reset);
try ttyconf.setColor(bw, .bold);
if (err_msg.count == 1) {
@ -233,11 +233,11 @@ fn renderErrorMessageToWriter(
const before_caret = src.data.span_main - src.data.span_start;
// -1 since span.main includes the caret
const after_caret = src.data.span_end -| src.data.span_main -| 1;
try bw.writeByteNTimes(' ', src.data.column - before_caret);
try bw.splatByteAll(' ', src.data.column - before_caret);
try ttyconf.setColor(bw, .green);
try bw.writeByteNTimes('~', before_caret);
try bw.splatByteAll('~', before_caret);
try bw.writeByte('^');
try bw.writeByteNTimes('~', after_caret);
try bw.splatByteAll('~', after_caret);
try bw.writeByte('\n');
try ttyconf.setColor(bw, .reset);
}
@ -277,7 +277,7 @@ fn renderErrorMessageToWriter(
}
} else {
try ttyconf.setColor(bw, color);
try bw.writeByteNTimes(' ', indent);
try bw.splatByteAll(' ', indent);
try bw.writeAll(kind);
try bw.writeAll(": ");
try ttyconf.setColor(bw, .reset);
@ -306,7 +306,7 @@ fn writeMsg(eb: ErrorBundle, err_msg: ErrorMessage, bw: *std.io.BufferedWriter,
try bw.writeAll(line);
if (lines.index == null) break;
try bw.writeByte('\n');
try bw.writeByteNTimes(' ', indent);
try bw.splatByteAll(' ', indent);
}
}

View File

@ -370,7 +370,7 @@ fn findNativeIncludeDirWindows(
for (installs) |install| {
result_buf.shrinkAndFree(0);
try result_buf.writer().print("{s}\\Include\\{s}\\ucrt", .{ install.path, install.version });
try result_buf.print("{s}\\Include\\{s}\\ucrt", .{ install.path, install.version });
var dir = fs.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) {
error.FileNotFound,
@ -417,7 +417,7 @@ fn findNativeCrtDirWindows(
for (installs) |install| {
result_buf.shrinkAndFree(0);
try result_buf.writer().print("{s}\\Lib\\{s}\\ucrt\\{s}", .{ install.path, install.version, arch_sub_dir });
try result_buf.print("{s}\\Lib\\{s}\\ucrt\\{s}", .{ install.path, install.version, arch_sub_dir });
var dir = fs.cwd().openDir(result_buf.items, .{}) catch |err| switch (err) {
error.FileNotFound,