build_runner: port to new std.io.BufferedWriter API

This commit is contained in:
Jacob Young 2025-04-14 10:07:16 -04:00 committed by Andrew Kelley
parent 1164d5ece5
commit a21e7ab64f
46 changed files with 820 additions and 847 deletions

View File

@ -279,7 +279,7 @@ pub fn build(b: *std.Build) !void {
const ancestor_ver = try std.SemanticVersion.parse(tagged_ancestor); const ancestor_ver = try std.SemanticVersion.parse(tagged_ancestor);
if (zig_version.order(ancestor_ver) != .gt) { if (zig_version.order(ancestor_ver) != .gt) {
std.debug.print("Zig version '{}' must be greater than tagged ancestor '{}'\n", .{ zig_version, ancestor_ver }); std.debug.print("Zig version '{f}' must be greater than tagged ancestor '{f}'\n", .{ zig_version, ancestor_ver });
std.process.exit(1); std.process.exit(1);
} }
@ -304,7 +304,7 @@ pub fn build(b: *std.Build) !void {
if (enable_llvm) { if (enable_llvm) {
const cmake_cfg = if (static_llvm) null else blk: { const cmake_cfg = if (static_llvm) null else blk: {
if (findConfigH(b, config_h_path_option)) |config_h_path| { if (findConfigH(b, config_h_path_option)) |config_h_path| {
const file_contents = fs.cwd().readFileAlloc(b.allocator, config_h_path, max_config_h_bytes) catch unreachable; const file_contents = fs.cwd().readFileAlloc(config_h_path, b.allocator, .limited(max_config_h_bytes)) catch unreachable;
break :blk parseConfigH(b, file_contents); break :blk parseConfigH(b, file_contents);
} else { } else {
std.log.warn("config.h could not be located automatically. Consider providing it explicitly via \"-Dconfig_h\"", .{}); std.log.warn("config.h could not be located automatically. Consider providing it explicitly via \"-Dconfig_h\"", .{});
@ -912,7 +912,7 @@ fn addCxxKnownPath(
return error.RequiredLibraryNotFound; return error.RequiredLibraryNotFound;
const path_padded = run: { const path_padded = run: {
var args = std.ArrayList([]const u8).init(b.allocator); var args: std.ArrayList([]const u8) = .init(b.allocator);
try args.append(ctx.cxx_compiler); try args.append(ctx.cxx_compiler);
var it = std.mem.tokenizeAny(u8, ctx.cxx_compiler_arg1, &std.ascii.whitespace); var it = std.mem.tokenizeAny(u8, ctx.cxx_compiler_arg1, &std.ascii.whitespace);
while (it.next()) |arg| try args.append(arg); while (it.next()) |arg| try args.append(arg);
@ -1418,7 +1418,7 @@ fn generateLangRef(b: *std.Build) std.Build.LazyPath {
}); });
var dir = b.build_root.handle.openDir("doc/langref", .{ .iterate = true }) catch |err| { var dir = b.build_root.handle.openDir("doc/langref", .{ .iterate = true }) catch |err| {
std.debug.panic("unable to open '{}doc/langref' directory: {s}", .{ std.debug.panic("unable to open '{f}doc/langref' directory: {s}", .{
b.build_root, @errorName(err), b.build_root, @errorName(err),
}); });
}; };
@ -1439,7 +1439,7 @@ fn generateLangRef(b: *std.Build) std.Build.LazyPath {
// in a temporary directory // in a temporary directory
"--cache-root", b.cache_root.path orelse ".", "--cache-root", b.cache_root.path orelse ".",
}); });
cmd.addArgs(&.{ "--zig-lib-dir", b.fmt("{}", .{b.graph.zig_lib_directory}) }); cmd.addArgs(&.{ "--zig-lib-dir", b.fmt("{f}", .{b.graph.zig_lib_directory}) });
cmd.addArgs(&.{"-i"}); cmd.addArgs(&.{"-i"});
cmd.addFileArg(b.path(b.fmt("doc/langref/{s}", .{entry.name}))); cmd.addFileArg(b.path(b.fmt("doc/langref/{s}", .{entry.name})));

View File

@ -330,7 +330,7 @@ pub fn main() !void {
} }
} }
const stderr = std.io.getStdErr(); const stderr: std.fs.File = .stderr();
const ttyconf = get_tty_conf(color, stderr); const ttyconf = get_tty_conf(color, stderr);
switch (ttyconf) { switch (ttyconf) {
.no_color => try graph.env_map.put("NO_COLOR", "1"), .no_color => try graph.env_map.put("NO_COLOR", "1"),
@ -365,7 +365,7 @@ pub fn main() !void {
.data = buffer.items, .data = buffer.items,
.flags = .{ .exclusive = true }, .flags = .{ .exclusive = true },
}) catch |err| { }) catch |err| {
fatal("unable to write configuration results to '{}{s}': {s}", .{ fatal("unable to write configuration results to '{f}{s}': {s}", .{
local_cache_directory, tmp_sub_path, @errorName(err), local_cache_directory, tmp_sub_path, @errorName(err),
}); });
}; };
@ -378,16 +378,11 @@ pub fn main() !void {
validateSystemLibraryOptions(builder); validateSystemLibraryOptions(builder);
var stdout_writer: std.io.BufferedWriter = .{ {
.buffer = &stdout_buffer, var stdout_bw = std.fs.File.stdout().writer().buffered(&stdio_buffer);
.unbuffered_writer = std.io.getStdOut().writer(), if (help_menu) return usage(builder, &stdout_bw);
}; if (steps_menu) return steps(builder, &stdout_bw);
}
if (help_menu)
return usage(builder, &stdout_writer);
if (steps_menu)
return steps(builder, &stdout_writer);
var run: Run = .{ var run: Run = .{
.max_rss = max_rss, .max_rss = max_rss,
@ -699,7 +694,7 @@ fn runStepNames(
const ttyconf = run.ttyconf; const ttyconf = run.ttyconf;
if (run.summary != .none) { if (run.summary != .none) {
var bw = std.debug.lockStdErr2(); var bw = std.debug.lockStdErr2(&stdio_buffer);
defer std.debug.unlockStdErr(); defer std.debug.unlockStdErr();
const total_count = success_count + failure_count + pending_count + skipped_count; const total_count = success_count + failure_count + pending_count + skipped_count;
@ -1131,7 +1126,7 @@ fn workerMakeOneStep(
const show_stderr = s.result_stderr.len > 0; const show_stderr = s.result_stderr.len > 0;
if (show_error_msgs or show_compile_errors or show_stderr) { if (show_error_msgs or show_compile_errors or show_stderr) {
var bw = std.debug.lockStdErr2(); var bw = std.debug.lockStdErr2(&stdio_buffer);
defer std.debug.unlockStdErr(); defer std.debug.unlockStdErr();
const gpa = b.allocator; const gpa = b.allocator;
@ -1256,7 +1251,7 @@ fn steps(builder: *std.Build, bw: *std.io.BufferedWriter) !void {
} }
} }
var stdout_buffer: [256]u8 = undefined; var stdio_buffer: [256]u8 = undefined;
fn usage(b: *std.Build, bw: *std.io.BufferedWriter) !void { fn usage(b: *std.Build, bw: *std.io.BufferedWriter) !void {
try bw.print( try bw.print(

View File

@ -284,7 +284,7 @@ pub fn create(
.h_dir = undefined, .h_dir = undefined,
.dest_dir = graph.env_map.get("DESTDIR"), .dest_dir = graph.env_map.get("DESTDIR"),
.install_tls = .{ .install_tls = .{
.step = Step.init(.{ .step = .init(.{
.id = TopLevelStep.base_id, .id = TopLevelStep.base_id,
.name = "install", .name = "install",
.owner = b, .owner = b,
@ -292,7 +292,7 @@ pub fn create(
.description = "Copy build artifacts to prefix path", .description = "Copy build artifacts to prefix path",
}, },
.uninstall_tls = .{ .uninstall_tls = .{
.step = Step.init(.{ .step = .init(.{
.id = TopLevelStep.base_id, .id = TopLevelStep.base_id,
.name = "uninstall", .name = "uninstall",
.owner = b, .owner = b,
@ -342,7 +342,7 @@ fn createChildOnly(
.graph = parent.graph, .graph = parent.graph,
.allocator = allocator, .allocator = allocator,
.install_tls = .{ .install_tls = .{
.step = Step.init(.{ .step = .init(.{
.id = TopLevelStep.base_id, .id = TopLevelStep.base_id,
.name = "install", .name = "install",
.owner = child, .owner = child,
@ -350,7 +350,7 @@ fn createChildOnly(
.description = "Copy build artifacts to prefix path", .description = "Copy build artifacts to prefix path",
}, },
.uninstall_tls = .{ .uninstall_tls = .{
.step = Step.init(.{ .step = .init(.{
.id = TopLevelStep.base_id, .id = TopLevelStep.base_id,
.name = "uninstall", .name = "uninstall",
.owner = child, .owner = child,
@ -1525,7 +1525,7 @@ pub fn option(b: *Build, comptime T: type, name_raw: []const u8, description_raw
pub fn step(b: *Build, name: []const u8, description: []const u8) *Step { pub fn step(b: *Build, name: []const u8, description: []const u8) *Step {
const step_info = b.allocator.create(TopLevelStep) catch @panic("OOM"); const step_info = b.allocator.create(TopLevelStep) catch @panic("OOM");
step_info.* = .{ step_info.* = .{
.step = Step.init(.{ .step = .init(.{
.id = TopLevelStep.base_id, .id = TopLevelStep.base_id,
.name = name, .name = name,
.owner = b, .owner = b,
@ -1745,7 +1745,7 @@ pub fn addUserInputOption(b: *Build, name_raw: []const u8, value_raw: []const u8
return true; return true;
}, },
.lazy_path, .lazy_path_list => { .lazy_path, .lazy_path_list => {
log.warn("the lazy path value type isn't added from the CLI, but somehow '{s}' is a .{}", .{ name, std.zig.fmtId(@tagName(gop.value_ptr.value)) }); log.warn("the lazy path value type isn't added from the CLI, but somehow '{s}' is a .{f}", .{ name, std.zig.fmtId(@tagName(gop.value_ptr.value)) });
return true; return true;
}, },
} }
@ -2059,7 +2059,7 @@ pub fn runAllowFail(
try Step.handleVerbose2(b, null, child.env_map, argv); try Step.handleVerbose2(b, null, child.env_map, argv);
try child.spawn(); try child.spawn();
const stdout = child.stdout.?.reader().readAllAlloc(b.allocator, max_output_size) catch { const stdout = child.stdout.?.readToEndAlloc(b.allocator, .limited(max_output_size)) catch {
return error.ReadFailure; return error.ReadFailure;
}; };
errdefer b.allocator.free(stdout); errdefer b.allocator.free(stdout);

View File

@ -333,7 +333,7 @@ pub const Manifest = struct {
pub const Diagnostic = union(enum) { pub const Diagnostic = union(enum) {
none, none,
manifest_create: fs.File.OpenError, manifest_create: fs.File.OpenError,
manifest_read: fs.File.ReadError, manifest_read: anyerror,
manifest_lock: fs.File.LockError, manifest_lock: fs.File.LockError,
manifest_seek: fs.File.SeekError, manifest_seek: fs.File.SeekError,
file_open: FileOp, file_open: FileOp,
@ -1062,7 +1062,7 @@ pub const Manifest = struct {
fn addDepFileMaybePost(self: *Manifest, dir: fs.Dir, dep_file_basename: []const u8) !void { fn addDepFileMaybePost(self: *Manifest, dir: fs.Dir, dep_file_basename: []const u8) !void {
const gpa = self.cache.gpa; const gpa = self.cache.gpa;
const dep_file_contents = try dir.readFileAlloc(gpa, dep_file_basename, manifest_file_size_max); const dep_file_contents = try dir.readFileAlloc(dep_file_basename, gpa, .limited(manifest_file_size_max));
defer gpa.free(dep_file_contents); defer gpa.free(dep_file_contents);
var error_buf: std.ArrayListUnmanaged(u8) = .empty; var error_buf: std.ArrayListUnmanaged(u8) = .empty;

View File

@ -57,15 +57,13 @@ pub fn closeAndFree(self: *Directory, gpa: Allocator) void {
pub fn format( pub fn format(
self: Directory, self: Directory,
bw: *std.io.BufferedWriter,
comptime fmt_string: []const u8, comptime fmt_string: []const u8,
options: fmt.FormatOptions,
writer: anytype,
) !void { ) !void {
_ = options;
if (fmt_string.len != 0) fmt.invalidFmtError(fmt_string, self); if (fmt_string.len != 0) fmt.invalidFmtError(fmt_string, self);
if (self.path) |p| { if (self.path) |p| {
try writer.writeAll(p); try bw.writeAll(p);
try writer.writeAll(fs.path.sep_str); try bw.writeAll(fs.path.sep_str);
} }
} }

View File

@ -142,9 +142,8 @@ pub fn toStringZ(p: Path, allocator: Allocator) Allocator.Error![:0]u8 {
pub fn format( pub fn format(
self: Path, self: Path,
bw: *std.io.BufferedWriter,
comptime fmt_string: []const u8, comptime fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void { ) !void {
if (fmt_string.len == 1) { if (fmt_string.len == 1) {
// Quote-escape the string. // Quote-escape the string.
@ -155,33 +154,33 @@ pub fn format(
else => @compileError("unsupported format string: " ++ fmt_string), else => @compileError("unsupported format string: " ++ fmt_string),
}; };
if (self.root_dir.path) |p| { if (self.root_dir.path) |p| {
try stringEscape(p, f, options, writer); try stringEscape(p, bw, f);
if (self.sub_path.len > 0) try stringEscape(fs.path.sep_str, f, options, writer); if (self.sub_path.len > 0) try stringEscape(fs.path.sep_str, bw, f);
} }
if (self.sub_path.len > 0) { if (self.sub_path.len > 0) {
try stringEscape(self.sub_path, f, options, writer); try stringEscape(self.sub_path, bw, f);
} }
return; return;
} }
if (fmt_string.len > 0) if (fmt_string.len > 0)
std.fmt.invalidFmtError(fmt_string, self); std.fmt.invalidFmtError(fmt_string, self);
if (std.fs.path.isAbsolute(self.sub_path)) { if (std.fs.path.isAbsolute(self.sub_path)) {
try writer.writeAll(self.sub_path); try bw.writeAll(self.sub_path);
return; return;
} }
if (self.root_dir.path) |p| { if (self.root_dir.path) |p| {
try writer.writeAll(p); try bw.writeAll(p);
if (self.sub_path.len > 0) { if (self.sub_path.len > 0) {
try writer.writeAll(fs.path.sep_str); try bw.writeAll(fs.path.sep_str);
try writer.writeAll(self.sub_path); try bw.writeAll(self.sub_path);
} }
return; return;
} }
if (self.sub_path.len > 0) { if (self.sub_path.len > 0) {
try writer.writeAll(self.sub_path); try bw.writeAll(self.sub_path);
return; return;
} }
try writer.writeByte('.'); try bw.writeByte('.');
} }
pub fn eql(self: Path, other: Path) bool { pub fn eql(self: Path, other: Path) bool {

View File

@ -169,8 +169,8 @@ fn serveFile(
// The desired API is actually sendfile, which will require enhancing std.http.Server. // The desired API is actually sendfile, which will require enhancing std.http.Server.
// We load the file with every request so that the user can make changes to the file // We load the file with every request so that the user can make changes to the file
// and refresh the HTML page without restarting this server. // and refresh the HTML page without restarting this server.
const file_contents = ws.zig_lib_directory.handle.readFileAlloc(gpa, name, 10 * 1024 * 1024) catch |err| { const file_contents = ws.zig_lib_directory.handle.readFileAlloc(name, gpa, .limited(10 * 1024 * 1024)) catch |err| {
log.err("failed to read '{}{s}': {s}", .{ ws.zig_lib_directory, name, @errorName(err) }); log.err("failed to read '{f}{s}': {s}", .{ ws.zig_lib_directory, name, @errorName(err) });
return error.AlreadyReported; return error.AlreadyReported;
}; };
defer gpa.free(file_contents); defer gpa.free(file_contents);
@ -206,7 +206,7 @@ fn serveWasm(
}); });
// std.http.Server does not have a sendfile API yet. // std.http.Server does not have a sendfile API yet.
const bin_path = try wasm_base_path.join(arena, bin_name); const bin_path = try wasm_base_path.join(arena, bin_name);
const file_contents = try bin_path.root_dir.handle.readFileAlloc(gpa, bin_path.sub_path, 10 * 1024 * 1024); const file_contents = try bin_path.root_dir.handle.readFileAlloc(bin_path.sub_path, gpa, .limited(10 * 1024 * 1024));
defer gpa.free(file_contents); defer gpa.free(file_contents);
try request.respond(file_contents, .{ try request.respond(file_contents, .{
.extra_headers = &.{ .extra_headers = &.{
@ -251,10 +251,10 @@ fn buildWasmBinary(
"-fsingle-threaded", // "-fsingle-threaded", //
"--dep", "Walk", // "--dep", "Walk", //
"--dep", "html_render", // "--dep", "html_render", //
try std.fmt.allocPrint(arena, "-Mroot={}", .{main_src_path}), // try std.fmt.allocPrint(arena, "-Mroot={f}", .{main_src_path}), //
try std.fmt.allocPrint(arena, "-MWalk={}", .{walk_src_path}), // try std.fmt.allocPrint(arena, "-MWalk={f}", .{walk_src_path}), //
"--dep", "Walk", // "--dep", "Walk", //
try std.fmt.allocPrint(arena, "-Mhtml_render={}", .{html_render_src_path}), // try std.fmt.allocPrint(arena, "-Mhtml_render={f}", .{html_render_src_path}), //
"--listen=-", "--listen=-",
}); });
@ -280,13 +280,10 @@ fn buildWasmBinary(
const stdout = poller.fifo(.stdout); const stdout = poller.fifo(.stdout);
poll: while (true) { poll: while (true) {
while (stdout.readableLength() < @sizeOf(Header)) { while (stdout.readableLength() < @sizeOf(Header)) if (!try poller.poll()) break :poll;
if (!(try poller.poll())) break :poll; var header: Header = undefined;
} assert(stdout.read(std.mem.asBytes(&header)) == @sizeOf(Header));
const header = stdout.reader().readStruct(Header) catch unreachable; while (stdout.readableLength() < header.bytes_len) if (!try poller.poll()) break :poll;
while (stdout.readableLength() < header.bytes_len) {
if (!(try poller.poll())) break :poll;
}
const body = stdout.readableSliceOfLen(header.bytes_len); const body = stdout.readableSliceOfLen(header.bytes_len);
switch (header.tag) { switch (header.tag) {
@ -527,7 +524,7 @@ fn serveSourcesTar(ws: *WebServer, request: *std.http.Server.Request) !void {
for (deduped_paths) |joined_path| { for (deduped_paths) |joined_path| {
var file = joined_path.root_dir.handle.openFile(joined_path.sub_path, .{}) catch |err| { var file = joined_path.root_dir.handle.openFile(joined_path.sub_path, .{}) catch |err| {
log.err("failed to open {}: {s}", .{ joined_path, @errorName(err) }); log.err("failed to open {f}: {s}", .{ joined_path, @errorName(err) });
continue; continue;
}; };
defer file.close(); defer file.close();
@ -605,7 +602,7 @@ fn prepareTables(
const rebuilt_exe_path = run_step.rebuilt_executable.?; const rebuilt_exe_path = run_step.rebuilt_executable.?;
var debug_info = std.debug.Info.load(gpa, rebuilt_exe_path, &gop.value_ptr.coverage) catch |err| { var debug_info = std.debug.Info.load(gpa, rebuilt_exe_path, &gop.value_ptr.coverage) catch |err| {
log.err("step '{s}': failed to load debug information for '{}': {s}", .{ log.err("step '{s}': failed to load debug information for '{f}': {s}", .{
run_step.step.name, rebuilt_exe_path, @errorName(err), run_step.step.name, rebuilt_exe_path, @errorName(err),
}); });
return error.AlreadyReported; return error.AlreadyReported;
@ -617,7 +614,7 @@ fn prepareTables(
.sub_path = "v/" ++ std.fmt.hex(coverage_id), .sub_path = "v/" ++ std.fmt.hex(coverage_id),
}; };
var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| { var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| {
log.err("step '{s}': failed to load coverage file '{}': {s}", .{ log.err("step '{s}': failed to load coverage file '{f}': {s}", .{
run_step.step.name, coverage_file_path, @errorName(err), run_step.step.name, coverage_file_path, @errorName(err),
}); });
return error.AlreadyReported; return error.AlreadyReported;
@ -625,7 +622,7 @@ fn prepareTables(
defer coverage_file.close(); defer coverage_file.close();
const file_size = coverage_file.getEndPos() catch |err| { const file_size = coverage_file.getEndPos() catch |err| {
log.err("unable to check len of coverage file '{}': {s}", .{ coverage_file_path, @errorName(err) }); log.err("unable to check len of coverage file '{f}': {s}", .{ coverage_file_path, @errorName(err) });
return error.AlreadyReported; return error.AlreadyReported;
}; };
@ -637,7 +634,7 @@ fn prepareTables(
coverage_file.handle, coverage_file.handle,
0, 0,
) catch |err| { ) catch |err| {
log.err("failed to map coverage file '{}': {s}", .{ coverage_file_path, @errorName(err) }); log.err("failed to map coverage file '{f}': {s}", .{ coverage_file_path, @errorName(err) });
return error.AlreadyReported; return error.AlreadyReported;
}; };
gop.value_ptr.mapped_memory = mapped_memory; gop.value_ptr.mapped_memory = mapped_memory;

View File

@ -516,13 +516,10 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path {
const stdout = zp.poller.fifo(.stdout); const stdout = zp.poller.fifo(.stdout);
poll: while (true) { poll: while (true) {
while (stdout.readableLength() < @sizeOf(Header)) { while (stdout.readableLength() < @sizeOf(Header)) if (!try zp.poller.poll()) break :poll;
if (!(try zp.poller.poll())) break :poll; var header: Header = undefined;
} assert(stdout.read(std.mem.asBytes(&header)) == @sizeOf(Header));
const header = stdout.reader().readStruct(Header) catch unreachable; while (stdout.readableLength() < header.bytes_len) if (!try zp.poller.poll()) break :poll;
while (stdout.readableLength() < header.bytes_len) {
if (!(try zp.poller.poll())) break :poll;
}
const body = stdout.readableSliceOfLen(header.bytes_len); const body = stdout.readableSliceOfLen(header.bytes_len);
switch (header.tag) { switch (header.tag) {

View File

@ -28,7 +28,7 @@ pub fn create(
) *CheckFile { ) *CheckFile {
const check_file = owner.allocator.create(CheckFile) catch @panic("OOM"); const check_file = owner.allocator.create(CheckFile) catch @panic("OOM");
check_file.* = .{ check_file.* = .{
.step = Step.init(.{ .step = .init(.{
.id = base_id, .id = base_id,
.name = "CheckFile", .name = "CheckFile",
.owner = owner, .owner = owner,
@ -53,7 +53,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
try step.singleUnchangingWatchInput(check_file.source); try step.singleUnchangingWatchInput(check_file.source);
const src_path = check_file.source.getPath2(b, step); const src_path = check_file.source.getPath2(b, step);
const contents = fs.cwd().readFileAlloc(b.allocator, src_path, check_file.max_bytes) catch |err| { const contents = fs.cwd().readFileAlloc(src_path, b.allocator, .limited(check_file.max_bytes)) catch |err| {
return step.fail("unable to read '{s}': {s}", .{ return step.fail("unable to read '{s}': {s}", .{
src_path, @errorName(err), src_path, @errorName(err),
}); });

File diff suppressed because it is too large Load Diff

View File

@ -409,7 +409,7 @@ pub fn create(owner: *std.Build, options: Options) *Compile {
.linkage = options.linkage, .linkage = options.linkage,
.kind = options.kind, .kind = options.kind,
.name = name, .name = name,
.step = Step.init(.{ .step = .init(.{
.id = base_id, .id = base_id,
.name = step_name, .name = step_name,
.owner = owner, .owner = owner,
@ -1542,7 +1542,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
if (compile.kind == .lib and compile.linkage != null and compile.linkage.? == .dynamic) { if (compile.kind == .lib and compile.linkage != null and compile.linkage.? == .dynamic) {
if (compile.version) |version| { if (compile.version) |version| {
try zig_args.append("--version"); try zig_args.append("--version");
try zig_args.append(b.fmt("{}", .{version})); try zig_args.append(b.fmt("{f}", .{version}));
} }
if (compile.rootModuleTarget().os.tag.isDarwin()) { if (compile.rootModuleTarget().os.tag.isDarwin()) {
@ -1704,7 +1704,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
const opt_zig_lib_dir = if (compile.zig_lib_dir) |dir| const opt_zig_lib_dir = if (compile.zig_lib_dir) |dir|
dir.getPath2(b, step) dir.getPath2(b, step)
else if (b.graph.zig_lib_directory.path) |_| else if (b.graph.zig_lib_directory.path) |_|
b.fmt("{}", .{b.graph.zig_lib_directory}) b.fmt("{f}", .{b.graph.zig_lib_directory})
else else
null; null;
@ -1830,7 +1830,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
// Update generated files // Update generated files
if (maybe_output_dir) |output_dir| { if (maybe_output_dir) |output_dir| {
if (compile.emit_directory) |lp| { if (compile.emit_directory) |lp| {
lp.path = b.fmt("{}", .{output_dir}); lp.path = b.fmt("{f}", .{output_dir});
} }
// zig fmt: off // zig fmt: off
@ -1970,13 +1970,13 @@ fn checkCompileErrors(compile: *Compile) !void {
const actual_errors = ae: { const actual_errors = ae: {
var aw: std.io.AllocatingWriter = undefined; var aw: std.io.AllocatingWriter = undefined;
const bw = aw.init(arena); aw.init(arena);
defer aw.deinit(); defer aw.deinit();
try actual_eb.renderToWriter(.{ try actual_eb.renderToWriter(.{
.ttyconf = .no_color, .ttyconf = .no_color,
.include_reference_trace = false, .include_reference_trace = false,
.include_source_line = false, .include_source_line = false,
}, bw); }, &aw.buffered_writer);
break :ae try aw.toOwnedSlice(); break :ae try aw.toOwnedSlice();
}; };

View File

@ -87,7 +87,7 @@ pub fn create(owner: *std.Build, options: Options) *ConfigHeader {
owner.fmt("configure {s} header to {s}", .{ @tagName(options.style), include_path }); owner.fmt("configure {s} header to {s}", .{ @tagName(options.style), include_path });
config_header.* = .{ config_header.* = .{
.step = Step.init(.{ .step = .init(.{
.id = base_id, .id = base_id,
.name = name, .name = name,
.owner = owner, .owner = owner,
@ -95,7 +95,7 @@ pub fn create(owner: *std.Build, options: Options) *ConfigHeader {
.first_ret_addr = options.first_ret_addr orelse @returnAddress(), .first_ret_addr = options.first_ret_addr orelse @returnAddress(),
}), }),
.style = options.style, .style = options.style,
.values = std.StringArrayHashMap(Value).init(owner.allocator), .values = .init(owner.allocator),
.max_bytes = options.max_bytes, .max_bytes = options.max_bytes,
.include_path = include_path, .include_path = include_path,
@ -195,8 +195,10 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
man.hash.addBytes(config_header.include_path); man.hash.addBytes(config_header.include_path);
man.hash.addOptionalBytes(config_header.include_guard_override); man.hash.addOptionalBytes(config_header.include_guard_override);
var output = std.ArrayList(u8).init(gpa); var aw: std.io.AllocatingWriter = undefined;
defer output.deinit(); aw.init(gpa);
defer aw.deinit();
const bw = &aw.buffered_writer;
const header_text = "This file was generated by ConfigHeader using the Zig Build System."; const header_text = "This file was generated by ConfigHeader using the Zig Build System.";
const c_generated_line = "/* " ++ header_text ++ " */\n"; const c_generated_line = "/* " ++ header_text ++ " */\n";
@ -204,40 +206,41 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
switch (config_header.style) { switch (config_header.style) {
.autoconf_undef, .autoconf, .autoconf_at => |file_source| { .autoconf_undef, .autoconf, .autoconf_at => |file_source| {
try output.appendSlice(c_generated_line); try bw.writeAll(c_generated_line);
const src_path = file_source.getPath2(b, step); const src_path = file_source.getPath2(b, step);
const contents = std.fs.cwd().readFileAlloc(arena, src_path, config_header.max_bytes) catch |err| { const contents = std.fs.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| {
return step.fail("unable to read autoconf input file '{s}': {s}", .{ return step.fail("unable to read autoconf input file '{s}': {s}", .{
src_path, @errorName(err), src_path, @errorName(err),
}); });
}; };
switch (config_header.style) { switch (config_header.style) {
.autoconf_undef, .autoconf => try render_autoconf_undef(step, contents, &output, config_header.values, src_path), .autoconf_undef, .autoconf => try render_autoconf_undef(step, contents, bw, config_header.values, src_path),
.autoconf_at => try render_autoconf_at(step, contents, &output, config_header.values, src_path), .autoconf_at => try render_autoconf_at(step, contents, &aw, config_header.values, src_path),
else => unreachable, else => unreachable,
} }
}, },
.cmake => |file_source| { .cmake => |file_source| {
try output.appendSlice(c_generated_line); try bw.writeAll(c_generated_line);
const src_path = file_source.getPath2(b, step); const src_path = file_source.getPath2(b, step);
const contents = std.fs.cwd().readFileAlloc(arena, src_path, config_header.max_bytes) catch |err| { const contents = std.fs.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| {
return step.fail("unable to read cmake input file '{s}': {s}", .{ return step.fail("unable to read cmake input file '{s}': {s}", .{
src_path, @errorName(err), src_path, @errorName(err),
}); });
}; };
try render_cmake(step, contents, &output, config_header.values, src_path); try render_cmake(step, contents, bw, config_header.values, src_path);
}, },
.blank => { .blank => {
try output.appendSlice(c_generated_line); try bw.writeAll(c_generated_line);
try render_blank(&output, config_header.values, config_header.include_path, config_header.include_guard_override); try render_blank(gpa, bw, config_header.values, config_header.include_path, config_header.include_guard_override);
}, },
.nasm => { .nasm => {
try output.appendSlice(asm_generated_line); try bw.writeAll(asm_generated_line);
try render_nasm(&output, config_header.values); try render_nasm(bw, config_header.values);
}, },
} }
man.hash.addBytes(output.items); const output = aw.getWritten();
man.hash.addBytes(output);
if (try step.cacheHit(&man)) { if (try step.cacheHit(&man)) {
const digest = man.final(); const digest = man.final();
@ -256,13 +259,13 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
const sub_path_dirname = std.fs.path.dirname(sub_path).?; const sub_path_dirname = std.fs.path.dirname(sub_path).?;
b.cache_root.handle.makePath(sub_path_dirname) catch |err| { b.cache_root.handle.makePath(sub_path_dirname) catch |err| {
return step.fail("unable to make path '{}{s}': {s}", .{ return step.fail("unable to make path '{f}{s}': {s}", .{
b.cache_root, sub_path_dirname, @errorName(err), b.cache_root, sub_path_dirname, @errorName(err),
}); });
}; };
b.cache_root.handle.writeFile(.{ .sub_path = sub_path, .data = output.items }) catch |err| { b.cache_root.handle.writeFile(.{ .sub_path = sub_path, .data = output }) catch |err| {
return step.fail("unable to write file '{}{s}': {s}", .{ return step.fail("unable to write file '{f}{s}': {s}", .{
b.cache_root, sub_path, @errorName(err), b.cache_root, sub_path, @errorName(err),
}); });
}; };
@ -274,7 +277,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
fn render_autoconf_undef( fn render_autoconf_undef(
step: *Step, step: *Step,
contents: []const u8, contents: []const u8,
output: *std.ArrayList(u8), bw: *std.io.BufferedWriter,
values: std.StringArrayHashMap(Value), values: std.StringArrayHashMap(Value),
src_path: []const u8, src_path: []const u8,
) !void { ) !void {
@ -289,15 +292,15 @@ fn render_autoconf_undef(
var line_it = std.mem.splitScalar(u8, contents, '\n'); var line_it = std.mem.splitScalar(u8, contents, '\n');
while (line_it.next()) |line| : (line_index += 1) { while (line_it.next()) |line| : (line_index += 1) {
if (!std.mem.startsWith(u8, line, "#")) { if (!std.mem.startsWith(u8, line, "#")) {
try output.appendSlice(line); try bw.writeAll(line);
try output.appendSlice("\n"); try bw.writeByte('\n');
continue; continue;
} }
var it = std.mem.tokenizeAny(u8, line[1..], " \t\r"); var it = std.mem.tokenizeAny(u8, line[1..], " \t\r");
const undef = it.next().?; const undef = it.next().?;
if (!std.mem.eql(u8, undef, "undef")) { if (!std.mem.eql(u8, undef, "undef")) {
try output.appendSlice(line); try bw.writeAll(line);
try output.appendSlice("\n"); try bw.writeByte('\n');
continue; continue;
} }
const name = it.next().?; const name = it.next().?;
@ -309,7 +312,7 @@ fn render_autoconf_undef(
continue; continue;
}; };
is_used.set(index); is_used.set(index);
try renderValueC(output, name, values.values()[index]); try renderValueC(bw, name, values.values()[index]);
} }
var unused_value_it = is_used.iterator(.{ .kind = .unset }); var unused_value_it = is_used.iterator(.{ .kind = .unset });
@ -326,12 +329,13 @@ fn render_autoconf_undef(
fn render_autoconf_at( fn render_autoconf_at(
step: *Step, step: *Step,
contents: []const u8, contents: []const u8,
output: *std.ArrayList(u8), aw: *std.io.AllocatingWriter,
values: std.StringArrayHashMap(Value), values: std.StringArrayHashMap(Value),
src_path: []const u8, src_path: []const u8,
) !void { ) !void {
const build = step.owner; const build = step.owner;
const allocator = build.allocator; const allocator = build.allocator;
const bw = &aw.buffered_writer;
const used = allocator.alloc(bool, values.count()) catch @panic("OOM"); const used = allocator.alloc(bool, values.count()) catch @panic("OOM");
for (used) |*u| u.* = false; for (used) |*u| u.* = false;
@ -343,11 +347,11 @@ fn render_autoconf_at(
while (line_it.next()) |line| : (line_index += 1) { while (line_it.next()) |line| : (line_index += 1) {
const last_line = line_it.index == line_it.buffer.len; const last_line = line_it.index == line_it.buffer.len;
const old_len = output.items.len; const old_len = aw.getWritten().len;
expand_variables_autoconf_at(output, line, values, used) catch |err| switch (err) { expand_variables_autoconf_at(bw, line, values, used) catch |err| switch (err) {
error.MissingValue => { error.MissingValue => {
const name = output.items[old_len..]; const name = aw.getWritten()[old_len..];
defer output.shrinkRetainingCapacity(old_len); defer aw.shrinkRetainingCapacity(old_len);
try step.addError("{s}:{d}: error: unspecified config header value: '{s}'", .{ try step.addError("{s}:{d}: error: unspecified config header value: '{s}'", .{
src_path, line_index + 1, name, src_path, line_index + 1, name,
}); });
@ -362,9 +366,7 @@ fn render_autoconf_at(
continue; continue;
}, },
}; };
if (!last_line) { if (!last_line) try bw.writeByte('\n');
try output.append('\n');
}
} }
for (values.unmanaged.entries.slice().items(.key), used) |name, u| { for (values.unmanaged.entries.slice().items(.key), used) |name, u| {
@ -374,15 +376,13 @@ fn render_autoconf_at(
} }
} }
if (any_errors) { if (any_errors) return error.MakeFailed;
return error.MakeFailed;
}
} }
fn render_cmake( fn render_cmake(
step: *Step, step: *Step,
contents: []const u8, contents: []const u8,
output: *std.ArrayList(u8), bw: *std.io.BufferedWriter,
values: std.StringArrayHashMap(Value), values: std.StringArrayHashMap(Value),
src_path: []const u8, src_path: []const u8,
) !void { ) !void {
@ -417,10 +417,8 @@ fn render_cmake(
defer allocator.free(line); defer allocator.free(line);
if (!std.mem.startsWith(u8, line, "#")) { if (!std.mem.startsWith(u8, line, "#")) {
try output.appendSlice(line); try bw.writeAll(line);
if (!last_line) { if (!last_line) try bw.writeByte('\n');
try output.appendSlice("\n");
}
continue; continue;
} }
var it = std.mem.tokenizeAny(u8, line[1..], " \t\r"); var it = std.mem.tokenizeAny(u8, line[1..], " \t\r");
@ -428,10 +426,8 @@ fn render_cmake(
if (!std.mem.eql(u8, cmakedefine, "cmakedefine") and if (!std.mem.eql(u8, cmakedefine, "cmakedefine") and
!std.mem.eql(u8, cmakedefine, "cmakedefine01")) !std.mem.eql(u8, cmakedefine, "cmakedefine01"))
{ {
try output.appendSlice(line); try bw.writeAll(line);
if (!last_line) { if (!last_line) try bw.writeByte('\n');
try output.appendSlice("\n");
}
continue; continue;
} }
@ -502,7 +498,7 @@ fn render_cmake(
value = Value{ .ident = it.rest() }; value = Value{ .ident = it.rest() };
} }
try renderValueC(output, name, value); try renderValueC(bw, name, value);
} }
if (any_errors) { if (any_errors) {
@ -511,13 +507,14 @@ fn render_cmake(
} }
fn render_blank( fn render_blank(
output: *std.ArrayList(u8), gpa: std.mem.Allocator,
bw: *std.io.BufferedWriter,
defines: std.StringArrayHashMap(Value), defines: std.StringArrayHashMap(Value),
include_path: []const u8, include_path: []const u8,
include_guard_override: ?[]const u8, include_guard_override: ?[]const u8,
) !void { ) !void {
const include_guard_name = include_guard_override orelse blk: { const include_guard_name = include_guard_override orelse blk: {
const name = try output.allocator.dupe(u8, include_path); const name = try gpa.dupe(u8, include_path);
for (name) |*byte| { for (name) |*byte| {
switch (byte.*) { switch (byte.*) {
'a'...'z' => byte.* = byte.* - 'a' + 'A', 'a'...'z' => byte.* = byte.* - 'a' + 'A',
@ -527,92 +524,53 @@ fn render_blank(
} }
break :blk name; break :blk name;
}; };
defer if (include_guard_override == null) gpa.free(include_guard_name);
try output.appendSlice("#ifndef "); try bw.print(
try output.appendSlice(include_guard_name); \\#ifndef {[0]s}
try output.appendSlice("\n#define "); \\#define {[0]s}
try output.appendSlice(include_guard_name); \\
try output.appendSlice("\n"); , .{include_guard_name});
const values = defines.values(); const values = defines.values();
for (defines.keys(), 0..) |name, i| { for (defines.keys(), 0..) |name, i| try renderValueC(bw, name, values[i]);
try renderValueC(output, name, values[i]);
}
try output.appendSlice("#endif /* "); try bw.print(
try output.appendSlice(include_guard_name); \\#endif /* {s} */
try output.appendSlice(" */\n"); \\
, .{include_guard_name});
} }
fn render_nasm(output: *std.ArrayList(u8), defines: std.StringArrayHashMap(Value)) !void { fn render_nasm(bw: *std.io.BufferedWriter, defines: std.StringArrayHashMap(Value)) !void {
const values = defines.values(); for (defines.keys(), defines.values()) |name, value| try renderValueNasm(bw, name, value);
for (defines.keys(), 0..) |name, i| {
try renderValueNasm(output, name, values[i]);
}
} }
fn renderValueC(output: *std.ArrayList(u8), name: []const u8, value: Value) !void { fn renderValueC(bw: *std.io.BufferedWriter, name: []const u8, value: Value) !void {
switch (value) { switch (value) {
.undef => { .undef => try bw.print("/* #undef {s} */\n", .{name}),
try output.appendSlice("/* #undef "); .defined => try bw.print("#define {s}\n", .{name}),
try output.appendSlice(name); .boolean => |b| try bw.print("#define {s} {c}\n", .{ name, @as(u8, '0') + @intFromBool(b) }),
try output.appendSlice(" */\n"); .int => |i| try bw.print("#define {s} {d}\n", .{ name, i }),
}, .ident => |ident| try bw.print("#define {s} {s}\n", .{ name, ident }),
.defined => {
try output.appendSlice("#define ");
try output.appendSlice(name);
try output.appendSlice("\n");
},
.boolean => |b| {
try output.appendSlice("#define ");
try output.appendSlice(name);
try output.appendSlice(if (b) " 1\n" else " 0\n");
},
.int => |i| {
try output.print("#define {s} {d}\n", .{ name, i });
},
.ident => |ident| {
try output.print("#define {s} {s}\n", .{ name, ident });
},
.string => |string| {
// TODO: use C-specific escaping instead of zig string literals // TODO: use C-specific escaping instead of zig string literals
try output.print("#define {s} \"{}\"\n", .{ name, std.zig.fmtEscapes(string) }); .string => |string| try bw.print("#define {s} \"{f}\"\n", .{ name, std.zig.fmtEscapes(string) }),
},
} }
} }
fn renderValueNasm(output: *std.ArrayList(u8), name: []const u8, value: Value) !void { fn renderValueNasm(bw: *std.io.BufferedWriter, name: []const u8, value: Value) !void {
switch (value) { switch (value) {
.undef => { .undef => try bw.print("; %undef {s}\n", .{name}),
try output.appendSlice("; %undef "); .defined => try bw.print("%define {s}\n", .{name}),
try output.appendSlice(name); .boolean => |b| try bw.print("%define {s} {c}\n", .{ name, @as(u8, '0') + @intFromBool(b) }),
try output.appendSlice("\n"); .int => |i| try bw.print("%define {s} {d}\n", .{ name, i }),
}, .ident => |ident| try bw.print("%define {s} {s}\n", .{ name, ident }),
.defined => {
try output.appendSlice("%define ");
try output.appendSlice(name);
try output.appendSlice("\n");
},
.boolean => |b| {
try output.appendSlice("%define ");
try output.appendSlice(name);
try output.appendSlice(if (b) " 1\n" else " 0\n");
},
.int => |i| {
try output.print("%define {s} {d}\n", .{ name, i });
},
.ident => |ident| {
try output.print("%define {s} {s}\n", .{ name, ident });
},
.string => |string| {
// TODO: use nasm-specific escaping instead of zig string literals // TODO: use nasm-specific escaping instead of zig string literals
try output.print("%define {s} \"{}\"\n", .{ name, std.zig.fmtEscapes(string) }); .string => |string| try bw.print("%define {s} \"{f}\"\n", .{ name, std.zig.fmtEscapes(string) }),
},
} }
} }
fn expand_variables_autoconf_at( fn expand_variables_autoconf_at(
output: *std.ArrayList(u8), bw: *std.io.BufferedWriter,
contents: []const u8, contents: []const u8,
values: std.StringArrayHashMap(Value), values: std.StringArrayHashMap(Value),
used: []bool, used: []bool,
@ -637,23 +595,17 @@ fn expand_variables_autoconf_at(
const key = contents[curr + 1 .. close_pos]; const key = contents[curr + 1 .. close_pos];
const index = values.getIndex(key) orelse { const index = values.getIndex(key) orelse {
// Report the missing key to the caller. // Report the missing key to the caller.
try output.appendSlice(key); try bw.writeAll(key);
return error.MissingValue; return error.MissingValue;
}; };
const value = values.unmanaged.entries.slice().items(.value)[index]; const value = values.unmanaged.entries.slice().items(.value)[index];
used[index] = true; used[index] = true;
try output.appendSlice(contents[source_offset..curr]); try bw.writeAll(contents[source_offset..curr]);
switch (value) { switch (value) {
.undef, .defined => {}, .undef, .defined => {},
.boolean => |b| { .boolean => |b| try bw.writeByte(@as(u8, '0') + @intFromBool(b)),
try output.append(if (b) '1' else '0'); .int => |i| try bw.print("{d}", .{i}),
}, .ident, .string => |s| try bw.writeAll(s),
.int => |i| {
try output.writer().print("{d}", .{i});
},
.ident, .string => |s| {
try output.appendSlice(s);
},
} }
curr = close_pos; curr = close_pos;
@ -661,7 +613,7 @@ fn expand_variables_autoconf_at(
} }
} }
try output.appendSlice(contents[source_offset..]); try bw.writeAll(contents[source_offset..]);
} }
fn expand_variables_cmake( fn expand_variables_cmake(
@ -669,7 +621,7 @@ fn expand_variables_cmake(
contents: []const u8, contents: []const u8,
values: std.StringArrayHashMap(Value), values: std.StringArrayHashMap(Value),
) ![]const u8 { ) ![]const u8 {
var result = std.ArrayList(u8).init(allocator); var result: std.ArrayList(u8) = .init(allocator);
errdefer result.deinit(); errdefer result.deinit();
const valid_varname_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789/_.+-"; const valid_varname_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789/_.+-";
@ -681,7 +633,7 @@ fn expand_variables_cmake(
source: usize, source: usize,
target: usize, target: usize,
}; };
var var_stack = std.ArrayList(Position).init(allocator); var var_stack: std.ArrayList(Position) = .init(allocator);
defer var_stack.deinit(); defer var_stack.deinit();
loop: while (curr < contents.len) : (curr += 1) { loop: while (curr < contents.len) : (curr += 1) {
switch (contents[curr]) { switch (contents[curr]) {
@ -801,7 +753,7 @@ fn testReplaceVariablesAutoconfAt(
expected: []const u8, expected: []const u8,
values: std.StringArrayHashMap(Value), values: std.StringArrayHashMap(Value),
) !void { ) !void {
var output = std.ArrayList(u8).init(allocator); var output: std.ArrayList(u8) = .init(allocator);
defer output.deinit(); defer output.deinit();
const used = try allocator.alloc(bool, values.count()); const used = try allocator.alloc(bool, values.count());
@ -828,7 +780,7 @@ fn testReplaceVariablesCMake(
test "expand_variables_autoconf_at simple cases" { test "expand_variables_autoconf_at simple cases" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
var values = std.StringArrayHashMap(Value).init(allocator); var values: std.StringArrayHashMap(Value) = .init(allocator);
defer values.deinit(); defer values.deinit();
// empty strings are preserved // empty strings are preserved
@ -924,7 +876,7 @@ test "expand_variables_autoconf_at simple cases" {
test "expand_variables_autoconf_at edge cases" { test "expand_variables_autoconf_at edge cases" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
var values = std.StringArrayHashMap(Value).init(allocator); var values: std.StringArrayHashMap(Value) = .init(allocator);
defer values.deinit(); defer values.deinit();
// @-vars resolved only when they wrap valid characters, otherwise considered literals // @-vars resolved only when they wrap valid characters, otherwise considered literals
@ -940,7 +892,7 @@ test "expand_variables_autoconf_at edge cases" {
test "expand_variables_cmake simple cases" { test "expand_variables_cmake simple cases" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
var values = std.StringArrayHashMap(Value).init(allocator); var values: std.StringArrayHashMap(Value) = .init(allocator);
defer values.deinit(); defer values.deinit();
try values.putNoClobber("undef", .undef); try values.putNoClobber("undef", .undef);
@ -1028,7 +980,7 @@ test "expand_variables_cmake simple cases" {
test "expand_variables_cmake edge cases" { test "expand_variables_cmake edge cases" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
var values = std.StringArrayHashMap(Value).init(allocator); var values: std.StringArrayHashMap(Value) = .init(allocator);
defer values.deinit(); defer values.deinit();
// special symbols // special symbols
@ -1089,7 +1041,7 @@ test "expand_variables_cmake edge cases" {
test "expand_variables_cmake escaped characters" { test "expand_variables_cmake escaped characters" {
const allocator = std.testing.allocator; const allocator = std.testing.allocator;
var values = std.StringArrayHashMap(Value).init(allocator); var values: std.StringArrayHashMap(Value) = .init(allocator);
defer values.deinit(); defer values.deinit();
try values.putNoClobber("string", Value{ .string = "text" }); try values.putNoClobber("string", Value{ .string = "text" });

View File

@ -12,7 +12,7 @@ pub fn create(owner: *std.Build, error_msg: []const u8) *Fail {
const fail = owner.allocator.create(Fail) catch @panic("OOM"); const fail = owner.allocator.create(Fail) catch @panic("OOM");
fail.* = .{ fail.* = .{
.step = Step.init(.{ .step = .init(.{
.id = base_id, .id = base_id,
.name = "fail", .name = "fail",
.owner = owner, .owner = owner,

View File

@ -23,7 +23,7 @@ pub fn create(owner: *std.Build, options: Options) *Fmt {
const fmt = owner.allocator.create(Fmt) catch @panic("OOM"); const fmt = owner.allocator.create(Fmt) catch @panic("OOM");
const name = if (options.check) "zig fmt --check" else "zig fmt"; const name = if (options.check) "zig fmt --check" else "zig fmt";
fmt.* = .{ fmt.* = .{
.step = Step.init(.{ .step = .init(.{
.id = base_id, .id = base_id,
.name = name, .name = name,
.owner = owner, .owner = owner,

View File

@ -63,7 +63,7 @@ pub fn create(owner: *std.Build, artifact: *Step.Compile, options: Options) *Ins
.override => |o| o, .override => |o| o,
}; };
install_artifact.* = .{ install_artifact.* = .{
.step = Step.init(.{ .step = .init(.{
.id = base_id, .id = base_id,
.name = owner.fmt("install {s}", .{artifact.name}), .name = owner.fmt("install {s}", .{artifact.name}),
.owner = owner, .owner = owner,
@ -164,7 +164,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
const full_h_prefix = b.getInstallPath(h_dir, dir.dest_rel_path); const full_h_prefix = b.getInstallPath(h_dir, dir.dest_rel_path);
var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| { var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| {
return step.fail("unable to open source directory '{}': {s}", .{ return step.fail("unable to open source directory '{f}': {s}", .{
src_dir_path, @errorName(err), src_dir_path, @errorName(err),
}); });
}; };

View File

@ -43,7 +43,7 @@ pub const Options = struct {
pub fn create(owner: *std.Build, options: Options) *InstallDir { pub fn create(owner: *std.Build, options: Options) *InstallDir {
const install_dir = owner.allocator.create(InstallDir) catch @panic("OOM"); const install_dir = owner.allocator.create(InstallDir) catch @panic("OOM");
install_dir.* = .{ install_dir.* = .{
.step = Step.init(.{ .step = .init(.{
.id = base_id, .id = base_id,
.name = owner.fmt("install {s}/", .{options.source_dir.getDisplayName()}), .name = owner.fmt("install {s}/", .{options.source_dir.getDisplayName()}),
.owner = owner, .owner = owner,
@ -65,7 +65,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
const src_dir_path = install_dir.options.source_dir.getPath3(b, step); const src_dir_path = install_dir.options.source_dir.getPath3(b, step);
const need_derived_inputs = try step.addDirectoryWatchInput(install_dir.options.source_dir); const need_derived_inputs = try step.addDirectoryWatchInput(install_dir.options.source_dir);
var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| { var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| {
return step.fail("unable to open source directory '{}': {s}", .{ return step.fail("unable to open source directory '{f}': {s}", .{
src_dir_path, @errorName(err), src_dir_path, @errorName(err),
}); });
}; };

View File

@ -21,7 +21,7 @@ pub fn create(
assert(dest_rel_path.len != 0); assert(dest_rel_path.len != 0);
const install_file = owner.allocator.create(InstallFile) catch @panic("OOM"); const install_file = owner.allocator.create(InstallFile) catch @panic("OOM");
install_file.* = .{ install_file.* = .{
.step = Step.init(.{ .step = .init(.{
.id = base_id, .id = base_id,
.name = owner.fmt("install {s} to {s}", .{ source.getDisplayName(), dest_rel_path }), .name = owner.fmt("install {s} to {s}", .{ source.getDisplayName(), dest_rel_path }),
.owner = owner, .owner = owner,

View File

@ -111,8 +111,8 @@ pub fn create(
options: Options, options: Options,
) *ObjCopy { ) *ObjCopy {
const objcopy = owner.allocator.create(ObjCopy) catch @panic("OOM"); const objcopy = owner.allocator.create(ObjCopy) catch @panic("OOM");
objcopy.* = ObjCopy{ objcopy.* = .{
.step = Step.init(.{ .step = .init(.{
.id = base_id, .id = base_id,
.name = owner.fmt("objcopy {s}", .{input_file.getDisplayName()}), .name = owner.fmt("objcopy {s}", .{input_file.getDisplayName()}),
.owner = owner, .owner = owner,

View File

@ -19,7 +19,7 @@ encountered_types: std.StringHashMapUnmanaged(void),
pub fn create(owner: *std.Build) *Options { pub fn create(owner: *std.Build) *Options {
const options = owner.allocator.create(Options) catch @panic("OOM"); const options = owner.allocator.create(Options) catch @panic("OOM");
options.* = .{ options.* = .{
.step = Step.init(.{ .step = .init(.{
.id = base_id, .id = base_id,
.name = "options", .name = "options",
.owner = owner, .owner = owner,
@ -79,15 +79,15 @@ fn printType(
std.zig.fmtId(some), std.zig.fmtEscapes(value), std.zig.fmtId(some), std.zig.fmtEscapes(value),
}); });
} else { } else {
try out.print(gpa, "\"{}\",", .{std.zig.fmtEscapes(value)}); try out.print(gpa, "\"{f}\",", .{std.zig.fmtEscapes(value)});
} }
return out.appendSlice(gpa, "\n"); return out.appendSlice(gpa, "\n");
}, },
[:0]const u8 => { [:0]const u8 => {
if (name) |some| { if (name) |some| {
try out.print(gpa, "pub const {}: [:0]const u8 = \"{}\";", .{ std.zig.fmtId(some), std.zig.fmtEscapes(value) }); try out.print(gpa, "pub const {f}: [:0]const u8 = \"{f}\";", .{ std.zig.fmtId(some), std.zig.fmtEscapes(value) });
} else { } else {
try out.print(gpa, "\"{}\",", .{std.zig.fmtEscapes(value)}); try out.print(gpa, "\"{f}\",", .{std.zig.fmtEscapes(value)});
} }
return out.appendSlice(gpa, "\n"); return out.appendSlice(gpa, "\n");
}, },
@ -97,7 +97,7 @@ fn printType(
} }
if (value) |payload| { if (value) |payload| {
try out.print(gpa, "\"{}\"", .{std.zig.fmtEscapes(payload)}); try out.print(gpa, "\"{f}\"", .{std.zig.fmtEscapes(payload)});
} else { } else {
try out.appendSlice(gpa, "null"); try out.appendSlice(gpa, "null");
} }
@ -115,7 +115,7 @@ fn printType(
} }
if (value) |payload| { if (value) |payload| {
try out.print(gpa, "\"{}\"", .{std.zig.fmtEscapes(payload)}); try out.print(gpa, "\"{f}\"", .{std.zig.fmtEscapes(payload)});
} else { } else {
try out.appendSlice(gpa, "null"); try out.appendSlice(gpa, "null");
} }
@ -129,7 +129,7 @@ fn printType(
}, },
std.SemanticVersion => { std.SemanticVersion => {
if (name) |some| { if (name) |some| {
try out.print(gpa, "pub const {}: @import(\"std\").SemanticVersion = ", .{std.zig.fmtId(some)}); try out.print(gpa, "pub const {f}: @import(\"std\").SemanticVersion = ", .{std.zig.fmtId(some)});
} }
try out.appendSlice(gpa, ".{\n"); try out.appendSlice(gpa, ".{\n");
@ -142,11 +142,11 @@ fn printType(
if (value.pre) |some| { if (value.pre) |some| {
try out.appendNTimes(gpa, ' ', indent); try out.appendNTimes(gpa, ' ', indent);
try out.print(gpa, " .pre = \"{}\",\n", .{std.zig.fmtEscapes(some)}); try out.print(gpa, " .pre = \"{f}\",\n", .{std.zig.fmtEscapes(some)});
} }
if (value.build) |some| { if (value.build) |some| {
try out.appendNTimes(gpa, ' ', indent); try out.appendNTimes(gpa, ' ', indent);
try out.print(gpa, " .build = \"{}\",\n", .{std.zig.fmtEscapes(some)}); try out.print(gpa, " .build = \"{f}\",\n", .{std.zig.fmtEscapes(some)});
} }
if (name != null) { if (name != null) {
@ -233,7 +233,7 @@ fn printType(
.null, .null,
=> { => {
if (name) |some| { if (name) |some| {
try out.print(gpa, "pub const {}: {s} = {any};\n", .{ std.zig.fmtId(some), @typeName(T), value }); try out.print(gpa, "pub const {f}: {s} = {any};\n", .{ std.zig.fmtId(some), @typeName(T), value });
} else { } else {
try out.print(gpa, "{any},\n", .{value}); try out.print(gpa, "{any},\n", .{value});
} }
@ -243,7 +243,7 @@ fn printType(
try printEnum(options, out, T, info, indent); try printEnum(options, out, T, info, indent);
if (name) |some| { if (name) |some| {
try out.print(gpa, "pub const {}: {} = .{p_};\n", .{ try out.print(gpa, "pub const {f}: {f} = .{fp_};\n", .{
std.zig.fmtId(some), std.zig.fmtId(some),
std.zig.fmtId(@typeName(T)), std.zig.fmtId(@typeName(T)),
std.zig.fmtId(@tagName(value)), std.zig.fmtId(@tagName(value)),
@ -255,7 +255,7 @@ fn printType(
try printStruct(options, out, T, info, indent); try printStruct(options, out, T, info, indent);
if (name) |some| { if (name) |some| {
try out.print(gpa, "pub const {}: {} = ", .{ try out.print(gpa, "pub const {f}: {f} = ", .{
std.zig.fmtId(some), std.zig.fmtId(some),
std.zig.fmtId(@typeName(T)), std.zig.fmtId(@typeName(T)),
}); });
@ -291,7 +291,7 @@ fn printEnum(
if (gop.found_existing) return; if (gop.found_existing) return;
try out.appendNTimes(gpa, ' ', indent); try out.appendNTimes(gpa, ' ', indent);
try out.print(gpa, "pub const {} = enum ({s}) {{\n", .{ std.zig.fmtId(@typeName(T)), @typeName(val.tag_type) }); try out.print(gpa, "pub const {f} = enum ({s}) {{\n", .{ std.zig.fmtId(@typeName(T)), @typeName(val.tag_type) });
inline for (val.fields) |field| { inline for (val.fields) |field| {
try out.appendNTimes(gpa, ' ', indent); try out.appendNTimes(gpa, ' ', indent);
@ -464,7 +464,7 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void {
error.FileNotFound => { error.FileNotFound => {
const sub_dirname = fs.path.dirname(sub_path).?; const sub_dirname = fs.path.dirname(sub_path).?;
b.cache_root.handle.makePath(sub_dirname) catch |e| { b.cache_root.handle.makePath(sub_dirname) catch |e| {
return step.fail("unable to make path '{}{s}': {s}", .{ return step.fail("unable to make path '{f}{s}': {s}", .{
b.cache_root, sub_dirname, @errorName(e), b.cache_root, sub_dirname, @errorName(e),
}); });
}; };
@ -476,13 +476,13 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void {
const tmp_sub_path_dirname = fs.path.dirname(tmp_sub_path).?; const tmp_sub_path_dirname = fs.path.dirname(tmp_sub_path).?;
b.cache_root.handle.makePath(tmp_sub_path_dirname) catch |err| { b.cache_root.handle.makePath(tmp_sub_path_dirname) catch |err| {
return step.fail("unable to make temporary directory '{}{s}': {s}", .{ return step.fail("unable to make temporary directory '{f}{s}': {s}", .{
b.cache_root, tmp_sub_path_dirname, @errorName(err), b.cache_root, tmp_sub_path_dirname, @errorName(err),
}); });
}; };
b.cache_root.handle.writeFile(.{ .sub_path = tmp_sub_path, .data = options.contents.items }) catch |err| { b.cache_root.handle.writeFile(.{ .sub_path = tmp_sub_path, .data = options.contents.items }) catch |err| {
return step.fail("unable to write options to '{}{s}': {s}", .{ return step.fail("unable to write options to '{f}{s}': {s}", .{
b.cache_root, tmp_sub_path, @errorName(err), b.cache_root, tmp_sub_path, @errorName(err),
}); });
}; };
@ -491,7 +491,7 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void {
error.PathAlreadyExists => { error.PathAlreadyExists => {
// Other process beat us to it. Clean up the temp file. // Other process beat us to it. Clean up the temp file.
b.cache_root.handle.deleteFile(tmp_sub_path) catch |e| { b.cache_root.handle.deleteFile(tmp_sub_path) catch |e| {
try step.addError("warning: unable to delete temp file '{}{s}': {s}", .{ try step.addError("warning: unable to delete temp file '{f}{s}': {s}", .{
b.cache_root, tmp_sub_path, @errorName(e), b.cache_root, tmp_sub_path, @errorName(e),
}); });
}; };
@ -499,7 +499,7 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void {
return; return;
}, },
else => { else => {
return step.fail("unable to rename options from '{}{s}' to '{}{s}': {s}", .{ return step.fail("unable to rename options from '{f}{s}' to '{f}{s}': {s}", .{
b.cache_root, tmp_sub_path, b.cache_root, tmp_sub_path,
b.cache_root, sub_path, b.cache_root, sub_path,
@errorName(err), @errorName(err),
@ -507,7 +507,7 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void {
}, },
}; };
}, },
else => |e| return step.fail("unable to access options file '{}{s}': {s}", .{ else => |e| return step.fail("unable to access options file '{f}{s}': {s}", .{
b.cache_root, sub_path, @errorName(e), b.cache_root, sub_path, @errorName(e),
}), }),
} }

View File

@ -12,7 +12,7 @@ doomed_path: LazyPath,
pub fn create(owner: *std.Build, doomed_path: LazyPath) *RemoveDir { pub fn create(owner: *std.Build, doomed_path: LazyPath) *RemoveDir {
const remove_dir = owner.allocator.create(RemoveDir) catch @panic("OOM"); const remove_dir = owner.allocator.create(RemoveDir) catch @panic("OOM");
remove_dir.* = .{ remove_dir.* = .{
.step = Step.init(.{ .step = .init(.{
.id = base_id, .id = base_id,
.name = owner.fmt("RemoveDir {s}", .{doomed_path.getDisplayName()}), .name = owner.fmt("RemoveDir {s}", .{doomed_path.getDisplayName()}),
.owner = owner, .owner = owner,

View File

@ -169,7 +169,7 @@ pub const Output = struct {
pub fn create(owner: *std.Build, name: []const u8) *Run { pub fn create(owner: *std.Build, name: []const u8) *Run {
const run = owner.allocator.create(Run) catch @panic("OOM"); const run = owner.allocator.create(Run) catch @panic("OOM");
run.* = .{ run.* = .{
.step = Step.init(.{ .step = .init(.{
.id = base_id, .id = base_id,
.name = name, .name = name,
.owner = owner, .owner = owner,
@ -832,7 +832,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
else => unreachable, else => unreachable,
}; };
b.cache_root.handle.makePath(output_sub_dir_path) catch |err| { b.cache_root.handle.makePath(output_sub_dir_path) catch |err| {
return step.fail("unable to make path '{}{s}': {s}", .{ return step.fail("unable to make path '{f}{s}': {s}", .{
b.cache_root, output_sub_dir_path, @errorName(err), b.cache_root, output_sub_dir_path, @errorName(err),
}); });
}; };
@ -864,7 +864,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
else => unreachable, else => unreachable,
}; };
b.cache_root.handle.makePath(output_sub_dir_path) catch |err| { b.cache_root.handle.makePath(output_sub_dir_path) catch |err| {
return step.fail("unable to make path '{}{s}': {s}", .{ return step.fail("unable to make path '{f}{s}': {s}", .{
b.cache_root, output_sub_dir_path, @errorName(err), b.cache_root, output_sub_dir_path, @errorName(err),
}); });
}; };
@ -903,21 +903,21 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
b.cache_root.handle.rename(tmp_dir_path, o_sub_path) catch |err| { b.cache_root.handle.rename(tmp_dir_path, o_sub_path) catch |err| {
if (err == error.PathAlreadyExists) { if (err == error.PathAlreadyExists) {
b.cache_root.handle.deleteTree(o_sub_path) catch |del_err| { b.cache_root.handle.deleteTree(o_sub_path) catch |del_err| {
return step.fail("unable to remove dir '{}'{s}: {s}", .{ return step.fail("unable to remove dir '{f}'{s}: {s}", .{
b.cache_root, b.cache_root,
tmp_dir_path, tmp_dir_path,
@errorName(del_err), @errorName(del_err),
}); });
}; };
b.cache_root.handle.rename(tmp_dir_path, o_sub_path) catch |retry_err| { b.cache_root.handle.rename(tmp_dir_path, o_sub_path) catch |retry_err| {
return step.fail("unable to rename dir '{}{s}' to '{}{s}': {s}", .{ return step.fail("unable to rename dir '{f}{s}' to '{f}{s}': {s}", .{
b.cache_root, tmp_dir_path, b.cache_root, tmp_dir_path,
b.cache_root, o_sub_path, b.cache_root, o_sub_path,
@errorName(retry_err), @errorName(retry_err),
}); });
}; };
} else { } else {
return step.fail("unable to rename dir '{}{s}' to '{}{s}': {s}", .{ return step.fail("unable to rename dir '{f}{s}' to '{f}{s}': {s}", .{
b.cache_root, tmp_dir_path, b.cache_root, tmp_dir_path,
b.cache_root, o_sub_path, b.cache_root, o_sub_path,
@errorName(err), @errorName(err),
@ -964,7 +964,7 @@ pub fn rerunInFuzzMode(
.artifact => |pa| { .artifact => |pa| {
const artifact = pa.artifact; const artifact = pa.artifact;
const file_path: []const u8 = p: { const file_path: []const u8 = p: {
if (artifact == run.producer.?) break :p b.fmt("{}", .{run.rebuilt_executable.?}); if (artifact == run.producer.?) break :p b.fmt("{f}", .{run.rebuilt_executable.?});
break :p artifact.installed_path orelse artifact.generated_bin.?.path.?; break :p artifact.installed_path orelse artifact.generated_bin.?.path.?;
}; };
try argv_list.append(arena, b.fmt("{s}{s}", .{ try argv_list.append(arena, b.fmt("{s}{s}", .{
@ -1013,20 +1013,16 @@ fn populateGeneratedPaths(
fn formatTerm( fn formatTerm(
term: ?std.process.Child.Term, term: ?std.process.Child.Term,
bw: *std.io.BufferedWriter,
comptime fmt: []const u8, comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void { ) !void {
_ = fmt; _ = fmt;
_ = options;
if (term) |t| switch (t) { if (term) |t| switch (t) {
.Exited => |code| try writer.print("exited with code {}", .{code}), .Exited => |code| try bw.print("exited with code {}", .{code}),
.Signal => |sig| try writer.print("terminated with signal {}", .{sig}), .Signal => |sig| try bw.print("terminated with signal {}", .{sig}),
.Stopped => |sig| try writer.print("stopped with signal {}", .{sig}), .Stopped => |sig| try bw.print("stopped with signal {}", .{sig}),
.Unknown => |code| try writer.print("terminated for unknown reason with code {}", .{code}), .Unknown => |code| try bw.print("terminated for unknown reason with code {}", .{code}),
} else { } else try bw.writeAll("exited with any code");
try writer.writeAll("exited with any code");
}
} }
fn fmtTerm(term: ?std.process.Child.Term) std.fmt.Formatter(formatTerm) { fn fmtTerm(term: ?std.process.Child.Term) std.fmt.Formatter(formatTerm) {
return .{ .data = term }; return .{ .data = term };
@ -1262,12 +1258,12 @@ fn runCommand(
const sub_path = b.pathJoin(&output_components); const sub_path = b.pathJoin(&output_components);
const sub_path_dirname = fs.path.dirname(sub_path).?; const sub_path_dirname = fs.path.dirname(sub_path).?;
b.cache_root.handle.makePath(sub_path_dirname) catch |err| { b.cache_root.handle.makePath(sub_path_dirname) catch |err| {
return step.fail("unable to make path '{}{s}': {s}", .{ return step.fail("unable to make path '{f}{s}': {s}", .{
b.cache_root, sub_path_dirname, @errorName(err), b.cache_root, sub_path_dirname, @errorName(err),
}); });
}; };
b.cache_root.handle.writeFile(.{ .sub_path = sub_path, .data = stream.bytes.? }) catch |err| { b.cache_root.handle.writeFile(.{ .sub_path = sub_path, .data = stream.bytes.? }) catch |err| {
return step.fail("unable to write file '{}{s}': {s}", .{ return step.fail("unable to write file '{f}{s}': {s}", .{
b.cache_root, sub_path, @errorName(err), b.cache_root, sub_path, @errorName(err),
}); });
}; };
@ -1346,7 +1342,7 @@ fn runCommand(
}, },
.expect_term => |expected_term| { .expect_term => |expected_term| {
if (!termMatches(expected_term, result.term)) { if (!termMatches(expected_term, result.term)) {
return step.fail("the following command {} (expected {}):\n{s}", .{ return step.fail("the following command {f} (expected {f}):\n{s}", .{
fmtTerm(result.term), fmtTerm(result.term),
fmtTerm(expected_term), fmtTerm(expected_term),
try Step.allocPrintCmd(arena, cwd, final_argv), try Step.allocPrintCmd(arena, cwd, final_argv),
@ -1366,7 +1362,7 @@ fn runCommand(
}; };
const expected_term: std.process.Child.Term = .{ .Exited = 0 }; const expected_term: std.process.Child.Term = .{ .Exited = 0 };
if (!termMatches(expected_term, result.term)) { if (!termMatches(expected_term, result.term)) {
return step.fail("{s}the following command {} (expected {}):\n{s}", .{ return step.fail("{s}the following command {f} (expected {f}):\n{s}", .{
prefix, prefix,
fmtTerm(result.term), fmtTerm(result.term),
fmtTerm(expected_term), fmtTerm(expected_term),
@ -1535,13 +1531,10 @@ fn evalZigTest(
defer if (sub_prog_node) |n| n.end(); defer if (sub_prog_node) |n| n.end();
const any_write_failed = first_write_failed or poll: while (true) { const any_write_failed = first_write_failed or poll: while (true) {
while (stdout.readableLength() < @sizeOf(Header)) { while (stdout.readableLength() < @sizeOf(Header)) if (!try poller.poll()) break :poll false;
if (!(try poller.poll())) break :poll false; var header: Header = undefined;
} assert(stdout.read(std.mem.asBytes(&header)) == @sizeOf(Header));
const header = stdout.reader().readStruct(Header) catch unreachable; while (stdout.readableLength() < header.bytes_len) if (!try poller.poll()) break :poll false;
while (stdout.readableLength() < header.bytes_len) {
if (!(try poller.poll())) break :poll false;
}
const body = stdout.readableSliceOfLen(header.bytes_len); const body = stdout.readableSliceOfLen(header.bytes_len);
switch (header.tag) { switch (header.tag) {
@ -1797,10 +1790,10 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !StdIoResult {
stdout_bytes = try poller.fifo(.stdout).toOwnedSlice(); stdout_bytes = try poller.fifo(.stdout).toOwnedSlice();
stderr_bytes = try poller.fifo(.stderr).toOwnedSlice(); stderr_bytes = try poller.fifo(.stderr).toOwnedSlice();
} else { } else {
stdout_bytes = try stdout.reader().readAllAlloc(arena, run.max_stdio_size); stdout_bytes = try stdout.reader().readAlloc(arena, run.max_stdio_size);
} }
} else if (child.stderr) |stderr| { } else if (child.stderr) |stderr| {
stderr_bytes = try stderr.reader().readAllAlloc(arena, run.max_stdio_size); stderr_bytes = try stderr.reader().readAlloc(arena, run.max_stdio_size);
} }
if (stderr_bytes) |bytes| if (bytes.len > 0) { if (stderr_bytes) |bytes| if (bytes.len > 0) {

View File

@ -31,7 +31,7 @@ pub fn create(owner: *std.Build, options: Options) *TranslateC {
const translate_c = owner.allocator.create(TranslateC) catch @panic("OOM"); const translate_c = owner.allocator.create(TranslateC) catch @panic("OOM");
const source = options.root_source_file.dupe(owner); const source = options.root_source_file.dupe(owner);
translate_c.* = .{ translate_c.* = .{
.step = Step.init(.{ .step = .init(.{
.id = base_id, .id = base_id,
.name = "translate-c", .name = "translate-c",
.owner = owner, .owner = owner,

View File

@ -27,7 +27,7 @@ pub const Contents = union(enum) {
pub fn create(owner: *std.Build) *UpdateSourceFiles { pub fn create(owner: *std.Build) *UpdateSourceFiles {
const usf = owner.allocator.create(UpdateSourceFiles) catch @panic("OOM"); const usf = owner.allocator.create(UpdateSourceFiles) catch @panic("OOM");
usf.* = .{ usf.* = .{
.step = Step.init(.{ .step = .init(.{
.id = base_id, .id = base_id,
.name = "UpdateSourceFiles", .name = "UpdateSourceFiles",
.owner = owner, .owner = owner,
@ -76,7 +76,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
for (usf.output_source_files.items) |output_source_file| { for (usf.output_source_files.items) |output_source_file| {
if (fs.path.dirname(output_source_file.sub_path)) |dirname| { if (fs.path.dirname(output_source_file.sub_path)) |dirname| {
b.build_root.handle.makePath(dirname) catch |err| { b.build_root.handle.makePath(dirname) catch |err| {
return step.fail("unable to make path '{}{s}': {s}", .{ return step.fail("unable to make path '{f}{s}': {s}", .{
b.build_root, dirname, @errorName(err), b.build_root, dirname, @errorName(err),
}); });
}; };
@ -84,7 +84,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
switch (output_source_file.contents) { switch (output_source_file.contents) {
.bytes => |bytes| { .bytes => |bytes| {
b.build_root.handle.writeFile(.{ .sub_path = output_source_file.sub_path, .data = bytes }) catch |err| { b.build_root.handle.writeFile(.{ .sub_path = output_source_file.sub_path, .data = bytes }) catch |err| {
return step.fail("unable to write file '{}{s}': {s}", .{ return step.fail("unable to write file '{f}{s}': {s}", .{
b.build_root, output_source_file.sub_path, @errorName(err), b.build_root, output_source_file.sub_path, @errorName(err),
}); });
}; };
@ -101,7 +101,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
output_source_file.sub_path, output_source_file.sub_path,
.{}, .{},
) catch |err| { ) catch |err| {
return step.fail("unable to update file from '{s}' to '{}{s}': {s}", .{ return step.fail("unable to update file from '{s}' to '{f}{s}': {s}", .{
source_path, b.build_root, output_source_file.sub_path, @errorName(err), source_path, b.build_root, output_source_file.sub_path, @errorName(err),
}); });
}; };

View File

@ -67,7 +67,7 @@ pub const Contents = union(enum) {
pub fn create(owner: *std.Build) *WriteFile { pub fn create(owner: *std.Build) *WriteFile {
const write_file = owner.allocator.create(WriteFile) catch @panic("OOM"); const write_file = owner.allocator.create(WriteFile) catch @panic("OOM");
write_file.* = .{ write_file.* = .{
.step = Step.init(.{ .step = .init(.{
.id = base_id, .id = base_id,
.name = "WriteFile", .name = "WriteFile",
.owner = owner, .owner = owner,
@ -217,7 +217,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
const src_dir_path = dir.source.getPath3(b, step); const src_dir_path = dir.source.getPath3(b, step);
var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| { var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| {
return step.fail("unable to open source directory '{}': {s}", .{ return step.fail("unable to open source directory '{f}': {s}", .{
src_dir_path, @errorName(err), src_dir_path, @errorName(err),
}); });
}; };
@ -258,7 +258,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
write_file.generated_directory.path = try b.cache_root.join(arena, &.{ "o", &digest }); write_file.generated_directory.path = try b.cache_root.join(arena, &.{ "o", &digest });
var cache_dir = b.cache_root.handle.makeOpenPath(cache_path, .{}) catch |err| { var cache_dir = b.cache_root.handle.makeOpenPath(cache_path, .{}) catch |err| {
return step.fail("unable to make path '{}{s}': {s}", .{ return step.fail("unable to make path '{f}{s}': {s}", .{
b.cache_root, cache_path, @errorName(err), b.cache_root, cache_path, @errorName(err),
}); });
}; };
@ -269,7 +269,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
for (write_file.files.items) |file| { for (write_file.files.items) |file| {
if (fs.path.dirname(file.sub_path)) |dirname| { if (fs.path.dirname(file.sub_path)) |dirname| {
cache_dir.makePath(dirname) catch |err| { cache_dir.makePath(dirname) catch |err| {
return step.fail("unable to make path '{}{s}{c}{s}': {s}", .{ return step.fail("unable to make path '{f}{s}{c}{s}': {s}", .{
b.cache_root, cache_path, fs.path.sep, dirname, @errorName(err), b.cache_root, cache_path, fs.path.sep, dirname, @errorName(err),
}); });
}; };
@ -277,7 +277,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
switch (file.contents) { switch (file.contents) {
.bytes => |bytes| { .bytes => |bytes| {
cache_dir.writeFile(.{ .sub_path = file.sub_path, .data = bytes }) catch |err| { cache_dir.writeFile(.{ .sub_path = file.sub_path, .data = bytes }) catch |err| {
return step.fail("unable to write file '{}{s}{c}{s}': {s}", .{ return step.fail("unable to write file '{f}{s}{c}{s}': {s}", .{
b.cache_root, cache_path, fs.path.sep, file.sub_path, @errorName(err), b.cache_root, cache_path, fs.path.sep, file.sub_path, @errorName(err),
}); });
}; };
@ -291,7 +291,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
file.sub_path, file.sub_path,
.{}, .{},
) catch |err| { ) catch |err| {
return step.fail("unable to update file from '{s}' to '{}{s}{c}{s}': {s}", .{ return step.fail("unable to update file from '{s}' to '{f}{s}{c}{s}': {s}", .{
source_path, source_path,
b.cache_root, b.cache_root,
cache_path, cache_path,
@ -315,7 +315,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
if (dest_dirname.len != 0) { if (dest_dirname.len != 0) {
cache_dir.makePath(dest_dirname) catch |err| { cache_dir.makePath(dest_dirname) catch |err| {
return step.fail("unable to make path '{}{s}{c}{s}': {s}", .{ return step.fail("unable to make path '{f}{s}{c}{s}': {s}", .{
b.cache_root, cache_path, fs.path.sep, dest_dirname, @errorName(err), b.cache_root, cache_path, fs.path.sep, dest_dirname, @errorName(err),
}); });
}; };
@ -338,7 +338,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
dest_path, dest_path,
.{}, .{},
) catch |err| { ) catch |err| {
return step.fail("unable to update file from '{}' to '{}{s}{c}{s}': {s}", .{ return step.fail("unable to update file from '{f}' to '{f}{s}{c}{s}': {s}", .{
src_entry_path, b.cache_root, cache_path, fs.path.sep, dest_path, @errorName(err), src_entry_path, b.cache_root, cache_path, fs.path.sep, dest_path, @errorName(err),
}); });
}; };

View File

@ -211,7 +211,7 @@ const Os = switch (builtin.os.tag) {
.ADD = true, .ADD = true,
.ONLYDIR = true, .ONLYDIR = true,
}, fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| { }, fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| {
fatal("unable to watch {}: {s}", .{ path, @errorName(err) }); fatal("unable to watch {f}: {s}", .{ path, @errorName(err) });
}; };
} }
break :rs &dh_gop.value_ptr.reaction_set; break :rs &dh_gop.value_ptr.reaction_set;
@ -265,7 +265,7 @@ const Os = switch (builtin.os.tag) {
.ONLYDIR = true, .ONLYDIR = true,
}, fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| switch (err) { }, fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| switch (err) {
error.FileNotFound => {}, // Expected, harmless. error.FileNotFound => {}, // Expected, harmless.
else => |e| std.log.warn("unable to unwatch '{}': {s}", .{ path, @errorName(e) }), else => |e| std.log.warn("unable to unwatch '{f}': {s}", .{ path, @errorName(e) }),
}; };
w.dir_table.swapRemoveAt(i); w.dir_table.swapRemoveAt(i);

View File

@ -152,15 +152,13 @@ fn parseNum(text: []const u8) error{ InvalidVersion, Overflow }!usize {
pub fn format( pub fn format(
self: Version, self: Version,
bw: *std.io.BufferedWriter,
comptime fmt: []const u8, comptime fmt: []const u8,
options: std.fmt.FormatOptions,
out_stream: anytype,
) !void { ) !void {
_ = options;
if (fmt.len != 0) std.fmt.invalidFmtError(fmt, self); if (fmt.len != 0) std.fmt.invalidFmtError(fmt, self);
try std.fmt.format(out_stream, "{d}.{d}.{d}", .{ self.major, self.minor, self.patch }); try bw.print("{d}.{d}.{d}", .{ self.major, self.minor, self.patch });
if (self.pre) |pre| try std.fmt.format(out_stream, "-{s}", .{pre}); if (self.pre) |pre| try bw.print("-{s}", .{pre});
if (self.build) |build| try std.fmt.format(out_stream, "+{s}", .{build}); if (self.build) |build| try bw.print("+{s}", .{build});
} }
const expect = std.testing.expect; const expect = std.testing.expect;

View File

@ -423,7 +423,7 @@ pub fn zigTriple(self: Query, gpa: Allocator) Allocator.Error![]u8 {
try formatVersion(v, gpa, &result); try formatVersion(v, gpa, &result);
}, },
.windows => |v| { .windows => |v| {
try result.print(gpa, "{s}", .{v}); try result.print(gpa, "{d}", .{v});
}, },
} }
} }
@ -437,7 +437,7 @@ pub fn zigTriple(self: Query, gpa: Allocator) Allocator.Error![]u8 {
.windows => |v| { .windows => |v| {
// This is counting on a custom format() function defined on `WindowsVersion` // This is counting on a custom format() function defined on `WindowsVersion`
// to add a prefix '.' and make there be a total of three dots. // to add a prefix '.' and make there be a total of three dots.
try result.print(gpa, "..{s}", .{v}); try result.print(gpa, "..{d}", .{v});
}, },
} }
} }

View File

@ -38,8 +38,6 @@ pub fn LinearFifo(
count: usize, count: usize,
const Self = @This(); const Self = @This();
pub const Reader = std.io.Reader(*Self, error{}, readFn);
pub const Writer = std.io.Writer(*Self, error{OutOfMemory}, appendWrite);
// Type of Self argument for slice operations. // Type of Self argument for slice operations.
// If buffer is inline (Static) then we need to ensure we haven't // If buffer is inline (Static) then we need to ensure we haven't
@ -236,8 +234,31 @@ pub fn LinearFifo(
return self.read(dest); return self.read(dest);
} }
pub fn reader(self: *Self) Reader { pub fn reader(self: *Self) std.io.Reader {
return .{ .context = self }; return .{
.context = self,
.vtable = &.{
.read = &reader_read,
.readv = &reader_readv,
},
};
}
fn reader_read(
ctx: ?*anyopaque,
bw: *std.io.BufferedWriter,
limit: std.io.Reader.Limit,
) anyerror!std.io.Reader.Status {
const fifo: *Self = @alignCast(@ptrCast(ctx));
_ = fifo;
_ = bw;
_ = limit;
@panic("TODO");
}
fn reader_readv(ctx: ?*anyopaque, data: []const []u8) anyerror!std.io.Reader.Status {
const fifo: *Self = @alignCast(@ptrCast(ctx));
_ = fifo;
_ = data;
@panic("TODO");
} }
/// Returns number of items available in fifo /// Returns number of items available in fifo
@ -326,8 +347,38 @@ pub fn LinearFifo(
return bytes.len; return bytes.len;
} }
pub fn writer(self: *Self) Writer { pub fn writer(fifo: *Self) std.io.Writer {
return .{ .context = self }; return .{
.context = fifo,
.vtable = &.{
.writeSplat = writer_writeSplat,
.writeFile = writer_writeFile,
},
};
}
fn writer_writeSplat(ctx: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
const fifo: *Self = @alignCast(@ptrCast(ctx));
_ = fifo;
_ = data;
_ = splat;
@panic("TODO");
}
fn writer_writeFile(
ctx: ?*anyopaque,
file: std.fs.File,
offset: std.io.Writer.Offset,
limit: std.io.Writer.Limit,
headers_and_trailers: []const []const u8,
headers_len: usize,
) anyerror!usize {
const fifo: *Self = @alignCast(@ptrCast(ctx));
_ = fifo;
_ = file;
_ = offset;
_ = limit;
_ = headers_and_trailers;
_ = headers_len;
@panic("TODO");
} }
/// Make `count` items available before the current read location /// Make `count` items available before the current read location

View File

@ -451,12 +451,10 @@ fn SliceEscape(comptime case: Case) type {
return struct { return struct {
pub fn format( pub fn format(
bytes: []const u8, bytes: []const u8,
bw: *std.io.BufferedWriter,
comptime fmt: []const u8, comptime fmt: []const u8,
options: std.fmt.Options,
writer: anytype,
) !void { ) !void {
_ = fmt; _ = fmt;
_ = options;
var buf: [4]u8 = undefined; var buf: [4]u8 = undefined;
buf[0] = '\\'; buf[0] = '\\';
@ -464,11 +462,11 @@ fn SliceEscape(comptime case: Case) type {
for (bytes) |c| { for (bytes) |c| {
if (std.ascii.isPrint(c)) { if (std.ascii.isPrint(c)) {
try writer.writeByte(c); try bw.writeByte(c);
} else { } else {
buf[2] = charset[c >> 4]; buf[2] = charset[c >> 4];
buf[3] = charset[c & 15]; buf[3] = charset[c & 15];
try writer.writeAll(&buf); try bw.writeAll(&buf);
} }
} }
} }
@ -535,11 +533,10 @@ pub fn Formatter(comptime formatFn: anytype) type {
data: Data, data: Data,
pub fn format( pub fn format(
self: @This(), self: @This(),
comptime fmt: []const u8,
options: std.fmt.Options,
writer: *std.io.BufferedWriter, writer: *std.io.BufferedWriter,
comptime fmt: []const u8,
) anyerror!void { ) anyerror!void {
try formatFn(self.data, fmt, options, writer); try formatFn(self.data, writer, fmt);
} }
}; };
} }

View File

@ -1979,10 +1979,45 @@ pub fn readFileAlloc(
/// * `error.FileTooBig` is returned. /// * `error.FileTooBig` is returned.
limit: std.io.Reader.Limit, limit: std.io.Reader.Limit,
) (File.OpenError || File.ReadAllocError)![]u8 { ) (File.OpenError || File.ReadAllocError)![]u8 {
var buffer: std.ArrayListUnmanaged(u8) = .empty; return dir.readFileAllocOptions(file_path, gpa, limit, null, .of(u8), null);
}
/// Reads all the bytes from the named file. On success, caller owns returned
/// buffer.
pub fn readFileAllocOptions(
dir: Dir,
/// On Windows, should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
/// On WASI, should be encoded as valid UTF-8.
/// On other platforms, an opaque sequence of bytes with no particular encoding.
file_path: []const u8,
/// Used to allocate the result.
gpa: mem.Allocator,
/// If exceeded:
/// * The array list's length is increased by exactly one byte past `limit`.
/// * The file seek position is advanced by exactly one byte past `limit`.
/// * `error.FileTooBig` is returned.
limit: std.io.Reader.Limit,
/// If specified, the initial buffer size is calculated using this value,
/// otherwise the effective file size is used instead.
size_hint: ?usize,
comptime alignment: std.mem.Alignment,
comptime optional_sentinel: ?u8,
) (File.OpenError || File.ReadAllocError)!(if (optional_sentinel) |s| [:s]align(alignment.toByteUnits()) u8 else []align(alignment.toByteUnits()) u8) {
var buffer: std.ArrayListAlignedUnmanaged(u8, alignment) = .empty;
defer buffer.deinit(gpa); defer buffer.deinit(gpa);
try readFileIntoArrayList(dir, file_path, gpa, limit, null, &buffer); try readFileIntoArrayList(
return buffer.toOwnedSlice(gpa); dir,
file_path,
gpa,
limit,
if (size_hint) |sh| sh +| 1 else null,
alignment,
&buffer,
);
return if (optional_sentinel) |sentinel|
buffer.toOwnedSliceSentinel(gpa, sentinel)
else
buffer.toOwnedSlice(gpa);
} }
/// Reads all the bytes from the named file, appending them into the provided /// Reads all the bytes from the named file, appending them into the provided
@ -2004,7 +2039,7 @@ pub fn readFileIntoArrayList(
/// otherwise the effective file size is used instead. /// otherwise the effective file size is used instead.
size_hint: ?usize, size_hint: ?usize,
comptime alignment: ?std.mem.Alignment, comptime alignment: ?std.mem.Alignment,
list: *std.ArrayListAligned(u8, alignment), list: *std.ArrayListAlignedUnmanaged(u8, alignment),
) (File.OpenError || File.ReadAllocError)!void { ) (File.OpenError || File.ReadAllocError)!void {
var file = try dir.openFile(file_path, .{}); var file = try dir.openFile(file_path, .{});
defer file.close(); defer file.close();

View File

@ -1169,7 +1169,7 @@ pub fn readIntoArrayList(
gpa: Allocator, gpa: Allocator,
limit: std.io.Reader.Limit, limit: std.io.Reader.Limit,
comptime alignment: ?std.mem.Alignment, comptime alignment: ?std.mem.Alignment,
list: *std.ArrayListAligned(u8, alignment), list: *std.ArrayListAlignedUnmanaged(u8, alignment),
) ReadAllocError!void { ) ReadAllocError!void {
var remaining = limit; var remaining = limit;
while (true) { while (true) {
@ -1676,7 +1676,7 @@ fn streamReadVec(context: ?*anyopaque, data: []const []u8) anyerror!std.io.Reade
return .{ .len = @intCast(n), .end = n == 0 }; return .{ .len = @intCast(n), .end = n == 0 };
} }
fn writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize { pub fn writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
const handle = opaqueToHandle(context); const handle = opaqueToHandle(context);
var splat_buffer: [256]u8 = undefined; var splat_buffer: [256]u8 = undefined;
if (is_windows) { if (is_windows) {
@ -1716,7 +1716,7 @@ fn writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anye
return std.posix.writev(handle, iovecs[0..len]); return std.posix.writev(handle, iovecs[0..len]);
} }
fn writeFile( pub fn writeFile(
context: ?*anyopaque, context: ?*anyopaque,
in_file: std.fs.File, in_file: std.fs.File,
in_offset: std.io.Writer.Offset, in_offset: std.io.Writer.Offset,
@ -1727,8 +1727,8 @@ fn writeFile(
const out_fd = opaqueToHandle(context); const out_fd = opaqueToHandle(context);
const in_fd = in_file.handle; const in_fd = in_file.handle;
const len_int = switch (in_limit) { const len_int = switch (in_limit) {
.zero => return writeSplat(context, headers_and_trailers, 1), .nothing => return writeSplat(context, headers_and_trailers, 1),
.none => 0, .unlimited => 0,
else => in_limit.toInt().?, else => in_limit.toInt().?,
}; };
if (native_os == .linux) sf: { if (native_os == .linux) sf: {

View File

@ -593,8 +593,46 @@ pub const Request = struct {
HttpHeadersOversize, HttpHeadersOversize,
}; };
fn contentLengthReader_read(
ctx: ?*anyopaque,
bw: *std.io.BufferedWriter,
limit: std.io.Reader.Limit,
) anyerror!std.io.Reader.Status {
const request: *Request = @alignCast(@ptrCast(ctx));
_ = request;
_ = bw;
_ = limit;
@panic("TODO");
}
fn contentLengthReader_readv(ctx: ?*anyopaque, data: []const []u8) anyerror!std.io.Reader.Status {
const request: *Request = @alignCast(@ptrCast(ctx));
_ = request;
_ = data;
@panic("TODO");
}
fn chunkedReader_read(
ctx: ?*anyopaque,
bw: *std.io.BufferedWriter,
limit: std.io.Reader.Limit,
) anyerror!std.io.Reader.Status {
const request: *Request = @alignCast(@ptrCast(ctx));
_ = request;
_ = bw;
_ = limit;
@panic("TODO");
}
fn chunkedReader_readv(ctx: ?*anyopaque, data: []const []u8) anyerror!std.io.Reader.Status {
const request: *Request = @alignCast(@ptrCast(ctx));
_ = request;
_ = data;
@panic("TODO");
}
fn read_cl(context: *const anyopaque, buffer: []u8) ReadError!usize { fn read_cl(context: *const anyopaque, buffer: []u8) ReadError!usize {
const request: *Request = @constCast(@alignCast(@ptrCast(context))); const request: *Request = @alignCast(@ptrCast(context));
const s = request.server; const s = request.server;
const remaining_content_length = &request.reader_state.remaining_content_length; const remaining_content_length = &request.reader_state.remaining_content_length;
@ -622,7 +660,7 @@ pub const Request = struct {
} }
fn read_chunked(context: *const anyopaque, buffer: []u8) ReadError!usize { fn read_chunked(context: *const anyopaque, buffer: []u8) ReadError!usize {
const request: *Request = @constCast(@alignCast(@ptrCast(context))); const request: *Request = @alignCast(@ptrCast(context));
const s = request.server; const s = request.server;
const cp = &request.reader_state.chunk_parser; const cp = &request.reader_state.chunk_parser;
@ -724,7 +762,7 @@ pub const Request = struct {
/// request's expect field to `null`. /// request's expect field to `null`.
/// ///
/// Asserts that this function is only called once. /// Asserts that this function is only called once.
pub fn reader(request: *Request) ReaderError!std.io.AnyReader { pub fn reader(request: *Request) ReaderError!std.io.Reader {
const s = request.server; const s = request.server;
assert(s.state == .received_head); assert(s.state == .received_head);
s.state = .receiving_body; s.state = .receiving_body;
@ -747,8 +785,11 @@ pub const Request = struct {
.chunked => { .chunked => {
request.reader_state = .{ .chunk_parser = http.ChunkParser.init }; request.reader_state = .{ .chunk_parser = http.ChunkParser.init };
return .{ return .{
.readFn = read_chunked,
.context = request, .context = request,
.vtable = &.{
.read = &chunkedReader_read,
.readv = &chunkedReader_readv,
},
}; };
}, },
.none => { .none => {
@ -756,8 +797,11 @@ pub const Request = struct {
.remaining_content_length = request.head.content_length orelse 0, .remaining_content_length = request.head.content_length orelse 0,
}; };
return .{ return .{
.readFn = read_cl,
.context = request, .context = request,
.vtable = &.{
.read = &contentLengthReader_read,
.readv = &contentLengthReader_readv,
},
}; };
}, },
} }
@ -779,7 +823,7 @@ pub const Request = struct {
if (keep_alive and request.head.keep_alive) switch (s.state) { if (keep_alive and request.head.keep_alive) switch (s.state) {
.received_head => { .received_head => {
const r = request.reader() catch return false; const r = request.reader() catch return false;
_ = r.discard() catch return false; _ = r.discardUntilEnd() catch return false;
assert(s.state == .ready); assert(s.state == .ready);
return true; return true;
}, },
@ -868,30 +912,30 @@ pub const Response = struct {
} }
} }
fn cl_writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) anyerror!usize { fn cl_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
_ = splat; _ = splat;
return cl_write(context, data[0]); // TODO: try to send all the data return cl_write(context, data[0]); // TODO: try to send all the data
} }
fn cl_writeFile( fn cl_writeFile(
context: *anyopaque, context: ?*anyopaque,
file: std.fs.File, file: std.fs.File,
offset: u64, offset: std.io.Writer.Offset,
len: std.io.Writer.FileLen, limit: std.io.Writer.Limit,
headers_and_trailers: []const []const u8, headers_and_trailers: []const []const u8,
headers_len: usize, headers_len: usize,
) anyerror!usize { ) anyerror!usize {
_ = context; _ = context;
_ = file; _ = file;
_ = offset; _ = offset;
_ = len; _ = limit;
_ = headers_and_trailers; _ = headers_and_trailers;
_ = headers_len; _ = headers_len;
return error.Unimplemented; return error.Unimplemented;
} }
fn cl_write(context: *anyopaque, bytes: []const u8) anyerror!usize { fn cl_write(context: ?*anyopaque, bytes: []const u8) anyerror!usize {
const r: *Response = @constCast(@alignCast(@ptrCast(context))); const r: *Response = @alignCast(@ptrCast(context));
var trash: u64 = std.math.maxInt(u64); var trash: u64 = std.math.maxInt(u64);
const len = switch (r.transfer_encoding) { const len = switch (r.transfer_encoding) {
@ -935,30 +979,30 @@ pub const Response = struct {
return bytes.len; return bytes.len;
} }
fn chunked_writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) anyerror!usize { fn chunked_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
_ = splat; _ = splat;
return chunked_write(context, data[0]); // TODO: try to send all the data return chunked_write(context, data[0]); // TODO: try to send all the data
} }
fn chunked_writeFile( fn chunked_writeFile(
context: *anyopaque, context: ?*anyopaque,
file: std.fs.File, file: std.fs.File,
offset: u64, offset: std.io.Writer.Offset,
len: std.io.Writer.FileLen, limit: std.io.Writer.Limit,
headers_and_trailers: []const []const u8, headers_and_trailers: []const []const u8,
headers_len: usize, headers_len: usize,
) anyerror!usize { ) anyerror!usize {
_ = context; _ = context;
_ = file; _ = file;
_ = offset; _ = offset;
_ = len; _ = limit;
_ = headers_and_trailers; _ = headers_and_trailers;
_ = headers_len; _ = headers_len;
return error.Unimplemented; // TODO lower to a call to writeFile on the output return error.Unimplemented; // TODO lower to a call to writeFile on the output
} }
fn chunked_write(context: *anyopaque, bytes: []const u8) anyerror!usize { fn chunked_write(context: ?*anyopaque, bytes: []const u8) anyerror!usize {
const r: *Response = @constCast(@alignCast(@ptrCast(context))); const r: *Response = @alignCast(@ptrCast(context));
assert(r.transfer_encoding == .chunked); assert(r.transfer_encoding == .chunked);
if (r.elide_body) if (r.elide_body)

View File

@ -57,8 +57,8 @@ pub fn init(
ws.* = .{ ws.* = .{
.key = key, .key = key,
.recv_fifo = std.fifo.LinearFifo(u8, .Slice).init(recv_buffer), .recv_fifo = .init(recv_buffer),
.reader = try request.reader(), .reader = undefined,
.response = request.respondStreaming(.{ .response = request.respondStreaming(.{
.send_buffer = send_buffer, .send_buffer = send_buffer,
.respond_options = .{ .respond_options = .{
@ -74,6 +74,7 @@ pub fn init(
.request = request, .request = request,
.outstanding_len = 0, .outstanding_len = 0,
}; };
ws.reader.init(try request.reader(), &.{});
return true; return true;
} }

View File

@ -28,12 +28,12 @@ const vtable: std.io.Writer.VTable = .{
/// Sets the `AllocatingWriter` to an empty state. /// Sets the `AllocatingWriter` to an empty state.
pub fn init(aw: *AllocatingWriter, allocator: std.mem.Allocator) void { pub fn init(aw: *AllocatingWriter, allocator: std.mem.Allocator) void {
initOwnedSlice(aw, allocator, &.{}); aw.initOwnedSlice(allocator, &.{});
} }
pub fn initCapacity(aw: *AllocatingWriter, allocator: std.mem.Allocator, capacity: usize) error{OutOfMemory}!void { pub fn initCapacity(aw: *AllocatingWriter, allocator: std.mem.Allocator, capacity: usize) error{OutOfMemory}!void {
const initial_buffer = try allocator.alloc(u8, capacity); const initial_buffer = try allocator.alloc(u8, capacity);
initOwnedSlice(aw, allocator, initial_buffer); aw.initOwnedSlice(allocator, initial_buffer);
} }
pub fn initOwnedSlice(aw: *AllocatingWriter, allocator: std.mem.Allocator, slice: []u8) void { pub fn initOwnedSlice(aw: *AllocatingWriter, allocator: std.mem.Allocator, slice: []u8) void {
@ -119,11 +119,15 @@ pub fn getWritten(aw: *AllocatingWriter) []u8 {
return written; return written;
} }
pub fn clearRetainingCapacity(aw: *AllocatingWriter) void { pub fn shrinkRetainingCapacity(aw: *AllocatingWriter, new_len: usize) void {
const bw = &aw.buffered_writer; const bw = &aw.buffered_writer;
bw.buffer = aw.written.ptr[0 .. aw.written.len + bw.buffer.len]; bw.buffer = aw.written.ptr[new_len .. aw.written.len + bw.buffer.len];
bw.end = 0; bw.end = 0;
aw.written.len = 0; aw.written.len = new_len;
}
pub fn clearRetainingCapacity(aw: *AllocatingWriter) void {
aw.shrinkRetainingCapacity(0);
} }
fn writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize { fn writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
@ -161,7 +165,7 @@ fn writeFile(
context: ?*anyopaque, context: ?*anyopaque,
file: std.fs.File, file: std.fs.File,
offset: std.io.Writer.Offset, offset: std.io.Writer.Offset,
len: std.io.Writer.FileLen, limit: std.io.Writer.Limit,
headers_and_trailers_full: []const []const u8, headers_and_trailers_full: []const []const u8,
headers_len_full: usize, headers_len_full: usize,
) anyerror!usize { ) anyerror!usize {
@ -177,7 +181,7 @@ fn writeFile(
} else .{ headers_and_trailers_full, headers_len_full }; } else .{ headers_and_trailers_full, headers_len_full };
const trailers = headers_and_trailers[headers_len..]; const trailers = headers_and_trailers[headers_len..];
const pos = offset.toInt() orelse @panic("TODO treat file as stream"); const pos = offset.toInt() orelse @panic("TODO treat file as stream");
if (len == .entire_file) { const limit_int = limit.toInt() orelse {
var new_capacity: usize = list.capacity + std.atomic.cache_line; var new_capacity: usize = list.capacity + std.atomic.cache_line;
for (headers_and_trailers) |bytes| new_capacity += bytes.len; for (headers_and_trailers) |bytes| new_capacity += bytes.len;
try list.ensureTotalCapacity(gpa, new_capacity); try list.ensureTotalCapacity(gpa, new_capacity);
@ -193,12 +197,12 @@ fn writeFile(
} }
list.items.len += n; list.items.len += n;
return list.items.len - start_len; return list.items.len - start_len;
} };
var new_capacity: usize = list.capacity + len.int(); var new_capacity: usize = list.capacity + limit_int;
for (headers_and_trailers) |bytes| new_capacity += bytes.len; for (headers_and_trailers) |bytes| new_capacity += bytes.len;
try list.ensureTotalCapacity(gpa, new_capacity); try list.ensureTotalCapacity(gpa, new_capacity);
for (headers_and_trailers[0..headers_len]) |bytes| list.appendSliceAssumeCapacity(bytes); for (headers_and_trailers[0..headers_len]) |bytes| list.appendSliceAssumeCapacity(bytes);
const dest = list.items.ptr[list.items.len..][0..len.int()]; const dest = list.items.ptr[list.items.len..][0..limit_int];
const n = try file.pread(dest, pos); const n = try file.pread(dest, pos);
list.items.len += n; list.items.len += n;
if (n < dest.len) { if (n < dest.len) {

View File

@ -253,18 +253,17 @@ pub fn discardUpTo(br: *BufferedReader, n: usize) anyerror!usize {
const proposed_seek = br.seek + remaining; const proposed_seek = br.seek + remaining;
if (proposed_seek <= storage.end) { if (proposed_seek <= storage.end) {
br.seek = proposed_seek; br.seek = proposed_seek;
return; return n;
} }
remaining -= (storage.end - br.seek); remaining -= (storage.end - br.seek);
storage.end = 0; storage.end = 0;
br.seek = 0; br.seek = 0;
const result = try br.unbuffered_reader.read(&storage, .none); const result = try br.unbuffered_reader.read(storage, .unlimited);
result.write_err catch unreachable;
try result.read_err;
assert(result.len == storage.end); assert(result.len == storage.end);
if (remaining <= storage.end) continue; if (remaining <= storage.end) continue;
if (result.end) return n - remaining; if (result.end) return n - remaining;
} }
return n;
} }
/// Reads the stream until the end, ignoring all the data. /// Reads the stream until the end, ignoring all the data.
@ -302,7 +301,7 @@ pub fn read(br: *BufferedReader, buffer: []u8) anyerror!void {
br.seek = 0; br.seek = 0;
var i: usize = in_buffer.len; var i: usize = in_buffer.len;
while (true) { while (true) {
const status = try br.unbuffered_reader.read(storage, .none); const status = try br.unbuffered_reader.read(storage, .unlimited);
const next_i = i + storage.end; const next_i = i + storage.end;
if (next_i >= buffer.len) { if (next_i >= buffer.len) {
const remaining = buffer[i..]; const remaining = buffer[i..];
@ -389,7 +388,7 @@ pub fn peekDelimiterInclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8
/// * `peekDelimiterConclusive` /// * `peekDelimiterConclusive`
pub fn takeDelimiterConclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 { pub fn takeDelimiterConclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8 {
const result = try peekDelimiterConclusive(br, delimiter); const result = try peekDelimiterConclusive(br, delimiter);
toss(result.len); br.toss(result.len);
return result; return result;
} }
@ -407,7 +406,7 @@ pub fn peekDelimiterConclusive(br: *BufferedReader, delimiter: u8) anyerror![]u8
storage.end = i; storage.end = i;
br.seek = 0; br.seek = 0;
while (i < storage.buffer.len) { while (i < storage.buffer.len) {
const status = try br.unbuffered_reader.read(storage, .none); const status = try br.unbuffered_reader.read(storage, .unlimited);
if (std.mem.indexOfScalarPos(u8, storage.buffer[0..storage.end], i, delimiter)) |end| { if (std.mem.indexOfScalarPos(u8, storage.buffer[0..storage.end], i, delimiter)) |end| {
return storage.buffer[0 .. end + 1]; return storage.buffer[0 .. end + 1];
} }
@ -505,7 +504,7 @@ pub fn fill(br: *BufferedReader, n: usize) anyerror!void {
storage.end = remainder.len; storage.end = remainder.len;
br.seek = 0; br.seek = 0;
while (true) { while (true) {
const status = try br.unbuffered_reader.read(storage, .none); const status = try br.unbuffered_reader.read(storage, .unlimited);
if (n <= storage.end) return; if (n <= storage.end) return;
if (status.end) return error.EndOfStream; if (status.end) return error.EndOfStream;
} }
@ -589,7 +588,7 @@ fn takeMultipleOf7Leb128(br: *BufferedReader, comptime Result: type) anyerror!Re
const buffer: []const packed struct(u8) { bits: u7, more: bool } = @ptrCast(try br.peekAll(1)); const buffer: []const packed struct(u8) { bits: u7, more: bool } = @ptrCast(try br.peekAll(1));
for (buffer, 1..) |byte, len| { for (buffer, 1..) |byte, len| {
if (remaining_bits > 0) { if (remaining_bits > 0) {
result = @shlExact(@as(UnsignedResult, byte.bits), result_info.bits - 7) | @shrExact(result, 7); result = @shlExact(@as(UnsignedResult, byte.bits), result_info.bits - 7) | if (result_info.bits > 7) @shrExact(result, 7) else 0;
remaining_bits -= 7; remaining_bits -= 7;
} else if (fits) fits = switch (result_info.signedness) { } else if (fits) fits = switch (result_info.signedness) {
.signed => @as(i7, @bitCast(byte.bits)) == @as(i7, @truncate(@as(Result, @bitCast(result)) >> (result_info.bits - 1))), .signed => @as(i7, @bitCast(byte.bits)) == @as(i7, @truncate(@as(Result, @bitCast(result)) >> (result_info.bits - 1))),

View File

@ -503,7 +503,7 @@ pub const WriteFileOptions = struct {
offset: Writer.Offset = .none, offset: Writer.Offset = .none,
/// If the size of the source file is known, it is likely that passing the /// If the size of the source file is known, it is likely that passing the
/// size here will save one syscall. /// size here will save one syscall.
limit: Writer.Limit = .none, limit: Writer.Limit = .unlimited,
/// Headers and trailers must be passed together so that in case `len` is /// Headers and trailers must be passed together so that in case `len` is
/// zero, they can be forwarded directly to `Writer.VTable.writev`. /// zero, they can be forwarded directly to `Writer.VTable.writev`.
/// ///
@ -518,8 +518,9 @@ pub const WriteFileOptions = struct {
pub fn writeFileAll(bw: *BufferedWriter, file: std.fs.File, options: WriteFileOptions) anyerror!void { pub fn writeFileAll(bw: *BufferedWriter, file: std.fs.File, options: WriteFileOptions) anyerror!void {
const headers_and_trailers = options.headers_and_trailers; const headers_and_trailers = options.headers_and_trailers;
const headers = headers_and_trailers[0..options.headers_len]; const headers = headers_and_trailers[0..options.headers_len];
if (options.limit == .zero) return writevAll(bw, headers_and_trailers); switch (options.limit) {
if (options.limit == .none) { .nothing => return writevAll(bw, headers_and_trailers),
.unlimited => {
// When reading the whole file, we cannot include the trailers in the // When reading the whole file, we cannot include the trailers in the
// call that reads from the file handle, because we have no way to // call that reads from the file handle, because we have no way to
// determine whether a partial write is past the end of the file or // determine whether a partial write is past the end of the file or
@ -527,7 +528,7 @@ pub fn writeFileAll(bw: *BufferedWriter, file: std.fs.File, options: WriteFileOp
var i: usize = 0; var i: usize = 0;
var offset = options.offset; var offset = options.offset;
while (true) { while (true) {
var n = try writeFile(bw, file, offset, .entire_file, headers[i..], headers.len - i); var n = try writeFile(bw, file, offset, .unlimited, headers[i..], headers.len - i);
while (i < headers.len and n >= headers[i].len) { while (i < headers.len and n >= headers[i].len) {
n -= headers[i].len; n -= headers[i].len;
i += 1; i += 1;
@ -537,14 +538,15 @@ pub fn writeFileAll(bw: *BufferedWriter, file: std.fs.File, options: WriteFileOp
continue; continue;
} }
if (n == 0) break; if (n == 0) break;
offset += n; offset = offset.advance(n);
} }
} else { },
else => {
var len = options.limit.toInt().?; var len = options.limit.toInt().?;
var i: usize = 0; var i: usize = 0;
var offset = options.offset; var offset = options.offset;
while (true) { while (true) {
var n = try writeFile(bw, file, offset, .init(len), headers_and_trailers[i..], headers.len - i); var n = try writeFile(bw, file, offset, .limited(len), headers_and_trailers[i..], headers.len - i);
while (i < headers.len and n >= headers[i].len) { while (i < headers.len and n >= headers[i].len) {
n -= headers[i].len; n -= headers[i].len;
i += 1; i += 1;
@ -564,9 +566,10 @@ pub fn writeFileAll(bw: *BufferedWriter, file: std.fs.File, options: WriteFileOp
headers_and_trailers[i] = headers_and_trailers[i][n..]; headers_and_trailers[i] = headers_and_trailers[i][n..];
return writevAll(bw, headers_and_trailers[i..]); return writevAll(bw, headers_and_trailers[i..]);
} }
offset += n; offset = offset.advance(n);
len -= n; len -= n;
} }
},
} }
} }
@ -717,9 +720,9 @@ pub fn printValue(
} }
} }
try bw.writeByteCount('('); try bw.writeByte('(');
try printValue(bw, actual_fmt, options, @intFromEnum(value), max_depth); try printValue(bw, actual_fmt, options, @intFromEnum(value), max_depth);
try bw.writeByteCount(')'); try bw.writeByte(')');
}, },
.@"union" => |info| { .@"union" => |info| {
if (actual_fmt.len != 0) invalidFmtError(fmt, value); if (actual_fmt.len != 0) invalidFmtError(fmt, value);

View File

@ -48,12 +48,12 @@ pub const Status = packed struct(usize) {
}; };
pub const Limit = enum(usize) { pub const Limit = enum(usize) {
zero = 0, nothing = 0,
none = std.math.maxInt(usize), unlimited = std.math.maxInt(usize),
_, _,
/// `std.math.maxInt(usize)` is interpreted to mean "no limit". /// `std.math.maxInt(usize)` is interpreted to mean `.unlimited`.
pub fn init(n: usize) Limit { pub fn limited(n: usize) Limit {
return @enumFromInt(n); return @enumFromInt(n);
} }
@ -66,7 +66,10 @@ pub const Limit = enum(usize) {
} }
pub fn toInt(l: Limit) ?usize { pub fn toInt(l: Limit) ?usize {
return if (l == .none) null else @intFromEnum(l); return switch (l) {
else => @intFromEnum(l),
.unlimited => null,
};
} }
/// Reduces a slice to account for the limit, leaving room for one extra /// Reduces a slice to account for the limit, leaving room for one extra
@ -84,7 +87,7 @@ pub const Limit = enum(usize) {
/// Return a new limit reduced by `amount` or return `null` indicating /// Return a new limit reduced by `amount` or return `null` indicating
/// limit would be exceeded. /// limit would be exceeded.
pub fn subtract(l: Limit, amount: usize) ?Limit { pub fn subtract(l: Limit, amount: usize) ?Limit {
if (l == .none) return .{ .next = .none }; if (l == .unlimited) return .unlimited;
if (amount > @intFromEnum(l)) return null; if (amount > @intFromEnum(l)) return null;
return @enumFromInt(@intFromEnum(l) - amount); return @enumFromInt(@intFromEnum(l) - amount);
} }
@ -103,7 +106,7 @@ pub fn readAll(r: Reader, w: *std.io.BufferedWriter) anyerror!usize {
const readFn = r.vtable.read; const readFn = r.vtable.read;
var offset: usize = 0; var offset: usize = 0;
while (true) { while (true) {
const status = try readFn(r.context, w, .none); const status = try readFn(r.context, w, .unlimited);
offset += status.len; offset += status.len;
if (status.end) return offset; if (status.end) return offset;
} }
@ -119,21 +122,21 @@ pub fn readAlloc(r: Reader, gpa: std.mem.Allocator, max_size: usize) anyerror![]
const readFn = r.vtable.read; const readFn = r.vtable.read;
var aw: std.io.AllocatingWriter = undefined; var aw: std.io.AllocatingWriter = undefined;
errdefer aw.deinit(); errdefer aw.deinit();
const bw = aw.init(gpa); aw.init(gpa);
var remaining = max_size; var remaining = max_size;
while (remaining > 0) { while (remaining > 0) {
const status = try readFn(r.context, bw, .init(remaining)); const status = try readFn(r.context, &aw.buffered_writer, .limited(remaining));
if (status.end) break; if (status.end) break;
remaining -= status.len; remaining -= status.len;
} }
return aw.toOwnedSlice(gpa); return aw.toOwnedSlice();
} }
/// Reads the stream until the end, ignoring all the data. /// Reads the stream until the end, ignoring all the data.
/// Returns the number of bytes discarded. /// Returns the number of bytes discarded.
pub fn discardUntilEnd(r: Reader) anyerror!usize { pub fn discardUntilEnd(r: Reader) anyerror!usize {
var bw = std.io.null_writer.unbuffered(); var bw = std.io.Writer.null.unbuffered();
return readAll(r, &bw); return r.readAll(&bw);
} }
test "readAlloc when the backing reader provides one byte at a time" { test "readAlloc when the backing reader provides one byte at a time" {

View File

@ -60,6 +60,13 @@ pub const Offset = enum(u64) {
pub fn toInt(o: Offset) ?u64 { pub fn toInt(o: Offset) ?u64 {
return if (o == .none) null else @intFromEnum(o); return if (o == .none) null else @intFromEnum(o);
} }
pub fn advance(o: Offset, amount: u64) Offset {
return switch (o) {
.none => .none,
else => .init(@intFromEnum(o) + amount),
};
}
}; };
pub fn writev(w: Writer, data: []const []const u8) anyerror!usize { pub fn writev(w: Writer, data: []const []const u8) anyerror!usize {
@ -106,7 +113,7 @@ pub fn buffered(w: Writer, buffer: []u8) std.io.BufferedWriter {
} }
pub fn unbuffered(w: Writer) std.io.BufferedWriter { pub fn unbuffered(w: Writer) std.io.BufferedWriter {
return buffered(w, &.{}); return w.buffered(&.{});
} }
/// A `Writer` that discards all data. /// A `Writer` that discards all data.

View File

@ -1853,7 +1853,7 @@ pub const Stream = struct {
}, },
else => &.{ else => &.{
.writeSplat = posix_writeSplat, .writeSplat = posix_writeSplat,
.writeFile = std.fs.File.writer_writeFile, .writeFile = std.fs.File.writeFile,
}, },
}, },
}; };
@ -1960,7 +1960,7 @@ pub const Stream = struct {
return n; return n;
} }
fn posix_writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) anyerror!usize { fn posix_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
const sock_fd = opaqueToHandle(context); const sock_fd = opaqueToHandle(context);
comptime assert(native_os != .windows); comptime assert(native_os != .windows);
var splat_buffer: [256]u8 = undefined; var splat_buffer: [256]u8 = undefined;
@ -2029,7 +2029,7 @@ pub const Stream = struct {
const max_buffers_len = 8; const max_buffers_len = 8;
fn handleToOpaque(handle: Handle) *anyopaque { fn handleToOpaque(handle: Handle) ?*anyopaque {
return switch (@typeInfo(Handle)) { return switch (@typeInfo(Handle)) {
.pointer => @ptrCast(handle), .pointer => @ptrCast(handle),
.int => @ptrFromInt(@as(u32, @bitCast(handle))), .int => @ptrFromInt(@as(u32, @bitCast(handle))),
@ -2037,7 +2037,7 @@ pub const Stream = struct {
}; };
} }
fn opaqueToHandle(userdata: *anyopaque) Handle { fn opaqueToHandle(userdata: ?*anyopaque) Handle {
return switch (@typeInfo(Handle)) { return switch (@typeInfo(Handle)) {
.pointer => @ptrCast(userdata), .pointer => @ptrCast(userdata),
.int => @intCast(@intFromPtr(userdata)), .int => @intCast(@intFromPtr(userdata)),

View File

@ -1004,13 +1004,17 @@ fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn {
fn writeIntFd(fd: i32, value: ErrInt) !void { fn writeIntFd(fd: i32, value: ErrInt) !void {
const file: File = .{ .handle = fd }; const file: File = .{ .handle = fd };
var bw = file.writer().unbuffered(); var buffer: [8]u8 = undefined;
bw.writeInt(u64, @intCast(value), .little) catch return error.SystemResources; std.mem.writeInt(u64, &buffer, @intCast(value), .little);
file.writeAll(&buffer) catch return error.SystemResorces;
} }
fn readIntFd(fd: i32) !ErrInt { fn readIntFd(fd: i32) !ErrInt {
const file: File = .{ .handle = fd }; const file: File = .{ .handle = fd };
return @intCast(file.reader().readInt(u64, .little) catch return error.SystemResources); var buffer: [8]u8 = undefined;
const n = file.readAll(&buffer) catch return error.SystemResources;
if (n != buffer.len) return error.SystemResources;
return @intCast(std.mem.readInt(u64, &buffer, .little));
} }
const ErrInt = std.meta.Int(.unsigned, @sizeOf(anyerror) * 8); const ErrInt = std.meta.Int(.unsigned, @sizeOf(anyerror) * 8);

View File

@ -44,7 +44,7 @@ pub fn writeFile(self: *Self, sub_path: []const u8, file: std.fs.File) !void {
try header.setMtime(mtime); try header.setMtime(mtime);
try header.write(self.underlying_writer); try header.write(self.underlying_writer);
try self.underlying_writer.writeFileAll(file, .{ .len = .init(stat.size) }); try self.underlying_writer.writeFileAll(file, .{ .limit = .limited(stat.size) });
try self.writePadding(stat.size); try self.writePadding(stat.size);
} }

View File

@ -414,9 +414,8 @@ test fmtId {
/// Print the string as a Zig identifier, escaping it with `@""` syntax if needed. /// Print the string as a Zig identifier, escaping it with `@""` syntax if needed.
fn formatId( fn formatId(
bytes: []const u8, bytes: []const u8,
bw: *std.io.BufferedWriter,
comptime fmt: []const u8, comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: *std.io.BufferedWriter,
) !void { ) !void {
const allow_primitive, const allow_underscore = comptime parse_fmt: { const allow_primitive, const allow_underscore = comptime parse_fmt: {
var allow_primitive = false; var allow_primitive = false;
@ -442,11 +441,11 @@ fn formatId(
(allow_primitive or !std.zig.isPrimitive(bytes)) and (allow_primitive or !std.zig.isPrimitive(bytes)) and
(allow_underscore or !isUnderscore(bytes))) (allow_underscore or !isUnderscore(bytes)))
{ {
return writer.writeAll(bytes); return bw.writeAll(bytes);
} }
try writer.writeAll("@\""); try bw.writeAll("@\"");
try stringEscape(bytes, "", options, writer); try stringEscape(bytes, bw, "");
try writer.writeByte('"'); try bw.writeByte('"');
} }
/// Return a Formatter for Zig Escapes of a double quoted string. /// Return a Formatter for Zig Escapes of a double quoted string.
@ -473,11 +472,9 @@ test fmtEscapes {
/// Format `{'}` treats contents as a single-quoted string. /// Format `{'}` treats contents as a single-quoted string.
pub fn stringEscape( pub fn stringEscape(
bytes: []const u8, bytes: []const u8,
comptime f: []const u8,
options: std.fmt.FormatOptions,
bw: *std.io.BufferedWriter, bw: *std.io.BufferedWriter,
comptime f: []const u8,
) !void { ) !void {
_ = options;
for (bytes) |byte| switch (byte) { for (bytes) |byte| switch (byte) {
'\n' => try bw.writeAll("\\n"), '\n' => try bw.writeAll("\\n"),
'\r' => try bw.writeAll("\\r"), '\r' => try bw.writeAll("\\r"),

View File

@ -190,7 +190,7 @@ fn renderErrorMessageToWriter(
) anyerror!void { ) anyerror!void {
const ttyconf = options.ttyconf; const ttyconf = options.ttyconf;
const err_msg = eb.getErrorMessage(err_msg_index); const err_msg = eb.getErrorMessage(err_msg_index);
const prefix_start = bw.bytes_written; const prefix_start = bw.count;
if (err_msg.src_loc != .none) { if (err_msg.src_loc != .none) {
const src = eb.extraData(SourceLocation, @intFromEnum(err_msg.src_loc)); const src = eb.extraData(SourceLocation, @intFromEnum(err_msg.src_loc));
try bw.splatByteAll(' ', indent); try bw.splatByteAll(' ', indent);
@ -205,7 +205,7 @@ fn renderErrorMessageToWriter(
try bw.writeAll(": "); try bw.writeAll(": ");
// This is the length of the part before the error message: // This is the length of the part before the error message:
// e.g. "file.zig:4:5: error: " // e.g. "file.zig:4:5: error: "
const prefix_len = bw.bytes_written - prefix_start; const prefix_len = bw.count - prefix_start;
try ttyconf.setColor(bw, .reset); try ttyconf.setColor(bw, .reset);
try ttyconf.setColor(bw, .bold); try ttyconf.setColor(bw, .bold);
if (err_msg.count == 1) { if (err_msg.count == 1) {

View File

@ -378,7 +378,7 @@ fn addFromDirInner(
current_file.* = filename; current_file.* = filename;
const max_file_size = 10 * 1024 * 1024; const max_file_size = 10 * 1024 * 1024;
const src = try iterable_dir.readFileAllocOptions(ctx.arena, filename, max_file_size, null, .@"1", 0); const src = try iterable_dir.readFileAllocOptions(filename, ctx.arena, .limited(max_file_size), null, .@"1", 0);
// Parse the manifest // Parse the manifest
var manifest = try TestManifest.parse(ctx.arena, src); var manifest = try TestManifest.parse(ctx.arena, src);

View File

@ -75,7 +75,7 @@ const CheckOutputCaching = struct {
pub fn init(owner: *std.Build, expect_caching: bool, output_paths: []const std.Build.LazyPath) *CheckOutputCaching { pub fn init(owner: *std.Build, expect_caching: bool, output_paths: []const std.Build.LazyPath) *CheckOutputCaching {
const check = owner.allocator.create(CheckOutputCaching) catch @panic("OOM"); const check = owner.allocator.create(CheckOutputCaching) catch @panic("OOM");
check.* = .{ check.* = .{
.step = std.Build.Step.init(.{ .step = .init(.{
.id = .custom, .id = .custom,
.name = "check output caching", .name = "check output caching",
.owner = owner, .owner = owner,
@ -112,7 +112,7 @@ const CheckPathEquality = struct {
pub fn init(owner: *std.Build, expected_equality: bool, output_paths: []const std.Build.LazyPath) *CheckPathEquality { pub fn init(owner: *std.Build, expected_equality: bool, output_paths: []const std.Build.LazyPath) *CheckPathEquality {
const check = owner.allocator.create(CheckPathEquality) catch @panic("OOM"); const check = owner.allocator.create(CheckPathEquality) catch @panic("OOM");
check.* = .{ check.* = .{
.step = std.Build.Step.init(.{ .step = .init(.{
.id = .custom, .id = .custom,
.name = "check output path equality", .name = "check output path equality",
.owner = owner, .owner = owner,

View File

@ -2711,7 +2711,7 @@ pub fn addIncrementalTests(b: *std.Build, test_step: *Step) !void {
run.addArg(b.graph.zig_exe); run.addArg(b.graph.zig_exe);
run.addFileArg(b.path("test/incremental/").path(b, entry.path)); run.addFileArg(b.path("test/incremental/").path(b, entry.path));
run.addArgs(&.{ "--zig-lib-dir", b.fmt("{}", .{b.graph.zig_lib_directory}) }); run.addArgs(&.{ "--zig-lib-dir", b.fmt("{f}", .{b.graph.zig_lib_directory}) });
run.addCheck(.{ .expect_term = .{ .Exited = 0 } }); run.addCheck(.{ .expect_term = .{ .Exited = 0 } });