build_runner: port to std.io.BufferedReader API changes

This commit is contained in:
Jacob Young 2025-04-20 23:20:56 -04:00 committed by Andrew Kelley
parent 7b0d826849
commit 6a0f2227e8
20 changed files with 302 additions and 356 deletions

View File

@ -100,8 +100,7 @@ fn accept(ws: *WebServer, connection: std.net.Server.Connection) void {
var sr = connection.stream.reader();
var rb: [0x4000]u8 = undefined;
var br: std.io.BufferedReader = undefined;
br.init(sr.interface(), &rb);
var br = sr.interface().buffered(&rb);
var sw = connection.stream.writer();
var wb: [0x4000]u8 = undefined;
@ -109,7 +108,6 @@ fn accept(ws: *WebServer, connection: std.net.Server.Connection) void {
var server: std.http.Server = .init(&br, &bw);
var web_socket: std.http.WebSocket = undefined;
var send_buffer: [0x4000]u8 = undefined;
var ws_recv_buffer: [0x4000]u8 align(4) = undefined;
while (server.state == .ready) {
var request = server.receiveHead() catch |err| switch (err) {
@ -119,7 +117,7 @@ fn accept(ws: *WebServer, connection: std.net.Server.Connection) void {
return;
},
};
if (web_socket.init(&request, &send_buffer, &ws_recv_buffer) catch |err| {
if (web_socket.init(&request, &ws_recv_buffer) catch |err| {
log.err("initializing web socket: {s}", .{@errorName(err)});
return;
}) {
@ -281,19 +279,16 @@ fn buildWasmBinary(
try sendMessage(child.stdin.?, .update);
try sendMessage(child.stdin.?, .exit);
const Header = std.zig.Server.Message.Header;
var result: ?Path = null;
var result_error_bundle = std.zig.ErrorBundle.empty;
const stdout = poller.fifo(.stdout);
const stdout_br = poller.reader(.stdout);
poll: while (true) {
while (stdout.readableLength() < @sizeOf(Header)) if (!try poller.poll()) break :poll;
var header: Header = undefined;
assert(stdout.read(std.mem.asBytes(&header)) == @sizeOf(Header));
while (stdout.readableLength() < header.bytes_len) if (!try poller.poll()) break :poll;
const body = stdout.readableSliceOfLen(header.bytes_len);
const Header = std.zig.Server.Message.Header;
while (stdout_br.bufferContents().len < @sizeOf(Header)) if (!try poller.poll()) break :poll;
const header = (stdout_br.takeStruct(Header) catch unreachable).*;
while (stdout_br.bufferContents().len < header.bytes_len) if (!try poller.poll()) break :poll;
const body = stdout_br.take(header.bytes_len) catch unreachable;
switch (header.tag) {
.zig_version => {
if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
@ -330,15 +325,12 @@ fn buildWasmBinary(
},
else => {}, // ignore other messages
}
stdout.discard(body.len);
}
const stderr = poller.fifo(.stderr);
if (stderr.readableLength() > 0) {
const owned_stderr = try stderr.toOwnedSlice();
defer gpa.free(owned_stderr);
std.debug.print("{s}", .{owned_stderr});
const stderr_br = poller.reader(.stderr);
const stderr_contents = stderr_br.bufferContents();
if (stderr_contents.len > 0) {
std.debug.print("{s}", .{stderr_contents});
}
// Send EOF to stdin.
@ -484,9 +476,7 @@ fn serveSourcesTar(ws: *WebServer, request: *std.http.Server.Request) !void {
defer arena_instance.deinit();
const arena = arena_instance.allocator();
var send_buffer: [0x4000]u8 = undefined;
var response = request.respondStreaming(.{
.send_buffer = &send_buffer,
var response = try request.respondStreaming(.{
.respond_options = .{
.extra_headers = &.{
.{ .name = "content-type", .value = "application/x-tar" },
@ -538,7 +528,7 @@ fn serveSourcesTar(ws: *WebServer, request: *std.http.Server.Request) !void {
defer file.close();
archiver.prefix = joined_path.root_dir.path orelse try memoizedCwd(arena, &cwd_cache);
try archiver.writeFile(joined_path.sub_path, file);
try archiver.writeFile(joined_path.sub_path, file, try file.stat());
}
// intentionally omitting the pointless trailer

View File

@ -511,18 +511,15 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path {
try sendMessage(zp.child.stdin.?, .update);
if (!watch) try sendMessage(zp.child.stdin.?, .exit);
const Header = std.zig.Server.Message.Header;
var result: ?Path = null;
const stdout = zp.poller.fifo(.stdout);
const stdout_br = zp.poller.reader(.stdout);
poll: while (true) {
while (stdout.readableLength() < @sizeOf(Header)) if (!try zp.poller.poll()) break :poll;
var header: Header = undefined;
assert(stdout.read(std.mem.asBytes(&header)) == @sizeOf(Header));
while (stdout.readableLength() < header.bytes_len) if (!try zp.poller.poll()) break :poll;
const body = stdout.readableSliceOfLen(header.bytes_len);
const Header = std.zig.Server.Message.Header;
while (stdout_br.bufferContents().len < @sizeOf(Header)) if (!try zp.poller.poll()) break :poll;
const header = (stdout_br.takeStruct(Header) catch unreachable).*;
while (stdout_br.bufferContents().len < header.bytes_len) if (!try zp.poller.poll()) break :poll;
const body = stdout_br.take(header.bytes_len) catch unreachable;
switch (header.tag) {
.zig_version => {
if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
@ -547,11 +544,8 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path {
.string_bytes = try arena.dupe(u8, string_bytes),
.extra = extra_array,
};
if (watch) {
// This message indicates the end of the update.
stdout.discard(body.len);
break;
}
// This message indicates the end of the update.
if (watch) break :poll;
},
.emit_digest => {
const EmitDigest = std.zig.Server.Message.EmitDigest;
@ -611,15 +605,14 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path {
},
else => {}, // ignore other messages
}
stdout.discard(body.len);
}
s.result_duration_ns = timer.read();
const stderr = zp.poller.fifo(.stderr);
if (stderr.readableLength() > 0) {
try s.result_error_msgs.append(arena, try stderr.toOwnedSlice());
const stderr_br = zp.poller.reader(.stderr);
const stderr_contents = stderr_br.bufferContents();
if (stderr_contents.len > 0) {
try s.result_error_msgs.append(arena, try arena.dupe(u8, stderr_contents));
}
return result;

View File

@ -1239,7 +1239,7 @@ const MachODumper = struct {
fn parseRebaseInfo(ctx: ObjectContext, data: []const u8, rebases: *std.ArrayList(u64)) !void {
var br: std.io.BufferedReader = undefined;
br.initFixed(data);
br.initFixed(@constCast(data));
var seg_id: ?u8 = null;
var offset: u64 = 0;
@ -1350,7 +1350,7 @@ const MachODumper = struct {
fn parseBindInfo(ctx: ObjectContext, data: []const u8, bindings: *std.ArrayList(Binding)) !void {
var br: std.io.BufferedReader = undefined;
br.initFixed(data);
br.initFixed(@constCast(data));
var seg_id: ?u8 = null;
var tag: Binding.Tag = .self;
@ -1448,7 +1448,7 @@ const MachODumper = struct {
var exports: std.ArrayList(Export) = .init(arena.allocator());
var br: std.io.BufferedReader = undefined;
br.initFixed(data);
br.initFixed(@constCast(data));
try parseTrieNode(arena.allocator(), &br, "", &exports);
mem.sort(Export, exports.items, {}, Export.lessThan);
@ -1706,7 +1706,7 @@ const ElfDumper = struct {
fn parseAndDumpArchive(step: *Step, check: Check, bytes: []const u8) ![]const u8 {
const gpa = step.owner.allocator;
var br: std.io.BufferedReader = undefined;
br.initFixed(bytes);
br.initFixed(@constCast(bytes));
if (!mem.eql(u8, try br.takeArray(elf.ARMAG.len), elf.ARMAG)) return error.InvalidArchiveMagicNumber;
@ -1781,7 +1781,7 @@ const ElfDumper = struct {
fn parseSymtab(ctx: *ArchiveContext, data: []const u8, ptr_width: enum { p32, p64 }) !void {
var br: std.io.BufferedReader = undefined;
br.initFixed(data);
br.initFixed(@constCast(data));
const num = switch (ptr_width) {
.p32 => try br.takeInt(u32, .big),
.p64 => try br.takeInt(u64, .big),
@ -1791,7 +1791,7 @@ const ElfDumper = struct {
.p64 => @sizeOf(u64),
};
try br.discard(num * ptr_size);
const strtab = try br.peekGreedy(0);
const strtab = br.bufferContents();
assert(ctx.symtab.len == 0);
ctx.symtab = try ctx.gpa.alloc(ArSymtabEntry, num);
@ -1852,7 +1852,7 @@ const ElfDumper = struct {
fn parseAndDumpObject(step: *Step, check: Check, bytes: []const u8) ![]const u8 {
const gpa = step.owner.allocator;
var br: std.io.BufferedReader = undefined;
br.initFixed(bytes);
br.initFixed(@constCast(bytes));
const hdr = try br.takeStruct(elf.Elf64_Ehdr);
if (!mem.eql(u8, hdr.e_ident[0..4], "\x7fELF")) return error.InvalidMagicNumber;
@ -2355,7 +2355,7 @@ const WasmDumper = struct {
fn parseAndDump(step: *Step, check: Check, bytes: []const u8) ![]const u8 {
const gpa = step.owner.allocator;
var br: std.io.BufferedReader = undefined;
br.initFixed(bytes);
br.initFixed(@constCast(bytes));
const buf = try br.takeArray(8);
if (!mem.eql(u8, buf[0..4], &std.wasm.magic)) return error.InvalidMagicByte;
@ -2402,7 +2402,7 @@ const WasmDumper = struct {
try bw.print(
\\Section {s}
\\size {d}
, .{ @tagName(section), br.storageBuffer().len });
, .{ @tagName(section), br.buffer.len });
switch (section) {
.type,
@ -2615,7 +2615,7 @@ const WasmDumper = struct {
/// https://webassembly.github.io/spec/core/appendix/custom.html
fn parseDumpNames(step: *Step, br: *std.io.BufferedReader, bw: *std.io.BufferedWriter) !void {
var subsection_br: std.io.BufferedReader = undefined;
while (br.seek < br.storageBuffer().len) {
while (br.seek < br.buffer.len) {
switch (try parseDumpType(step, std.wasm.NameSubsection, br, bw)) {
// The module name subsection ... consists of a single name
// that is assigned to the module itself.
@ -2626,7 +2626,7 @@ const WasmDumper = struct {
\\name {s}
\\
, .{name});
if (subsection_br.seek != subsection_br.storageBuffer().len) return error.BadSubsectionSize;
if (subsection_br.seek != subsection_br.buffer.len) return error.BadSubsectionSize;
},
// The function name subsection ... consists of a name map
@ -2647,7 +2647,7 @@ const WasmDumper = struct {
\\
, .{ index, name });
}
if (subsection_br.seek != subsection_br.storageBuffer().len) return error.BadSubsectionSize;
if (subsection_br.seek != subsection_br.buffer.len) return error.BadSubsectionSize;
},
// The local name subsection ... consists of an indirect name

View File

@ -1517,11 +1517,6 @@ fn evalZigTest(
break :failed false;
};
const Header = std.zig.Server.Message.Header;
const stdout = poller.fifo(.stdout);
const stderr = poller.fifo(.stderr);
var fail_count: u32 = 0;
var skip_count: u32 = 0;
var leak_count: u32 = 0;
@ -1534,13 +1529,14 @@ fn evalZigTest(
var sub_prog_node: ?std.Progress.Node = null;
defer if (sub_prog_node) |n| n.end();
const stdout_br = poller.reader(.stdout);
const stderr_br = poller.reader(.stderr);
const any_write_failed = first_write_failed or poll: while (true) {
while (stdout.readableLength() < @sizeOf(Header)) if (!try poller.poll()) break :poll false;
var header: Header = undefined;
assert(stdout.read(std.mem.asBytes(&header)) == @sizeOf(Header));
while (stdout.readableLength() < header.bytes_len) if (!try poller.poll()) break :poll false;
const body = stdout.readableSliceOfLen(header.bytes_len);
const Header = std.zig.Server.Message.Header;
while (stdout_br.bufferContents().len < @sizeOf(Header)) if (!try poller.poll()) break :poll false;
const header = (stdout_br.takeStruct(Header) catch unreachable).*;
while (stdout_br.bufferContents().len < header.bytes_len) if (!try poller.poll()) break :poll false;
const body = stdout_br.take(header.bytes_len) catch unreachable;
switch (header.tag) {
.zig_version => {
if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
@ -1597,9 +1593,9 @@ fn evalZigTest(
if (tr_hdr.flags.fail or tr_hdr.flags.leak or tr_hdr.flags.log_err_count > 0) {
const name = std.mem.sliceTo(md.string_bytes[md.names[tr_hdr.index]..], 0);
const orig_msg = stderr.readableSlice(0);
defer stderr.discard(orig_msg.len);
const msg = std.mem.trim(u8, orig_msg, "\n");
const stderr_contents = stderr_br.bufferContents();
stderr_br.toss(stderr_contents.len);
const msg = std.mem.trim(u8, stderr_contents, "\n");
const label = if (tr_hdr.flags.fail)
"failed"
else if (tr_hdr.flags.leak)
@ -1650,8 +1646,6 @@ fn evalZigTest(
},
else => {}, // ignore other messages
}
stdout.discard(body.len);
};
if (any_write_failed) {
@ -1660,9 +1654,9 @@ fn evalZigTest(
while (try poller.poll()) {}
}
if (stderr.readableLength() > 0) {
const msg = std.mem.trim(u8, try stderr.toOwnedSlice(), "\n");
if (msg.len > 0) run.step.result_stderr = msg;
const stderr_contents = std.mem.trim(u8, stderr_br.bufferContents(), "\n");
if (stderr_contents.len > 0) {
run.step.result_stderr = try arena.dupe(u8, stderr_contents);
}
// Send EOF to stdin.
@ -1776,7 +1770,7 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !StdIoResult {
var stdout_bytes: ?[]const u8 = null;
var stderr_bytes: ?[]const u8 = null;
run.stdio_limit = .limited(run.stdio_limit.min(run.max_stdio_size));
run.stdio_limit = run.stdio_limit.min(.limited(run.max_stdio_size));
if (child.stdout) |stdout| {
if (child.stderr) |stderr| {
var poller = std.io.poll(arena, enum { stdout, stderr }, .{
@ -1787,15 +1781,15 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !StdIoResult {
while (try poller.poll()) {
if (run.stdio_limit.toInt()) |limit| {
if (poller.fifo(.stderr).count > limit)
if (poller.reader(.stderr).bufferContents().len > limit)
return error.StdoutStreamTooLong;
if (poller.fifo(.stderr).count > limit)
if (poller.reader(.stderr).bufferContents().len > limit)
return error.StderrStreamTooLong;
}
}
stdout_bytes = try poller.fifo(.stdout).toOwnedSlice();
stderr_bytes = try poller.fifo(.stderr).toOwnedSlice();
stdout_bytes = poller.reader(.stdout).bufferContents();
stderr_bytes = poller.reader(.stderr).bufferContents();
} else {
stdout_bytes = try stdout.readToEndAlloc(arena, run.stdio_limit);
}

View File

@ -150,27 +150,26 @@ pub fn fmtJoin(paths: []const []const u8) std.fmt.Formatter(formatJoin) {
return .{ .data = paths };
}
fn formatJoin(paths: []const []const u8, comptime fmt: []const u8, options: std.fmt.FormatOptions, w: anytype) !void {
fn formatJoin(paths: []const []const u8, bw: *std.io.BufferedWriter, comptime fmt: []const u8) !void {
_ = fmt;
_ = options;
const first_path_idx = for (paths, 0..) |p, idx| {
if (p.len != 0) break idx;
} else return;
try w.writeAll(paths[first_path_idx]); // first component
try bw.writeAll(paths[first_path_idx]); // first component
var prev_path = paths[first_path_idx];
for (paths[first_path_idx + 1 ..]) |this_path| {
if (this_path.len == 0) continue; // skip empty components
const prev_sep = isSep(prev_path[prev_path.len - 1]);
const this_sep = isSep(this_path[0]);
if (!prev_sep and !this_sep) {
try w.writeByte(sep);
try bw.writeByte(sep);
}
if (prev_sep and this_sep) {
try w.writeAll(this_path[1..]); // skip redundant separator
try bw.writeAll(this_path[1..]); // skip redundant separator
} else {
try w.writeAll(this_path);
try bw.writeAll(this_path);
}
prev_path = this_path;
}

View File

@ -9,7 +9,7 @@ const native_endian = builtin.cpu.arch.endian();
key: []const u8,
request: *std.http.Server.Request,
recv_fifo: std.fifo.LinearFifo(u8, .Slice),
reader: *std.io.BufferedReader,
reader: std.io.BufferedReader,
response: std.http.Server.Response,
/// Number of bytes that have been peeked but not discarded yet.
outstanding_len: usize,
@ -20,7 +20,6 @@ pub const InitError = error{WebSocketUpgradeMissingKey} ||
pub fn init(
ws: *WebSocket,
request: *std.http.Server.Request,
send_buffer: []u8,
recv_buffer: []align(4) u8,
) InitError!bool {
switch (request.head.version) {
@ -58,9 +57,8 @@ pub fn init(
ws.* = .{
.key = key,
.recv_fifo = .init(recv_buffer),
.reader = undefined,
.response = request.respondStreaming(.{
.send_buffer = send_buffer,
.reader = (try request.reader()).unbuffered(),
.response = try request.respondStreaming(.{
.respond_options = .{
.status = .switching_protocols,
.extra_headers = &.{
@ -74,7 +72,6 @@ pub fn init(
.request = request,
.outstanding_len = 0,
};
ws.reader.init(try request.reader(), &.{});
return true;
}
@ -239,9 +236,8 @@ pub fn writeMessagev(ws: *WebSocket, message: []const std.posix.iovec_const, opc
},
};
const response = &ws.response;
try response.writeAll(header);
for (message) |iovec|
try response.writeAll(iovec.base[0..iovec.len]);
try response.flush();
var bw = ws.response.writer().unbuffered();
try bw.writeAll(header);
for (message) |iovec| try bw.writeAll(iovec.base[0..iovec.len]);
try bw.flush();
}

View File

@ -53,7 +53,12 @@ pub fn poll(
const enum_fields = @typeInfo(StreamEnum).@"enum".fields;
var result: Poller(StreamEnum) = .{
.gpa = gpa,
.readers = undefined,
.readers = @splat(.{
.unbuffered_reader = .failing,
.buffer = &.{},
.end = 0,
.seek = 0,
}),
.poll_fds = undefined,
.windows = if (is_windows) .{
.first_read_done = false,
@ -70,12 +75,6 @@ pub fn poll(
};
inline for (enum_fields, 0..) |field, i| {
result.readers[i] = .{
.unbuffered_reader = .failing,
.buffer = &.{},
.end = 0,
.seek = 0,
};
if (is_windows) {
result.windows.active.handles_buf[i] = @field(files, field.name).handle;
} else {

View File

@ -51,6 +51,23 @@ pub fn readVec(br: *BufferedReader, data: []const []u8) Reader.Error!usize {
return passthruReadVec(br, data);
}
pub fn readVecAll(br: *BufferedReader, data: [][]u8) Reader.Error!void {
var index: usize = 0;
var truncate: usize = 0;
while (index < data.len) {
{
const untruncated = data[index];
data[index] = untruncated[truncate..];
defer data[index] = untruncated;
truncate += try br.readVec(data[index..]);
}
while (index < data.len and truncate <= data[index].len) {
truncate -= data[index].len;
index += 1;
}
}
}
pub fn read(br: *BufferedReader, bw: *BufferedWriter, limit: Reader.Limit) Reader.RwError!usize {
return passthruRead(br, bw, limit);
}
@ -58,8 +75,8 @@ pub fn read(br: *BufferedReader, bw: *BufferedWriter, limit: Reader.Limit) Reade
/// "Pump" data from the reader to the writer.
pub fn readAll(br: *BufferedReader, bw: *BufferedWriter, limit: Reader.Limit) Reader.RwError!void {
var remaining = limit;
while (true) {
const n = try passthruRead(br, bw, remaining);
while (remaining.nonzero()) {
const n = try br.read(bw, remaining);
remaining = remaining.subtract(n).?;
}
}
@ -68,7 +85,7 @@ fn passthruRead(ctx: ?*anyopaque, bw: *BufferedWriter, limit: Reader.Limit) Read
const br: *BufferedReader = @alignCast(@ptrCast(ctx));
const buffer = br.buffer[0..br.end];
const buffered = buffer[br.seek..];
const limited = buffered[0..limit.min(buffered.len)];
const limited = buffered[0..limit.minInt(buffered.len)];
if (limited.len > 0) {
const n = try bw.write(limited);
br.seek += n;
@ -93,7 +110,7 @@ fn passthruReadVec(ctx: ?*anyopaque, data: []const []u8) Reader.Error!usize {
vecs[0] = buf[copy_len..];
const vecs_len: usize = @min(vecs.len, data.len - i);
var vec_data_len: usize = vecs[0].len;
for (&vecs[1..vecs_len], data[i + 1 ..][0 .. vecs_len - 1]) |*v, d| {
for (vecs[1..vecs_len], data[i + 1 ..][0 .. vecs_len - 1]) |*v, d| {
vec_data_len += d.len;
v.* = d;
}
@ -149,7 +166,6 @@ pub fn seekForwardBy(br: *BufferedReader, seek_by: u64) !void {
/// * `peek`
/// * `toss`
pub fn peek(br: *BufferedReader, n: usize) Reader.Error![]u8 {
assert(n <= br.buffer.len);
try br.fill(n);
return br.buffer[br.seek..][0..n];
}
@ -169,7 +185,6 @@ pub fn peek(br: *BufferedReader, n: usize) Reader.Error![]u8 {
/// * `peek`
/// * `toss`
pub fn peekGreedy(br: *BufferedReader, n: usize) Reader.Error![]u8 {
assert(n <= br.buffer.len);
try br.fill(n);
return br.buffer[br.seek..br.end];
}
@ -448,10 +463,12 @@ fn peekDelimiterInclusiveUnlessEnd(br: *BufferedReader, delimiter: u8) Delimiter
@branchHint(.likely);
return buffer[seek .. end + 1];
}
const remainder = buffer[seek..];
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
br.end = remainder.len;
br.seek = 0;
if (seek > 0) {
const remainder = buffer[seek..];
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
br.end = remainder.len;
br.seek = 0;
}
while (br.end < br.buffer.len) {
const n = try br.unbuffered_reader.readVec(&.{br.buffer[br.end..]});
const prev_end = br.end;
@ -550,10 +567,12 @@ pub fn fill(br: *BufferedReader, n: usize) Reader.Error!void {
@branchHint(.likely);
return;
}
const remainder = buffer[seek..];
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
br.end = remainder.len;
br.seek = 0;
if (seek > 0) {
const remainder = buffer[seek..];
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
br.end = remainder.len;
br.seek = 0;
}
while (true) {
br.end += try br.unbuffered_reader.readVec(&.{br.buffer[br.end..]});
if (n <= br.end) return;
@ -665,15 +684,35 @@ pub fn writableSliceGreedyAlloc(
allocator: Allocator,
min_len: usize,
) error{OutOfMemory}![]u8 {
_ = br;
_ = allocator;
_ = min_len;
@panic("TODO");
{
const unused = br.buffer[br.end..];
if (unused.len >= min_len) return unused;
}
const seek = br.seek;
if (seek > 0) {
const buffer = br.buffer[0..br.end];
const remainder = buffer[seek..];
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
br.end = remainder.len;
br.seek = 0;
}
{
var list: std.ArrayListUnmanaged(u8) = .{
.items = br.buffer[0..br.end],
.capacity = br.buffer.len,
};
defer br.buffer = list.allocatedSlice();
try list.ensureUnusedCapacity(allocator, min_len);
}
const unused = br.buffer[br.end..];
assert(unused.len >= min_len);
return unused;
}
/// After writing directly into the unused capacity of `buffer`, this function
/// updates `end` so that users of `BufferedReader` can receive the data.
pub fn advanceBufferEnd(br: *BufferedReader, n: usize) void {
assert(n <= br.buffer.len - br.end);
br.end += n;
}

View File

@ -141,16 +141,19 @@ pub fn advance(bw: *BufferedWriter, n: usize) void {
/// The `data` parameter is mutable because this function needs to mutate the
/// fields in order to handle partial writes from `Writer.VTable.writeVec`.
pub fn writeVecAll(bw: *BufferedWriter, data: [][]const u8) Writer.Error!void {
var i: usize = 0;
while (true) {
var n = try passthruWriteSplat(bw, data[i..], 1);
const len = data[i].len;
while (n >= len) {
n -= len;
i += 1;
if (i >= data.len) return;
var index: usize = 0;
var truncate: usize = 0;
while (index < data.len) {
{
const untruncated = data[index];
data[index] = untruncated[truncate..];
defer data[index] = untruncated;
truncate += try bw.writeVec(data[index..]);
}
while (index < data.len and truncate <= data[index].len) {
truncate -= data[index].len;
index += 1;
}
data[i] = data[i][n..];
}
}

View File

@ -94,7 +94,7 @@ pub const Limit = enum(usize) {
}
pub fn slice(l: Limit, s: []u8) []u8 {
return s[0..min(l, s.len)];
return s[0..l.minInt(s.len)];
}
pub fn toInt(l: Limit) ?usize {

View File

@ -348,16 +348,15 @@ pub const RunResult = struct {
stderr: []u8,
};
fn writeFifoDataToArrayList(allocator: Allocator, list: *std.ArrayListUnmanaged(u8), fifo: *std.io.PollFifo) !void {
if (fifo.head != 0) fifo.realign();
fn writeBufferedReaderToArrayList(allocator: Allocator, list: *std.ArrayListUnmanaged(u8), br: *std.io.BufferedReader) !void {
assert(br.seek == 0);
if (list.capacity == 0) {
list.* = .{
.items = fifo.buf[0..fifo.count],
.capacity = fifo.buf.len,
.items = br.bufferContents(),
.capacity = br.buffer.len,
};
fifo.* = std.io.PollFifo.init(fifo.allocator);
} else {
try list.appendSlice(allocator, fifo.buf[0..fifo.count]);
try list.appendSlice(allocator, br.bufferContents());
}
}
@ -384,14 +383,14 @@ pub fn collectOutput(
defer poller.deinit();
while (try poller.poll()) {
if (poller.fifo(.stdout).count > max_output_bytes)
if (poller.reader(.stdout).bufferContents().len > max_output_bytes)
return error.StdoutStreamTooLong;
if (poller.fifo(.stderr).count > max_output_bytes)
if (poller.reader(.stderr).bufferContents().len > max_output_bytes)
return error.StderrStreamTooLong;
}
try writeFifoDataToArrayList(allocator, stdout, poller.fifo(.stdout));
try writeFifoDataToArrayList(allocator, stderr, poller.fifo(.stderr));
try writeBufferedReaderToArrayList(allocator, stdout, poller.reader(.stdout));
try writeBufferedReaderToArrayList(allocator, stderr, poller.reader(.stderr));
}
pub const RunError = posix.GetCwdError || posix.ReadError || SpawnError || posix.PollError || error{

View File

@ -522,7 +522,7 @@ pub fn parseStrLit(
tree: Ast,
node: Ast.Node.Index,
writer: *std.io.BufferedWriter,
) error{OutOfMemory}!std.zig.string_literal.Result {
) std.io.Writer.Error!std.zig.string_literal.Result {
switch (tree.nodeTag(node)) {
.string_literal => {
const token = tree.nodeMainToken(node);

View File

@ -72,9 +72,9 @@ pub fn writeInst(
}
pub fn dump(air: Air, pt: Zcu.PerThread, liveness: ?Air.Liveness) void {
var bw = std.debug.lockStdErr2(&.{});
defer std.debug.unlockStdErr();
air.write(&bw, pt, liveness);
const stderr_bw = std.debug.lockStderrWriter(&.{});
defer std.debug.unlockStderrWriter();
air.write(stderr_bw, pt, liveness);
}
pub fn dumpInst(air: Air, inst: Air.Inst.Index, pt: Zcu.PerThread, liveness: ?Air.Liveness) void {

View File

@ -1881,14 +1881,12 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
if (options.verbose_llvm_cpu_features) {
if (options.root_mod.resolved_target.llvm_cpu_features) |cf| print: {
var stderr = std.debug.lockStdErr2(&.{});
defer std.debug.unlockStdErr();
nosuspend {
stderr.print("compilation: {s}\n", .{options.root_name}) catch break :print;
stderr.print(" target: {s}\n", .{try target.zigTriple(arena)}) catch break :print;
stderr.print(" cpu: {s}\n", .{target.cpu.model.name}) catch break :print;
stderr.print(" features: {s}\n", .{cf}) catch {};
}
const stderr_bw = std.debug.lockStderrWriter(&.{});
defer std.debug.unlockStderrWriter();
stderr_bw.print("compilation: {s}\n", .{options.root_name}) catch break :print;
stderr_bw.print(" target: {s}\n", .{try target.zigTriple(arena)}) catch break :print;
stderr_bw.print(" cpu: {s}\n", .{target.cpu.model.name}) catch break :print;
stderr_bw.print(" features: {s}\n", .{cf}) catch {};
}
}

View File

@ -11263,8 +11263,8 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
fn dumpAllFallible(ip: *const InternPool) anyerror!void {
var buffer: [4096]u8 = undefined;
var bw = std.debug.lockStdErr2(&buffer);
defer std.debug.unlockStdErr();
const stderr_bw = std.debug.lockStderrWriter(&buffer);
defer std.debug.unlockStderrWriter();
for (ip.locals, 0..) |*local, tid| {
const items = local.shared.items.view();
for (
@ -11273,12 +11273,12 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void {
0..,
) |tag, data, index| {
const i = Index.Unwrapped.wrap(.{ .tid = @enumFromInt(tid), .index = @intCast(index) }, ip);
try bw.print("${d} = {s}(", .{ i, @tagName(tag) });
try stderr_bw.print("${d} = {s}(", .{ i, @tagName(tag) });
switch (tag) {
.removed => {},
.simple_type => try bw.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(@intFromEnum(i))))}),
.simple_value => try bw.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(@intFromEnum(i))))}),
.simple_type => try stderr_bw.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(@intFromEnum(i))))}),
.simple_value => try stderr_bw.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(@intFromEnum(i))))}),
.type_int_signed,
.type_int_unsigned,
@ -11351,17 +11351,16 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void {
.func_coerced,
.union_value,
.memoized_call,
=> try bw.print("{d}", .{data}),
=> try stderr_bw.print("{d}", .{data}),
.opt_null,
.type_slice,
.only_possible_value,
=> try bw.print("${d}", .{data}),
=> try stderr_bw.print("${d}", .{data}),
}
try bw.writeAll(")\n");
try stderr_bw.writeAll(")\n");
}
}
try bw.flush();
}
pub fn dumpGenericInstances(ip: *const InternPool, allocator: Allocator) void {
@ -11396,8 +11395,8 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator)
}
var buffer: [4096]u8 = undefined;
var bw = std.debug.lockStdErr2(&buffer);
defer std.debug.unlockStdErr();
const stderr_bw = std.debug.lockStderrWriter(&buffer);
defer std.debug.unlockStderrWriter();
const SortContext = struct {
values: []std.ArrayListUnmanaged(Index),
@ -11410,23 +11409,21 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator)
var it = instances.iterator();
while (it.next()) |entry| {
const generic_fn_owner_nav = ip.getNav(ip.funcDeclInfo(entry.key_ptr.*).owner_nav);
try bw.print("{f} ({}): \n", .{ generic_fn_owner_nav.name.fmt(ip), entry.value_ptr.items.len });
try stderr_bw.print("{f} ({}): \n", .{ generic_fn_owner_nav.name.fmt(ip), entry.value_ptr.items.len });
for (entry.value_ptr.items) |index| {
const unwrapped_index = index.unwrap(ip);
const func = ip.extraFuncInstance(unwrapped_index.tid, unwrapped_index.getExtra(ip), unwrapped_index.getData(ip));
const owner_nav = ip.getNav(func.owner_nav);
try bw.print(" {f}: (", .{owner_nav.name.fmt(ip)});
try stderr_bw.print(" {f}: (", .{owner_nav.name.fmt(ip)});
for (func.comptime_args.get(ip)) |arg| {
if (arg != .none) {
const key = ip.indexToKey(arg);
try bw.print(" {} ", .{key});
try stderr_bw.print(" {} ", .{key});
}
}
try bw.writeAll(")\n");
try stderr_bw.writeAll(")\n");
}
}
try bw.flush();
}
pub fn getNav(ip: *const InternPool, index: Nav.Index) Nav {

View File

@ -1043,12 +1043,12 @@ pub const File = struct {
if (stat.size > std.math.maxInt(u32))
return error.FileTooBig;
const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0);
const source = try gpa.allocSentinel(u8, @intCast(stat.size), 0);
errdefer gpa.free(source);
const amt = try f.readAll(source);
if (amt != stat.size)
return error.UnexpectedEndOfFile;
var fr = f.reader();
var br = fr.interface().unbuffered();
try br.readSlice(source);
// Here we do not modify stat fields because this function is the one
// used for error reporting. We need to keep the stat fields stale so that
@ -2845,34 +2845,21 @@ pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.F
undefined;
defer if (data_has_safety_tag) gpa.free(safety_buffer);
const data_ptr = if (data_has_safety_tag)
@as([*]u8, @ptrCast(safety_buffer.ptr))
else
@as([*]u8, @ptrCast(zir.instructions.items(.data).ptr));
var iovecs = [_]std.posix.iovec{
.{
.base = @as([*]u8, @ptrCast(zir.instructions.items(.tag).ptr)),
.len = header.instructions_len,
},
.{
.base = data_ptr,
.len = header.instructions_len * 8,
},
.{
.base = zir.string_bytes.ptr,
.len = header.string_bytes_len,
},
.{
.base = @as([*]u8, @ptrCast(zir.extra.ptr)),
.len = header.extra_len * 4,
},
var vecs = [_][]u8{
@ptrCast(zir.instructions.items(.tag)),
if (data_has_safety_tag)
@ptrCast(safety_buffer)
else
zir.instructions.items(.data),
zir.string_bytes,
@ptrCast(zir.extra),
};
var cache_fr = cache_file.reader();
var cache_br = cache_fr.interface().unbuffered();
cache_br.readVecAll(&vecs) catch |err| switch (err) {
error.ReadFailed => if (cache_fr.err) |_| unreachable else |e| return e,
error.EndOfStream => return error.UnexpectedFileSize,
};
const amt_read = try cache_file.readvAll(&iovecs);
const amt_expected = zir.instructions.len * 9 +
zir.string_bytes.len +
zir.extra.len * 4;
if (amt_read != amt_expected) return error.UnexpectedFileSize;
if (data_has_safety_tag) {
const tags = zir.instructions.items(.tag);
for (zir.instructions.items(.data), 0..) |*data, i| {
@ -2895,14 +2882,6 @@ pub fn saveZirCache(gpa: Allocator, cache_file: std.fs.File, stat: std.fs.File.S
undefined;
defer if (data_has_safety_tag) gpa.free(safety_buffer);
const data_ptr: [*]const u8 = if (data_has_safety_tag)
if (zir.instructions.len == 0)
undefined
else
@ptrCast(safety_buffer.ptr)
else
@ptrCast(zir.instructions.items(.data).ptr);
if (data_has_safety_tag) {
// The `Data` union has a safety tag but in the file format we store it without.
for (zir.instructions.items(.data), 0..) |*data, i| {
@ -2920,29 +2899,21 @@ pub fn saveZirCache(gpa: Allocator, cache_file: std.fs.File, stat: std.fs.File.S
.stat_inode = stat.inode,
.stat_mtime = stat.mtime,
};
var iovecs: [5]std.posix.iovec_const = .{
.{
.base = @ptrCast(&header),
.len = @sizeOf(Zir.Header),
},
.{
.base = @ptrCast(zir.instructions.items(.tag).ptr),
.len = zir.instructions.len,
},
.{
.base = data_ptr,
.len = zir.instructions.len * 8,
},
.{
.base = zir.string_bytes.ptr,
.len = zir.string_bytes.len,
},
.{
.base = @ptrCast(zir.extra.ptr),
.len = zir.extra.len * 4,
},
var vecs = [_][]const u8{
@ptrCast((&header)[0..1]),
@ptrCast(zir.instructions.items(.tag)),
if (data_has_safety_tag)
@ptrCast(safety_buffer)
else
@ptrCast(zir.instructions.items(.data)),
zir.string_bytes,
@ptrCast(zir.extra),
};
var cache_fw = cache_file.writer();
var cache_bw = cache_fw.interface().unbuffered();
cache_bw.writeVecAll(&vecs) catch |err| switch (err) {
error.WriteFailed => if (cache_fw.err) |_| unreachable else |e| return e,
};
try cache_file.writevAll(&iovecs);
}
pub fn saveZoirCache(cache_file: std.fs.File, stat: std.fs.File.Stat, zoir: Zoir) std.fs.File.WriteError!void {
@ -2958,45 +2929,22 @@ pub fn saveZoirCache(cache_file: std.fs.File, stat: std.fs.File.Stat, zoir: Zoir
.stat_inode = stat.inode,
.stat_mtime = stat.mtime,
};
var iovecs: [9]std.posix.iovec_const = .{
.{
.base = @ptrCast(&header),
.len = @sizeOf(Zoir.Header),
},
.{
.base = @ptrCast(zoir.nodes.items(.tag)),
.len = zoir.nodes.len * @sizeOf(Zoir.Node.Repr.Tag),
},
.{
.base = @ptrCast(zoir.nodes.items(.data)),
.len = zoir.nodes.len * 4,
},
.{
.base = @ptrCast(zoir.nodes.items(.ast_node)),
.len = zoir.nodes.len * 4,
},
.{
.base = @ptrCast(zoir.extra),
.len = zoir.extra.len * 4,
},
.{
.base = @ptrCast(zoir.limbs),
.len = zoir.limbs.len * @sizeOf(std.math.big.Limb),
},
.{
.base = zoir.string_bytes.ptr,
.len = zoir.string_bytes.len,
},
.{
.base = @ptrCast(zoir.compile_errors),
.len = zoir.compile_errors.len * @sizeOf(Zoir.CompileError),
},
.{
.base = @ptrCast(zoir.error_notes),
.len = zoir.error_notes.len * @sizeOf(Zoir.CompileError.Note),
},
var vecs = [_][]const u8{
@ptrCast((&header)[0..1]),
@ptrCast(zoir.nodes.items(.tag)),
@ptrCast(zoir.nodes.items(.data)),
@ptrCast(zoir.nodes.items(.ast_node)),
@ptrCast(zoir.extra),
@ptrCast(zoir.limbs),
zoir.string_bytes,
@ptrCast(zoir.compile_errors),
@ptrCast(zoir.error_notes),
};
var cache_fw = cache_file.writer();
var cache_bw = cache_fw.interface().unbuffered();
cache_bw.writeVecAll(&vecs) catch |err| switch (err) {
error.WriteFailed => if (cache_fw.err) |_| unreachable else |e| return e,
};
try cache_file.writevAll(&iovecs);
}
pub fn loadZoirCacheBody(gpa: Allocator, header: Zoir.Header, cache_file: std.fs.File) !Zoir {
@ -3025,49 +2973,22 @@ pub fn loadZoirCacheBody(gpa: Allocator, header: Zoir.Header, cache_file: std.fs
zoir.compile_errors = try gpa.alloc(Zoir.CompileError, header.compile_errors_len);
zoir.error_notes = try gpa.alloc(Zoir.CompileError.Note, header.error_notes_len);
var iovecs: [8]std.posix.iovec = .{
.{
.base = @ptrCast(zoir.nodes.items(.tag)),
.len = header.nodes_len * @sizeOf(Zoir.Node.Repr.Tag),
},
.{
.base = @ptrCast(zoir.nodes.items(.data)),
.len = header.nodes_len * 4,
},
.{
.base = @ptrCast(zoir.nodes.items(.ast_node)),
.len = header.nodes_len * 4,
},
.{
.base = @ptrCast(zoir.extra),
.len = header.extra_len * 4,
},
.{
.base = @ptrCast(zoir.limbs),
.len = header.limbs_len * @sizeOf(std.math.big.Limb),
},
.{
.base = zoir.string_bytes.ptr,
.len = header.string_bytes_len,
},
.{
.base = @ptrCast(zoir.compile_errors),
.len = header.compile_errors_len * @sizeOf(Zoir.CompileError),
},
.{
.base = @ptrCast(zoir.error_notes),
.len = header.error_notes_len * @sizeOf(Zoir.CompileError.Note),
},
var vecs = [_][]u8{
@ptrCast(zoir.nodes.items(.tag)),
@ptrCast(zoir.nodes.items(.data)),
@ptrCast(zoir.nodes.items(.ast_node)),
@ptrCast(zoir.extra),
@ptrCast(zoir.limbs),
zoir.string_bytes,
@ptrCast(zoir.compile_errors),
@ptrCast(zoir.error_notes),
};
const bytes_expected = expected: {
var n: usize = 0;
for (iovecs) |v| n += v.len;
break :expected n;
var cache_fr = cache_file.reader();
var cache_br = cache_fr.interface().unbuffered();
cache_br.readVecAll(&vecs) catch |err| switch (err) {
error.ReadFailed => if (cache_fr.err) |_| unreachable else |e| return e,
error.EndOfStream => return error.UnexpectedFileSize,
};
const bytes_read = try cache_file.readvAll(&iovecs);
if (bytes_read != bytes_expected) return error.UnexpectedFileSize;
return zoir;
}

View File

@ -249,11 +249,14 @@ pub fn updateFile(
if (stat.size > std.math.maxInt(u32))
return error.FileTooBig;
const source = try gpa.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0);
const source = try gpa.allocSentinel(u8, @intCast(stat.size), 0);
defer if (file.source == null) gpa.free(source);
const amt = try source_file.readAll(source);
if (amt != stat.size)
return error.UnexpectedEndOfFile;
var source_fr = source_file.reader();
var source_br = source_fr.interface().unbuffered();
source_br.readSlice(source) catch |err| switch (err) {
error.ReadFailed => if (source_fr.err) |_| unreachable else |e| return e,
error.EndOfStream => return error.UnexpectedEndOfFile,
};
file.source = source;
@ -340,13 +343,17 @@ fn loadZirZoirCache(
.zon => Zoir.Header,
};
var buffer: [@sizeOf(Header)]u8 = undefined;
var cache_fr = cache_file.reader();
var cache_br = cache_fr.interface().buffered(&buffer);
// First we read the header to determine the lengths of arrays.
const header = cache_file.reader().readStruct(Header) catch |err| switch (err) {
const header = (cache_br.takeStruct(Header) catch |err| switch (err) {
// This can happen if Zig bails out of this function between creating
// the cached file and writing it.
error.EndOfStream => return .invalid,
else => |e| return e,
};
}).*;
const unchanged_metadata =
stat.size == header.stat_size and
@ -2433,8 +2440,12 @@ fn updateEmbedFileInner(
const old_len = strings.mutate.len;
errdefer strings.shrinkRetainingCapacity(old_len);
const bytes = (try strings.addManyAsSlice(size_plus_one))[0];
const actual_read = try file.readAll(bytes[0..size]);
if (actual_read != size) return error.UnexpectedEof;
var fr = file.reader();
var br = fr.interface().unbuffered();
br.readSlice(bytes[0..size]) catch |err| switch (err) {
error.ReadFailed => if (fr.err) |_| unreachable else |e| return e,
error.EndOfStream => return error.UnexpectedEof,
};
bytes[size] = 0;
break :str try ip.getOrPutTrailingString(gpa, tid, @intCast(bytes.len), .maybe_embedded_nulls);
};

View File

@ -80,18 +80,19 @@ fn dumpStatusReport() !void {
var fba = std.heap.FixedBufferAllocator.init(&crash_heap);
const allocator = fba.allocator();
var stderr = std.fs.File.stderr().writer().unbuffered();
var stderr_fw = std.fs.File.stderr().writer();
var stderr_bw = stderr_fw.interface().unbuffered();
const block: *Sema.Block = anal.block;
const zcu = anal.sema.pt.zcu;
const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, zcu) orelse {
const file = zcu.fileByIndex(block.src_base_inst.resolveFile(&zcu.intern_pool));
try stderr.print("Analyzing lost instruction in file '{f}'. This should not happen!\n\n", .{file.path.fmt(zcu.comp)});
try stderr_bw.print("Analyzing lost instruction in file '{f}'. This should not happen!\n\n", .{file.path.fmt(zcu.comp)});
return;
};
try stderr.writeAll("Analyzing ");
try stderr.print("Analyzing '{f}'\n", .{file.path.fmt(zcu.comp)});
try stderr_bw.writeAll("Analyzing ");
try stderr_bw.print("Analyzing '{f}'\n", .{file.path.fmt(zcu.comp)});
print_zir.renderInstructionContext(
allocator,
@ -100,12 +101,12 @@ fn dumpStatusReport() !void {
file,
src_base_node,
6, // indent
&stderr,
&stderr_bw,
) catch |err| switch (err) {
error.OutOfMemory => try stderr.writeAll(" <out of memory dumping zir>\n"),
error.OutOfMemory => try stderr_bw.writeAll(" <out of memory dumping zir>\n"),
else => |e| return e,
};
try stderr.print(
try stderr_bw.print(
\\ For full context, use the command
\\ zig ast-check -t {f}
\\
@ -116,30 +117,30 @@ fn dumpStatusReport() !void {
while (parent) |curr| {
fba.reset();
const cur_block_file = zcu.fileByIndex(curr.block.src_base_inst.resolveFile(&zcu.intern_pool));
try stderr.print(" in {f}\n", .{cur_block_file.path.fmt(zcu.comp)});
try stderr_bw.print(" in {f}\n", .{cur_block_file.path.fmt(zcu.comp)});
_, const cur_block_src_base_node = Zcu.LazySrcLoc.resolveBaseNode(curr.block.src_base_inst, zcu) orelse {
try stderr.writeAll(" > [lost instruction; this should not happen]\n");
try stderr_bw.writeAll(" > [lost instruction; this should not happen]\n");
parent = curr.parent;
continue;
};
try stderr.writeAll(" > ");
try stderr_bw.writeAll(" > ");
print_zir.renderSingleInstruction(
allocator,
curr.body[curr.body_index],
cur_block_file,
cur_block_src_base_node,
6, // indent
&stderr,
&stderr_bw,
) catch |err| switch (err) {
error.OutOfMemory => try stderr.writeAll(" <out of memory dumping zir>\n"),
error.OutOfMemory => try stderr_bw.writeAll(" <out of memory dumping zir>\n"),
else => |e| return e,
};
try stderr.writeAll("\n");
try stderr_bw.writeAll("\n");
parent = curr.parent;
}
try stderr.writeByte('\n');
try stderr_bw.writeByte('\n');
}
var crash_heap: [16 * 4096]u8 = undefined;
@ -268,8 +269,9 @@ const StackContext = union(enum) {
debug.dumpCurrentStackTrace(ct.ret_addr);
},
.exception => |context| {
var stderr = std.fs.File.stderr().writer().unbuffered();
debug.dumpStackTraceFromBase(context, &stderr);
var stderr_fw = std.fs.File.stderr().writer();
var stderr_bw = stderr_fw.interface().unbuffered();
debug.dumpStackTraceFromBase(context, &stderr_bw);
},
.not_supported => {
std.fs.File.stderr().writeAll("Stack trace not supported on this platform.\n") catch {};
@ -379,19 +381,20 @@ const PanicSwitch = struct {
state.recover_stage = .release_mutex;
var stderr = std.fs.File.stderr().writer().unbuffered();
var stderr_fw = std.fs.File.stderr().writer();
var stderr_bw = stderr_fw.interface().unbuffered();
if (builtin.single_threaded) {
stderr.print("panic: ", .{}) catch goTo(releaseMutex, .{state});
stderr_bw.print("panic: ", .{}) catch goTo(releaseMutex, .{state});
} else {
const current_thread_id = std.Thread.getCurrentId();
stderr.print("thread {} panic: ", .{current_thread_id}) catch goTo(releaseMutex, .{state});
stderr_bw.print("thread {} panic: ", .{current_thread_id}) catch goTo(releaseMutex, .{state});
}
stderr.print("{s}\n", .{msg}) catch goTo(releaseMutex, .{state});
stderr_bw.print("{s}\n", .{msg}) catch goTo(releaseMutex, .{state});
state.recover_stage = .report_stack;
dumpStatusReport() catch |err| {
stderr.print("\nIntercepted error.{} while dumping current state. Continuing...\n", .{err}) catch {};
stderr_bw.print("\nIntercepted error.{} while dumping current state. Continuing...\n", .{err}) catch {};
};
goTo(reportStack, .{state});
@ -406,8 +409,9 @@ const PanicSwitch = struct {
recover(state, trace, stack, msg);
state.recover_stage = .release_mutex;
var stderr = std.fs.File.stderr().writer().unbuffered();
stderr.writeAll("\nOriginal Error:\n") catch {};
var stderr_fw = std.fs.File.stderr().writer();
var stderr_bw = stderr_fw.interface().unbuffered();
stderr_bw.writeAll("\nOriginal Error:\n") catch {};
goTo(reportStack, .{state});
}
@ -477,8 +481,9 @@ const PanicSwitch = struct {
recover(state, trace, stack, msg);
state.recover_stage = .silent_abort;
var stderr = std.fs.File.stderr().writer().unbuffered();
stderr.writeAll("Aborting...\n") catch {};
var stderr_fw = std.fs.File.stderr().writer();
var stderr_bw = stderr_fw.interface().unbuffered();
stderr_bw.writeAll("Aborting...\n") catch {};
goTo(abort, .{});
}
@ -505,10 +510,11 @@ const PanicSwitch = struct {
// lower the verbosity, and restore it at the end if we don't panic.
state.recover_verbosity = .message_only;
var stderr = std.fs.File.stderr().writer().unbuffered();
stderr.writeAll("\nPanicked during a panic: ") catch {};
stderr.writeAll(msg) catch {};
stderr.writeAll("\nInner panic stack:\n") catch {};
var stderr_fw = std.fs.File.stderr().writer();
var stderr_bw = stderr_fw.interface().unbuffered();
stderr_bw.writeAll("\nPanicked during a panic: ") catch {};
stderr_bw.writeAll(msg) catch {};
stderr_bw.writeAll("\nInner panic stack:\n") catch {};
if (trace) |t| {
debug.dumpStackTrace(t.*);
}
@ -519,10 +525,11 @@ const PanicSwitch = struct {
.message_only => {
state.recover_verbosity = .silent;
var stderr = std.fs.File.stderr().writer().unbuffered();
stderr.writeAll("\nPanicked while dumping inner panic stack: ") catch {};
stderr.writeAll(msg) catch {};
stderr.writeByte('\n') catch {};
var stderr_fw = std.fs.File.stderr().writer();
var stderr_bw = stderr_fw.interface().unbuffered();
stderr_bw.writeAll("\nPanicked while dumping inner panic stack: ") catch {};
stderr_bw.writeAll(msg) catch {};
stderr_bw.writeByte('\n') catch {};
// If we succeed, restore all the way to dumping the stack.
state.recover_verbosity = .message_and_stack;

View File

@ -47,7 +47,7 @@ pub fn print(
level: u8,
pt: Zcu.PerThread,
opt_sema: ?*Sema,
) std.io.Writer.Error!void {
) (std.io.Writer.Error || Zcu.CompileError)!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (ip.indexToKey(val.toIntern())) {
@ -190,7 +190,7 @@ fn printAggregate(
level: u8,
pt: Zcu.PerThread,
opt_sema: ?*Sema,
) std.io.Writer.Error!void {
) (std.io.Writer.Error || Zcu.CompileError)!void {
if (level == 0) {
if (is_ref) try bw.writeByte('&');
return bw.writeAll(".{ ... }");
@ -276,7 +276,7 @@ fn printPtr(
level: u8,
pt: Zcu.PerThread,
opt_sema: ?*Sema,
) std.io.Writer.Error!void {
) (std.io.Writer.Error || Zcu.CompileError)!void {
const ptr = switch (pt.zcu.intern_pool.indexToKey(ptr_val.toIntern())) {
.undef => return bw.writeAll("undefined"),
.ptr => |ptr| ptr,
@ -336,7 +336,7 @@ pub fn printPtrDerivation(
/// The maximum recursion depth. We can never recurse infinitely here, but the depth can be arbitrary,
/// so at this depth we just write "..." to prevent stack overflow.
ptr_depth: u8,
) std.io.Writer.Error!Value.PointerDeriveStep {
) (std.io.Writer.Error || Zcu.CompileError)!Value.PointerDeriveStep {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;

View File

@ -176,7 +176,7 @@ const Writer = struct {
}
} = .{},
const Error = std.io.Writer.Error;
const Error = std.io.Writer.Error || std.mem.Allocator.Error;
fn writeInstToStream(
self: *Writer,