Merge remote-tracking branch 'origin/master' into wrangle-writer-buffering

This commit is contained in:
Andrew Kelley 2025-07-24 19:12:44 -07:00
commit 8c4482ed78
16 changed files with 433 additions and 320 deletions

View File

@ -282,13 +282,15 @@ fn buildWasmBinary(
var result: ?Path = null;
var result_error_bundle = std.zig.ErrorBundle.empty;
const stdout_br = poller.reader(.stdout);
const stdout = poller.reader(.stdout);
poll: while (true) {
const Header = std.zig.Server.Message.Header;
while (stdout_br.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll;
const header = (stdout_br.takeStruct(Header) catch unreachable).*;
while (stdout_br.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll;
const body = stdout_br.take(header.bytes_len) catch unreachable;
while (stdout.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll;
const header = stdout.takeStruct(Header, .little) catch unreachable;
while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll;
const body = stdout.take(header.bytes_len) catch unreachable;
switch (header.tag) {
.zig_version => {
if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
@ -327,8 +329,7 @@ fn buildWasmBinary(
}
}
const stderr_br = poller.reader(.stderr);
const stderr_contents = stderr_br.buffered();
const stderr_contents = try poller.toOwnedSlice(.stderr);
if (stderr_contents.len > 0) {
std.debug.print("{s}", .{stderr_contents});
}

View File

@ -286,7 +286,7 @@ pub fn cast(step: *Step, comptime T: type) ?*T {
}
/// For debugging purposes, prints identifying information about this Step.
pub fn dump(step: *Step, w: *std.io.Writer, tty_config: std.io.tty.Config) void {
pub fn dump(step: *Step, w: *std.Io.Writer, tty_config: std.Io.tty.Config) void {
const debug_info = std.debug.getSelfDebugInfo() catch |err| {
w.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{
@errorName(err),
@ -359,7 +359,7 @@ pub fn addError(step: *Step, comptime fmt: []const u8, args: anytype) error{OutO
pub const ZigProcess = struct {
child: std.process.Child,
poller: std.io.Poller(StreamEnum),
poller: std.Io.Poller(StreamEnum),
progress_ipc_fd: if (std.Progress.have_ipc) ?std.posix.fd_t else void,
pub const StreamEnum = enum { stdout, stderr };
@ -428,7 +428,7 @@ pub fn evalZigProcess(
const zp = try gpa.create(ZigProcess);
zp.* = .{
.child = child,
.poller = std.io.poll(gpa, ZigProcess.StreamEnum, .{
.poller = std.Io.poll(gpa, ZigProcess.StreamEnum, .{
.stdout = child.stdout.?,
.stderr = child.stderr.?,
}),
@ -511,12 +511,14 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path {
var result: ?Path = null;
const stdout = zp.poller.reader(.stdout);
poll: while (true) {
const Header = std.zig.Server.Message.Header;
while (stdout.buffered().len < @sizeOf(Header)) if (!try zp.poller.poll()) break :poll;
const header = (stdout.takeStruct(Header) catch unreachable).*;
const header = stdout.takeStruct(Header, .little) catch unreachable;
while (stdout.buffered().len < header.bytes_len) if (!try zp.poller.poll()) break :poll;
const body = stdout.take(header.bytes_len) catch unreachable;
switch (header.tag) {
.zig_version => {
if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
@ -606,8 +608,7 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path {
s.result_duration_ns = timer.read();
const stderr = zp.poller.reader(.stderr);
const stderr_contents = stderr.buffered();
const stderr_contents = try zp.poller.toOwnedSlice(.stderr);
if (stderr_contents.len > 0) {
try s.result_error_msgs.append(arena, try arena.dupe(u8, stderr_contents));
}
@ -726,7 +727,7 @@ pub fn allocPrintCmd2(
argv: []const []const u8,
) Allocator.Error![]u8 {
const shell = struct {
fn escape(writer: *std.io.Writer, string: []const u8, is_argv0: bool) !void {
fn escape(writer: *std.Io.Writer, string: []const u8, is_argv0: bool) !void {
for (string) |c| {
if (switch (c) {
else => true,
@ -760,9 +761,9 @@ pub fn allocPrintCmd2(
}
};
var aw: std.io.Writer.Allocating = .init(arena);
var aw: std.Io.Writer.Allocating = .init(arena);
const writer = &aw.writer;
if (opt_cwd) |cwd| try writer.print(arena, "cd {s} && ", .{cwd});
if (opt_cwd) |cwd| writer.print("cd {s} && ", .{cwd}) catch return error.OutOfMemory;
if (opt_env) |env| {
const process_env_map = std.process.getEnvMap(arena) catch std.process.EnvMap.init(arena);
var it = env.iterator();
@ -772,17 +773,17 @@ pub fn allocPrintCmd2(
if (process_env_map.get(key)) |process_value| {
if (std.mem.eql(u8, value, process_value)) continue;
}
try writer.print(arena, "{s}=", .{key});
try shell.escape(writer, value, false);
try writer.writeByte(arena, ' ');
writer.print("{s}=", .{key}) catch return error.OutOfMemory;
shell.escape(writer, value, false) catch return error.OutOfMemory;
writer.writeByte(' ') catch return error.OutOfMemory;
}
}
try shell.escape(writer, argv[0], true);
shell.escape(writer, argv[0], true) catch return error.OutOfMemory;
for (argv[1..]) |arg| {
try writer.writeByte(arena, ' ');
try shell.escape(writer, arg, false);
writer.writeByte(' ') catch return error.OutOfMemory;
shell.escape(writer, arg, false) catch return error.OutOfMemory;
}
return aw.getWritten();
return aw.toOwnedSlice();
}
/// Prefer `cacheHitAndWatch` unless you already added watch inputs

View File

@ -1545,7 +1545,7 @@ fn evalZigTest(
const any_write_failed = first_write_failed or poll: while (true) {
const Header = std.zig.Server.Message.Header;
while (stdout.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll false;
const header = (stdout.takeStruct(Header, .little) catch unreachable).*;
const header = stdout.takeStruct(Header, .little) catch unreachable;
while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll false;
const body = stdout.take(header.bytes_len) catch unreachable;
switch (header.tag) {
@ -1808,21 +1808,23 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !StdIoResult {
}
}
stdout_bytes = poller.reader(.stdout).buffered();
stderr_bytes = poller.reader(.stderr).buffered();
stdout_bytes = try poller.toOwnedSlice(.stdout);
stderr_bytes = try poller.toOwnedSlice(.stderr);
} else {
var fr = stdout.readerStreaming();
stdout_bytes = fr.interface().allocRemaining(arena, run.stdio_limit) catch |err| switch (err) {
var small_buffer: [1]u8 = undefined;
var stdout_reader = stdout.readerStreaming(&small_buffer);
stdout_bytes = stdout_reader.interface.allocRemaining(arena, run.stdio_limit) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.ReadFailed => return fr.err.?,
error.ReadFailed => return stdout_reader.err.?,
error.StreamTooLong => return error.StdoutStreamTooLong,
};
}
} else if (child.stderr) |stderr| {
var fr = stderr.readerStreaming();
stderr_bytes = fr.interface().allocRemaining(arena, run.stdio_limit) catch |err| switch (err) {
var small_buffer: [1]u8 = undefined;
var stderr_reader = stderr.readerStreaming(&small_buffer);
stderr_bytes = stderr_reader.interface.allocRemaining(arena, run.stdio_limit) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.ReadFailed => return fr.err.?,
error.ReadFailed => return stderr_reader.err.?,
error.StreamTooLong => return error.StderrStreamTooLong,
};
}

View File

@ -92,12 +92,7 @@ pub fn poll(
const enum_fields = @typeInfo(StreamEnum).@"enum".fields;
var result: Poller(StreamEnum) = .{
.gpa = gpa,
.readers = @splat(.{
.unbuffered_reader = .failing,
.buffer = &.{},
.end = 0,
.seek = 0,
}),
.readers = @splat(.failing),
.poll_fds = undefined,
.windows = if (is_windows) .{
.first_read_done = false,
@ -186,21 +181,40 @@ pub fn Poller(comptime StreamEnum: type) type {
}
}
pub inline fn reader(self: *Self, comptime which: StreamEnum) *Reader {
pub fn reader(self: *Self, which: StreamEnum) *Reader {
return &self.readers[@intFromEnum(which)];
}
pub fn toOwnedSlice(self: *Self, which: StreamEnum) error{OutOfMemory}![]u8 {
const gpa = self.gpa;
const r = reader(self, which);
if (r.seek == 0) {
const new = try gpa.realloc(r.buffer, r.end);
r.buffer = &.{};
r.end = 0;
return new;
}
const new = try gpa.dupe(u8, r.buffered());
gpa.free(r.buffer);
r.buffer = &.{};
r.seek = 0;
r.end = 0;
return new;
}
fn pollWindows(self: *Self, nanoseconds: ?u64) !bool {
const bump_amt = 512;
const gpa = self.gpa;
if (!self.windows.first_read_done) {
var already_read_data = false;
for (0..enum_fields.len) |i| {
const handle = self.windows.active.handles_buf[i];
switch (try windowsAsyncReadToFifoAndQueueSmallRead(
gpa,
handle,
&self.windows.overlapped[i],
&self.fifos[i],
&self.readers[i],
&self.windows.small_bufs[i],
bump_amt,
)) {
@ -247,7 +261,7 @@ pub fn Poller(comptime StreamEnum: type) type {
const handle = self.windows.active.handles_buf[active_idx];
const overlapped = &self.windows.overlapped[stream_idx];
const stream_fifo = &self.fifos[stream_idx];
const stream_reader = &self.readers[stream_idx];
const small_buf = &self.windows.small_bufs[stream_idx];
const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) {
@ -258,12 +272,16 @@ pub fn Poller(comptime StreamEnum: type) type {
},
.aborted => unreachable,
};
try stream_fifo.write(small_buf[0..num_bytes_read]);
const buf = small_buf[0..num_bytes_read];
const dest = try writableSliceGreedyAlloc(stream_reader, gpa, buf.len);
@memcpy(dest[0..buf.len], buf);
advanceBufferEnd(stream_reader, buf.len);
switch (try windowsAsyncReadToFifoAndQueueSmallRead(
gpa,
handle,
overlapped,
stream_fifo,
stream_reader,
small_buf,
bump_amt,
)) {
@ -298,18 +316,18 @@ pub fn Poller(comptime StreamEnum: type) type {
}
var keep_polling = false;
inline for (&self.poll_fds, &self.readers) |*poll_fd, *r| {
for (&self.poll_fds, &self.readers) |*poll_fd, *r| {
// Try reading whatever is available before checking the error
// conditions.
// It's still possible to read after a POLL.HUP is received,
// always check if there's some data waiting to be read first.
if (poll_fd.revents & posix.POLL.IN != 0) {
const buf = try r.writableSliceGreedyAlloc(gpa, bump_amt);
const buf = try writableSliceGreedyAlloc(r, gpa, bump_amt);
const amt = posix.read(poll_fd.fd, buf) catch |err| switch (err) {
error.BrokenPipe => 0, // Handle the same as EOF.
else => |e| return e,
};
r.advanceBufferEnd(amt);
advanceBufferEnd(r, amt);
if (amt == 0) {
// Remove the fd when the EOF condition is met.
poll_fd.fd = -1;
@ -325,146 +343,181 @@ pub fn Poller(comptime StreamEnum: type) type {
}
return keep_polling;
}
};
}
/// The `ReadFile` docuementation states that `lpNumberOfBytesRead` does not have a meaningful
/// result when using overlapped I/O, but also that it cannot be `null` on Windows 7. For
/// compatibility, we point it to this dummy variables, which we never otherwise access.
/// See: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-readfile
var win_dummy_bytes_read: u32 = undefined;
/// Read as much data as possible from `handle` with `overlapped`, and write it to the FIFO. Before
/// returning, queue a read into `small_buf` so that `WaitForMultipleObjects` returns when more data
/// is available. `handle` must have no pending asynchronous operation.
fn windowsAsyncReadToFifoAndQueueSmallRead(
handle: windows.HANDLE,
overlapped: *windows.OVERLAPPED,
r: *Reader,
small_buf: *[128]u8,
bump_amt: usize,
) !enum { empty, populated, closed_populated, closed } {
var read_any_data = false;
while (true) {
const fifo_read_pending = while (true) {
const buf = try r.writableWithSize(bump_amt);
const buf_len = math.cast(u32, buf.len) orelse math.maxInt(u32);
if (0 == windows.kernel32.ReadFile(
handle,
buf.ptr,
buf_len,
&win_dummy_bytes_read,
overlapped,
)) switch (windows.GetLastError()) {
.IO_PENDING => break true,
.BROKEN_PIPE => return if (read_any_data) .closed_populated else .closed,
else => |err| return windows.unexpectedError(err),
};
const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) {
.success => |n| n,
.closed => return if (read_any_data) .closed_populated else .closed,
.aborted => unreachable,
};
read_any_data = true;
r.update(num_bytes_read);
if (num_bytes_read == buf_len) {
// We filled the buffer, so there's probably more data available.
continue;
} else {
// We didn't fill the buffer, so assume we're out of data.
// There is no pending read.
break false;
/// Returns a slice into the unused capacity of `buffer` with at least
/// `min_len` bytes, extending `buffer` by resizing it with `gpa` as necessary.
///
/// After calling this function, typically the caller will follow up with a
/// call to `advanceBufferEnd` to report the actual number of bytes buffered.
fn writableSliceGreedyAlloc(r: *Reader, allocator: Allocator, min_len: usize) Allocator.Error![]u8 {
{
const unused = r.buffer[r.end..];
if (unused.len >= min_len) return unused;
}
};
if (fifo_read_pending) cancel_read: {
// Cancel the pending read into the FIFO.
_ = windows.kernel32.CancelIo(handle);
// We have to wait for the handle to be signalled, i.e. for the cancellation to complete.
switch (windows.kernel32.WaitForSingleObject(handle, windows.INFINITE)) {
windows.WAIT_OBJECT_0 => {},
windows.WAIT_FAILED => return windows.unexpectedError(windows.GetLastError()),
else => unreachable,
if (r.seek > 0) r.rebase();
{
var list: std.ArrayListUnmanaged(u8) = .{
.items = r.buffer[0..r.end],
.capacity = r.buffer.len,
};
defer r.buffer = list.allocatedSlice();
try list.ensureUnusedCapacity(allocator, min_len);
}
// If it completed before we canceled, make sure to tell the FIFO!
const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, true)) {
.success => |n| n,
.closed => return if (read_any_data) .closed_populated else .closed,
.aborted => break :cancel_read,
};
read_any_data = true;
r.update(num_bytes_read);
const unused = r.buffer[r.end..];
assert(unused.len >= min_len);
return unused;
}
// Try to queue the 1-byte read.
if (0 == windows.kernel32.ReadFile(
handle,
small_buf,
small_buf.len,
&win_dummy_bytes_read,
overlapped,
)) switch (windows.GetLastError()) {
.IO_PENDING => {
// 1-byte read pending as intended
return if (read_any_data) .populated else .empty;
},
.BROKEN_PIPE => return if (read_any_data) .closed_populated else .closed,
else => |err| return windows.unexpectedError(err),
};
/// After writing directly into the unused capacity of `buffer`, this function
/// updates `end` so that users of `Reader` can receive the data.
fn advanceBufferEnd(r: *Reader, n: usize) void {
assert(n <= r.buffer.len - r.end);
r.end += n;
}
// We got data back this time. Write it to the FIFO and run the main loop again.
const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) {
.success => |n| n,
.closed => return if (read_any_data) .closed_populated else .closed,
.aborted => unreachable,
};
try r.write(small_buf[0..num_bytes_read]);
read_any_data = true;
}
}
/// The `ReadFile` docuementation states that `lpNumberOfBytesRead` does not have a meaningful
/// result when using overlapped I/O, but also that it cannot be `null` on Windows 7. For
/// compatibility, we point it to this dummy variables, which we never otherwise access.
/// See: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-readfile
var win_dummy_bytes_read: u32 = undefined;
/// Simple wrapper around `GetOverlappedResult` to determine the result of a `ReadFile` operation.
/// If `!allow_aborted`, then `aborted` is never returned (`OPERATION_ABORTED` is considered unexpected).
///
/// The `ReadFile` documentation states that the number of bytes read by an overlapped `ReadFile` must be determined using `GetOverlappedResult`, even if the
/// operation immediately returns data:
/// "Use NULL for [lpNumberOfBytesRead] if this is an asynchronous operation to avoid potentially
/// erroneous results."
/// "If `hFile` was opened with `FILE_FLAG_OVERLAPPED`, the following conditions are in effect: [...]
/// The lpNumberOfBytesRead parameter should be set to NULL. Use the GetOverlappedResult function to
/// get the actual number of bytes read."
/// See: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-readfile
fn windowsGetReadResult(
handle: windows.HANDLE,
overlapped: *windows.OVERLAPPED,
allow_aborted: bool,
) !union(enum) {
success: u32,
closed,
aborted,
} {
var num_bytes_read: u32 = undefined;
if (0 == windows.kernel32.GetOverlappedResult(
handle,
overlapped,
&num_bytes_read,
0,
)) switch (windows.GetLastError()) {
.BROKEN_PIPE => return .closed,
.OPERATION_ABORTED => |err| if (allow_aborted) {
return .aborted;
} else {
return windows.unexpectedError(err);
},
else => |err| return windows.unexpectedError(err),
/// Read as much data as possible from `handle` with `overlapped`, and write it to the FIFO. Before
/// returning, queue a read into `small_buf` so that `WaitForMultipleObjects` returns when more data
/// is available. `handle` must have no pending asynchronous operation.
fn windowsAsyncReadToFifoAndQueueSmallRead(
gpa: Allocator,
handle: windows.HANDLE,
overlapped: *windows.OVERLAPPED,
r: *Reader,
small_buf: *[128]u8,
bump_amt: usize,
) !enum { empty, populated, closed_populated, closed } {
var read_any_data = false;
while (true) {
const fifo_read_pending = while (true) {
const buf = try writableSliceGreedyAlloc(r, gpa, bump_amt);
const buf_len = math.cast(u32, buf.len) orelse math.maxInt(u32);
if (0 == windows.kernel32.ReadFile(
handle,
buf.ptr,
buf_len,
&win_dummy_bytes_read,
overlapped,
)) switch (windows.GetLastError()) {
.IO_PENDING => break true,
.BROKEN_PIPE => return if (read_any_data) .closed_populated else .closed,
else => |err| return windows.unexpectedError(err),
};
const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) {
.success => |n| n,
.closed => return if (read_any_data) .closed_populated else .closed,
.aborted => unreachable,
};
read_any_data = true;
advanceBufferEnd(r, num_bytes_read);
if (num_bytes_read == buf_len) {
// We filled the buffer, so there's probably more data available.
continue;
} else {
// We didn't fill the buffer, so assume we're out of data.
// There is no pending read.
break false;
}
};
if (fifo_read_pending) cancel_read: {
// Cancel the pending read into the FIFO.
_ = windows.kernel32.CancelIo(handle);
// We have to wait for the handle to be signalled, i.e. for the cancellation to complete.
switch (windows.kernel32.WaitForSingleObject(handle, windows.INFINITE)) {
windows.WAIT_OBJECT_0 => {},
windows.WAIT_FAILED => return windows.unexpectedError(windows.GetLastError()),
else => unreachable,
}
// If it completed before we canceled, make sure to tell the FIFO!
const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, true)) {
.success => |n| n,
.closed => return if (read_any_data) .closed_populated else .closed,
.aborted => break :cancel_read,
};
read_any_data = true;
advanceBufferEnd(r, num_bytes_read);
}
// Try to queue the 1-byte read.
if (0 == windows.kernel32.ReadFile(
handle,
small_buf,
small_buf.len,
&win_dummy_bytes_read,
overlapped,
)) switch (windows.GetLastError()) {
.IO_PENDING => {
// 1-byte read pending as intended
return if (read_any_data) .populated else .empty;
},
.BROKEN_PIPE => return if (read_any_data) .closed_populated else .closed,
else => |err| return windows.unexpectedError(err),
};
// We got data back this time. Write it to the FIFO and run the main loop again.
const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) {
.success => |n| n,
.closed => return if (read_any_data) .closed_populated else .closed,
.aborted => unreachable,
};
const buf = small_buf[0..num_bytes_read];
const dest = try writableSliceGreedyAlloc(r, gpa, buf.len);
@memcpy(dest[0..buf.len], buf);
advanceBufferEnd(r, buf.len);
read_any_data = true;
}
}
/// Simple wrapper around `GetOverlappedResult` to determine the result of a `ReadFile` operation.
/// If `!allow_aborted`, then `aborted` is never returned (`OPERATION_ABORTED` is considered unexpected).
///
/// The `ReadFile` documentation states that the number of bytes read by an overlapped `ReadFile` must be determined using `GetOverlappedResult`, even if the
/// operation immediately returns data:
/// "Use NULL for [lpNumberOfBytesRead] if this is an asynchronous operation to avoid potentially
/// erroneous results."
/// "If `hFile` was opened with `FILE_FLAG_OVERLAPPED`, the following conditions are in effect: [...]
/// The lpNumberOfBytesRead parameter should be set to NULL. Use the GetOverlappedResult function to
/// get the actual number of bytes read."
/// See: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-readfile
fn windowsGetReadResult(
handle: windows.HANDLE,
overlapped: *windows.OVERLAPPED,
allow_aborted: bool,
) !union(enum) {
success: u32,
closed,
aborted,
} {
var num_bytes_read: u32 = undefined;
if (0 == windows.kernel32.GetOverlappedResult(
handle,
overlapped,
&num_bytes_read,
0,
)) switch (windows.GetLastError()) {
.BROKEN_PIPE => return .closed,
.OPERATION_ABORTED => |err| if (allow_aborted) {
return .aborted;
} else {
return windows.unexpectedError(err);
},
else => |err| return windows.unexpectedError(err),
};
return .{ .success = num_bytes_read };
}
};
return .{ .success = num_bytes_read };
}
/// Given an enum, returns a struct with fields of that enum, each field

View File

@ -1241,37 +1241,6 @@ pub fn fillAlloc(r: *Reader, allocator: Allocator, n: usize) FillAllocError!void
return fill(r, n);
}
/// Returns a slice into the unused capacity of `buffer` with at least
/// `min_len` bytes, extending `buffer` by resizing it with `gpa` as necessary.
///
/// After calling this function, typically the caller will follow up with a
/// call to `advanceBufferEnd` to report the actual number of bytes buffered.
pub fn writableSliceGreedyAlloc(r: *Reader, allocator: Allocator, min_len: usize) Allocator.Error![]u8 {
{
const unused = r.buffer[r.end..];
if (unused.len >= min_len) return unused;
}
if (r.seek > 0) rebase(r);
{
var list: ArrayList(u8) = .{
.items = r.buffer[0..r.end],
.capacity = r.buffer.len,
};
defer r.buffer = list.allocatedSlice();
try list.ensureUnusedCapacity(allocator, min_len);
}
const unused = r.buffer[r.end..];
assert(unused.len >= min_len);
return unused;
}
/// After writing directly into the unused capacity of `buffer`, this function
/// updates `end` so that users of `Reader` can receive the data.
pub fn advanceBufferEnd(r: *Reader, n: usize) void {
assert(n <= r.buffer.len - r.end);
r.end += n;
}
fn takeMultipleOf7Leb128(r: *Reader, comptime Result: type) TakeLeb128Error!Result {
const result_info = @typeInfo(Result).int;
comptime assert(result_info.bits % 7 == 0);

View File

@ -7147,7 +7147,7 @@ pub const dirent = switch (native_os) {
off: off_t,
reclen: c_ushort,
type: u8,
name: [256:0]u8,
name: [255:0]u8,
},
else => void,
};

View File

@ -192,10 +192,27 @@ pub const iovec_const = extern struct {
len: usize,
};
pub const ACCMODE = enum(u2) {
RDONLY = 0,
WRONLY = 1,
RDWR = 2,
pub const ACCMODE = switch (native_os) {
// POSIX has a note about the access mode values:
//
// In historical implementations the value of O_RDONLY is zero. Because of
// that, it is not possible to detect the presence of O_RDONLY and another
// option. Future implementations should encode O_RDONLY and O_WRONLY as
// bit flags so that: O_RDONLY | O_WRONLY == O_RDWR
//
// In practice SerenityOS is the only system supported by Zig that
// implements this suggestion.
// https://github.com/SerenityOS/serenity/blob/4adc51fdf6af7d50679c48b39362e062f5a3b2cb/Kernel/API/POSIX/fcntl.h#L28-L30
.serenity => enum(u2) {
RDONLY = 1,
WRONLY = 2,
RDWR = 3,
},
else => enum(u2) {
RDONLY = 0,
WRONLY = 1,
RDWR = 2,
},
};
pub const TCSA = enum(c_uint) {

View File

@ -14,6 +14,7 @@ const assert = std.debug.assert;
const native_os = builtin.os.tag;
const Allocator = std.mem.Allocator;
const ChildProcess = @This();
const ArrayList = std.ArrayListUnmanaged;
pub const Id = switch (native_os) {
.windows => windows.HANDLE,
@ -348,18 +349,6 @@ pub const RunResult = struct {
stderr: []u8,
};
fn writeBufferedReaderToArrayList(allocator: Allocator, list: *std.ArrayListUnmanaged(u8), r: *std.Io.Reader) !void {
assert(r.seek == 0);
if (list.capacity == 0) {
list.* = .{
.items = r.buffered(),
.capacity = r.buffer.len,
};
} else {
try list.appendSlice(allocator, r.buffered());
}
}
/// Collect the output from the process's stdout and stderr. Will return once all output
/// has been collected. This does not mean that the process has ended. `wait` should still
/// be called to wait for and clean up the process.
@ -369,8 +358,8 @@ pub fn collectOutput(
child: ChildProcess,
/// Used for `stdout` and `stderr`.
allocator: Allocator,
stdout: *std.ArrayListUnmanaged(u8),
stderr: *std.ArrayListUnmanaged(u8),
stdout: *ArrayList(u8),
stderr: *ArrayList(u8),
max_output_bytes: usize,
) !void {
assert(child.stdout_behavior == .Pipe);
@ -382,15 +371,35 @@ pub fn collectOutput(
});
defer poller.deinit();
while (try poller.poll()) {
if (poller.reader(.stdout).bufferedLen() > max_output_bytes)
return error.StdoutStreamTooLong;
if (poller.reader(.stderr).bufferedLen() > max_output_bytes)
return error.StderrStreamTooLong;
const stdout_r = poller.reader(.stdout);
stdout_r.buffer = stdout.allocatedSlice();
stdout_r.seek = 0;
stdout_r.end = stdout.items.len;
const stderr_r = poller.reader(.stderr);
stderr_r.buffer = stderr.allocatedSlice();
stderr_r.seek = 0;
stderr_r.end = stderr.items.len;
defer {
stdout.* = .{
.items = stdout_r.buffer[0..stdout_r.end],
.capacity = stdout_r.buffer.len,
};
stderr.* = .{
.items = stderr_r.buffer[0..stderr_r.end],
.capacity = stderr_r.buffer.len,
};
stdout_r.buffer = &.{};
stderr_r.buffer = &.{};
}
try writeBufferedReaderToArrayList(allocator, stdout, poller.reader(.stdout));
try writeBufferedReaderToArrayList(allocator, stderr, poller.reader(.stderr));
while (try poller.poll()) {
if (stdout_r.bufferedLen() > max_output_bytes)
return error.StdoutStreamTooLong;
if (stderr_r.bufferedLen() > max_output_bytes)
return error.StderrStreamTooLong;
}
}
pub const RunError = posix.GetCwdError || posix.ReadError || SpawnError || posix.PollError || error{
@ -420,10 +429,10 @@ pub fn run(args: struct {
child.expand_arg0 = args.expand_arg0;
child.progress_node = args.progress_node;
var stdout: std.ArrayListUnmanaged(u8) = .empty;
errdefer stdout.deinit(args.allocator);
var stderr: std.ArrayListUnmanaged(u8) = .empty;
errdefer stderr.deinit(args.allocator);
var stdout: ArrayList(u8) = .empty;
defer stdout.deinit(args.allocator);
var stderr: ArrayList(u8) = .empty;
defer stderr.deinit(args.allocator);
try child.spawn();
errdefer {
@ -431,7 +440,7 @@ pub fn run(args: struct {
}
try child.collectOutput(args.allocator, &stdout, &stderr, args.max_output_bytes);
return RunResult{
return .{
.stdout = try stdout.toOwnedSlice(args.allocator),
.stderr = try stderr.toOwnedSlice(args.allocator),
.term = try child.wait(),
@ -877,12 +886,12 @@ fn spawnWindows(self: *ChildProcess) SpawnError!void {
var cmd_line_cache = WindowsCommandLineCache.init(self.allocator, self.argv);
defer cmd_line_cache.deinit();
var app_buf: std.ArrayListUnmanaged(u16) = .empty;
var app_buf: ArrayList(u16) = .empty;
defer app_buf.deinit(self.allocator);
try app_buf.appendSlice(self.allocator, app_name_w);
var dir_buf: std.ArrayListUnmanaged(u16) = .empty;
var dir_buf: ArrayList(u16) = .empty;
defer dir_buf.deinit(self.allocator);
if (cwd_path_w.len > 0) {
@ -1022,8 +1031,8 @@ const ErrInt = std.meta.Int(.unsigned, @sizeOf(anyerror) * 8);
/// Note: If the dir is the cwd, dir_buf should be empty (len = 0).
fn windowsCreateProcessPathExt(
allocator: mem.Allocator,
dir_buf: *std.ArrayListUnmanaged(u16),
app_buf: *std.ArrayListUnmanaged(u16),
dir_buf: *ArrayList(u16),
app_buf: *ArrayList(u16),
pathext: [:0]const u16,
cmd_line_cache: *WindowsCommandLineCache,
envp_ptr: ?[*]u16,
@ -1506,7 +1515,7 @@ const WindowsCommandLineCache = struct {
/// Returns the absolute path of `cmd.exe` within the Windows system directory.
/// The caller owns the returned slice.
fn windowsCmdExePath(allocator: mem.Allocator) error{ OutOfMemory, Unexpected }![:0]u16 {
var buf = try std.ArrayListUnmanaged(u16).initCapacity(allocator, 128);
var buf = try ArrayList(u16).initCapacity(allocator, 128);
errdefer buf.deinit(allocator);
while (true) {
const unused_slice = buf.unusedCapacitySlice();

View File

@ -6215,19 +6215,20 @@ fn spawnZigRc(
return comp.failWin32Resource(win32_resource, "unable to spawn {s} rc: {s}", .{ argv[0], @errorName(err) });
};
var poller = std.io.poll(comp.gpa, enum { stdout }, .{
var poller = std.Io.poll(comp.gpa, enum { stdout, stderr }, .{
.stdout = child.stdout.?,
.stderr = child.stderr.?,
});
defer poller.deinit();
const stdout = poller.fifo(.stdout);
const stdout = poller.reader(.stdout);
poll: while (true) {
while (stdout.readableLength() < @sizeOf(std.zig.Server.Message.Header)) if (!try poller.poll()) break :poll;
var header: std.zig.Server.Message.Header = undefined;
assert(stdout.read(std.mem.asBytes(&header)) == @sizeOf(std.zig.Server.Message.Header));
while (stdout.readableLength() < header.bytes_len) if (!try poller.poll()) break :poll;
const body = stdout.readableSliceOfLen(header.bytes_len);
const MessageHeader = std.zig.Server.Message.Header;
while (stdout.buffered().len < @sizeOf(MessageHeader)) if (!try poller.poll()) break :poll;
const header = stdout.takeStruct(MessageHeader, .little) catch unreachable;
while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll;
const body = stdout.take(header.bytes_len) catch unreachable;
switch (header.tag) {
// We expect exactly one ErrorBundle, and if any error_bundle header is
@ -6250,13 +6251,10 @@ fn spawnZigRc(
},
else => {}, // ignore other messages
}
stdout.discard(body.len);
}
// Just in case there's a failure that didn't send an ErrorBundle (e.g. an error return trace)
const stderr_reader = child.stderr.?.deprecatedReader();
const stderr = try stderr_reader.readAllAlloc(arena, 10 * 1024 * 1024);
const stderr = poller.reader(.stderr);
const term = child.wait() catch |err| {
return comp.failWin32Resource(win32_resource, "unable to wait for {s} rc: {s}", .{ argv[0], @errorName(err) });
@ -6265,12 +6263,12 @@ fn spawnZigRc(
switch (term) {
.Exited => |code| {
if (code != 0) {
log.err("zig rc failed with stderr:\n{s}", .{stderr});
log.err("zig rc failed with stderr:\n{s}", .{stderr.buffered()});
return comp.failWin32Resource(win32_resource, "zig rc exited with code {d}", .{code});
}
},
else => {
log.err("zig rc terminated with stderr:\n{s}", .{stderr});
log.err("zig rc terminated with stderr:\n{s}", .{stderr.buffered()});
return comp.failWin32Resource(win32_resource, "zig rc terminated unexpectedly", .{});
},
}

View File

@ -1887,8 +1887,10 @@ fn genInst(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.call_never_tail => cg.airCall(inst, .never_tail),
.call_never_inline => cg.airCall(inst, .never_inline),
.is_err => cg.airIsErr(inst, .i32_ne),
.is_non_err => cg.airIsErr(inst, .i32_eq),
.is_err => cg.airIsErr(inst, .i32_ne, .value),
.is_non_err => cg.airIsErr(inst, .i32_eq, .value),
.is_err_ptr => cg.airIsErr(inst, .i32_ne, .ptr),
.is_non_err_ptr => cg.airIsErr(inst, .i32_eq, .ptr),
.is_null => cg.airIsNull(inst, .i32_eq, .value),
.is_non_null => cg.airIsNull(inst, .i32_ne, .value),
@ -1971,8 +1973,6 @@ fn genInst(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.runtime_nav_ptr => cg.airRuntimeNavPtr(inst),
.assembly,
.is_err_ptr,
.is_non_err_ptr,
.err_return_trace,
.set_err_return_trace,
@ -4106,7 +4106,7 @@ fn airSwitchDispatch(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
return cg.finishAir(inst, .none, &.{br.operand});
}
fn airIsErr(cg: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode) InnerError!void {
fn airIsErr(cg: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void {
const zcu = cg.pt.zcu;
const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try cg.resolveInst(un_op);
@ -4123,7 +4123,7 @@ fn airIsErr(cg: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode) InnerEr
}
try cg.emitWValue(operand);
if (pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
if (op_kind == .ptr or pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try cg.addMemArg(.i32_load16_u, .{
.offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, zcu))),
.alignment = @intCast(Type.anyerror.abiAlignment(zcu).toByteUnits().?),
@ -6463,9 +6463,6 @@ fn lowerTry(
operand_is_ptr: bool,
) InnerError!WValue {
const zcu = cg.pt.zcu;
if (operand_is_ptr) {
return cg.fail("TODO: lowerTry for pointers", .{});
}
const pl_ty = err_union_ty.errorUnionPayload(zcu);
const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(zcu);
@ -6476,7 +6473,7 @@ fn lowerTry(
// check if the error tag is set for the error union.
try cg.emitWValue(err_union);
if (pl_has_bits) {
if (pl_has_bits or operand_is_ptr) {
const err_offset: u32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
try cg.addMemArg(.i32_load16_u, .{
.offset = err_union.offset() + err_offset,
@ -6498,12 +6495,12 @@ fn lowerTry(
}
// if we reach here it means error was not set, and we want the payload
if (!pl_has_bits) {
if (!pl_has_bits and !operand_is_ptr) {
return .none;
}
const pl_offset: u32 = @intCast(errUnionPayloadOffset(pl_ty, zcu));
if (isByRef(pl_ty, zcu, cg.target)) {
if (operand_is_ptr or isByRef(pl_ty, zcu, cg.target)) {
return buildPointerOffset(cg, err_union, pl_offset, .new);
}
const payload = try cg.load(err_union, pl_ty, pl_offset);

View File

@ -414,6 +414,8 @@ pub fn libcFullLinkFlags(target: *const std.Target) []const []const u8 {
.android, .androideabi, .ohos, .ohoseabi => &.{ "-lm", "-lc", "-ldl" },
else => &.{ "-lm", "-lpthread", "-lc", "-ldl", "-lrt", "-lutil" },
},
// On SerenityOS libc includes libm, libpthread, libdl, and libssp.
.serenity => &.{"-lc"},
else => &.{},
};
return result;

View File

@ -121,3 +121,82 @@ test "'return try' through conditional" {
comptime std.debug.assert(result == 123);
}
}
test "try ptr propagation const" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo0() !u32 {
return 0;
}
fn foo1() error{Bad}!u32 {
return 1;
}
fn foo2() anyerror!u32 {
return 2;
}
fn doTheTest() !void {
const res0: *const u32 = &(try foo0());
const res1: *const u32 = &(try foo1());
const res2: *const u32 = &(try foo2());
try expect(res0.* == 0);
try expect(res1.* == 1);
try expect(res2.* == 2);
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
test "try ptr propagation mutate" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
fn foo0() !u32 {
return 0;
}
fn foo1() error{Bad}!u32 {
return 1;
}
fn foo2() anyerror!u32 {
return 2;
}
fn doTheTest() !void {
var f0 = foo0();
var f1 = foo1();
var f2 = foo2();
const res0: *u32 = &(try f0);
const res1: *u32 = &(try f1);
const res2: *u32 = &(try f2);
res0.* += 1;
res1.* += 1;
res2.* += 1;
try expect(f0 catch unreachable == 1);
try expect(f1 catch unreachable == 2);
try expect(f2 catch unreachable == 3);
try expect(res0.* == 1);
try expect(res1.* == 2);
try expect(res2.* == 3);
}
};
try S.doTheTest();
try comptime S.doTheTest();
}

View File

@ -800,6 +800,8 @@ const TestManifestConfigDefaults = struct {
}
// Windows
defaults = defaults ++ "x86_64-windows" ++ ",";
// Wasm
defaults = defaults ++ "wasm32-wasi";
break :blk defaults;
};
} else if (std.mem.eql(u8, key, "output_mode")) {

View File

@ -1369,16 +1369,15 @@ const test_targets = blk: {
// WASI Targets
// TODO: lowerTry for pointers
//.{
// .target = .{
// .cpu_arch = .wasm32,
// .os_tag = .wasi,
// .abi = .none,
// },
// .use_llvm = false,
// .use_lld = false,
//},
.{
.target = .{
.cpu_arch = .wasm32,
.os_tag = .wasi,
.abi = .none,
},
.use_llvm = false,
.use_lld = false,
},
.{
.target = .{
.cpu_arch = .wasm32,

View File

@ -3,7 +3,6 @@ const builtin = @import("builtin");
const io = std.io;
const fs = std.fs;
const process = std.process;
const ChildProcess = std.process.Child;
const Progress = std.Progress;
const print = std.debug.print;
const mem = std.mem;

View File

@ -186,7 +186,7 @@ pub fn main() !void {
try child.spawn();
var poller = std.io.poll(arena, Eval.StreamEnum, .{
var poller = std.Io.poll(arena, Eval.StreamEnum, .{
.stdout = child.stdout.?,
.stderr = child.stderr.?,
});
@ -247,19 +247,15 @@ const Eval = struct {
fn check(eval: *Eval, poller: *Poller, update: Case.Update, prog_node: std.Progress.Node) !void {
const arena = eval.arena;
const Header = std.zig.Server.Message.Header;
const stdout = poller.fifo(.stdout);
const stderr = poller.fifo(.stderr);
const stdout = poller.reader(.stdout);
const stderr = poller.reader(.stderr);
poll: while (true) {
while (stdout.readableLength() < @sizeOf(Header)) {
if (!(try poller.poll())) break :poll;
}
const header = stdout.reader().readStruct(Header) catch unreachable;
while (stdout.readableLength() < header.bytes_len) {
if (!(try poller.poll())) break :poll;
}
const body = stdout.readableSliceOfLen(header.bytes_len);
const Header = std.zig.Server.Message.Header;
while (stdout.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll;
const header = stdout.takeStruct(Header, .little) catch unreachable;
while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll;
const body = stdout.take(header.bytes_len) catch unreachable;
switch (header.tag) {
.error_bundle => {
@ -277,8 +273,8 @@ const Eval = struct {
.string_bytes = try arena.dupe(u8, string_bytes),
.extra = extra_array,
};
if (stderr.readableLength() > 0) {
const stderr_data = try stderr.toOwnedSlice();
if (stderr.bufferedLen() > 0) {
const stderr_data = try poller.toOwnedSlice(.stderr);
if (eval.allow_stderr) {
std.log.info("error_bundle included stderr:\n{s}", .{stderr_data});
} else {
@ -289,15 +285,14 @@ const Eval = struct {
try eval.checkErrorOutcome(update, result_error_bundle);
}
// This message indicates the end of the update.
stdout.discard(body.len);
return;
},
.emit_digest => {
const EbpHdr = std.zig.Server.Message.EmitDigest;
const ebp_hdr = @as(*align(1) const EbpHdr, @ptrCast(body));
_ = ebp_hdr;
if (stderr.readableLength() > 0) {
const stderr_data = try stderr.toOwnedSlice();
if (stderr.bufferedLen() > 0) {
const stderr_data = try poller.toOwnedSlice(.stderr);
if (eval.allow_stderr) {
std.log.info("emit_digest included stderr:\n{s}", .{stderr_data});
} else {
@ -308,7 +303,6 @@ const Eval = struct {
if (eval.target.backend == .sema) {
try eval.checkSuccessOutcome(update, null, prog_node);
// This message indicates the end of the update.
stdout.discard(body.len);
}
const digest = body[@sizeOf(EbpHdr)..][0..Cache.bin_digest_len];
@ -323,21 +317,18 @@ const Eval = struct {
try eval.checkSuccessOutcome(update, bin_path, prog_node);
// This message indicates the end of the update.
stdout.discard(body.len);
},
else => {
// Ignore other messages.
stdout.discard(body.len);
},
}
}
if (stderr.readableLength() > 0) {
const stderr_data = try stderr.toOwnedSlice();
if (stderr.bufferedLen() > 0) {
if (eval.allow_stderr) {
std.log.info("update '{s}' included stderr:\n{s}", .{ update.name, stderr_data });
std.log.info("update '{s}' included stderr:\n{s}", .{ update.name, stderr.buffered() });
} else {
eval.fatal("update '{s}' failed:\n{s}", .{ update.name, stderr_data });
eval.fatal("update '{s}' failed:\n{s}", .{ update.name, stderr.buffered() });
}
}
@ -537,25 +528,19 @@ const Eval = struct {
fn end(eval: *Eval, poller: *Poller) !void {
requestExit(eval.child, eval);
const Header = std.zig.Server.Message.Header;
const stdout = poller.fifo(.stdout);
const stderr = poller.fifo(.stderr);
const stdout = poller.reader(.stdout);
const stderr = poller.reader(.stderr);
poll: while (true) {
while (stdout.readableLength() < @sizeOf(Header)) {
if (!(try poller.poll())) break :poll;
}
const header = stdout.reader().readStruct(Header) catch unreachable;
while (stdout.readableLength() < header.bytes_len) {
if (!(try poller.poll())) break :poll;
}
const body = stdout.readableSliceOfLen(header.bytes_len);
stdout.discard(body.len);
const Header = std.zig.Server.Message.Header;
while (stdout.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll;
const header = stdout.takeStruct(Header, .little) catch unreachable;
while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll;
stdout.toss(header.bytes_len);
}
if (stderr.readableLength() > 0) {
const stderr_data = try stderr.toOwnedSlice();
eval.fatal("unexpected stderr:\n{s}", .{stderr_data});
if (stderr.bufferedLen() > 0) {
eval.fatal("unexpected stderr:\n{s}", .{stderr.buffered()});
}
}