mirror of
https://github.com/ziglang/zig.git
synced 2025-12-25 07:33:08 +00:00
Merge pull request #21518 from mlugg/incremental-ci
incr-check enhancements, and CI for incremental test cases
This commit is contained in:
commit
008bb1f120
@ -577,6 +577,10 @@ pub fn build(b: *std.Build) !void {
|
||||
} else {
|
||||
update_mingw_step.dependOn(&b.addFail("The -Dmingw-src=... option is required for this step").step);
|
||||
}
|
||||
|
||||
const test_incremental_step = b.step("test-incremental", "Run the incremental compilation test cases");
|
||||
try tests.addIncrementalTests(b, test_incremental_step);
|
||||
test_step.dependOn(test_incremental_step);
|
||||
}
|
||||
|
||||
fn addWasiUpdateStep(b: *std.Build, version: [:0]const u8) !void {
|
||||
|
||||
192
lib/std/io.zig
192
lib/std/io.zig
@ -442,6 +442,7 @@ pub fn poll(
|
||||
.overlapped = [1]windows.OVERLAPPED{
|
||||
mem.zeroes(windows.OVERLAPPED),
|
||||
} ** enum_fields.len,
|
||||
.small_bufs = undefined,
|
||||
.active = .{
|
||||
.count = 0,
|
||||
.handles_buf = undefined,
|
||||
@ -481,6 +482,7 @@ pub fn Poller(comptime StreamEnum: type) type {
|
||||
windows: if (is_windows) struct {
|
||||
first_read_done: bool,
|
||||
overlapped: [enum_fields.len]windows.OVERLAPPED,
|
||||
small_bufs: [enum_fields.len][128]u8,
|
||||
active: struct {
|
||||
count: math.IntFittingRange(0, enum_fields.len),
|
||||
handles_buf: [enum_fields.len]windows.HANDLE,
|
||||
@ -534,24 +536,31 @@ pub fn Poller(comptime StreamEnum: type) type {
|
||||
const bump_amt = 512;
|
||||
|
||||
if (!self.windows.first_read_done) {
|
||||
// Windows Async IO requires an initial call to ReadFile before waiting on the handle
|
||||
var already_read_data = false;
|
||||
for (0..enum_fields.len) |i| {
|
||||
const handle = self.windows.active.handles_buf[i];
|
||||
switch (try windowsAsyncRead(
|
||||
switch (try windowsAsyncReadToFifoAndQueueSmallRead(
|
||||
handle,
|
||||
&self.windows.overlapped[i],
|
||||
&self.fifos[i],
|
||||
&self.windows.small_bufs[i],
|
||||
bump_amt,
|
||||
)) {
|
||||
.pending => {
|
||||
.populated, .empty => |state| {
|
||||
if (state == .populated) already_read_data = true;
|
||||
self.windows.active.handles_buf[self.windows.active.count] = handle;
|
||||
self.windows.active.stream_map[self.windows.active.count] = @as(StreamEnum, @enumFromInt(i));
|
||||
self.windows.active.count += 1;
|
||||
},
|
||||
.closed => {}, // don't add to the wait_objects list
|
||||
.closed_populated => {
|
||||
// don't add to the wait_objects list, but we did already get data
|
||||
already_read_data = true;
|
||||
},
|
||||
}
|
||||
}
|
||||
self.windows.first_read_done = true;
|
||||
if (already_read_data) return true;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
@ -576,32 +585,35 @@ pub fn Poller(comptime StreamEnum: type) type {
|
||||
|
||||
const active_idx = status - windows.WAIT_OBJECT_0;
|
||||
|
||||
const handle = self.windows.active.handles_buf[active_idx];
|
||||
const stream_idx = @intFromEnum(self.windows.active.stream_map[active_idx]);
|
||||
var read_bytes: u32 = undefined;
|
||||
if (0 == windows.kernel32.GetOverlappedResult(
|
||||
handle,
|
||||
&self.windows.overlapped[stream_idx],
|
||||
&read_bytes,
|
||||
0,
|
||||
)) switch (windows.GetLastError()) {
|
||||
.BROKEN_PIPE => {
|
||||
const handle = self.windows.active.handles_buf[active_idx];
|
||||
|
||||
const overlapped = &self.windows.overlapped[stream_idx];
|
||||
const stream_fifo = &self.fifos[stream_idx];
|
||||
const small_buf = &self.windows.small_bufs[stream_idx];
|
||||
|
||||
const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) {
|
||||
.success => |n| n,
|
||||
.closed => {
|
||||
self.windows.active.removeAt(active_idx);
|
||||
continue;
|
||||
},
|
||||
else => |err| return windows.unexpectedError(err),
|
||||
.aborted => unreachable,
|
||||
};
|
||||
try stream_fifo.write(small_buf[0..num_bytes_read]);
|
||||
|
||||
self.fifos[stream_idx].update(read_bytes);
|
||||
|
||||
switch (try windowsAsyncRead(
|
||||
switch (try windowsAsyncReadToFifoAndQueueSmallRead(
|
||||
handle,
|
||||
&self.windows.overlapped[stream_idx],
|
||||
&self.fifos[stream_idx],
|
||||
overlapped,
|
||||
stream_fifo,
|
||||
small_buf,
|
||||
bump_amt,
|
||||
)) {
|
||||
.pending => {},
|
||||
.closed => self.windows.active.removeAt(active_idx),
|
||||
.empty => {}, // irrelevant, we already got data from the small buffer
|
||||
.populated => {},
|
||||
.closed,
|
||||
.closed_populated, // identical, since we already got data from the small buffer
|
||||
=> self.windows.active.removeAt(active_idx),
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -654,25 +666,145 @@ pub fn Poller(comptime StreamEnum: type) type {
|
||||
};
|
||||
}
|
||||
|
||||
fn windowsAsyncRead(
|
||||
/// The `ReadFile` docuementation states that `lpNumberOfBytesRead` does not have a meaningful
|
||||
/// result when using overlapped I/O, but also that it cannot be `null` on Windows 7. For
|
||||
/// compatibility, we point it to this dummy variables, which we never otherwise access.
|
||||
/// See: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-readfile
|
||||
var win_dummy_bytes_read: u32 = undefined;
|
||||
|
||||
/// Read as much data as possible from `handle` with `overlapped`, and write it to the FIFO. Before
|
||||
/// returning, queue a read into `small_buf` so that `WaitForMultipleObjects` returns when more data
|
||||
/// is available. `handle` must have no pending asynchronous operation.
|
||||
fn windowsAsyncReadToFifoAndQueueSmallRead(
|
||||
handle: windows.HANDLE,
|
||||
overlapped: *windows.OVERLAPPED,
|
||||
fifo: *PollFifo,
|
||||
small_buf: *[128]u8,
|
||||
bump_amt: usize,
|
||||
) !enum { pending, closed } {
|
||||
) !enum { empty, populated, closed_populated, closed } {
|
||||
var read_any_data = false;
|
||||
while (true) {
|
||||
const buf = try fifo.writableWithSize(bump_amt);
|
||||
var read_bytes: u32 = undefined;
|
||||
const read_result = windows.kernel32.ReadFile(handle, buf.ptr, math.cast(u32, buf.len) orelse math.maxInt(u32), &read_bytes, overlapped);
|
||||
if (read_result == 0) return switch (windows.GetLastError()) {
|
||||
.IO_PENDING => .pending,
|
||||
.BROKEN_PIPE => .closed,
|
||||
else => |err| windows.unexpectedError(err),
|
||||
const fifo_read_pending = while (true) {
|
||||
const buf = try fifo.writableWithSize(bump_amt);
|
||||
const buf_len = math.cast(u32, buf.len) orelse math.maxInt(u32);
|
||||
|
||||
if (0 == windows.kernel32.ReadFile(
|
||||
handle,
|
||||
buf.ptr,
|
||||
buf_len,
|
||||
&win_dummy_bytes_read,
|
||||
overlapped,
|
||||
)) switch (windows.GetLastError()) {
|
||||
.IO_PENDING => break true,
|
||||
.BROKEN_PIPE => return if (read_any_data) .closed_populated else .closed,
|
||||
else => |err| return windows.unexpectedError(err),
|
||||
};
|
||||
|
||||
const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) {
|
||||
.success => |n| n,
|
||||
.closed => return if (read_any_data) .closed_populated else .closed,
|
||||
.aborted => unreachable,
|
||||
};
|
||||
|
||||
read_any_data = true;
|
||||
fifo.update(num_bytes_read);
|
||||
|
||||
if (num_bytes_read == buf_len) {
|
||||
// We filled the buffer, so there's probably more data available.
|
||||
continue;
|
||||
} else {
|
||||
// We didn't fill the buffer, so assume we're out of data.
|
||||
// There is no pending read.
|
||||
break false;
|
||||
}
|
||||
};
|
||||
fifo.update(read_bytes);
|
||||
|
||||
if (fifo_read_pending) cancel_read: {
|
||||
// Cancel the pending read into the FIFO.
|
||||
_ = windows.kernel32.CancelIo(handle);
|
||||
|
||||
// We have to wait for the handle to be signalled, i.e. for the cancellation to complete.
|
||||
switch (windows.kernel32.WaitForSingleObject(handle, windows.INFINITE)) {
|
||||
windows.WAIT_OBJECT_0 => {},
|
||||
windows.WAIT_FAILED => return windows.unexpectedError(windows.GetLastError()),
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
// If it completed before we canceled, make sure to tell the FIFO!
|
||||
const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, true)) {
|
||||
.success => |n| n,
|
||||
.closed => return if (read_any_data) .closed_populated else .closed,
|
||||
.aborted => break :cancel_read,
|
||||
};
|
||||
read_any_data = true;
|
||||
fifo.update(num_bytes_read);
|
||||
}
|
||||
|
||||
// Try to queue the 1-byte read.
|
||||
if (0 == windows.kernel32.ReadFile(
|
||||
handle,
|
||||
small_buf,
|
||||
small_buf.len,
|
||||
&win_dummy_bytes_read,
|
||||
overlapped,
|
||||
)) switch (windows.GetLastError()) {
|
||||
.IO_PENDING => {
|
||||
// 1-byte read pending as intended
|
||||
return if (read_any_data) .populated else .empty;
|
||||
},
|
||||
.BROKEN_PIPE => return if (read_any_data) .closed_populated else .closed,
|
||||
else => |err| return windows.unexpectedError(err),
|
||||
};
|
||||
|
||||
// We got data back this time. Write it to the FIFO and run the main loop again.
|
||||
const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) {
|
||||
.success => |n| n,
|
||||
.closed => return if (read_any_data) .closed_populated else .closed,
|
||||
.aborted => unreachable,
|
||||
};
|
||||
try fifo.write(small_buf[0..num_bytes_read]);
|
||||
read_any_data = true;
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple wrapper around `GetOverlappedResult` to determine the result of a `ReadFile` operation.
|
||||
/// If `!allow_aborted`, then `aborted` is never returned (`OPERATION_ABORTED` is considered unexpected).
|
||||
///
|
||||
/// The `ReadFile` documentation states that the number of bytes read by an overlapped `ReadFile` must be determined using `GetOverlappedResult`, even if the
|
||||
/// operation immediately returns data:
|
||||
/// "Use NULL for [lpNumberOfBytesRead] if this is an asynchronous operation to avoid potentially
|
||||
/// erroneous results."
|
||||
/// "If `hFile` was opened with `FILE_FLAG_OVERLAPPED`, the following conditions are in effect: [...]
|
||||
/// The lpNumberOfBytesRead parameter should be set to NULL. Use the GetOverlappedResult function to
|
||||
/// get the actual number of bytes read."
|
||||
/// See: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-readfile
|
||||
fn windowsGetReadResult(
|
||||
handle: windows.HANDLE,
|
||||
overlapped: *windows.OVERLAPPED,
|
||||
allow_aborted: bool,
|
||||
) !union(enum) {
|
||||
success: u32,
|
||||
closed,
|
||||
aborted,
|
||||
} {
|
||||
var num_bytes_read: u32 = undefined;
|
||||
if (0 == windows.kernel32.GetOverlappedResult(
|
||||
handle,
|
||||
overlapped,
|
||||
&num_bytes_read,
|
||||
0,
|
||||
)) switch (windows.GetLastError()) {
|
||||
.BROKEN_PIPE => return .closed,
|
||||
.OPERATION_ABORTED => |err| if (allow_aborted) {
|
||||
return .aborted;
|
||||
} else {
|
||||
return windows.unexpectedError(err);
|
||||
},
|
||||
else => |err| return windows.unexpectedError(err),
|
||||
};
|
||||
return .{ .success = num_bytes_read };
|
||||
}
|
||||
|
||||
/// Given an enum, returns a struct with fields of that enum, each field
|
||||
/// representing an I/O stream for polling.
|
||||
pub fn PollFiles(comptime StreamEnum: type) type {
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
#target=x86_64-linux-selfhosted
|
||||
#target=x86_64-linux-cbe
|
||||
#target=x86_64-windows-cbe
|
||||
#update=initial version
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
#target=x86_64-linux-selfhosted
|
||||
#target=x86_64-linux-cbe
|
||||
#target=x86_64-windows-cbe
|
||||
#update=initial version
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
#target=x86_64-linux-selfhosted
|
||||
#target=x86_64-linux-cbe
|
||||
#target=x86_64-windows-cbe
|
||||
#update=initial version
|
||||
#file=main.zig
|
||||
pub fn main() void {}
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
#target=x86_64-linux-selfhosted
|
||||
#target=x86_64-linux-cbe
|
||||
#target=x86_64-windows-cbe
|
||||
#update=initial version
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
#target=x86_64-linux-selfhosted
|
||||
#target=x86_64-linux-cbe
|
||||
#target=x86_64-windows-cbe
|
||||
#update=initial version
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
#target=x86_64-linux-selfhosted
|
||||
#target=x86_64-linux-cbe
|
||||
#target=x86_64-windows-cbe
|
||||
#update=initial version
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
#target=x86_64-linux-selfhosted
|
||||
#target=x86_64-linux-cbe
|
||||
#target=x86_64-windows-cbe
|
||||
#update=initial version
|
||||
#file=main.zig
|
||||
const MyEnum = enum(u8) {
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
#target=x86_64-linux-selfhosted
|
||||
#target=x86_64-linux-cbe
|
||||
#target=x86_64-windows-cbe
|
||||
#update=initial version
|
||||
#file=main.zig
|
||||
const SomeType = u32;
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
#target=x86_64-linux-selfhosted
|
||||
#target=x86_64-linux-cbe
|
||||
#target=x86_64-windows-cbe
|
||||
#update=initial version
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
|
||||
@ -1509,3 +1509,31 @@ pub fn addDebuggerTests(b: *std.Build, options: DebuggerContext.Options) ?*Step
|
||||
});
|
||||
return step;
|
||||
}
|
||||
|
||||
pub fn addIncrementalTests(b: *std.Build, test_step: *Step) !void {
|
||||
const incr_check = b.addExecutable(.{
|
||||
.name = "incr-check",
|
||||
.root_source_file = b.path("tools/incr-check.zig"),
|
||||
.target = b.graph.host,
|
||||
.optimize = .Debug,
|
||||
});
|
||||
|
||||
var dir = try b.build_root.handle.openDir("test/incremental", .{ .iterate = true });
|
||||
defer dir.close();
|
||||
|
||||
var it = try dir.walk(b.graph.arena);
|
||||
while (try it.next()) |entry| {
|
||||
if (entry.kind != .file) continue;
|
||||
|
||||
const run = b.addRunArtifact(incr_check);
|
||||
run.setName(b.fmt("incr-check '{s}'", .{entry.basename}));
|
||||
|
||||
run.addArg(b.graph.zig_exe);
|
||||
run.addFileArg(b.path("test/incremental/").path(b, entry.path));
|
||||
run.addArgs(&.{ "--zig-lib-dir", b.fmt("{}", .{b.graph.zig_lib_directory}) });
|
||||
|
||||
run.addCheck(.{ .expect_term = .{ .Exited = 0 } });
|
||||
|
||||
test_step.dependOn(&run.step);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,11 +1,12 @@
|
||||
const std = @import("std");
|
||||
const fatal = std.process.fatal;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Cache = std.Build.Cache;
|
||||
|
||||
const usage = "usage: incr-check <zig binary path> <input file> [--zig-lib-dir lib] [--debug-zcu] [--debug-link] [--zig-cc-binary /path/to/zig]";
|
||||
const usage = "usage: incr-check <zig binary path> <input file> [--zig-lib-dir lib] [--debug-zcu] [--debug-link] [--preserve-tmp] [--zig-cc-binary /path/to/zig]";
|
||||
|
||||
pub fn main() !void {
|
||||
const fatal = std.process.fatal;
|
||||
|
||||
var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
defer arena_instance.deinit();
|
||||
const arena = arena_instance.allocator();
|
||||
@ -16,6 +17,7 @@ pub fn main() !void {
|
||||
var opt_cc_zig: ?[]const u8 = null;
|
||||
var debug_zcu = false;
|
||||
var debug_link = false;
|
||||
var preserve_tmp = false;
|
||||
|
||||
var arg_it = try std.process.argsWithAllocator(arena);
|
||||
_ = arg_it.skip();
|
||||
@ -27,6 +29,8 @@ pub fn main() !void {
|
||||
debug_zcu = true;
|
||||
} else if (std.mem.eql(u8, arg, "--debug-link")) {
|
||||
debug_link = true;
|
||||
} else if (std.mem.eql(u8, arg, "--preserve-tmp")) {
|
||||
preserve_tmp = true;
|
||||
} else if (std.mem.eql(u8, arg, "--zig-cc-binary")) {
|
||||
opt_cc_zig = arg_it.next() orelse fatal("expect arg after '--zig-cc-binary'\n{s}", .{usage});
|
||||
} else {
|
||||
@ -48,15 +52,29 @@ pub fn main() !void {
|
||||
const input_file_bytes = try std.fs.cwd().readFileAlloc(arena, input_file_name, std.math.maxInt(u32));
|
||||
const case = try Case.parse(arena, input_file_bytes);
|
||||
|
||||
// Check now: if there are any targets using the `cbe` backend, we need the lib dir.
|
||||
if (opt_lib_dir == null) {
|
||||
for (case.targets) |target| {
|
||||
if (target.backend == .cbe) {
|
||||
fatal("'--zig-lib-dir' requried when using backend 'cbe'", .{});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const prog_node = std.Progress.start(.{});
|
||||
defer prog_node.end();
|
||||
|
||||
const rand_int = std.crypto.random.int(u64);
|
||||
const tmp_dir_path = "tmp_" ++ std.fmt.hex(rand_int);
|
||||
const tmp_dir = try std.fs.cwd().makeOpenPath(tmp_dir_path, .{});
|
||||
|
||||
const child_prog_node = prog_node.start("zig build-exe", 0);
|
||||
defer child_prog_node.end();
|
||||
var tmp_dir = try std.fs.cwd().makeOpenPath(tmp_dir_path, .{});
|
||||
defer {
|
||||
tmp_dir.close();
|
||||
if (!preserve_tmp) {
|
||||
std.fs.cwd().deleteTree(tmp_dir_path) catch |err| {
|
||||
std.log.warn("failed to delete tree '{s}': {s}", .{ tmp_dir_path, @errorName(err) });
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Convert paths to be relative to the cwd of the subprocess.
|
||||
const resolved_zig_exe = try std.fs.path.relative(arena, tmp_dir_path, zig_exe);
|
||||
@ -65,10 +83,21 @@ pub fn main() !void {
|
||||
else
|
||||
null;
|
||||
|
||||
const host = try std.zig.system.resolveTargetQuery(.{});
|
||||
|
||||
const debug_log_verbose = debug_zcu or debug_link;
|
||||
|
||||
for (case.targets) |target| {
|
||||
std.log.scoped(.status).info("target: '{s}-{s}'", .{ target.query, @tagName(target.backend) });
|
||||
const target_prog_node = node: {
|
||||
var name_buf: [std.Progress.Node.max_name_len]u8 = undefined;
|
||||
const name = std.fmt.bufPrint(&name_buf, "{s}-{s}", .{ target.query, @tagName(target.backend) }) catch &name_buf;
|
||||
break :node prog_node.start(name, case.updates.len);
|
||||
};
|
||||
defer target_prog_node.end();
|
||||
|
||||
if (debug_log_verbose) {
|
||||
std.log.scoped(.status).info("target: '{s}-{s}'", .{ target.query, @tagName(target.backend) });
|
||||
}
|
||||
|
||||
var child_args: std.ArrayListUnmanaged([]const u8) = .empty;
|
||||
try child_args.appendSlice(arena, &.{
|
||||
@ -81,7 +110,7 @@ pub fn main() !void {
|
||||
"--cache-dir",
|
||||
".local-cache",
|
||||
"--global-cache-dir",
|
||||
".global_cache",
|
||||
".global-cache",
|
||||
"--listen=-",
|
||||
});
|
||||
if (opt_resolved_lib_dir) |resolved_lib_dir| {
|
||||
@ -100,11 +129,14 @@ pub fn main() !void {
|
||||
try child_args.appendSlice(arena, &.{ "--debug-log", "link", "--debug-log", "link_state", "--debug-log", "link_relocs" });
|
||||
}
|
||||
|
||||
const zig_prog_node = target_prog_node.start("zig build-exe", 0);
|
||||
defer zig_prog_node.end();
|
||||
|
||||
var child = std.process.Child.init(child_args.items, arena);
|
||||
child.stdin_behavior = .Pipe;
|
||||
child.stdout_behavior = .Pipe;
|
||||
child.stderr_behavior = .Pipe;
|
||||
child.progress_node = child_prog_node;
|
||||
child.progress_node = zig_prog_node;
|
||||
child.cwd_dir = tmp_dir;
|
||||
child.cwd = tmp_dir_path;
|
||||
|
||||
@ -121,7 +153,7 @@ pub fn main() !void {
|
||||
"-target",
|
||||
target.query,
|
||||
"-I",
|
||||
opt_resolved_lib_dir orelse fatal("'--zig-lib-dir' required when using backend 'cbe'", .{}),
|
||||
opt_resolved_lib_dir.?, // verified earlier
|
||||
"-o",
|
||||
});
|
||||
}
|
||||
@ -129,11 +161,13 @@ pub fn main() !void {
|
||||
var eval: Eval = .{
|
||||
.arena = arena,
|
||||
.case = case,
|
||||
.host = host,
|
||||
.target = target,
|
||||
.tmp_dir = tmp_dir,
|
||||
.tmp_dir_path = tmp_dir_path,
|
||||
.child = &child,
|
||||
.allow_stderr = debug_log_verbose,
|
||||
.preserve_tmp_on_fatal = preserve_tmp,
|
||||
.cc_child_args = &cc_child_args,
|
||||
};
|
||||
|
||||
@ -146,7 +180,7 @@ pub fn main() !void {
|
||||
defer poller.deinit();
|
||||
|
||||
for (case.updates) |update| {
|
||||
var update_node = prog_node.start(update.name, 0);
|
||||
var update_node = target_prog_node.start(update.name, 0);
|
||||
defer update_node.end();
|
||||
|
||||
if (debug_log_verbose) {
|
||||
@ -160,18 +194,20 @@ pub fn main() !void {
|
||||
|
||||
try eval.end(&poller);
|
||||
|
||||
waitChild(&child);
|
||||
waitChild(&child, &eval);
|
||||
}
|
||||
}
|
||||
|
||||
const Eval = struct {
|
||||
arena: Allocator,
|
||||
host: std.Target,
|
||||
case: Case,
|
||||
target: Case.Target,
|
||||
tmp_dir: std.fs.Dir,
|
||||
tmp_dir_path: []const u8,
|
||||
child: *std.process.Child,
|
||||
allow_stderr: bool,
|
||||
preserve_tmp_on_fatal: bool,
|
||||
/// When `target.backend == .cbe`, this contains the first few arguments to `zig cc` to build the generated binary.
|
||||
/// The arguments `out.c in.c` must be appended before spawning the subprocess.
|
||||
cc_child_args: *std.ArrayListUnmanaged([]const u8),
|
||||
@ -186,12 +222,12 @@ const Eval = struct {
|
||||
.sub_path = full_contents.name,
|
||||
.data = full_contents.bytes,
|
||||
}) catch |err| {
|
||||
fatal("failed to update '{s}': {s}", .{ full_contents.name, @errorName(err) });
|
||||
eval.fatal("failed to update '{s}': {s}", .{ full_contents.name, @errorName(err) });
|
||||
};
|
||||
}
|
||||
for (update.deletes) |doomed_name| {
|
||||
eval.tmp_dir.deleteFile(doomed_name) catch |err| {
|
||||
fatal("failed to delete '{s}': {s}", .{ doomed_name, @errorName(err) });
|
||||
eval.fatal("failed to delete '{s}': {s}", .{ doomed_name, @errorName(err) });
|
||||
};
|
||||
}
|
||||
}
|
||||
@ -233,7 +269,7 @@ const Eval = struct {
|
||||
if (eval.allow_stderr) {
|
||||
std.log.info("error_bundle included stderr:\n{s}", .{stderr_data});
|
||||
} else {
|
||||
fatal("error_bundle included unexpected stderr:\n{s}", .{stderr_data});
|
||||
eval.fatal("error_bundle included unexpected stderr:\n{s}", .{stderr_data});
|
||||
}
|
||||
}
|
||||
if (result_error_bundle.errorMessageCount() != 0) {
|
||||
@ -252,7 +288,7 @@ const Eval = struct {
|
||||
if (eval.allow_stderr) {
|
||||
std.log.info("emit_digest included stderr:\n{s}", .{stderr_data});
|
||||
} else {
|
||||
fatal("emit_digest included unexpected stderr:\n{s}", .{stderr_data});
|
||||
eval.fatal("emit_digest included unexpected stderr:\n{s}", .{stderr_data});
|
||||
}
|
||||
}
|
||||
|
||||
@ -268,14 +304,7 @@ const Eval = struct {
|
||||
const name = std.fs.path.stem(std.fs.path.basename(eval.case.root_source_file));
|
||||
const bin_name = try std.zig.binNameAlloc(arena, .{
|
||||
.root_name = name,
|
||||
.target = try std.zig.system.resolveTargetQuery(try std.Build.parseTargetQuery(.{
|
||||
.arch_os_abi = eval.target.query,
|
||||
.object_format = switch (eval.target.backend) {
|
||||
.sema => unreachable,
|
||||
.selfhosted, .llvm => null,
|
||||
.cbe => "c",
|
||||
},
|
||||
})),
|
||||
.target = eval.target.resolved,
|
||||
.output_mode = .Exe,
|
||||
});
|
||||
const bin_path = try std.fs.path.join(arena, &.{ result_dir, bin_name });
|
||||
@ -296,16 +325,15 @@ const Eval = struct {
|
||||
if (eval.allow_stderr) {
|
||||
std.log.info("update '{s}' included stderr:\n{s}", .{ update.name, stderr_data });
|
||||
} else {
|
||||
fatal("update '{s}' failed:\n{s}", .{ update.name, stderr_data });
|
||||
eval.fatal("update '{s}' failed:\n{s}", .{ update.name, stderr_data });
|
||||
}
|
||||
}
|
||||
|
||||
waitChild(eval.child);
|
||||
fatal("update '{s}': compiler failed to send error_bundle or emit_bin_path", .{update.name});
|
||||
waitChild(eval.child, eval);
|
||||
eval.fatal("update '{s}': compiler failed to send error_bundle or emit_bin_path", .{update.name});
|
||||
}
|
||||
|
||||
fn checkErrorOutcome(eval: *Eval, update: Case.Update, error_bundle: std.zig.ErrorBundle) !void {
|
||||
_ = eval;
|
||||
switch (update.outcome) {
|
||||
.unknown => return,
|
||||
.compile_errors => |expected_errors| {
|
||||
@ -317,7 +345,7 @@ const Eval = struct {
|
||||
.stdout, .exit_code => {
|
||||
const color: std.zig.Color = .auto;
|
||||
error_bundle.renderToStdErr(color.renderOptions());
|
||||
fatal("update '{s}': unexpected compile errors", .{update.name});
|
||||
eval.fatal("update '{s}': unexpected compile errors", .{update.name});
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -325,7 +353,7 @@ const Eval = struct {
|
||||
fn checkSuccessOutcome(eval: *Eval, update: Case.Update, opt_emitted_path: ?[]const u8, prog_node: std.Progress.Node) !void {
|
||||
switch (update.outcome) {
|
||||
.unknown => return,
|
||||
.compile_errors => fatal("expected compile errors but compilation incorrectly succeeded", .{}),
|
||||
.compile_errors => eval.fatal("expected compile errors but compilation incorrectly succeeded", .{}),
|
||||
.stdout, .exit_code => {},
|
||||
}
|
||||
const emitted_path = opt_emitted_path orelse {
|
||||
@ -344,27 +372,73 @@ const Eval = struct {
|
||||
},
|
||||
};
|
||||
|
||||
var argv_buf: [2][]const u8 = undefined;
|
||||
const argv: []const []const u8, const is_foreign: bool = switch (std.zig.system.getExternalExecutor(
|
||||
eval.host,
|
||||
&eval.target.resolved,
|
||||
.{ .link_libc = eval.target.backend == .cbe },
|
||||
)) {
|
||||
.bad_dl, .bad_os_or_cpu => {
|
||||
// This binary cannot be executed on this host.
|
||||
if (eval.allow_stderr) {
|
||||
std.log.warn("skipping execution because host '{s}' cannot execute binaries for foreign target '{s}'", .{
|
||||
try eval.host.zigTriple(eval.arena),
|
||||
try eval.target.resolved.zigTriple(eval.arena),
|
||||
});
|
||||
}
|
||||
return;
|
||||
},
|
||||
.native, .rosetta => argv: {
|
||||
argv_buf[0] = binary_path;
|
||||
break :argv .{ argv_buf[0..1], false };
|
||||
},
|
||||
.qemu, .wine, .wasmtime, .darling => |executor_cmd| argv: {
|
||||
argv_buf[0] = executor_cmd;
|
||||
argv_buf[1] = binary_path;
|
||||
break :argv .{ argv_buf[0..2], true };
|
||||
},
|
||||
};
|
||||
|
||||
const run_prog_node = prog_node.start("run generated executable", 0);
|
||||
defer run_prog_node.end();
|
||||
|
||||
const result = std.process.Child.run(.{
|
||||
.allocator = eval.arena,
|
||||
.argv = &.{binary_path},
|
||||
.argv = argv,
|
||||
.cwd_dir = eval.tmp_dir,
|
||||
.cwd = eval.tmp_dir_path,
|
||||
}) catch |err| {
|
||||
fatal("update '{s}': failed to run the generated executable '{s}': {s}", .{
|
||||
if (is_foreign) {
|
||||
// Chances are the foreign executor isn't available. Skip this evaluation.
|
||||
if (eval.allow_stderr) {
|
||||
std.log.warn("update '{s}': skipping execution of '{s}' via executor for foreign target '{s}': {s}", .{
|
||||
update.name,
|
||||
binary_path,
|
||||
try eval.target.resolved.zigTriple(eval.arena),
|
||||
@errorName(err),
|
||||
});
|
||||
}
|
||||
return;
|
||||
}
|
||||
eval.fatal("update '{s}': failed to run the generated executable '{s}': {s}", .{
|
||||
update.name, binary_path, @errorName(err),
|
||||
});
|
||||
};
|
||||
if (result.stderr.len != 0) {
|
||||
|
||||
// Some executors (looking at you, Wine) like throwing some stderr in, just for fun.
|
||||
// Therefore, we'll ignore stderr when using a foreign executor.
|
||||
if (!is_foreign and result.stderr.len != 0) {
|
||||
std.log.err("update '{s}': generated executable '{s}' had unexpected stderr:\n{s}", .{
|
||||
update.name, binary_path, result.stderr,
|
||||
});
|
||||
}
|
||||
|
||||
switch (result.term) {
|
||||
.Exited => |code| switch (update.outcome) {
|
||||
.unknown, .compile_errors => unreachable,
|
||||
.stdout => |expected_stdout| {
|
||||
if (code != 0) {
|
||||
fatal("update '{s}': generated executable '{s}' failed with code {d}", .{
|
||||
eval.fatal("update '{s}': generated executable '{s}' failed with code {d}", .{
|
||||
update.name, binary_path, code,
|
||||
});
|
||||
}
|
||||
@ -373,12 +447,13 @@ const Eval = struct {
|
||||
.exit_code => |expected_code| try std.testing.expectEqual(expected_code, result.term.Exited),
|
||||
},
|
||||
.Signal, .Stopped, .Unknown => {
|
||||
fatal("update '{s}': generated executable '{s}' terminated unexpectedly", .{
|
||||
eval.fatal("update '{s}': generated executable '{s}' terminated unexpectedly", .{
|
||||
update.name, binary_path,
|
||||
});
|
||||
},
|
||||
}
|
||||
if (result.stderr.len != 0) std.process.exit(1);
|
||||
|
||||
if (!is_foreign and result.stderr.len != 0) std.process.exit(1);
|
||||
}
|
||||
|
||||
fn requestUpdate(eval: *Eval) !void {
|
||||
@ -390,7 +465,7 @@ const Eval = struct {
|
||||
}
|
||||
|
||||
fn end(eval: *Eval, poller: *Poller) !void {
|
||||
requestExit(eval.child);
|
||||
requestExit(eval.child, eval);
|
||||
|
||||
const Header = std.zig.Server.Message.Header;
|
||||
const stdout = poller.fifo(.stdout);
|
||||
@ -410,7 +485,7 @@ const Eval = struct {
|
||||
|
||||
if (stderr.readableLength() > 0) {
|
||||
const stderr_data = try stderr.toOwnedSlice();
|
||||
fatal("unexpected stderr:\n{s}", .{stderr_data});
|
||||
eval.fatal("unexpected stderr:\n{s}", .{stderr_data});
|
||||
}
|
||||
}
|
||||
|
||||
@ -430,7 +505,7 @@ const Eval = struct {
|
||||
.cwd = eval.tmp_dir_path,
|
||||
.progress_node = child_prog_node,
|
||||
}) catch |err| {
|
||||
fatal("update '{s}': failed to spawn zig cc for '{s}': {s}", .{
|
||||
eval.fatal("update '{s}': failed to spawn zig cc for '{s}': {s}", .{
|
||||
update.name, c_path, @errorName(err),
|
||||
});
|
||||
};
|
||||
@ -441,7 +516,7 @@ const Eval = struct {
|
||||
update.name, result.stderr,
|
||||
});
|
||||
}
|
||||
fatal("update '{s}': zig cc for '{s}' failed with code {d}", .{
|
||||
eval.fatal("update '{s}': zig cc for '{s}' failed with code {d}", .{
|
||||
update.name, c_path, code,
|
||||
});
|
||||
},
|
||||
@ -451,12 +526,22 @@ const Eval = struct {
|
||||
update.name, result.stderr,
|
||||
});
|
||||
}
|
||||
fatal("update '{s}': zig cc for '{s}' terminated unexpectedly", .{
|
||||
eval.fatal("update '{s}': zig cc for '{s}' terminated unexpectedly", .{
|
||||
update.name, c_path,
|
||||
});
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn fatal(eval: *Eval, comptime fmt: []const u8, args: anytype) noreturn {
|
||||
eval.tmp_dir.close();
|
||||
if (!eval.preserve_tmp_on_fatal) {
|
||||
std.fs.cwd().deleteTree(eval.tmp_dir_path) catch |err| {
|
||||
std.log.warn("failed to delete tree '{s}': {s}", .{ eval.tmp_dir_path, @errorName(err) });
|
||||
};
|
||||
}
|
||||
std.process.fatal(fmt, args);
|
||||
}
|
||||
};
|
||||
|
||||
const Case = struct {
|
||||
@ -466,6 +551,7 @@ const Case = struct {
|
||||
|
||||
const Target = struct {
|
||||
query: []const u8,
|
||||
resolved: std.Target,
|
||||
backend: Backend,
|
||||
const Backend = enum {
|
||||
/// Run semantic analysis only. Runtime output will not be tested, but we still verify
|
||||
@ -511,6 +597,8 @@ const Case = struct {
|
||||
};
|
||||
|
||||
fn parse(arena: Allocator, bytes: []const u8) !Case {
|
||||
const fatal = std.process.fatal;
|
||||
|
||||
var targets: std.ArrayListUnmanaged(Target) = .empty;
|
||||
var updates: std.ArrayListUnmanaged(Update) = .empty;
|
||||
var changes: std.ArrayListUnmanaged(FullContents) = .empty;
|
||||
@ -521,18 +609,32 @@ const Case = struct {
|
||||
if (std.mem.startsWith(u8, line, "#")) {
|
||||
var line_it = std.mem.splitScalar(u8, line, '=');
|
||||
const key = line_it.first()[1..];
|
||||
const val = line_it.rest();
|
||||
const val = std.mem.trimRight(u8, line_it.rest(), "\r"); // windows moment
|
||||
if (val.len == 0) {
|
||||
fatal("line {d}: missing value", .{line_n});
|
||||
} else if (std.mem.eql(u8, key, "target")) {
|
||||
const split_idx = std.mem.lastIndexOfScalar(u8, val, '-') orelse
|
||||
fatal("line {d}: target does not include backend", .{line_n});
|
||||
|
||||
const query = val[0..split_idx];
|
||||
|
||||
const backend_str = val[split_idx + 1 ..];
|
||||
const backend: Target.Backend = std.meta.stringToEnum(Target.Backend, backend_str) orelse
|
||||
fatal("line {d}: invalid backend '{s}'", .{ line_n, backend_str });
|
||||
|
||||
const parsed_query = std.Build.parseTargetQuery(.{
|
||||
.arch_os_abi = query,
|
||||
.object_format = switch (backend) {
|
||||
.sema, .selfhosted, .llvm => null,
|
||||
.cbe => "c",
|
||||
},
|
||||
}) catch fatal("line {d}: invalid target query '{s}'", .{ line_n, query });
|
||||
|
||||
const resolved = try std.zig.system.resolveTargetQuery(parsed_query);
|
||||
|
||||
try targets.append(arena, .{
|
||||
.query = query,
|
||||
.resolved = resolved,
|
||||
.backend = backend,
|
||||
});
|
||||
} else if (std.mem.eql(u8, key, "update")) {
|
||||
@ -603,7 +705,7 @@ const Case = struct {
|
||||
}
|
||||
};
|
||||
|
||||
fn requestExit(child: *std.process.Child) void {
|
||||
fn requestExit(child: *std.process.Child, eval: *Eval) void {
|
||||
if (child.stdin == null) return;
|
||||
|
||||
const header: std.zig.Client.Message.Header = .{
|
||||
@ -612,7 +714,7 @@ fn requestExit(child: *std.process.Child) void {
|
||||
};
|
||||
child.stdin.?.writeAll(std.mem.asBytes(&header)) catch |err| switch (err) {
|
||||
error.BrokenPipe => {},
|
||||
else => fatal("failed to send exit: {s}", .{@errorName(err)}),
|
||||
else => eval.fatal("failed to send exit: {s}", .{@errorName(err)}),
|
||||
};
|
||||
|
||||
// Send EOF to stdin.
|
||||
@ -620,11 +722,11 @@ fn requestExit(child: *std.process.Child) void {
|
||||
child.stdin = null;
|
||||
}
|
||||
|
||||
fn waitChild(child: *std.process.Child) void {
|
||||
requestExit(child);
|
||||
const term = child.wait() catch |err| fatal("child process failed: {s}", .{@errorName(err)});
|
||||
fn waitChild(child: *std.process.Child, eval: *Eval) void {
|
||||
requestExit(child, eval);
|
||||
const term = child.wait() catch |err| eval.fatal("child process failed: {s}", .{@errorName(err)});
|
||||
switch (term) {
|
||||
.Exited => |code| if (code != 0) fatal("compiler failed with code {d}", .{code}),
|
||||
.Signal, .Stopped, .Unknown => fatal("compiler terminated unexpectedly", .{}),
|
||||
.Exited => |code| if (code != 0) eval.fatal("compiler failed with code {d}", .{code}),
|
||||
.Signal, .Stopped, .Unknown => eval.fatal("compiler terminated unexpectedly", .{}),
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user