Merge pull request #25029 from mlugg/unit-test-timing

build system: unit test enhancements

Contributes towards https://github.com/ziglang/zig/issues/19821, but does not close it, since the timeout currently cannot be modified per unit test.
This commit is contained in:
Matthew Lugg 2025-10-18 09:36:32 +01:00 committed by GitHub
commit 35d0790514
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
38 changed files with 2245 additions and 1511 deletions

View File

@ -50,7 +50,8 @@ stage3-debug/bin/zig build test docs \
-Dtarget=native-native-musl \
--search-prefix "$PREFIX" \
--zig-lib-dir "$PWD/../lib" \
-Denable-superhtml
-Denable-superhtml \
--test-timeout 3m
stage3-debug/bin/zig build \
--prefix stage4-debug \

View File

@ -50,7 +50,8 @@ stage3-release/bin/zig build test docs \
-Dtarget=native-native-musl \
--search-prefix "$PREFIX" \
--zig-lib-dir "$PWD/../lib" \
-Denable-superhtml
-Denable-superhtml \
--test-timeout 3m
# Ensure that stage3 and stage4 are byte-for-byte identical.
stage3-release/bin/zig build \

View File

@ -46,4 +46,5 @@ stage3-debug/bin/zig build test docs \
-Denable-macos-sdk \
-Dstatic-llvm \
-Dskip-non-native \
--search-prefix "$PREFIX"
--search-prefix "$PREFIX" \
--test-timeout 2m

View File

@ -46,7 +46,8 @@ stage3-release/bin/zig build test docs \
-Denable-macos-sdk \
-Dstatic-llvm \
-Dskip-non-native \
--search-prefix "$PREFIX"
--search-prefix "$PREFIX" \
--test-timeout 2m
# Ensure that stage3 and stage4 are byte-for-byte identical.
stage3-release/bin/zig build \

View File

@ -58,7 +58,8 @@ Write-Output "Main test suite..."
--search-prefix "$PREFIX_PATH" `
-Dstatic-llvm `
-Dskip-non-native `
-Denable-symlinks-windows
-Denable-symlinks-windows `
--test-timeout 30m
CheckLastExitCode
# Ensure that stage3 and stage4 are byte-for-byte identical.

View File

@ -50,7 +50,8 @@ stage3-debug/bin/zig build test docs \
-Dskip-non-native \
-Dtarget=native-native-musl \
--search-prefix "$PREFIX" \
--zig-lib-dir "$PWD/../lib"
--zig-lib-dir "$PWD/../lib" \
--test-timeout 4m
stage3-debug/bin/zig build \
--prefix stage4-debug \

View File

@ -50,7 +50,8 @@ stage3-release/bin/zig build test docs \
-Dskip-non-native \
-Dtarget=native-native-musl \
--search-prefix "$PREFIX" \
--zig-lib-dir "$PWD/../lib"
--zig-lib-dir "$PWD/../lib" \
--test-timeout 4m
# Ensure that stage3 and stage4 are byte-for-byte identical.
stage3-release/bin/zig build \

View File

@ -51,4 +51,5 @@ stage3-debug/bin/zig build test-cases test-modules test-unit test-c-abi test-sta
-Dskip-compile-errors \
-Dtarget=native-native-musl \
--search-prefix "$PREFIX" \
--zig-lib-dir "$PWD/../lib"
--zig-lib-dir "$PWD/../lib" \
--test-timeout 4m

View File

@ -51,4 +51,5 @@ stage3-release/bin/zig build test-cases test-modules test-unit test-c-abi test-s
-Dskip-compile-errors \
-Dtarget=native-native-musl \
--search-prefix "$PREFIX" \
--zig-lib-dir "$PWD/../lib"
--zig-lib-dir "$PWD/../lib" \
--test-timeout 4m

View File

@ -51,7 +51,8 @@ stage3-debug/bin/zig build test docs \
-Dskip-windows \
-Dskip-macos \
--search-prefix "$PREFIX" \
--zig-lib-dir "$PWD/../lib"
--zig-lib-dir "$PWD/../lib" \
--test-timeout 2m
stage3-debug/bin/zig build \
--prefix stage4-debug \

View File

@ -51,7 +51,8 @@ stage3-release/bin/zig build test docs \
-Dskip-windows \
-Dskip-macos \
--search-prefix "$PREFIX" \
--zig-lib-dir "$PWD/../lib"
--zig-lib-dir "$PWD/../lib" \
--test-timeout 2m
# Ensure that stage3 and stage4 are byte-for-byte identical.
stage3-release/bin/zig build \

View File

@ -61,4 +61,5 @@ stage3-debug/bin/zig build test docs \
-Dtarget=native-native-musl \
--search-prefix "$PREFIX" \
--zig-lib-dir "$PWD/../lib" \
-Denable-superhtml
-Denable-superhtml \
--test-timeout 12m

View File

@ -61,4 +61,5 @@ stage3-debug/bin/zig build test docs \
-Dtarget=native-native-musl \
--search-prefix "$PREFIX" \
--zig-lib-dir "$PWD/../lib" \
-Denable-superhtml
-Denable-superhtml \
--test-timeout 10m

View File

@ -63,7 +63,8 @@ stage3-release/bin/zig build test docs \
-Dtarget=native-native-musl \
--search-prefix "$PREFIX" \
--zig-lib-dir "$PWD/../lib" \
-Denable-superhtml
-Denable-superhtml \
--test-timeout 12m
# Ensure that stage3 and stage4 are byte-for-byte identical.
stage3-release/bin/zig build \

View File

@ -59,7 +59,8 @@ Write-Output "Main test suite..."
-Dstatic-llvm `
-Dskip-non-native `
-Dskip-release `
-Denable-symlinks-windows
-Denable-symlinks-windows `
--test-timeout 30m
CheckLastExitCode
Write-Output "Build x86_64-windows-msvc behavior tests using the C backend..."

View File

@ -58,7 +58,8 @@ Write-Output "Main test suite..."
--search-prefix "$PREFIX_PATH" `
-Dstatic-llvm `
-Dskip-non-native `
-Denable-symlinks-windows
-Denable-symlinks-windows `
--test-timeout 30m
CheckLastExitCode
# Ensure that stage3 and stage4 are byte-for-byte identical.

View File

@ -139,6 +139,19 @@
<div><slot name="llvm-pass-timings"></slot></div>
</details>
</div>
<div id="runTestReport">
<table class="time-stats">
<thead>
<tr>
<th scope="col">Test Name</th>
<th scope="col">Duration</th>
</tr>
</thead>
<!-- HTML does not allow placing a 'slot' inside of a 'tbody' for backwards-compatibility
reasons, so we unfortunately must template on the `id` here. -->
<tbody id="runTestTableBody"></tbody>
</div>
</div>
</details>
</template>

View File

@ -46,8 +46,9 @@ WebAssembly.instantiateStreaming(wasm_promise, {
updateCoverage: fuzzUpdateCoverage,
},
time_report: {
updateCompile: timeReportUpdateCompile,
updateGeneric: timeReportUpdateGeneric,
updateCompile: timeReportUpdateCompile,
updateRunTest: timeReportUpdateRunTest,
},
}).then(function(obj) {
setConnectionStatus("Connecting to WebSocket...", true);
@ -248,6 +249,7 @@ function timeReportUpdateCompile(
shadow.getElementById("genericReport").classList.add("hidden");
shadow.getElementById("compileReport").classList.remove("hidden");
shadow.getElementById("runTestReport").classList.add("hidden");
if (!use_llvm) shadow.querySelector(":host > details").classList.add("no-llvm");
host.innerHTML = inner_html;
@ -265,8 +267,26 @@ function timeReportUpdateGeneric(
shadow.querySelector(":host > details").classList.remove("pending", "no-llvm");
shadow.getElementById("genericReport").classList.remove("hidden");
shadow.getElementById("compileReport").classList.add("hidden");
shadow.getElementById("runTestReport").classList.add("hidden");
host.innerHTML = inner_html;
}
function timeReportUpdateRunTest(
step_idx,
table_html_ptr,
table_html_len,
) {
const table_html = decodeString(table_html_ptr, table_html_len);
const host = domTimeReportList.children.item(step_idx);
const shadow = host.shadowRoot;
shadow.querySelector(":host > details").classList.remove("pending", "no-llvm");
shadow.getElementById("genericReport").classList.add("hidden");
shadow.getElementById("compileReport").classList.add("hidden");
shadow.getElementById("runTestReport").classList.remove("hidden");
shadow.getElementById("runTestTableBody").innerHTML = table_html;
}
const fuzz_entry_template = document.getElementById("fuzzEntryTemplate").content;
const domFuzz = document.getElementById("fuzz");

View File

@ -94,6 +94,7 @@ export fn message_end() void {
.time_report_generic_result => return time_report.genericResultMessage(msg_bytes) catch @panic("OOM"),
.time_report_compile_result => return time_report.compileResultMessage(msg_bytes) catch @panic("OOM"),
.time_report_run_test_result => return time_report.runTestResultMessage(msg_bytes) catch @panic("OOM"),
}
}

View File

@ -27,6 +27,13 @@ const js = struct {
/// Whether the LLVM backend was used. If not, LLVM-specific statistics are hidden.
use_llvm: bool,
) void;
extern "time_report" fn updateRunTest(
/// The index of the step.
step_idx: u32,
// The HTML which will populate the <tbody> of the test table.
table_html_ptr: [*]const u8,
table_html_len: usize,
) void;
};
pub fn genericResultMessage(msg_bytes: []u8) error{OutOfMemory}!void {
@ -237,3 +244,37 @@ pub fn compileResultMessage(msg_bytes: []u8) error{ OutOfMemory, WriteFailed }!v
hdr.flags.use_llvm,
);
}
pub fn runTestResultMessage(msg_bytes: []u8) error{OutOfMemory}!void {
if (msg_bytes.len < @sizeOf(abi.RunTestResult)) @panic("malformed RunTestResult message");
const hdr: *const abi.RunTestResult = @ptrCast(msg_bytes[0..@sizeOf(abi.RunTestResult)]);
if (hdr.step_idx >= step_list.*.len) @panic("malformed RunTestResult message");
const trailing = msg_bytes[@sizeOf(abi.RunTestResult)..];
const durations: []align(1) const u64 = @ptrCast(trailing[0 .. hdr.tests_len * 8]);
var offset: usize = hdr.tests_len * 8;
var table_html: std.ArrayListUnmanaged(u8) = .empty;
defer table_html.deinit(gpa);
for (durations) |test_ns| {
const test_name_len = std.mem.indexOfScalar(u8, trailing[offset..], 0) orelse @panic("malformed RunTestResult message");
const test_name = trailing[offset..][0..test_name_len];
offset += test_name_len + 1;
try table_html.print(gpa, "<tr><th scope=\"row\"><code>{f}</code></th>", .{fmtEscapeHtml(test_name)});
if (test_ns == std.math.maxInt(u64)) {
try table_html.appendSlice(gpa, "<td class=\"empty-cell\"></td>"); // didn't run
} else {
try table_html.print(gpa, "<td>{D}</td>", .{test_ns});
}
try table_html.appendSlice(gpa, "</tr>\n");
}
if (offset != trailing.len) @panic("malformed RunTestResult message");
js.updateRunTest(
hdr.step_idx,
table_html.items.ptr,
table_html.items.len,
);
}

View File

@ -103,11 +103,13 @@ pub fn main() !void {
var install_prefix: ?[]const u8 = null;
var dir_list = std.Build.DirList{};
var error_style: ErrorStyle = .verbose;
var multiline_errors: MultilineErrors = .indent;
var summary: ?Summary = null;
var max_rss: u64 = 0;
var skip_oom_steps = false;
var test_timeout_ns: ?u64 = null;
var color: Color = .auto;
var prominent_compile_errors = false;
var help_menu = false;
var steps_menu = false;
var output_tmp_nonce: ?[16]u8 = null;
@ -116,6 +118,18 @@ pub fn main() !void {
var debounce_interval_ms: u16 = 50;
var webui_listen: ?std.net.Address = null;
if (try std.zig.EnvVar.ZIG_BUILD_ERROR_STYLE.get(arena)) |str| {
if (std.meta.stringToEnum(ErrorStyle, str)) |style| {
error_style = style;
}
}
if (try std.zig.EnvVar.ZIG_BUILD_MULTILINE_ERRORS.get(arena)) |str| {
if (std.meta.stringToEnum(MultilineErrors, str)) |style| {
multiline_errors = style;
}
}
while (nextArg(args, &arg_idx)) |arg| {
if (mem.startsWith(u8, arg, "-Z")) {
if (arg.len != 18) fatalWithHint("bad argument: '{s}'", .{arg});
@ -175,6 +189,41 @@ pub fn main() !void {
};
} else if (mem.eql(u8, arg, "--skip-oom-steps")) {
skip_oom_steps = true;
} else if (mem.eql(u8, arg, "--test-timeout")) {
const units: []const struct { []const u8, u64 } = &.{
.{ "ns", 1 },
.{ "nanosecond", 1 },
.{ "us", std.time.ns_per_us },
.{ "microsecond", std.time.ns_per_us },
.{ "ms", std.time.ns_per_ms },
.{ "millisecond", std.time.ns_per_ms },
.{ "s", std.time.ns_per_s },
.{ "second", std.time.ns_per_s },
.{ "m", std.time.ns_per_min },
.{ "minute", std.time.ns_per_min },
.{ "h", std.time.ns_per_hour },
.{ "hour", std.time.ns_per_hour },
};
const timeout_str = nextArgOrFatal(args, &arg_idx);
const num_end_idx = std.mem.findLastNone(u8, timeout_str, "abcdefghijklmnopqrstuvwxyz") orelse fatal(
"invalid timeout '{s}': expected unit (ns, us, ms, s, m, h)",
.{timeout_str},
);
const num_str = timeout_str[0 .. num_end_idx + 1];
const unit_str = timeout_str[num_end_idx + 1 ..];
const unit_factor: f64 = for (units) |unit_and_factor| {
if (std.mem.eql(u8, unit_str, unit_and_factor[0])) {
break @floatFromInt(unit_and_factor[1]);
}
} else fatal(
"invalid timeout '{s}': invalid unit '{s}' (expected ns, us, ms, s, m, h)",
.{ timeout_str, unit_str },
);
const num_parsed = std.fmt.parseFloat(f64, num_str) catch |err| fatal(
"invalid timeout '{s}': invalid number '{s}' ({t})",
.{ timeout_str, num_str, err },
);
test_timeout_ns = std.math.lossyCast(u64, unit_factor * num_parsed);
} else if (mem.eql(u8, arg, "--search-prefix")) {
const search_prefix = nextArgOrFatal(args, &arg_idx);
builder.addSearchPrefix(search_prefix);
@ -188,11 +237,23 @@ pub fn main() !void {
arg, next_arg,
});
};
} else if (mem.eql(u8, arg, "--error-style")) {
const next_arg = nextArg(args, &arg_idx) orelse
fatalWithHint("expected style after '{s}'", .{arg});
error_style = std.meta.stringToEnum(ErrorStyle, next_arg) orelse {
fatalWithHint("expected style after '{s}', found '{s}'", .{ arg, next_arg });
};
} else if (mem.eql(u8, arg, "--multiline-errors")) {
const next_arg = nextArg(args, &arg_idx) orelse
fatalWithHint("expected style after '{s}'", .{arg});
multiline_errors = std.meta.stringToEnum(MultilineErrors, next_arg) orelse {
fatalWithHint("expected style after '{s}', found '{s}'", .{ arg, next_arg });
};
} else if (mem.eql(u8, arg, "--summary")) {
const next_arg = nextArg(args, &arg_idx) orelse
fatalWithHint("expected [all|new|failures|none] after '{s}'", .{arg});
fatalWithHint("expected [all|new|failures|line|none] after '{s}'", .{arg});
summary = std.meta.stringToEnum(Summary, next_arg) orelse {
fatalWithHint("expected [all|new|failures|none] after '{s}', found '{s}'", .{
fatalWithHint("expected [all|new|failures|line|none] after '{s}', found '{s}'", .{
arg, next_arg,
});
};
@ -264,8 +325,6 @@ pub fn main() !void {
builder.verbose_cc = true;
} else if (mem.eql(u8, arg, "--verbose-llvm-cpu-features")) {
builder.verbose_llvm_cpu_features = true;
} else if (mem.eql(u8, arg, "--prominent-compile-errors")) {
prominent_compile_errors = true;
} else if (mem.eql(u8, arg, "--watch")) {
watch = true;
} else if (mem.eql(u8, arg, "--time-report")) {
@ -448,14 +507,17 @@ pub fn main() !void {
.max_rss_is_default = false,
.max_rss_mutex = .{},
.skip_oom_steps = skip_oom_steps,
.unit_test_timeout_ns = test_timeout_ns,
.watch = watch,
.web_server = undefined, // set after `prepare`
.memory_blocked_steps = .empty,
.step_stack = .empty,
.prominent_compile_errors = prominent_compile_errors,
.claimed_rss = 0,
.summary = summary orelse if (watch) .new else .failures,
.error_style = error_style,
.multiline_errors = multiline_errors,
.summary = summary orelse if (watch or webui_listen != null) .line else .failures,
.ttyconf = ttyconf,
.stderr = stderr,
.thread_pool = undefined,
@ -471,8 +533,14 @@ pub fn main() !void {
}
prepare(arena, builder, targets.items, &run, graph.random_seed) catch |err| switch (err) {
error.UncleanExit => process.exit(1),
else => return err,
error.DependencyLoopDetected => {
// Perhaps in the future there could be an Advanced Options flag such as
// --debug-build-runner-leaks which would make this code return instead of
// calling exit.
std.debug.lockStdErr();
process.exit(1);
},
else => |e| return e,
};
var w: Watch = w: {
@ -502,22 +570,20 @@ pub fn main() !void {
ws.start() catch |err| fatal("failed to start web server: {s}", .{@errorName(err)});
}
rebuild: while (true) {
rebuild: while (true) : (if (run.error_style.clearOnUpdate()) {
const bw = std.debug.lockStderrWriter(&stdio_buffer_allocation);
defer std.debug.unlockStderrWriter();
try bw.writeAll("\x1B[2J\x1B[3J\x1B[H");
}) {
if (run.web_server) |*ws| ws.startBuild();
runStepNames(
try runStepNames(
builder,
targets.items,
main_progress_node,
&run,
fuzz,
) catch |err| switch (err) {
error.UncleanExit => {
assert(!run.watch and run.web_server == null);
process.exit(1);
},
else => return err,
};
);
if (run.web_server) |*web_server| {
if (fuzz) |mode| if (mode != .forever) fatal(
@ -528,10 +594,6 @@ pub fn main() !void {
web_server.finishBuild(.{ .fuzz = fuzz != null });
}
if (!watch and run.web_server == null) {
return cleanExit();
}
if (run.web_server) |*ws| {
assert(!watch); // fatal error after CLI parsing
while (true) switch (ws.wait()) {
@ -605,24 +667,21 @@ const Run = struct {
max_rss_is_default: bool,
max_rss_mutex: std.Thread.Mutex,
skip_oom_steps: bool,
unit_test_timeout_ns: ?u64,
watch: bool,
web_server: if (!builtin.single_threaded) ?WebServer else ?noreturn,
/// Allocated into `gpa`.
memory_blocked_steps: std.ArrayListUnmanaged(*Step),
/// Allocated into `gpa`.
step_stack: std.AutoArrayHashMapUnmanaged(*Step, void),
prominent_compile_errors: bool,
thread_pool: std.Thread.Pool,
claimed_rss: usize,
error_style: ErrorStyle,
multiline_errors: MultilineErrors,
summary: Summary,
ttyconf: tty.Config,
stderr: File,
fn cleanExit(run: Run) void {
if (run.watch or run.web_server != null) return;
return runner.cleanExit();
}
};
fn prepare(
@ -656,10 +715,7 @@ fn prepare(
rand.shuffle(*Step, starting_steps);
for (starting_steps) |s| {
constructGraphAndCheckForDependencyLoop(gpa, b, s, &run.step_stack, rand) catch |err| switch (err) {
error.DependencyLoopDetected => return uncleanExit(),
else => |e| return e,
};
try constructGraphAndCheckForDependencyLoop(gpa, b, s, &run.step_stack, rand);
}
{
@ -720,10 +776,12 @@ fn runStepNames(
assert(run.memory_blocked_steps.items.len == 0);
var test_pass_count: usize = 0;
var test_skip_count: usize = 0;
var test_fail_count: usize = 0;
var test_pass_count: usize = 0;
var test_leak_count: usize = 0;
var test_crash_count: usize = 0;
var test_timeout_count: usize = 0;
var test_count: usize = 0;
var success_count: usize = 0;
@ -733,10 +791,12 @@ fn runStepNames(
var total_compile_errors: usize = 0;
for (step_stack.keys()) |s| {
test_fail_count += s.test_results.fail_count;
test_skip_count += s.test_results.skip_count;
test_leak_count += s.test_results.leak_count;
test_pass_count += s.test_results.passCount();
test_skip_count += s.test_results.skip_count;
test_fail_count += s.test_results.fail_count;
test_crash_count += s.test_results.crash_count;
test_timeout_count += s.test_results.timeout_count;
test_count += s.test_results.test_count;
switch (s.state) {
@ -805,38 +865,74 @@ fn runStepNames(
f.waitAndPrintReport();
}
// A proper command line application defaults to silently succeeding.
// The user may request verbose mode if they have a different preference.
const failures_only = switch (run.summary) {
.failures, .none => true,
else => false,
};
// Every test has a state
assert(test_pass_count + test_skip_count + test_fail_count + test_crash_count + test_timeout_count == test_count);
if (failure_count == 0) {
std.Progress.setStatus(.success);
if (failures_only) return run.cleanExit();
} else {
std.Progress.setStatus(.failure);
}
if (run.summary != .none) {
summary: {
switch (run.summary) {
.all, .new, .line => {},
.failures => if (failure_count == 0) break :summary,
.none => break :summary,
}
const w = std.debug.lockStderrWriter(&stdio_buffer_allocation);
defer std.debug.unlockStderrWriter();
const total_count = success_count + failure_count + pending_count + skipped_count;
ttyconf.setColor(w, .cyan) catch {};
w.writeAll("\nBuild Summary:") catch {};
ttyconf.setColor(w, .bold) catch {};
w.writeAll("Build Summary: ") catch {};
ttyconf.setColor(w, .reset) catch {};
w.print(" {d}/{d} steps succeeded", .{ success_count, total_count }) catch {};
if (skipped_count > 0) w.print("; {d} skipped", .{skipped_count}) catch {};
if (failure_count > 0) w.print("; {d} failed", .{failure_count}) catch {};
w.print("{d}/{d} steps succeeded", .{ success_count, total_count }) catch {};
{
ttyconf.setColor(w, .dim) catch {};
var first = true;
if (skipped_count > 0) {
w.print("{s}{d} skipped", .{ if (first) " (" else ", ", skipped_count }) catch {};
first = false;
}
if (failure_count > 0) {
w.print("{s}{d} failed", .{ if (first) " (" else ", ", failure_count }) catch {};
first = false;
}
if (!first) w.writeByte(')') catch {};
ttyconf.setColor(w, .reset) catch {};
}
if (test_count > 0) w.print("; {d}/{d} tests passed", .{ test_pass_count, test_count }) catch {};
if (test_skip_count > 0) w.print("; {d} skipped", .{test_skip_count}) catch {};
if (test_fail_count > 0) w.print("; {d} failed", .{test_fail_count}) catch {};
if (test_leak_count > 0) w.print("; {d} leaked", .{test_leak_count}) catch {};
if (test_count > 0) {
w.print("; {d}/{d} tests passed", .{ test_pass_count, test_count }) catch {};
ttyconf.setColor(w, .dim) catch {};
var first = true;
if (test_skip_count > 0) {
w.print("{s}{d} skipped", .{ if (first) " (" else ", ", test_skip_count }) catch {};
first = false;
}
if (test_fail_count > 0) {
w.print("{s}{d} failed", .{ if (first) " (" else ", ", test_fail_count }) catch {};
first = false;
}
if (test_crash_count > 0) {
w.print("{s}{d} crashed", .{ if (first) " (" else ", ", test_crash_count }) catch {};
first = false;
}
if (test_timeout_count > 0) {
w.print("{s}{d} timed out", .{ if (first) " (" else ", ", test_timeout_count }) catch {};
first = false;
}
if (!first) w.writeByte(')') catch {};
ttyconf.setColor(w, .reset) catch {};
}
w.writeAll("\n") catch {};
if (run.summary == .line) break :summary;
// Print a fancy tree with build results.
var step_stack_copy = try step_stack.clone(gpa);
defer step_stack_copy.deinit(gpa);
@ -852,7 +948,7 @@ fn runStepNames(
i -= 1;
const step = b.top_level_steps.get(step_names[i]).?.step;
const found = switch (run.summary) {
.all, .none => unreachable,
.all, .line, .none => unreachable,
.failures => step.state != .success,
.new => !step.result_cached,
};
@ -869,28 +965,19 @@ fn runStepNames(
w.writeByte('\n') catch {};
}
if (failure_count == 0) {
return run.cleanExit();
}
if (run.watch or run.web_server != null) return;
// Finally, render compile errors at the bottom of the terminal.
if (run.prominent_compile_errors and total_compile_errors > 0) {
for (step_stack.keys()) |s| {
if (s.result_error_bundle.errorMessageCount() > 0) {
s.result_error_bundle.renderToStdErr(.{ .ttyconf = ttyconf });
}
}
// Perhaps in the future there could be an Advanced Options flag such as
// --debug-build-runner-leaks which would make this code return instead of
// calling exit.
if (!run.watch and run.web_server == null) {
// Signal to parent process that we have printed compile errors. The
// parent process may choose to omit the "following command failed"
// line in this case.
std.debug.lockStdErr();
process.exit(2);
}
}
if (!run.watch and run.web_server == null) return uncleanExit();
const code: u8 = code: {
if (failure_count == 0) break :code 0; // success
if (run.error_style.verboseContext()) break :code 1; // failure; print build command
break :code 2; // failure; do not print build command
};
std.debug.lockStdErr();
process.exit(code);
}
const PrintNode = struct {
@ -943,11 +1030,16 @@ fn printStepStatus(
try stderr.writeAll(" cached");
} else if (s.test_results.test_count > 0) {
const pass_count = s.test_results.passCount();
try stderr.print(" {d} passed", .{pass_count});
assert(s.test_results.test_count == pass_count + s.test_results.skip_count);
try stderr.print(" {d} pass", .{pass_count});
if (s.test_results.skip_count > 0) {
try ttyconf.setColor(stderr, .reset);
try stderr.writeAll(", ");
try ttyconf.setColor(stderr, .yellow);
try stderr.print(" {d} skipped", .{s.test_results.skip_count});
try stderr.print("{d} skip", .{s.test_results.skip_count});
}
try ttyconf.setColor(stderr, .reset);
try stderr.print(" ({d} total)", .{s.test_results.test_count});
} else {
try stderr.writeAll(" success");
}
@ -995,7 +1087,10 @@ fn printStepStatus(
try stderr.writeAll("\n");
try ttyconf.setColor(stderr, .reset);
},
.failure => try printStepFailure(s, stderr, ttyconf),
.failure => {
try printStepFailure(s, stderr, ttyconf, false);
try ttyconf.setColor(stderr, .reset);
},
}
}
@ -1003,51 +1098,87 @@ fn printStepFailure(
s: *Step,
stderr: *Writer,
ttyconf: tty.Config,
dim: bool,
) !void {
if (s.result_error_bundle.errorMessageCount() > 0) {
try ttyconf.setColor(stderr, .red);
try stderr.print(" {d} errors\n", .{
s.result_error_bundle.errorMessageCount(),
});
try ttyconf.setColor(stderr, .reset);
} else if (!s.test_results.isSuccess()) {
try stderr.print(" {d}/{d} passed", .{
s.test_results.passCount(), s.test_results.test_count,
});
if (s.test_results.fail_count > 0) {
try stderr.writeAll(", ");
try ttyconf.setColor(stderr, .red);
try stderr.print("{d} failed", .{
s.test_results.fail_count,
});
try ttyconf.setColor(stderr, .reset);
}
// These first values include all of the test "statuses". Every test is either passsed,
// skipped, failed, crashed, or timed out.
try ttyconf.setColor(stderr, .green);
try stderr.print(" {d} pass", .{s.test_results.passCount()});
try ttyconf.setColor(stderr, .reset);
if (dim) try ttyconf.setColor(stderr, .dim);
if (s.test_results.skip_count > 0) {
try stderr.writeAll(", ");
try ttyconf.setColor(stderr, .yellow);
try stderr.print("{d} skipped", .{
s.test_results.skip_count,
});
try stderr.print("{d} skip", .{s.test_results.skip_count});
try ttyconf.setColor(stderr, .reset);
if (dim) try ttyconf.setColor(stderr, .dim);
}
if (s.test_results.leak_count > 0) {
if (s.test_results.fail_count > 0) {
try stderr.writeAll(", ");
try ttyconf.setColor(stderr, .red);
try stderr.print("{d} leaked", .{
s.test_results.leak_count,
});
try stderr.print("{d} fail", .{s.test_results.fail_count});
try ttyconf.setColor(stderr, .reset);
if (dim) try ttyconf.setColor(stderr, .dim);
}
if (s.test_results.crash_count > 0) {
try stderr.writeAll(", ");
try ttyconf.setColor(stderr, .red);
try stderr.print("{d} crash", .{s.test_results.crash_count});
try ttyconf.setColor(stderr, .reset);
if (dim) try ttyconf.setColor(stderr, .dim);
}
if (s.test_results.timeout_count > 0) {
try stderr.writeAll(", ");
try ttyconf.setColor(stderr, .red);
try stderr.print("{d} timeout", .{s.test_results.timeout_count});
try ttyconf.setColor(stderr, .reset);
if (dim) try ttyconf.setColor(stderr, .dim);
}
try stderr.print(" ({d} total)", .{s.test_results.test_count});
// Memory leaks are intentionally written after the total, because is isn't a test *status*,
// but just a flag that any tests -- even passed ones -- can have. We also use a different
// separator, so it looks like:
// 2 pass, 1 skip, 2 fail (5 total); 2 leaks
if (s.test_results.leak_count > 0) {
try stderr.writeAll("; ");
try ttyconf.setColor(stderr, .red);
try stderr.print("{d} leaks", .{s.test_results.leak_count});
try ttyconf.setColor(stderr, .reset);
if (dim) try ttyconf.setColor(stderr, .dim);
}
// It's usually not helpful to know how many error logs there were because they tend to
// just come with other errors (e.g. crashes and leaks print stack traces, and clean
// failures print error traces). So only mention them if they're the only thing causing
// the failure.
const show_err_logs: bool = show: {
var alt_results = s.test_results;
alt_results.log_err_count = 0;
break :show alt_results.isSuccess();
};
if (show_err_logs) {
try stderr.writeAll("; ");
try ttyconf.setColor(stderr, .red);
try stderr.print("{d} error logs", .{s.test_results.log_err_count});
try ttyconf.setColor(stderr, .reset);
if (dim) try ttyconf.setColor(stderr, .dim);
}
try stderr.writeAll("\n");
} else if (s.result_error_msgs.items.len > 0) {
try ttyconf.setColor(stderr, .red);
try stderr.writeAll(" failure\n");
try ttyconf.setColor(stderr, .reset);
} else {
assert(s.result_stderr.len > 0);
try ttyconf.setColor(stderr, .red);
try stderr.writeAll(" stderr\n");
try ttyconf.setColor(stderr, .reset);
}
}
@ -1063,7 +1194,7 @@ fn printTreeStep(
const first = step_stack.swapRemove(s);
const summary = run.summary;
const skip = switch (summary) {
.none => unreachable,
.none, .line => unreachable,
.all => false,
.new => s.result_cached,
.failures => s.state == .success,
@ -1096,7 +1227,7 @@ fn printTreeStep(
const step = s.dependencies.items[i];
const found = switch (summary) {
.all, .none => unreachable,
.all, .line, .none => unreachable,
.failures => step.state != .success,
.new => !step.result_cached,
};
@ -1250,19 +1381,18 @@ fn workerMakeOneStep(
.thread_pool = thread_pool,
.watch = run.watch,
.web_server = if (run.web_server) |*ws| ws else null,
.unit_test_timeout_ns = run.unit_test_timeout_ns,
.gpa = run.gpa,
});
// No matter the result, we want to display error/warning messages.
const show_compile_errors = !run.prominent_compile_errors and
s.result_error_bundle.errorMessageCount() > 0;
const show_compile_errors = s.result_error_bundle.errorMessageCount() > 0;
const show_error_msgs = s.result_error_msgs.items.len > 0;
const show_stderr = s.result_stderr.len > 0;
if (show_error_msgs or show_compile_errors or show_stderr) {
const bw = std.debug.lockStderrWriter(&stdio_buffer_allocation);
defer std.debug.unlockStderrWriter();
printErrorMessages(run.gpa, s, .{ .ttyconf = run.ttyconf }, bw, run.prominent_compile_errors) catch {};
printErrorMessages(run.gpa, s, .{ .ttyconf = run.ttyconf }, bw, run.error_style, run.multiline_errors) catch {};
}
handle_result: {
@ -1326,37 +1456,46 @@ pub fn printErrorMessages(
failing_step: *Step,
options: std.zig.ErrorBundle.RenderOptions,
stderr: *Writer,
prominent_compile_errors: bool,
error_style: ErrorStyle,
multiline_errors: MultilineErrors,
) !void {
// Provide context for where these error messages are coming from by
// printing the corresponding Step subtree.
var step_stack: std.ArrayListUnmanaged(*Step) = .empty;
defer step_stack.deinit(gpa);
try step_stack.append(gpa, failing_step);
while (step_stack.items[step_stack.items.len - 1].dependants.items.len != 0) {
try step_stack.append(gpa, step_stack.items[step_stack.items.len - 1].dependants.items[0]);
}
// Now, `step_stack` has the subtree that we want to print, in reverse order.
const ttyconf = options.ttyconf;
try ttyconf.setColor(stderr, .dim);
var indent: usize = 0;
while (step_stack.pop()) |s| : (indent += 1) {
if (indent > 0) {
try stderr.splatByteAll(' ', (indent - 1) * 3);
try printChildNodePrefix(stderr, ttyconf);
if (error_style.verboseContext()) {
// Provide context for where these error messages are coming from by
// printing the corresponding Step subtree.
var step_stack: std.ArrayListUnmanaged(*Step) = .empty;
defer step_stack.deinit(gpa);
try step_stack.append(gpa, failing_step);
while (step_stack.items[step_stack.items.len - 1].dependants.items.len != 0) {
try step_stack.append(gpa, step_stack.items[step_stack.items.len - 1].dependants.items[0]);
}
try stderr.writeAll(s.name);
// Now, `step_stack` has the subtree that we want to print, in reverse order.
try ttyconf.setColor(stderr, .dim);
var indent: usize = 0;
while (step_stack.pop()) |s| : (indent += 1) {
if (indent > 0) {
try stderr.splatByteAll(' ', (indent - 1) * 3);
try printChildNodePrefix(stderr, ttyconf);
}
if (s == failing_step) {
try printStepFailure(s, stderr, ttyconf);
} else {
try stderr.writeAll("\n");
try stderr.writeAll(s.name);
if (s == failing_step) {
try printStepFailure(s, stderr, ttyconf, true);
} else {
try stderr.writeAll("\n");
}
}
try ttyconf.setColor(stderr, .reset);
} else {
// Just print the failing step itself.
try ttyconf.setColor(stderr, .dim);
try stderr.writeAll(failing_step.name);
try printStepFailure(failing_step, stderr, ttyconf, true);
try ttyconf.setColor(stderr, .reset);
}
try ttyconf.setColor(stderr, .reset);
if (failing_step.result_stderr.len > 0) {
try stderr.writeAll(failing_step.result_stderr);
@ -1365,17 +1504,38 @@ pub fn printErrorMessages(
}
}
if (!prominent_compile_errors and failing_step.result_error_bundle.errorMessageCount() > 0) {
try failing_step.result_error_bundle.renderToWriter(options, stderr);
}
try failing_step.result_error_bundle.renderToWriter(options, stderr);
for (failing_step.result_error_msgs.items) |msg| {
try ttyconf.setColor(stderr, .red);
try stderr.writeAll("error: ");
try stderr.writeAll("error:");
try ttyconf.setColor(stderr, .reset);
try stderr.writeAll(msg);
try stderr.writeAll("\n");
if (std.mem.indexOfScalar(u8, msg, '\n') == null) {
try stderr.print(" {s}\n", .{msg});
} else switch (multiline_errors) {
.indent => {
var it = std.mem.splitScalar(u8, msg, '\n');
try stderr.print(" {s}\n", .{it.first()});
while (it.next()) |line| {
try stderr.print(" {s}\n", .{line});
}
},
.newline => try stderr.print("\n{s}\n", .{msg}),
.none => try stderr.print(" {s}\n", .{msg}),
}
}
if (error_style.verboseContext()) {
if (failing_step.result_failed_command) |cmd_str| {
try ttyconf.setColor(stderr, .red);
try stderr.writeAll("failed command: ");
try ttyconf.setColor(stderr, .reset);
try stderr.writeAll(cmd_str);
try stderr.writeByte('\n');
}
}
try stderr.writeByte('\n');
}
fn printSteps(builder: *std.Build, w: *Writer) !void {
@ -1430,15 +1590,26 @@ fn printUsage(b: *std.Build, w: *Writer) !void {
\\ -l, --list-steps Print available steps
\\ --verbose Print commands before executing them
\\ --color [auto|off|on] Enable or disable colored error messages
\\ --prominent-compile-errors Buffer compile errors and display at end
\\ --error-style [style] Control how build errors are printed
\\ verbose (Default) Report errors with full context
\\ minimal Report errors after summary, excluding context like command lines
\\ verbose_clear Like 'verbose', but clear the terminal at the start of each update
\\ minimal_clear Like 'minimal', but clear the terminal at the start of each update
\\ --multiline-errors [style] Control how multi-line error messages are printed
\\ indent (Default) Indent non-initial lines to align with initial line
\\ newline Include a leading newline so that the error message is on its own lines
\\ none Print as usual so the first line is misaligned
\\ --summary [mode] Control the printing of the build summary
\\ all Print the build summary in its entirety
\\ new Omit cached steps
\\ failures (Default) Only print failed steps
\\ failures (Default if short-lived) Only print failed steps
\\ line (Default if long-lived) Only print the single-line summary
\\ none Do not print the build summary
\\ -j<N> Limit concurrent jobs (default is to use all CPU cores)
\\ --maxrss <bytes> Limit memory usage (default is to use available memory)
\\ --skip-oom-steps Instead of failing, skip steps that would exceed --maxrss
\\ --test-timeout <timeout> Limit execution time of unit tests, terminating if exceeded.
\\ The timeout must include a unit: ns, us, ms, s, m, h
\\ --fetch[=mode] Fetch dependency tree (optionally choose laziness) and exit
\\ needed (Default) Lazy dependencies are fetched as needed
\\ all Lazy dependencies are always fetched
@ -1557,24 +1728,27 @@ fn argsRest(args: []const [:0]const u8, idx: usize) ?[]const [:0]const u8 {
return args[idx..];
}
/// Perhaps in the future there could be an Advanced Options flag such as
/// --debug-build-runner-leaks which would make this function return instead of
/// calling exit.
fn cleanExit() void {
std.debug.lockStdErr();
process.exit(0);
}
/// Perhaps in the future there could be an Advanced Options flag such as
/// --debug-build-runner-leaks which would make this function return instead of
/// calling exit.
fn uncleanExit() error{UncleanExit} {
std.debug.lockStdErr();
process.exit(1);
}
const Color = std.zig.Color;
const Summary = enum { all, new, failures, none };
const ErrorStyle = enum {
verbose,
minimal,
verbose_clear,
minimal_clear,
fn verboseContext(s: ErrorStyle) bool {
return switch (s) {
.verbose, .verbose_clear => true,
.minimal, .minimal_clear => false,
};
}
fn clearOnUpdate(s: ErrorStyle) bool {
return switch (s) {
.verbose, .minimal => false,
.verbose_clear, .minimal_clear => true,
};
}
};
const MultilineErrors = enum { indent, newline, none };
const Summary = enum { all, new, failures, line, none };
fn get_tty_conf(color: Color, stderr: File) tty.Config {
return switch (color) {

View File

@ -17,7 +17,9 @@ var fba_buffer: [8192]u8 = undefined;
var stdin_buffer: [4096]u8 = undefined;
var stdout_buffer: [4096]u8 = undefined;
const crippled = switch (builtin.zig_backend) {
/// Keep in sync with logic in `std.Build.addRunArtifact` which decides whether
/// the test runner will communicate with the build runner via `std.zig.Server`.
const need_simple = switch (builtin.zig_backend) {
.stage2_aarch64,
.stage2_powerpc,
.stage2_riscv64,
@ -33,7 +35,7 @@ pub fn main() void {
return;
}
if (crippled) {
if (need_simple) {
return mainSimple() catch @panic("test failure\n");
}
@ -132,30 +134,39 @@ fn mainServer() !void {
log_err_count = 0;
const index = try server.receiveBody_u32();
const test_fn = builtin.test_functions[index];
var fail = false;
var skip = false;
is_fuzz_test = false;
test_fn.func() catch |err| switch (err) {
error.SkipZigTest => skip = true,
else => {
fail = true;
// let the build server know we're starting the test now
try server.serveStringMessage(.test_started, &.{});
const TestResults = std.zig.Server.Message.TestResults;
const status: TestResults.Status = if (test_fn.func()) |v| s: {
v;
break :s .pass;
} else |err| switch (err) {
error.SkipZigTest => .skip,
else => s: {
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace);
}
break :s .fail;
},
};
const leak = testing.allocator_instance.deinit() == .leak;
const leak_count = testing.allocator_instance.detectLeaks();
testing.allocator_instance.deinitWithoutLeakChecks();
try server.serveTestResults(.{
.index = index,
.flags = .{
.fail = fail,
.skip = skip,
.leak = leak,
.status = status,
.fuzz = is_fuzz_test,
.log_err_count = std.math.lossyCast(
@FieldType(std.zig.Server.Message.TestResults.Flags, "log_err_count"),
@FieldType(TestResults.Flags, "log_err_count"),
log_err_count,
),
.leak_count = std.math.lossyCast(
@FieldType(TestResults.Flags, "leak_count"),
leak_count,
),
},
});
},
@ -371,7 +382,7 @@ pub fn fuzz(
// Some compiler backends are not capable of handling fuzz testing yet but
// we still want CI test coverage enabled.
if (crippled) return;
if (need_simple) return;
// Smoke test to ensure the test did not use conditional compilation to
// contradict itself by making it not actually be a fuzz test when the test

View File

@ -194,37 +194,36 @@ inline fn copyRange4(
copyFixedLength(dest + last, src + last, copy_len);
}
test "memcpy" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
fn testMemcpyImpl(comptime memcpyImpl: anytype) !void {
const max_len = 1024;
var buffer: [max_len + @alignOf(Element) - 1]u8 align(@alignOf(Element)) = undefined;
for (&buffer, 0..) |*b, i| {
b.* = @intCast(i % 97);
}
var dest: [max_len + @alignOf(Element) - 1]u8 align(@alignOf(Element)) = undefined;
const S = struct {
fn testFunc(comptime copy_func: anytype) !void {
const max_len = 1024;
var buffer: [max_len + @alignOf(Element) - 1]u8 align(@alignOf(Element)) = undefined;
for (&buffer, 0..) |*b, i| {
b.* = @intCast(i % 97);
}
var dest: [max_len + @alignOf(Element) - 1]u8 align(@alignOf(Element)) = undefined;
for (0..max_len) |copy_len| {
for (0..@alignOf(Element)) |s_offset| {
for (0..@alignOf(Element)) |d_offset| {
@memset(&dest, 0xff);
const s = buffer[s_offset..][0..copy_len];
const d = dest[d_offset..][0..copy_len];
_ = copy_func(@ptrCast(d.ptr), @ptrCast(s.ptr), s.len);
std.testing.expectEqualSlices(u8, s, d) catch |e| {
std.debug.print("error encountered for length={d}, s_offset={d}, d_offset={d}\n", .{
copy_len, s_offset, d_offset,
});
return e;
};
}
}
for (0..max_len) |copy_len| {
for (0..@alignOf(Element)) |s_offset| {
for (0..@alignOf(Element)) |d_offset| {
@memset(&dest, 0xff);
const s = buffer[s_offset..][0..copy_len];
const d = dest[d_offset..][0..copy_len];
_ = memcpyImpl(@ptrCast(d.ptr), @ptrCast(s.ptr), s.len);
std.testing.expectEqualSlices(u8, s, d) catch |e| {
std.debug.print("error encountered for length={d}, s_offset={d}, d_offset={d}\n", .{
copy_len, s_offset, d_offset,
});
return e;
};
}
}
};
try S.testFunc(memcpySmall);
try S.testFunc(memcpyFast);
}
}
test memcpySmall {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try testMemcpyImpl(memcpySmall);
}
test memcpyFast {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try testMemcpyImpl(memcpyFast);
}

View File

@ -956,8 +956,37 @@ pub fn addRunArtifact(b: *Build, exe: *Step.Compile) *Step.Run {
run_step.addArtifactArg(exe);
}
const test_server_mode = if (exe.test_runner) |r| r.mode == .server else true;
if (test_server_mode) run_step.enableTestRunnerMode();
const test_server_mode: bool = s: {
if (exe.test_runner) |r| break :s r.mode == .server;
if (exe.use_llvm == false) {
// The default test runner does not use the server protocol if the selected backend
// is too immature to support it. Keep this logic in sync with `need_simple` in the
// default test runner implementation.
switch (exe.rootModuleTarget().cpu.arch) {
// stage2_aarch64
.aarch64,
.aarch64_be,
// stage2_powerpc
.powerpc,
.powerpcle,
.powerpc64,
.powerpc64le,
// stage2_riscv64
.riscv64,
=> break :s false,
else => {},
}
}
break :s true;
};
if (test_server_mode) {
run_step.enableTestRunnerMode();
} else if (exe.test_runner == null) {
// If a test runner does not use the `std.zig.Server` protocol, it can instead
// communicate failure via its exit code.
run_step.expectExitCode(0);
}
} else {
run_step.addArtifactArg(exe);
}
@ -1594,20 +1623,6 @@ pub fn validateUserInputDidItFail(b: *Build) bool {
return b.invalid_user_input;
}
fn allocPrintCmd(gpa: Allocator, opt_cwd: ?[]const u8, argv: []const []const u8) error{OutOfMemory}![]u8 {
var buf: ArrayList(u8) = .empty;
if (opt_cwd) |cwd| try buf.print(gpa, "cd {s} && ", .{cwd});
for (argv) |arg| {
try buf.print(gpa, "{s} ", .{arg});
}
return buf.toOwnedSlice(gpa);
}
fn printCmd(ally: Allocator, cwd: ?[]const u8, argv: []const []const u8) void {
const text = allocPrintCmd(ally, cwd, argv) catch @panic("OOM");
std.debug.print("{s}\n", .{text});
}
/// This creates the install step and adds it to the dependencies of the
/// top-level install step, using all the default options.
/// See `addInstallArtifact` for a more flexible function.
@ -1857,14 +1872,14 @@ pub fn runAllowFail(
pub fn run(b: *Build, argv: []const []const u8) []u8 {
if (!process.can_spawn) {
std.debug.print("unable to spawn the following command: cannot spawn child process\n{s}\n", .{
try allocPrintCmd(b.allocator, null, argv),
try Step.allocPrintCmd(b.allocator, null, argv),
});
process.exit(1);
}
var code: u8 = undefined;
return b.runAllowFail(argv, &code, .Inherit) catch |err| {
const printed_cmd = allocPrintCmd(b.allocator, null, argv) catch @panic("OOM");
const printed_cmd = Step.allocPrintCmd(b.allocator, null, argv) catch @panic("OOM");
std.debug.print("unable to spawn the following command: {s}\n{s}\n", .{
@errorName(err), printed_cmd,
});

View File

@ -178,7 +178,7 @@ fn rebuildTestsWorkerRunFallible(run: *Step.Run, gpa: Allocator, ttyconf: std.Io
var buf: [256]u8 = undefined;
const w = std.debug.lockStderrWriter(&buf);
defer std.debug.unlockStderrWriter();
build_runner.printErrorMessages(gpa, &compile.step, .{ .ttyconf = ttyconf }, w, false) catch {};
build_runner.printErrorMessages(gpa, &compile.step, .{ .ttyconf = ttyconf }, w, .verbose, .indent) catch {};
}
const rebuilt_bin_path = result catch |err| switch (err) {
@ -204,7 +204,7 @@ fn fuzzWorkerRun(
var buf: [256]u8 = undefined;
const w = std.debug.lockStderrWriter(&buf);
defer std.debug.unlockStderrWriter();
build_runner.printErrorMessages(gpa, &run.step, .{ .ttyconf = fuzz.ttyconf }, w, false) catch {};
build_runner.printErrorMessages(gpa, &run.step, .{ .ttyconf = fuzz.ttyconf }, w, .verbose, .indent) catch {};
return;
},
else => {

View File

@ -56,6 +56,9 @@ result_cached: bool,
result_duration_ns: ?u64,
/// 0 means unavailable or not reported.
result_peak_rss: usize,
/// If the step is failed and this field is populated, this is the command which failed.
/// This field may be populated even if the step succeeded.
result_failed_command: ?[]const u8,
test_results: TestResults,
/// The return address associated with creation of this step that can be useful
@ -63,18 +66,43 @@ test_results: TestResults,
debug_stack_trace: std.builtin.StackTrace,
pub const TestResults = struct {
fail_count: u32 = 0,
skip_count: u32 = 0,
leak_count: u32 = 0,
log_err_count: u32 = 0,
/// The total number of tests in the step. Every test has a "status" from the following:
/// * passed
/// * skipped
/// * failed cleanly
/// * crashed
/// * timed out
test_count: u32 = 0,
/// The number of tests which were skipped (`error.SkipZigTest`).
skip_count: u32 = 0,
/// The number of tests which failed cleanly.
fail_count: u32 = 0,
/// The number of tests which terminated unexpectedly, i.e. crashed.
crash_count: u32 = 0,
/// The number of tests which timed out.
timeout_count: u32 = 0,
/// The number of detected memory leaks. The associated test may still have passed; indeed, *all*
/// individual tests may have passed. However, the step as a whole fails if any test has leaks.
leak_count: u32 = 0,
/// The number of detected error logs. The associated test may still have passed; indeed, *all*
/// individual tests may have passed. However, the step as a whole fails if any test logs errors.
log_err_count: u32 = 0,
pub fn isSuccess(tr: TestResults) bool {
return tr.fail_count == 0 and tr.leak_count == 0 and tr.log_err_count == 0;
// all steps are success or skip
return tr.fail_count == 0 and
tr.crash_count == 0 and
tr.timeout_count == 0 and
// no (otherwise successful) step leaked memory or logged errors
tr.leak_count == 0 and
tr.log_err_count == 0;
}
/// Computes the number of tests which passed from the other values.
pub fn passCount(tr: TestResults) u32 {
return tr.test_count - tr.fail_count - tr.skip_count;
return tr.test_count - tr.skip_count - tr.fail_count - tr.crash_count - tr.timeout_count;
}
};
@ -88,6 +116,8 @@ pub const MakeOptions = struct {
// it currently breaks because `std.net.Address` doesn't work there. Work around for now.
.wasm32 => void,
},
/// If set, this is a timeout to enforce on all individual unit tests, in nanoseconds.
unit_test_timeout_ns: ?u64,
/// Not to be confused with `Build.allocator`, which is an alias of `Build.graph.arena`.
gpa: Allocator,
};
@ -230,6 +260,7 @@ pub fn init(options: StepOptions) Step {
.result_cached = false,
.result_duration_ns = null,
.result_peak_rss = 0,
.result_failed_command = null,
.test_results = .{},
};
}
@ -243,6 +274,7 @@ pub fn make(s: *Step, options: MakeOptions) error{ MakeFailed, MakeSkipped }!voi
var timer: ?std.time.Timer = t: {
if (!s.owner.graph.time_report) break :t null;
if (s.id == .compile) break :t null;
if (s.id == .run and s.cast(Run).?.stdio == .zig_test) break :t null;
break :t std.time.Timer.start() catch @panic("--time-report not supported on this host");
};
const make_result = s.makeFn(s, options);
@ -308,20 +340,20 @@ pub fn dump(step: *Step, w: *std.Io.Writer, tty_config: std.Io.tty.Config) void
}
}
pub fn evalChildProcess(s: *Step, argv: []const []const u8) ![]u8 {
const run_result = try captureChildProcess(s, std.Progress.Node.none, argv);
try handleChildProcessTerm(s, run_result.term, null, argv);
return run_result.stdout;
}
/// Populates `s.result_failed_command`.
pub fn captureChildProcess(
s: *Step,
gpa: Allocator,
progress_node: std.Progress.Node,
argv: []const []const u8,
) !std.process.Child.RunResult {
const arena = s.owner.allocator;
try handleChildProcUnsupported(s, null, argv);
// If an error occurs, it's happened in this command:
assert(s.result_failed_command == null);
s.result_failed_command = try allocPrintCmd(gpa, null, argv);
try handleChildProcUnsupported(s);
try handleVerbose(s.owner, null, argv);
const result = std.process.Child.run(.{
@ -358,6 +390,7 @@ pub const ZigProcess = struct {
/// Assumes that argv contains `--listen=-` and that the process being spawned
/// is the zig compiler - the same version that compiled the build runner.
/// Populates `s.result_failed_command`.
pub fn evalZigProcess(
s: *Step,
argv: []const []const u8,
@ -366,6 +399,10 @@ pub fn evalZigProcess(
web_server: ?*Build.WebServer,
gpa: Allocator,
) !?Path {
// If an error occurs, it's happened in this command:
assert(s.result_failed_command == null);
s.result_failed_command = try allocPrintCmd(gpa, null, argv);
if (s.getZigProcess()) |zp| update: {
assert(watch);
if (std.Progress.have_ipc) if (zp.progress_ipc_fd) |fd| prog_node.setIpcFd(fd);
@ -382,8 +419,9 @@ pub fn evalZigProcess(
else => |e| return e,
};
if (s.result_error_bundle.errorMessageCount() > 0)
if (s.result_error_bundle.errorMessageCount() > 0) {
return s.fail("{d} compilation errors", .{s.result_error_bundle.errorMessageCount()});
}
if (s.result_error_msgs.items.len > 0 and result == null) {
// Crash detected.
@ -392,7 +430,7 @@ pub fn evalZigProcess(
};
s.result_peak_rss = zp.child.resource_usage_statistics.getMaxRss() orelse 0;
s.clearZigProcess(gpa);
try handleChildProcessTerm(s, term, null, argv);
try handleChildProcessTerm(s, term);
return error.MakeFailed;
}
@ -402,7 +440,7 @@ pub fn evalZigProcess(
const b = s.owner;
const arena = b.allocator;
try handleChildProcUnsupported(s, null, argv);
try handleChildProcUnsupported(s);
try handleVerbose(s.owner, null, argv);
var child = std.process.Child.init(argv, arena);
@ -456,16 +494,11 @@ pub fn evalZigProcess(
else => {},
};
try handleChildProcessTerm(s, term, null, argv);
try handleChildProcessTerm(s, term);
}
// This is intentionally printed for failure on the first build but not for
// subsequent rebuilds.
if (s.result_error_bundle.errorMessageCount() > 0) {
return s.fail("the following command failed with {d} compilation errors:\n{s}", .{
s.result_error_bundle.errorMessageCount(),
try allocPrintCmd(arena, null, argv),
});
return s.fail("{d} compilation errors", .{s.result_error_bundle.errorMessageCount()});
}
return result;
@ -513,7 +546,6 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build.
const header = stdout.takeStruct(Header, .little) catch unreachable;
while (stdout.buffered().len < header.bytes_len) if (!try zp.poller.poll()) break :poll;
const body = stdout.take(header.bytes_len) catch unreachable;
switch (header.tag) {
.zig_version => {
if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
@ -669,54 +701,38 @@ pub fn handleVerbose2(
}
}
pub inline fn handleChildProcUnsupported(
s: *Step,
opt_cwd: ?[]const u8,
argv: []const []const u8,
) error{ OutOfMemory, MakeFailed }!void {
/// Asserts that the caller has already populated `s.result_failed_command`.
pub inline fn handleChildProcUnsupported(s: *Step) error{ OutOfMemory, MakeFailed }!void {
if (!std.process.can_spawn) {
return s.fail(
"unable to execute the following command: host cannot spawn child processes\n{s}",
.{try allocPrintCmd(s.owner.allocator, opt_cwd, argv)},
);
return s.fail("unable to spawn process: host cannot spawn child processes", .{});
}
}
pub fn handleChildProcessTerm(
s: *Step,
term: std.process.Child.Term,
opt_cwd: ?[]const u8,
argv: []const []const u8,
) error{ MakeFailed, OutOfMemory }!void {
const arena = s.owner.allocator;
/// Asserts that the caller has already populated `s.result_failed_command`.
pub fn handleChildProcessTerm(s: *Step, term: std.process.Child.Term) error{ MakeFailed, OutOfMemory }!void {
assert(s.result_failed_command != null);
switch (term) {
.Exited => |code| {
if (code != 0) {
return s.fail(
"the following command exited with error code {d}:\n{s}",
.{ code, try allocPrintCmd(arena, opt_cwd, argv) },
);
return s.fail("process exited with error code {d}", .{code});
}
},
.Signal, .Stopped, .Unknown => {
return s.fail(
"the following command terminated unexpectedly:\n{s}",
.{try allocPrintCmd(arena, opt_cwd, argv)},
);
return s.fail("process terminated unexpectedly", .{});
},
}
}
pub fn allocPrintCmd(
arena: Allocator,
gpa: Allocator,
opt_cwd: ?[]const u8,
argv: []const []const u8,
) Allocator.Error![]u8 {
return allocPrintCmd2(arena, opt_cwd, null, argv);
return allocPrintCmd2(gpa, opt_cwd, null, argv);
}
pub fn allocPrintCmd2(
arena: Allocator,
gpa: Allocator,
opt_cwd: ?[]const u8,
opt_env: ?*const std.process.EnvMap,
argv: []const []const u8,
@ -756,11 +772,13 @@ pub fn allocPrintCmd2(
}
};
var aw: std.Io.Writer.Allocating = .init(arena);
var aw: std.Io.Writer.Allocating = .init(gpa);
defer aw.deinit();
const writer = &aw.writer;
if (opt_cwd) |cwd| writer.print("cd {s} && ", .{cwd}) catch return error.OutOfMemory;
if (opt_env) |env| {
const process_env_map = std.process.getEnvMap(arena) catch std.process.EnvMap.init(arena);
var process_env_map = std.process.getEnvMap(gpa) catch std.process.EnvMap.init(gpa);
defer process_env_map.deinit();
var it = env.iterator();
while (it.next()) |entry| {
const key = entry.key_ptr.*;
@ -946,11 +964,14 @@ fn addWatchInputFromPath(step: *Step, path: Build.Cache.Path, basename: []const
pub fn reset(step: *Step, gpa: Allocator) void {
assert(step.state == .precheck_done);
if (step.result_failed_command) |cmd| gpa.free(cmd);
step.result_error_msgs.clearRetainingCapacity();
step.result_stderr = "";
step.result_cached = false;
step.result_duration_ns = null;
step.result_peak_rss = 0;
step.result_failed_command = null;
step.test_results = .{};
step.result_error_bundle.deinit(gpa);

View File

@ -67,7 +67,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
argv.appendAssumeCapacity(b.pathFromRoot(p));
}
const run_result = try step.captureChildProcess(prog_node, argv.items);
const run_result = try step.captureChildProcess(options.gpa, prog_node, argv.items);
if (fmt.check) switch (run_result.term) {
.Exited => |code| if (code != 0 and run_result.stdout.len != 0) {
var it = std.mem.tokenizeScalar(u8, run_result.stdout, '\n');
@ -77,5 +77,5 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
},
else => {},
};
try step.handleChildProcessTerm(run_result.term, null, argv.items);
try step.handleChildProcessTerm(run_result.term);
}

View File

@ -630,6 +630,8 @@ pub fn addCheck(run: *Run, new_check: StdIo.Check) void {
pub fn captureStdErr(run: *Run, options: CapturedStdIo.Options) std.Build.LazyPath {
assert(run.stdio != .inherit);
assert(run.stdio != .zig_test);
const b = run.step.owner;
if (run.captured_stderr) |captured| return .{ .generated = .{ .file = &captured.output.generated_file } };
@ -649,6 +651,8 @@ pub fn captureStdErr(run: *Run, options: CapturedStdIo.Options) std.Build.LazyPa
pub fn captureStdOut(run: *Run, options: CapturedStdIo.Options) std.Build.LazyPath {
assert(run.stdio != .inherit);
assert(run.stdio != .zig_test);
const b = run.step.owner;
if (run.captured_stdout) |captured| return .{ .generated = .{ .file = &captured.output.generated_file } };
@ -756,7 +760,6 @@ const IndexedOutput = struct {
output: *Output,
};
fn make(step: *Step, options: Step.MakeOptions) !void {
const prog_node = options.progress_node;
const b = step.owner;
const arena = b.allocator;
const run: *Run = @fieldParentPtr("step", step);
@ -964,7 +967,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
b.fmt("{s}{s}", .{ placeholder.output.prefix, arg_output_path });
}
try runCommand(run, argv_list.items, has_side_effects, output_dir_path, prog_node, null);
try runCommand(run, argv_list.items, has_side_effects, output_dir_path, options, null);
if (!has_side_effects) try step.writeManifestAndWatch(&man);
return;
};
@ -997,7 +1000,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
});
}
try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, prog_node, null);
try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, options, null);
const dep_file_dir = std.fs.cwd();
const dep_file_basename = dep_output_file.generated_file.getPath2(b, step);
@ -1115,7 +1118,14 @@ pub fn rerunInFuzzMode(
const has_side_effects = false;
const rand_int = std.crypto.random.int(u64);
const tmp_dir_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int);
try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, prog_node, .{
try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, .{
.progress_node = prog_node,
.thread_pool = undefined, // not used by `runCommand`
.watch = undefined, // not used by `runCommand`
.web_server = null, // only needed for time reports
.unit_test_timeout_ns = null, // don't time out fuzz tests for now
.gpa = undefined, // not used by `runCommand`
}, .{
.unit_test_index = unit_test_index,
.fuzz = fuzz,
});
@ -1196,16 +1206,17 @@ fn runCommand(
argv: []const []const u8,
has_side_effects: bool,
output_dir_path: []const u8,
prog_node: std.Progress.Node,
options: Step.MakeOptions,
fuzz_context: ?FuzzContext,
) !void {
const step = &run.step;
const b = step.owner;
const arena = b.allocator;
const gpa = options.gpa;
const cwd: ?[]const u8 = if (run.cwd) |lazy_cwd| lazy_cwd.getPath2(b, step) else null;
try step.handleChildProcUnsupported(cwd, argv);
try step.handleChildProcUnsupported();
try Step.handleVerbose2(step.owner, cwd, run.env_map, argv);
const allow_skip = switch (run.stdio) {
@ -1218,7 +1229,7 @@ fn runCommand(
var env_map = run.env_map orelse &b.graph.env_map;
const result = spawnChildAndCollect(run, argv, env_map, has_side_effects, prog_node, fuzz_context) catch |err| term: {
const opt_generic_result = spawnChildAndCollect(run, argv, env_map, has_side_effects, options, fuzz_context) catch |err| term: {
// InvalidExe: cpu arch mismatch
// FileNotFound: can happen with a wrong dynamic linker path
if (err == error.InvalidExe or err == error.FileNotFound) interpret: {
@ -1355,32 +1366,33 @@ fn runCommand(
run.addPathForDynLibs(exe);
}
gpa.free(step.result_failed_command.?);
step.result_failed_command = null;
try Step.handleVerbose2(step.owner, cwd, run.env_map, interp_argv.items);
break :term spawnChildAndCollect(run, interp_argv.items, env_map, has_side_effects, prog_node, fuzz_context) catch |e| {
break :term spawnChildAndCollect(run, interp_argv.items, env_map, has_side_effects, options, fuzz_context) catch |e| {
if (!run.failing_to_execute_foreign_is_an_error) return error.MakeSkipped;
if (e == error.MakeFailed) return error.MakeFailed; // error already reported
return step.fail("unable to spawn interpreter {s}: {s}", .{
interp_argv.items[0], @errorName(e),
});
};
}
if (err == error.MakeFailed) return error.MakeFailed; // error already reported
return step.fail("failed to spawn and capture stdio from {s}: {s}", .{ argv[0], @errorName(err) });
};
step.result_duration_ns = result.elapsed_ns;
step.result_peak_rss = result.peak_rss;
step.test_results = result.stdio.test_results;
if (result.stdio.test_metadata) |tm|
run.cached_test_metadata = tm.toCachedTestMetadata();
const final_argv = if (interp_argv.items.len == 0) argv else interp_argv.items;
if (fuzz_context != null) {
try step.handleChildProcessTerm(result.term, cwd, final_argv);
const generic_result = opt_generic_result orelse {
assert(run.stdio == .zig_test);
// Specific errors have already been reported, and test results are populated. All we need
// to do is report step failure if any test failed.
if (!step.test_results.isSuccess()) return error.MakeFailed;
return;
}
};
assert(fuzz_context == null);
assert(run.stdio != .zig_test);
// Capture stdout and stderr to GeneratedFile objects.
const Stream = struct {
@ -1390,11 +1402,11 @@ fn runCommand(
for ([_]Stream{
.{
.captured = run.captured_stdout,
.bytes = result.stdio.stdout,
.bytes = generic_result.stdout,
},
.{
.captured = run.captured_stderr,
.bytes = result.stdio.stderr,
.bytes = generic_result.stderr,
},
}) |stream| {
if (stream.captured) |captured| {
@ -1424,133 +1436,94 @@ fn runCommand(
}
switch (run.stdio) {
.zig_test => unreachable,
.check => |checks| for (checks.items) |check| switch (check) {
.expect_stderr_exact => |expected_bytes| {
if (!mem.eql(u8, expected_bytes, result.stdio.stderr.?)) {
if (!mem.eql(u8, expected_bytes, generic_result.stderr.?)) {
return step.fail(
\\
\\========= expected this stderr: =========
\\{s}
\\========= but found: ====================
\\{s}
\\========= from the following command: ===
\\{s}
, .{
expected_bytes,
result.stdio.stderr.?,
try Step.allocPrintCmd(arena, cwd, final_argv),
generic_result.stderr.?,
});
}
},
.expect_stderr_match => |match| {
if (mem.indexOf(u8, result.stdio.stderr.?, match) == null) {
if (mem.indexOf(u8, generic_result.stderr.?, match) == null) {
return step.fail(
\\
\\========= expected to find in stderr: =========
\\{s}
\\========= but stderr does not contain it: =====
\\{s}
\\========= from the following command: =========
\\{s}
, .{
match,
result.stdio.stderr.?,
try Step.allocPrintCmd(arena, cwd, final_argv),
generic_result.stderr.?,
});
}
},
.expect_stdout_exact => |expected_bytes| {
if (!mem.eql(u8, expected_bytes, result.stdio.stdout.?)) {
if (!mem.eql(u8, expected_bytes, generic_result.stdout.?)) {
return step.fail(
\\
\\========= expected this stdout: =========
\\{s}
\\========= but found: ====================
\\{s}
\\========= from the following command: ===
\\{s}
, .{
expected_bytes,
result.stdio.stdout.?,
try Step.allocPrintCmd(arena, cwd, final_argv),
generic_result.stdout.?,
});
}
},
.expect_stdout_match => |match| {
if (mem.indexOf(u8, result.stdio.stdout.?, match) == null) {
if (mem.indexOf(u8, generic_result.stdout.?, match) == null) {
return step.fail(
\\
\\========= expected to find in stdout: =========
\\{s}
\\========= but stdout does not contain it: =====
\\{s}
\\========= from the following command: =========
\\{s}
, .{
match,
result.stdio.stdout.?,
try Step.allocPrintCmd(arena, cwd, final_argv),
generic_result.stdout.?,
});
}
},
.expect_term => |expected_term| {
if (!termMatches(expected_term, result.term)) {
return step.fail("the following command {f} (expected {f}):\n{s}", .{
fmtTerm(result.term),
if (!termMatches(expected_term, generic_result.term)) {
return step.fail("process {f} (expected {f})", .{
fmtTerm(generic_result.term),
fmtTerm(expected_term),
try Step.allocPrintCmd(arena, cwd, final_argv),
});
}
},
},
.zig_test => {
const prefix: []const u8 = p: {
if (result.stdio.test_metadata) |tm| {
if (tm.next_index > 0 and tm.next_index <= tm.names.len) {
const name = tm.testName(tm.next_index - 1);
break :p b.fmt("while executing test '{s}', ", .{name});
}
}
break :p "";
};
const expected_term: std.process.Child.Term = .{ .Exited = 0 };
if (!termMatches(expected_term, result.term)) {
return step.fail("{s}the following command {f} (expected {f}):\n{s}", .{
prefix,
fmtTerm(result.term),
fmtTerm(expected_term),
try Step.allocPrintCmd(arena, cwd, final_argv),
});
}
if (!result.stdio.test_results.isSuccess()) {
return step.fail(
"{s}the following test command failed:\n{s}",
.{ prefix, try Step.allocPrintCmd(arena, cwd, final_argv) },
);
}
},
else => {
// On failure, print stderr if captured.
const bad_exit = switch (result.term) {
// On failure, report captured stderr like normal standard error output.
const bad_exit = switch (generic_result.term) {
.Exited => |code| code != 0,
.Signal, .Stopped, .Unknown => true,
};
if (bad_exit) {
if (generic_result.stderr) |bytes| {
run.step.result_stderr = bytes;
}
}
if (bad_exit) if (result.stdio.stderr) |err| {
try step.addError("stderr:\n{s}", .{err});
};
try step.handleChildProcessTerm(result.term, cwd, final_argv);
try step.handleChildProcessTerm(generic_result.term);
},
}
}
const ChildProcResult = struct {
const EvalZigTestResult = struct {
test_results: Step.TestResults,
test_metadata: ?TestMetadata,
};
const EvalGenericResult = struct {
term: std.process.Child.Term,
elapsed_ns: u64,
peak_rss: usize,
stdio: StdIoResult,
stdout: ?[]const u8,
stderr: ?[]const u8,
};
fn spawnChildAndCollect(
@ -1558,9 +1531,9 @@ fn spawnChildAndCollect(
argv: []const []const u8,
env_map: *EnvMap,
has_side_effects: bool,
prog_node: std.Progress.Node,
options: Step.MakeOptions,
fuzz_context: ?FuzzContext,
) !ChildProcResult {
) !?EvalGenericResult {
const b = run.step.owner;
const arena = b.allocator;
@ -1601,192 +1574,400 @@ fn spawnChildAndCollect(
child.stdin_behavior = .Pipe;
}
const inherit = child.stdout_behavior == .Inherit or child.stderr_behavior == .Inherit;
// If an error occurs, it's caused by this command:
assert(run.step.result_failed_command == null);
run.step.result_failed_command = try Step.allocPrintCmd(options.gpa, child.cwd, argv);
if (run.stdio != .zig_test and !run.disable_zig_progress and !inherit) {
child.progress_node = prog_node;
}
const term, const result, const elapsed_ns = t: {
if (run.stdio == .zig_test) {
var timer = try std.time.Timer.start();
const res = try evalZigTest(run, &child, options, fuzz_context);
run.step.result_duration_ns = timer.read();
run.step.test_results = res.test_results;
if (res.test_metadata) |tm| {
run.cached_test_metadata = tm.toCachedTestMetadata();
if (options.web_server) |ws| ws.updateTimeReportRunTest(
run,
&run.cached_test_metadata.?,
tm.ns_per_test,
);
}
return null;
} else {
const inherit = child.stdout_behavior == .Inherit or child.stderr_behavior == .Inherit;
if (!run.disable_zig_progress and !inherit) {
child.progress_node = options.progress_node;
}
if (inherit) std.debug.lockStdErr();
defer if (inherit) std.debug.unlockStdErr();
try child.spawn();
errdefer {
_ = child.kill() catch {};
}
// We need to report `error.InvalidExe` *now* if applicable.
try child.waitForSpawn();
var timer = try std.time.Timer.start();
const result = if (run.stdio == .zig_test)
try evalZigTest(run, &child, prog_node, fuzz_context)
else
try evalGeneric(run, &child);
break :t .{ try child.wait(), result, timer.read() };
};
return .{
.stdio = result,
.term = term,
.elapsed_ns = elapsed_ns,
.peak_rss = child.resource_usage_statistics.getMaxRss() orelse 0,
};
const res = try evalGeneric(run, &child);
run.step.result_duration_ns = timer.read();
return .{ .term = res.term, .stdout = res.stdout, .stderr = res.stderr };
}
}
const StdIoResult = struct {
stdout: ?[]const u8,
stderr: ?[]const u8,
test_results: Step.TestResults,
test_metadata: ?TestMetadata,
};
const StdioPollEnum = enum { stdout, stderr };
fn evalZigTest(
run: *Run,
child: *std.process.Child,
prog_node: std.Progress.Node,
options: Step.MakeOptions,
fuzz_context: ?FuzzContext,
) !StdIoResult {
) !EvalZigTestResult {
const gpa = run.step.owner.allocator;
const arena = run.step.owner.allocator;
var poller = std.Io.poll(gpa, enum { stdout, stderr }, .{
.stdout = child.stdout.?,
.stderr = child.stderr.?,
});
defer poller.deinit();
// We will update this every time a child runs.
run.step.result_peak_rss = 0;
// If this is `true`, we avoid ever entering the polling loop below, because the stdin pipe has
// somehow already closed; instead, we go straight to capturing stderr in case it has anything
// useful.
const first_write_failed = if (fuzz_context) |fctx| failed: {
switch (fctx.fuzz.mode) {
.forever => {
const instance_id = 0; // will be used by mutiprocess forever fuzzing
sendRunFuzzTestMessage(child.stdin.?, fctx.unit_test_index, .forever, instance_id) catch |err| {
try run.step.addError("unable to write stdin: {s}", .{@errorName(err)});
break :failed true;
};
break :failed false;
},
.limit => |limit| {
sendRunFuzzTestMessage(child.stdin.?, fctx.unit_test_index, .iterations, limit.amount) catch |err| {
try run.step.addError("unable to write stdin: {s}", .{@errorName(err)});
break :failed true;
};
break :failed false;
},
}
} else failed: {
run.fuzz_tests.clearRetainingCapacity();
sendMessage(child.stdin.?, .query_test_metadata) catch |err| {
try run.step.addError("unable to write stdin: {s}", .{@errorName(err)});
break :failed true;
};
break :failed false;
var result: EvalZigTestResult = .{
.test_results = .{
.test_count = 0,
.skip_count = 0,
.fail_count = 0,
.crash_count = 0,
.timeout_count = 0,
.leak_count = 0,
.log_err_count = 0,
},
.test_metadata = null,
};
var fail_count: u32 = 0;
var skip_count: u32 = 0;
var leak_count: u32 = 0;
var test_count: u32 = 0;
var log_err_count: u32 = 0;
while (true) {
try child.spawn();
var poller = std.Io.poll(gpa, StdioPollEnum, .{
.stdout = child.stdout.?,
.stderr = child.stderr.?,
});
var child_killed = false;
defer if (!child_killed) {
_ = child.kill() catch {};
poller.deinit();
run.step.result_peak_rss = @max(
run.step.result_peak_rss,
child.resource_usage_statistics.getMaxRss() orelse 0,
);
};
var metadata: ?TestMetadata = null;
var coverage_id: ?u64 = null;
try child.waitForSpawn();
switch (try pollZigTest(
run,
child,
options,
fuzz_context,
&poller,
&result.test_metadata,
&result.test_results,
)) {
.write_failed => |err| {
// The runner unexpectedly closed a stdio pipe, which means a crash. Make sure we've captured
// all available stderr to make our error output as useful as possible.
while (try poller.poll()) {}
run.step.result_stderr = try arena.dupe(u8, poller.reader(.stderr).buffered());
// Clean up everything and wait for the child to exit.
child.stdin.?.close();
child.stdin = null;
poller.deinit();
child_killed = true;
const term = try child.wait();
run.step.result_peak_rss = @max(
run.step.result_peak_rss,
child.resource_usage_statistics.getMaxRss() orelse 0,
);
try run.step.addError("unable to write stdin ({t}); test process unexpectedly {f}", .{ err, fmtTerm(term) });
return result;
},
.no_poll => |no_poll| {
// This might be a success (we requested exit and the child dutifully closed stdout) or
// a crash of some kind. Either way, the child will terminate by itself -- wait for it.
const stderr_owned = try arena.dupe(u8, poller.reader(.stderr).buffered());
poller.reader(.stderr).tossBuffered();
// Clean up everything and wait for the child to exit.
child.stdin.?.close();
child.stdin = null;
poller.deinit();
child_killed = true;
const term = try child.wait();
run.step.result_peak_rss = @max(
run.step.result_peak_rss,
child.resource_usage_statistics.getMaxRss() orelse 0,
);
if (no_poll.active_test_index) |test_index| {
// A test was running, so this is definitely a crash. Report it against that
// test, and continue to the next test.
result.test_metadata.?.ns_per_test[test_index] = no_poll.ns_elapsed;
result.test_results.crash_count += 1;
try run.step.addError("'{s}' {f}{s}{s}", .{
result.test_metadata.?.testName(test_index),
fmtTerm(term),
if (stderr_owned.len != 0) " with stderr:\n" else "",
std.mem.trim(u8, stderr_owned, "\n"),
});
continue;
}
// Report an error if the child terminated uncleanly or if we were still trying to run more tests.
run.step.result_stderr = stderr_owned;
const tests_done = result.test_metadata != null and result.test_metadata.?.next_index == std.math.maxInt(u32);
if (!tests_done or !termMatches(.{ .Exited = 0 }, term)) {
try run.step.addError("test process unexpectedly {f}", .{fmtTerm(term)});
}
return result;
},
.timeout => |timeout| {
const stderr = poller.reader(.stderr).buffered();
poller.reader(.stderr).tossBuffered();
if (timeout.active_test_index) |test_index| {
// A test was running. Report the timeout against that test, and continue on to
// the next test.
result.test_metadata.?.ns_per_test[test_index] = timeout.ns_elapsed;
result.test_results.timeout_count += 1;
try run.step.addError("'{s}' timed out after {D}{s}{s}", .{
result.test_metadata.?.testName(test_index),
timeout.ns_elapsed,
if (stderr.len != 0) " with stderr:\n" else "",
std.mem.trim(u8, stderr, "\n"),
});
continue;
}
// Just log an error and let the child be killed.
run.step.result_stderr = try arena.dupe(u8, stderr);
return run.step.fail("test runner failed to respond for {D}", .{timeout.ns_elapsed});
},
}
comptime unreachable;
}
}
/// Polls stdout of a Zig test process until a termination condition is reached:
/// * A write fails, indicating the child unexpectedly closed stdin
/// * A test (or a response from the test runner) times out
/// * `poll` fails, indicating the child closed stdout and stderr
fn pollZigTest(
run: *Run,
child: *std.process.Child,
options: Step.MakeOptions,
fuzz_context: ?FuzzContext,
poller: *std.Io.Poller(StdioPollEnum),
opt_metadata: *?TestMetadata,
results: *Step.TestResults,
) !union(enum) {
write_failed: anyerror,
no_poll: struct {
active_test_index: ?u32,
ns_elapsed: u64,
},
timeout: struct {
active_test_index: ?u32,
ns_elapsed: u64,
},
} {
const gpa = run.step.owner.allocator;
const arena = run.step.owner.allocator;
var sub_prog_node: ?std.Progress.Node = null;
defer if (sub_prog_node) |n| n.end();
if (fuzz_context) |ctx| {
assert(opt_metadata.* == null); // fuzz processes are never restarted
switch (ctx.fuzz.mode) {
.forever => {
sendRunFuzzTestMessage(
child.stdin.?,
ctx.unit_test_index,
.forever,
0, // instance ID; will be used by multiprocess forever fuzzing in the future
) catch |err| return .{ .write_failed = err };
},
.limit => |limit| {
sendRunFuzzTestMessage(
child.stdin.?,
ctx.unit_test_index,
.iterations,
limit.amount,
) catch |err| return .{ .write_failed = err };
},
}
} else if (opt_metadata.*) |*md| {
// Previous unit test process died or was killed; we're continuing where it left off
requestNextTest(child.stdin.?, md, &sub_prog_node) catch |err| return .{ .write_failed = err };
} else {
// Running unit tests normally
run.fuzz_tests.clearRetainingCapacity();
sendMessage(child.stdin.?, .query_test_metadata) catch |err| return .{ .write_failed = err };
}
var active_test_index: ?u32 = null;
// `null` means this host does not support `std.time.Timer`. This timer is `reset()` whenever we
// change `active_test_index`, i.e. whenever a test starts or finishes.
var timer: ?std.time.Timer = std.time.Timer.start() catch null;
var coverage_id: ?u64 = null;
// This timeout is used when we're waiting on the test runner itself rather than a user-specified
// test. For instance, if the test runner leaves this much time between us requesting a test to
// start and it acknowledging the test starting, we terminate the child and raise an error. This
// *should* never happen, but could in theory be caused by some very unlucky IB in a test.
const response_timeout_ns = @max(options.unit_test_timeout_ns orelse 0, 60 * std.time.ns_per_s);
const stdout = poller.reader(.stdout);
const stderr = poller.reader(.stderr);
const any_write_failed = first_write_failed or poll: while (true) {
while (true) {
const Header = std.zig.Server.Message.Header;
while (stdout.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll false;
// This block is exited when `stdout` contains enough bytes for a `Header`.
header_ready: {
if (stdout.buffered().len >= @sizeOf(Header)) {
// We already have one, no need to poll!
break :header_ready;
}
// Always `null` if `timer` is `null`.
const opt_timeout_ns: ?u64 = ns: {
if (timer == null) break :ns null;
if (active_test_index == null) break :ns response_timeout_ns;
break :ns options.unit_test_timeout_ns;
};
if (opt_timeout_ns) |timeout_ns| {
const remaining_ns = timeout_ns -| timer.?.read();
if (!try poller.pollTimeout(remaining_ns)) return .{ .no_poll = .{
.active_test_index = active_test_index,
.ns_elapsed = if (timer) |*t| t.read() else 0,
} };
} else {
if (!try poller.poll()) return .{ .no_poll = .{
.active_test_index = active_test_index,
.ns_elapsed = if (timer) |*t| t.read() else 0,
} };
}
if (stdout.buffered().len >= @sizeOf(Header)) {
// There wasn't a header before, but there is one after the `poll`.
break :header_ready;
}
if (opt_timeout_ns) |timeout_ns| {
const cur_ns = timer.?.read();
if (cur_ns >= timeout_ns) return .{ .timeout = .{
.active_test_index = active_test_index,
.ns_elapsed = cur_ns,
} };
}
continue;
}
// There is definitely a header available now -- read it.
const header = stdout.takeStruct(Header, .little) catch unreachable;
while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll false;
while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) return .{ .no_poll = .{
.active_test_index = active_test_index,
.ns_elapsed = if (timer) |*t| t.read() else 0,
} };
const body = stdout.take(header.bytes_len) catch unreachable;
switch (header.tag) {
.zig_version => {
if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
return run.step.fail(
"zig version mismatch build runner vs compiler: '{s}' vs '{s}'",
.{ builtin.zig_version_string, body },
);
}
if (!std.mem.eql(u8, builtin.zig_version_string, body)) return run.step.fail(
"zig version mismatch build runner vs compiler: '{s}' vs '{s}'",
.{ builtin.zig_version_string, body },
);
},
.test_metadata => {
assert(fuzz_context == null);
const TmHdr = std.zig.Server.Message.TestMetadata;
const tm_hdr = @as(*align(1) const TmHdr, @ptrCast(body));
test_count = tm_hdr.tests_len;
const names_bytes = body[@sizeOf(TmHdr)..][0 .. test_count * @sizeOf(u32)];
const expected_panic_msgs_bytes = body[@sizeOf(TmHdr) + names_bytes.len ..][0 .. test_count * @sizeOf(u32)];
// `metadata` would only be populated if we'd already seen a `test_metadata`, but we
// only request it once (and importantly, we don't re-request it if we kill and
// restart the test runner).
assert(opt_metadata.* == null);
const TmHdr = std.zig.Server.Message.TestMetadata;
const tm_hdr: *align(1) const TmHdr = @ptrCast(body);
results.test_count = tm_hdr.tests_len;
const names_bytes = body[@sizeOf(TmHdr)..][0 .. results.test_count * @sizeOf(u32)];
const expected_panic_msgs_bytes = body[@sizeOf(TmHdr) + names_bytes.len ..][0 .. results.test_count * @sizeOf(u32)];
const string_bytes = body[@sizeOf(TmHdr) + names_bytes.len + expected_panic_msgs_bytes.len ..][0..tm_hdr.string_bytes_len];
const names = std.mem.bytesAsSlice(u32, names_bytes);
const expected_panic_msgs = std.mem.bytesAsSlice(u32, expected_panic_msgs_bytes);
const names_aligned = try arena.alloc(u32, names.len);
for (names_aligned, names) |*dest, src| dest.* = src;
const expected_panic_msgs_aligned = try arena.alloc(u32, expected_panic_msgs.len);
for (expected_panic_msgs_aligned, expected_panic_msgs) |*dest, src| dest.* = src;
prog_node.setEstimatedTotalItems(names.len);
metadata = .{
options.progress_node.setEstimatedTotalItems(names.len);
opt_metadata.* = .{
.string_bytes = try arena.dupe(u8, string_bytes),
.ns_per_test = try arena.alloc(u64, results.test_count),
.names = names_aligned,
.expected_panic_msgs = expected_panic_msgs_aligned,
.next_index = 0,
.prog_node = prog_node,
.prog_node = options.progress_node,
};
@memset(opt_metadata.*.?.ns_per_test, std.math.maxInt(u64));
requestNextTest(child.stdin.?, &metadata.?, &sub_prog_node) catch |err| {
try run.step.addError("unable to write stdin: {s}", .{@errorName(err)});
break :poll true;
};
active_test_index = null;
if (timer) |*t| t.reset();
requestNextTest(child.stdin.?, &opt_metadata.*.?, &sub_prog_node) catch |err| return .{ .write_failed = err };
},
.test_started => {
active_test_index = opt_metadata.*.?.next_index - 1;
if (timer) |*t| t.reset();
},
.test_results => {
assert(fuzz_context == null);
const md = metadata.?;
const md = &opt_metadata.*.?;
const TrHdr = std.zig.Server.Message.TestResults;
const tr_hdr = @as(*align(1) const TrHdr, @ptrCast(body));
fail_count +|= @intFromBool(tr_hdr.flags.fail);
skip_count +|= @intFromBool(tr_hdr.flags.skip);
leak_count +|= @intFromBool(tr_hdr.flags.leak);
log_err_count +|= tr_hdr.flags.log_err_count;
const tr_hdr: *align(1) const TrHdr = @ptrCast(body);
assert(tr_hdr.index == active_test_index);
switch (tr_hdr.flags.status) {
.pass => {},
.skip => results.skip_count +|= 1,
.fail => results.fail_count +|= 1,
}
const leak_count = tr_hdr.flags.leak_count;
const log_err_count = tr_hdr.flags.log_err_count;
results.leak_count +|= leak_count;
results.log_err_count +|= log_err_count;
if (tr_hdr.flags.fuzz) try run.fuzz_tests.append(gpa, tr_hdr.index);
if (tr_hdr.flags.fail or tr_hdr.flags.leak or tr_hdr.flags.log_err_count > 0) {
const name = std.mem.sliceTo(md.string_bytes[md.names[tr_hdr.index]..], 0);
const stderr_contents = stderr.buffered();
stderr.toss(stderr_contents.len);
const msg = std.mem.trim(u8, stderr_contents, "\n");
const label = if (tr_hdr.flags.fail)
"failed"
else if (tr_hdr.flags.leak)
"leaked"
else if (tr_hdr.flags.log_err_count > 0)
"logged errors"
else
unreachable;
if (msg.len > 0) {
try run.step.addError("'{s}' {s}: {s}", .{ name, label, msg });
if (tr_hdr.flags.status == .fail) {
const name = std.mem.sliceTo(md.testName(tr_hdr.index), 0);
const stderr_bytes = std.mem.trim(u8, stderr.buffered(), "\n");
stderr.tossBuffered();
if (stderr_bytes.len == 0) {
try run.step.addError("'{s}' failed without output", .{name});
} else {
try run.step.addError("'{s}' {s}", .{ name, label });
try run.step.addError("'{s}' failed:\n{s}", .{ name, stderr_bytes });
}
} else if (leak_count > 0) {
const name = std.mem.sliceTo(md.testName(tr_hdr.index), 0);
const stderr_bytes = std.mem.trim(u8, stderr.buffered(), "\n");
stderr.tossBuffered();
try run.step.addError("'{s}' leaked {d} allocations:\n{s}", .{ name, leak_count, stderr_bytes });
} else if (log_err_count > 0) {
const name = std.mem.sliceTo(md.testName(tr_hdr.index), 0);
const stderr_bytes = std.mem.trim(u8, stderr.buffered(), "\n");
stderr.tossBuffered();
try run.step.addError("'{s}' logged {d} errors:\n{s}", .{ name, log_err_count, stderr_bytes });
}
requestNextTest(child.stdin.?, &metadata.?, &sub_prog_node) catch |err| {
try run.step.addError("unable to write stdin: {s}", .{@errorName(err)});
break :poll true;
};
active_test_index = null;
if (timer) |*t| md.ns_per_test[tr_hdr.index] = t.lap();
requestNextTest(child.stdin.?, md, &sub_prog_node) catch |err| return .{ .write_failed = err };
},
.coverage_id => {
const fuzz = fuzz_context.?.fuzz;
@ -1823,39 +2004,12 @@ fn evalZigTest(
},
else => {}, // ignore other messages
}
};
if (any_write_failed) {
// The compiler unexpectedly closed stdin; something is very wrong and has probably crashed.
// We want to make sure we've captured all of stderr so that it's logged below.
while (try poller.poll()) {}
}
const stderr_contents = std.mem.trim(u8, stderr.buffered(), "\n");
if (stderr_contents.len > 0) {
run.step.result_stderr = try arena.dupe(u8, stderr_contents);
}
// Send EOF to stdin.
child.stdin.?.close();
child.stdin = null;
return .{
.stdout = null,
.stderr = null,
.test_results = .{
.test_count = test_count,
.fail_count = fail_count,
.skip_count = skip_count,
.leak_count = leak_count,
.log_err_count = log_err_count,
},
.test_metadata = metadata,
};
}
const TestMetadata = struct {
names: []const u32,
ns_per_test: []u64,
expected_panic_msgs: []const u32,
string_bytes: []const u8,
next_index: u32,
@ -1896,6 +2050,7 @@ fn requestNextTest(in: fs.File, metadata: *TestMetadata, sub_prog_node: *?std.Pr
try sendRunTestMessage(in, .run_test, i);
return;
} else {
metadata.next_index = std.math.maxInt(u32); // indicate that all tests are done
try sendMessage(in, .exit);
}
}
@ -1933,10 +2088,15 @@ fn sendRunFuzzTestMessage(
try file.writeAll(full_msg);
}
fn evalGeneric(run: *Run, child: *std.process.Child) !StdIoResult {
fn evalGeneric(run: *Run, child: *std.process.Child) !EvalGenericResult {
const b = run.step.owner;
const arena = b.allocator;
try child.spawn();
errdefer _ = child.kill() catch {};
try child.waitForSpawn();
switch (run.stdin) {
.bytes => |bytes| {
child.stdin.?.writeAll(bytes) catch |err| {
@ -2026,11 +2186,12 @@ fn evalGeneric(run: *Run, child: *std.process.Child) !StdIoResult {
}
};
run.step.result_peak_rss = child.resource_usage_statistics.getMaxRss() orelse 0;
return .{
.term = try child.wait(),
.stdout = stdout_bytes,
.stderr = stderr_bytes,
.test_results = .{},
.test_metadata = null,
};
}

View File

@ -751,6 +751,64 @@ pub fn updateTimeReportGeneric(ws: *WebServer, step: *Build.Step, ns_total: u64)
ws.notifyUpdate();
}
pub fn updateTimeReportRunTest(
ws: *WebServer,
run: *Build.Step.Run,
tests: *const Build.Step.Run.CachedTestMetadata,
ns_per_test: []const u64,
) void {
const gpa = ws.gpa;
const step_idx: u32 = for (ws.all_steps, 0..) |s, i| {
if (s == &run.step) break @intCast(i);
} else unreachable;
assert(tests.names.len == ns_per_test.len);
const tests_len: u32 = @intCast(tests.names.len);
const new_len: u64 = len: {
var names_len: u64 = 0;
for (0..tests_len) |i| {
names_len += tests.testName(@intCast(i)).len + 1;
}
break :len @sizeOf(abi.time_report.RunTestResult) + names_len + 8 * tests_len;
};
const old_buf = old: {
ws.time_report_mutex.lock();
defer ws.time_report_mutex.unlock();
const old = ws.time_report_msgs[step_idx];
ws.time_report_msgs[step_idx] = &.{};
break :old old;
};
const buf = gpa.realloc(old_buf, new_len) catch @panic("out of memory");
const out_header: *align(1) abi.time_report.RunTestResult = @ptrCast(buf[0..@sizeOf(abi.time_report.RunTestResult)]);
out_header.* = .{
.step_idx = step_idx,
.tests_len = tests_len,
};
var offset: usize = @sizeOf(abi.time_report.RunTestResult);
const ns_per_test_out: []align(1) u64 = @ptrCast(buf[offset..][0 .. tests_len * 8]);
@memcpy(ns_per_test_out, ns_per_test);
offset += tests_len * 8;
for (0..tests_len) |i| {
const name = tests.testName(@intCast(i));
@memcpy(buf[offset..][0..name.len], name);
buf[offset..][name.len] = 0;
offset += name.len + 1;
}
assert(offset == buf.len);
{
ws.time_report_mutex.lock();
defer ws.time_report_mutex.unlock();
assert(ws.time_report_msgs[step_idx].len == 0);
ws.time_report_msgs[step_idx] = buf;
ws.time_report_update_times[step_idx] = ws.now();
}
ws.notifyUpdate();
}
const RunnerRequest = union(enum) {
rebuild,
};

View File

@ -56,6 +56,7 @@ pub const ToClientTag = enum(u8) {
// `--time-report`
time_report_generic_result,
time_report_compile_result,
time_report_run_test_result,
_,
};
@ -342,4 +343,19 @@ pub const time_report = struct {
};
};
};
/// WebSocket server->client.
///
/// Sent after a `Step.Run` for a Zig test executable finishes, providing the test's time report.
///
/// Trailing:
/// * for each `tests_len`:
/// * `test_ns: u64` (nanoseconds spent running this test)
/// * for each `tests_len`:
/// * `name` (null-terminated UTF-8 string)
pub const RunTestResult = extern struct {
tag: ToClientTag = .time_report_run_test_result,
step_idx: u32 align(1),
tests_len: u32 align(1),
};
};

View File

@ -301,81 +301,86 @@ test "concurrent access" {
const num_writers: usize = 2;
const num_readers: usize = 4;
const num_writes: usize = 10000;
const num_reads: usize = num_writes * 2;
const num_writes: usize = 1000;
const num_reads: usize = 2000;
const Runner = struct {
const Self = @This();
const Runner = @This();
rwl: RwLock = .{},
writes: usize = 0,
reads: std.atomic.Value(usize) = std.atomic.Value(usize).init(0),
rwl: RwLock,
writes: usize,
reads: std.atomic.Value(usize),
term1: usize = 0,
term2: usize = 0,
term_sum: usize = 0,
val_a: usize,
val_b: usize,
fn reader(self: *Self) !void {
while (true) {
self.rwl.lockShared();
defer self.rwl.unlockShared();
if (self.writes >= num_writes or self.reads.load(.unordered) >= num_reads)
break;
try self.check();
_ = self.reads.fetchAdd(1, .monotonic);
}
}
fn writer(self: *Self, thread_idx: usize) !void {
fn reader(run: *Runner, thread_idx: usize) !void {
var prng = std.Random.DefaultPrng.init(thread_idx);
var rnd = prng.random();
const rnd = prng.random();
while (true) {
self.rwl.lock();
defer self.rwl.unlock();
run.rwl.lockShared();
defer run.rwl.unlockShared();
if (self.writes >= num_writes)
break;
try testing.expect(run.writes <= num_writes);
if (run.reads.fetchAdd(1, .monotonic) >= num_reads) break;
try self.check();
// We use `volatile` accesses so that we can make sure the memory is accessed either
// side of a yield, maximising chances of a race.
const a_ptr: *const volatile usize = &run.val_a;
const b_ptr: *const volatile usize = &run.val_b;
const term1 = rnd.int(usize);
self.term1 = term1;
try std.Thread.yield();
const term2 = rnd.int(usize);
self.term2 = term2;
try std.Thread.yield();
self.term_sum = term1 +% term2;
self.writes += 1;
const old_a = a_ptr.*;
if (rnd.boolean()) try std.Thread.yield();
const old_b = b_ptr.*;
try testing.expect(old_a == old_b);
}
}
fn check(self: *const Self) !void {
const term_sum = self.term_sum;
try std.Thread.yield();
fn writer(run: *Runner, thread_idx: usize) !void {
var prng = std.Random.DefaultPrng.init(thread_idx);
const rnd = prng.random();
while (true) {
run.rwl.lock();
defer run.rwl.unlock();
const term2 = self.term2;
try std.Thread.yield();
try testing.expect(run.writes <= num_writes);
if (run.writes == num_writes) break;
const term1 = self.term1;
try testing.expectEqual(term_sum, term1 +% term2);
// We use `volatile` accesses so that we can make sure the memory is accessed either
// side of a yield, maximising chances of a race.
const a_ptr: *volatile usize = &run.val_a;
const b_ptr: *volatile usize = &run.val_b;
const new_val = rnd.int(usize);
const old_a = a_ptr.*;
a_ptr.* = new_val;
if (rnd.boolean()) try std.Thread.yield();
const old_b = b_ptr.*;
b_ptr.* = new_val;
try testing.expect(old_a == old_b);
run.writes += 1;
}
}
};
var runner = Runner{};
var threads: [num_writers + num_readers]std.Thread = undefined;
var run: Runner = .{
.rwl = .{},
.writes = 0,
.reads = .init(0),
.val_a = 0,
.val_b = 0,
};
var write_threads: [num_writers]std.Thread = undefined;
var read_threads: [num_readers]std.Thread = undefined;
for (threads[0..num_writers], 0..) |*t, i| t.* = try std.Thread.spawn(.{}, Runner.writer, .{ &runner, i });
for (threads[num_writers..]) |*t| t.* = try std.Thread.spawn(.{}, Runner.reader, .{&runner});
for (&write_threads, 0..) |*t, i| t.* = try .spawn(.{}, Runner.writer, .{ &run, i });
for (&read_threads, num_writers..) |*t, i| t.* = try .spawn(.{}, Runner.reader, .{ &run, i });
for (threads) |t| t.join();
for (write_threads) |t| t.join();
for (read_threads) |t| t.join();
try testing.expectEqual(num_writes, runner.writes);
//std.debug.print("reads={}\n", .{ runner.reads.load(.unordered)});
try testing.expect(run.writes == num_writes);
try testing.expect(run.reads.raw >= num_reads);
}

View File

@ -587,8 +587,7 @@ test "signature" {
}
test "batch verification" {
var i: usize = 0;
while (i < 100) : (i += 1) {
for (0..16) |_| {
const key_pair = Ed25519.KeyPair.generate();
var msg1: [32]u8 = undefined;
var msg2: [32]u8 = undefined;

File diff suppressed because one or more lines are too long

View File

@ -421,10 +421,10 @@ pub fn DebugAllocator(comptime config: Config) type {
return usedBitsCount(slot_count) * @sizeOf(usize);
}
fn detectLeaksInBucket(bucket: *BucketHeader, size_class_index: usize, used_bits_count: usize) bool {
fn detectLeaksInBucket(bucket: *BucketHeader, size_class_index: usize, used_bits_count: usize) usize {
const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
const slot_count = slot_counts[size_class_index];
var leaks = false;
var leaks: usize = 0;
for (0..used_bits_count) |used_bits_byte| {
const used_int = bucket.usedBits(used_bits_byte).*;
if (used_int != 0) {
@ -437,7 +437,7 @@ pub fn DebugAllocator(comptime config: Config) type {
const page_addr = @intFromPtr(bucket) & ~(page_size - 1);
const addr = page_addr + slot_index * size_class;
log.err("memory address 0x{x} leaked: {f}", .{ addr, stack_trace });
leaks = true;
leaks += 1;
}
}
}
@ -445,16 +445,16 @@ pub fn DebugAllocator(comptime config: Config) type {
return leaks;
}
/// Emits log messages for leaks and then returns whether there were any leaks.
pub fn detectLeaks(self: *Self) bool {
var leaks = false;
/// Emits log messages for leaks and then returns the number of detected leaks (0 if no leaks were detected).
pub fn detectLeaks(self: *Self) usize {
var leaks: usize = 0;
for (self.buckets, 0..) |init_optional_bucket, size_class_index| {
var optional_bucket = init_optional_bucket;
const slot_count = slot_counts[size_class_index];
const used_bits_count = usedBitsCount(slot_count);
while (optional_bucket) |bucket| {
leaks = detectLeaksInBucket(bucket, size_class_index, used_bits_count) or leaks;
leaks += detectLeaksInBucket(bucket, size_class_index, used_bits_count);
optional_bucket = bucket.prev;
}
}
@ -466,7 +466,7 @@ pub fn DebugAllocator(comptime config: Config) type {
log.err("memory address 0x{x} leaked: {f}", .{
@intFromPtr(large_alloc.bytes.ptr), stack_trace,
});
leaks = true;
leaks += 1;
}
return leaks;
}
@ -498,11 +498,17 @@ pub fn DebugAllocator(comptime config: Config) type {
/// Returns `std.heap.Check.leak` if there were leaks; `std.heap.Check.ok` otherwise.
pub fn deinit(self: *Self) std.heap.Check {
const leaks = if (config.safety) self.detectLeaks() else false;
const leaks: usize = if (config.safety) self.detectLeaks() else 0;
self.deinitWithoutLeakChecks();
return if (leaks == 0) .ok else .leak;
}
/// Like `deinit`, but does not check for memory leaks. This is useful if leaks have already
/// been detected manually with `detectLeaks` to avoid reporting them for a second time.
pub fn deinitWithoutLeakChecks(self: *Self) void {
if (config.retain_metadata) self.freeRetainedMetadata();
self.large_allocations.deinit(self.backing_allocator);
self.* = undefined;
return if (leaks) .leak else .ok;
}
fn collectStackTrace(first_trace_addr: usize, addr_buf: *[stack_n]usize) void {

View File

@ -562,15 +562,11 @@ test "sync" {
if (native_os != .linux)
return error.SkipZigTest;
var tmp = tmpDir(.{});
defer tmp.cleanup();
// Unfortunately, we cannot safely call `sync` or `syncfs`, because if file IO is happening
// than the system can commit the results to disk, such calls could block indefinitely.
const test_out_file = "os_tmp_test";
const file = try tmp.dir.createFile(test_out_file, .{});
defer file.close();
posix.sync();
try posix.syncfs(file.handle);
_ = &posix.sync;
_ = &posix.syncfs;
}
test "fsync" {

View File

@ -697,6 +697,8 @@ pub const EnvVar = enum {
ZIG_LIB_DIR,
ZIG_LIBC,
ZIG_BUILD_RUNNER,
ZIG_BUILD_ERROR_STYLE,
ZIG_BUILD_MULTILINE_ERRORS,
ZIG_VERBOSE_LINK,
ZIG_VERBOSE_CC,
ZIG_BTRFS_WORKAROUND,

View File

@ -34,6 +34,12 @@ pub const Message = struct {
test_metadata,
/// Body is a TestResults
test_results,
/// Does not have a body.
/// Notifies the build runner that the next test (requested by `Client.Message.Tag.run_test`)
/// is starting execution. This message helps to ensure that the timestamp used by the build
/// runner to enforce unit test time limits is relatively accurate under extreme system load
/// (where there may be a non-trivial delay before the test process is scheduled).
test_started,
/// Body is a series of strings, delimited by null bytes.
/// Each string is a prefixed file path.
/// The first byte indicates the file prefix path (see prefixes fields
@ -90,15 +96,16 @@ pub const Message = struct {
pub const TestResults = extern struct {
index: u32,
flags: Flags,
flags: Flags align(4),
pub const Flags = packed struct(u32) {
fail: bool,
skip: bool,
leak: bool,
pub const Flags = packed struct(u64) {
status: Status,
fuzz: bool,
log_err_count: u28 = 0,
log_err_count: u30,
leak_count: u31,
};
pub const Status = enum(u2) { pass, fail, skip };
};
/// Trailing is the same as in `std.Build.abi.time_report.CompileResult`, excluding `step_name`.

View File

@ -320,7 +320,7 @@ test "inline call preserves tail call" {
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support always tail calls
const max = std.math.maxInt(u16);
const max_depth = 1000;
const S = struct {
var a: u16 = 0;
fn foo() void {
@ -328,16 +328,16 @@ test "inline call preserves tail call" {
}
inline fn bar() void {
if (a == max) return;
if (a == max_depth) return;
// Stack overflow if not tail called
var buf: [max]u16 = undefined;
var buf: [100_000]u16 = undefined;
buf[a] = a;
a += 1;
return @call(.always_tail, foo, .{});
}
};
S.foo();
try expect(S.a == std.math.maxInt(u16));
try expect(S.a == max_depth);
}
test "inline call doesn't re-evaluate non generic struct" {