Merge remote-tracking branch 'origin/master' into wrangle-writer-buffering

This commit is contained in:
Andrew Kelley 2025-08-01 18:25:15 -07:00
commit 5667435bc4
116 changed files with 4600 additions and 2428 deletions

View File

@ -1,229 +1,26 @@
const std = @import("std");
const assert = std.debug.assert;
const abi = std.Build.Fuzz.abi;
const gpa = std.heap.wasm_allocator;
const log = std.log;
const Coverage = std.debug.Coverage;
const Allocator = std.mem.Allocator;
const Walk = @import("Walk");
const Decl = Walk.Decl;
const html_render = @import("html_render");
/// Nanoseconds.
var server_base_timestamp: i64 = 0;
/// Milliseconds.
var client_base_timestamp: i64 = 0;
/// Relative to `server_base_timestamp`.
// Server timestamp.
var start_fuzzing_timestamp: i64 = undefined;
const js = struct {
extern "js" fn log(ptr: [*]const u8, len: usize) void;
extern "js" fn panic(ptr: [*]const u8, len: usize) noreturn;
extern "js" fn timestamp() i64;
extern "js" fn emitSourceIndexChange() void;
extern "js" fn emitCoverageUpdate() void;
extern "js" fn emitEntryPointsUpdate() void;
extern "fuzz" fn requestSources() void;
extern "fuzz" fn ready() void;
extern "fuzz" fn updateStats(html_ptr: [*]const u8, html_len: usize) void;
extern "fuzz" fn updateEntryPoints(html_ptr: [*]const u8, html_len: usize) void;
extern "fuzz" fn updateSource(html_ptr: [*]const u8, html_len: usize) void;
extern "fuzz" fn updateCoverage(covered_ptr: [*]const SourceLocationIndex, covered_len: u32) void;
};
pub const std_options: std.Options = .{
.logFn = logFn,
};
pub fn sourceIndexMessage(msg_bytes: []u8) error{OutOfMemory}!void {
Walk.files.clearRetainingCapacity();
Walk.decls.clearRetainingCapacity();
Walk.modules.clearRetainingCapacity();
recent_coverage_update.clearRetainingCapacity();
selected_source_location = null;
pub fn panic(msg: []const u8, st: ?*std.builtin.StackTrace, addr: ?usize) noreturn {
_ = st;
_ = addr;
log.err("panic: {s}", .{msg});
@trap();
}
js.requestSources();
fn logFn(
comptime message_level: log.Level,
comptime scope: @TypeOf(.enum_literal),
comptime format: []const u8,
args: anytype,
) void {
const level_txt = comptime message_level.asText();
const prefix2 = if (scope == .default) ": " else "(" ++ @tagName(scope) ++ "): ";
var buf: [500]u8 = undefined;
const line = std.fmt.bufPrint(&buf, level_txt ++ prefix2 ++ format, args) catch l: {
buf[buf.len - 3 ..][0..3].* = "...".*;
break :l &buf;
};
js.log(line.ptr, line.len);
}
export fn alloc(n: usize) [*]u8 {
const slice = gpa.alloc(u8, n) catch @panic("OOM");
return slice.ptr;
}
var message_buffer: std.ArrayListAlignedUnmanaged(u8, .of(u64)) = .empty;
/// Resizes the message buffer to be the correct length; returns the pointer to
/// the query string.
export fn message_begin(len: usize) [*]u8 {
message_buffer.resize(gpa, len) catch @panic("OOM");
return message_buffer.items.ptr;
}
export fn message_end() void {
const msg_bytes = message_buffer.items;
const tag: abi.ToClientTag = @enumFromInt(msg_bytes[0]);
switch (tag) {
.current_time => return currentTimeMessage(msg_bytes),
.source_index => return sourceIndexMessage(msg_bytes) catch @panic("OOM"),
.coverage_update => return coverageUpdateMessage(msg_bytes) catch @panic("OOM"),
.entry_points => return entryPointsMessage(msg_bytes) catch @panic("OOM"),
_ => unreachable,
}
}
export fn unpack(tar_ptr: [*]u8, tar_len: usize) void {
const tar_bytes = tar_ptr[0..tar_len];
log.debug("received {d} bytes of tar file", .{tar_bytes.len});
unpackInner(tar_bytes) catch |err| {
fatal("unable to unpack tar: {s}", .{@errorName(err)});
};
}
/// Set by `set_input_string`.
var input_string: std.ArrayListUnmanaged(u8) = .empty;
var string_result: std.ArrayListUnmanaged(u8) = .empty;
export fn set_input_string(len: usize) [*]u8 {
input_string.resize(gpa, len) catch @panic("OOM");
return input_string.items.ptr;
}
/// Looks up the root struct decl corresponding to a file by path.
/// Uses `input_string`.
export fn find_file_root() Decl.Index {
const file: Walk.File.Index = @enumFromInt(Walk.files.getIndex(input_string.items) orelse return .none);
return file.findRootDecl();
}
export fn decl_source_html(decl_index: Decl.Index) String {
const decl = decl_index.get();
string_result.clearRetainingCapacity();
html_render.fileSourceHtml(decl.file, &string_result, decl.ast_node, .{}) catch |err| {
fatal("unable to render source: {s}", .{@errorName(err)});
};
return String.init(string_result.items);
}
export fn totalSourceLocations() usize {
return coverage_source_locations.items.len;
}
export fn coveredSourceLocations() usize {
const covered_bits = recent_coverage_update.items[@sizeOf(abi.CoverageUpdateHeader)..];
var count: usize = 0;
for (covered_bits) |byte| count += @popCount(byte);
return count;
}
fn getCoverageUpdateHeader() *abi.CoverageUpdateHeader {
return @alignCast(@ptrCast(recent_coverage_update.items[0..@sizeOf(abi.CoverageUpdateHeader)]));
}
export fn totalRuns() u64 {
const header = getCoverageUpdateHeader();
return header.n_runs;
}
export fn uniqueRuns() u64 {
const header = getCoverageUpdateHeader();
return header.unique_runs;
}
export fn totalRunsPerSecond() f64 {
@setFloatMode(.optimized);
const header = getCoverageUpdateHeader();
const ns_elapsed: f64 = @floatFromInt(nsSince(start_fuzzing_timestamp));
const n_runs: f64 = @floatFromInt(header.n_runs);
return n_runs / (ns_elapsed / std.time.ns_per_s);
}
const String = Slice(u8);
fn Slice(T: type) type {
return packed struct(u64) {
ptr: u32,
len: u32,
fn init(s: []const T) @This() {
return .{
.ptr = @intFromPtr(s.ptr),
.len = s.len,
};
}
};
}
fn unpackInner(tar_bytes: []u8) !void {
var fbs = std.io.fixedBufferStream(tar_bytes);
var file_name_buffer: [1024]u8 = undefined;
var link_name_buffer: [1024]u8 = undefined;
var it = std.tar.iterator(fbs.reader(), .{
.file_name_buffer = &file_name_buffer,
.link_name_buffer = &link_name_buffer,
});
while (try it.next()) |tar_file| {
switch (tar_file.kind) {
.file => {
if (tar_file.size == 0 and tar_file.name.len == 0) break;
if (std.mem.endsWith(u8, tar_file.name, ".zig")) {
log.debug("found file: '{s}'", .{tar_file.name});
const file_name = try gpa.dupe(u8, tar_file.name);
if (std.mem.indexOfScalar(u8, file_name, '/')) |pkg_name_end| {
const pkg_name = file_name[0..pkg_name_end];
const gop = try Walk.modules.getOrPut(gpa, pkg_name);
const file: Walk.File.Index = @enumFromInt(Walk.files.entries.len);
if (!gop.found_existing or
std.mem.eql(u8, file_name[pkg_name_end..], "/root.zig") or
std.mem.eql(u8, file_name[pkg_name_end + 1 .. file_name.len - ".zig".len], pkg_name))
{
gop.value_ptr.* = file;
}
const file_bytes = tar_bytes[fbs.pos..][0..@intCast(tar_file.size)];
assert(file == try Walk.add_file(file_name, file_bytes));
}
} else {
log.warn("skipping: '{s}' - the tar creation should have done that", .{tar_file.name});
}
},
else => continue,
}
}
}
fn fatal(comptime format: []const u8, args: anytype) noreturn {
var buf: [500]u8 = undefined;
const line = std.fmt.bufPrint(&buf, format, args) catch l: {
buf[buf.len - 3 ..][0..3].* = "...".*;
break :l &buf;
};
js.panic(line.ptr, line.len);
}
fn currentTimeMessage(msg_bytes: []u8) void {
client_base_timestamp = js.timestamp();
server_base_timestamp = @bitCast(msg_bytes[1..][0..8].*);
}
/// Nanoseconds passed since a server timestamp.
fn nsSince(server_timestamp: i64) i64 {
const ms_passed = js.timestamp() - client_base_timestamp;
const ns_passed = server_base_timestamp - server_timestamp;
return ns_passed + ms_passed * std.time.ns_per_ms;
}
fn sourceIndexMessage(msg_bytes: []u8) error{OutOfMemory}!void {
const Header = abi.SourceIndexHeader;
const Header = abi.fuzz.SourceIndexHeader;
const header: Header = @bitCast(msg_bytes[0..@sizeOf(Header)].*);
const directories_start = @sizeOf(Header);
@ -239,27 +36,55 @@ fn sourceIndexMessage(msg_bytes: []u8) error{OutOfMemory}!void {
const source_locations: []const Coverage.SourceLocation = @alignCast(std.mem.bytesAsSlice(Coverage.SourceLocation, msg_bytes[source_locations_start..source_locations_end]));
start_fuzzing_timestamp = header.start_timestamp;
try updateCoverage(directories, files, source_locations, string_bytes);
js.emitSourceIndexChange();
try updateCoverageSources(directories, files, source_locations, string_bytes);
js.ready();
}
fn coverageUpdateMessage(msg_bytes: []u8) error{OutOfMemory}!void {
var coverage = Coverage.init;
/// Index of type `SourceLocationIndex`.
var coverage_source_locations: std.ArrayListUnmanaged(Coverage.SourceLocation) = .empty;
/// Contains the most recent coverage update message, unmodified.
var recent_coverage_update: std.ArrayListAlignedUnmanaged(u8, .of(u64)) = .empty;
fn updateCoverageSources(
directories: []const Coverage.String,
files: []const Coverage.File,
source_locations: []const Coverage.SourceLocation,
string_bytes: []const u8,
) !void {
coverage.directories.clearRetainingCapacity();
coverage.files.clearRetainingCapacity();
coverage.string_bytes.clearRetainingCapacity();
coverage_source_locations.clearRetainingCapacity();
try coverage_source_locations.appendSlice(gpa, source_locations);
try coverage.string_bytes.appendSlice(gpa, string_bytes);
try coverage.files.entries.resize(gpa, files.len);
@memcpy(coverage.files.entries.items(.key), files);
try coverage.files.reIndexContext(gpa, .{ .string_bytes = coverage.string_bytes.items });
try coverage.directories.entries.resize(gpa, directories.len);
@memcpy(coverage.directories.entries.items(.key), directories);
try coverage.directories.reIndexContext(gpa, .{ .string_bytes = coverage.string_bytes.items });
}
pub fn coverageUpdateMessage(msg_bytes: []u8) error{OutOfMemory}!void {
recent_coverage_update.clearRetainingCapacity();
recent_coverage_update.appendSlice(gpa, msg_bytes) catch @panic("OOM");
js.emitCoverageUpdate();
try updateStats();
try updateCoverage();
}
var entry_points: std.ArrayListUnmanaged(u32) = .empty;
var entry_points: std.ArrayListUnmanaged(SourceLocationIndex) = .empty;
fn entryPointsMessage(msg_bytes: []u8) error{OutOfMemory}!void {
const header: abi.EntryPointHeader = @bitCast(msg_bytes[0..@sizeOf(abi.EntryPointHeader)].*);
entry_points.resize(gpa, header.flags.locs_len) catch @panic("OOM");
@memcpy(entry_points.items, std.mem.bytesAsSlice(u32, msg_bytes[@sizeOf(abi.EntryPointHeader)..]));
js.emitEntryPointsUpdate();
}
export fn entryPoints() Slice(u32) {
return Slice(u32).init(entry_points.items);
pub fn entryPointsMessage(msg_bytes: []u8) error{OutOfMemory}!void {
const header: abi.fuzz.EntryPointHeader = @bitCast(msg_bytes[0..@sizeOf(abi.fuzz.EntryPointHeader)].*);
const slis: []align(1) const SourceLocationIndex = @ptrCast(msg_bytes[@sizeOf(abi.fuzz.EntryPointHeader)..]);
assert(slis.len == header.locsLen());
try entry_points.resize(gpa, slis.len);
@memcpy(entry_points.items, slis);
try updateEntryPoints();
}
/// Index into `coverage_source_locations`.
@ -277,11 +102,18 @@ const SourceLocationIndex = enum(u32) {
fn sourceLocationLinkHtml(
sli: SourceLocationIndex,
out: *std.ArrayListUnmanaged(u8),
focused: bool,
) Allocator.Error!void {
const sl = sli.ptr();
try out.writer(gpa).print("<a href=\"#l{d}\">", .{@intFromEnum(sli)});
try out.writer(gpa).print("<code{s}>", .{
@as([]const u8, if (focused) " class=\"status-running\"" else ""),
});
try sli.appendPath(out);
try out.writer(gpa).print(":{d}:{d}</a>", .{ sl.line, sl.column });
try out.writer(gpa).print(":{d}:{d} </code><button class=\"linkish\" onclick=\"wasm_exports.fuzzSelectSli({d});\">View</button>", .{
sl.line,
sl.column,
@intFromEnum(sli),
});
}
fn appendPath(sli: SourceLocationIndex, out: *std.ArrayListUnmanaged(u8)) Allocator.Error!void {
@ -372,84 +204,174 @@ fn computeSourceAnnotations(
}
}
var coverage = Coverage.init;
/// Index of type `SourceLocationIndex`.
var coverage_source_locations: std.ArrayListUnmanaged(Coverage.SourceLocation) = .empty;
/// Contains the most recent coverage update message, unmodified.
var recent_coverage_update: std.ArrayListAlignedUnmanaged(u8, .of(u64)) = .empty;
export fn fuzzUnpackSources(tar_ptr: [*]u8, tar_len: usize) void {
const tar_bytes = tar_ptr[0..tar_len];
log.debug("received {d} bytes of sources.tar", .{tar_bytes.len});
fn updateCoverage(
directories: []const Coverage.String,
files: []const Coverage.File,
source_locations: []const Coverage.SourceLocation,
string_bytes: []const u8,
) !void {
coverage.directories.clearRetainingCapacity();
coverage.files.clearRetainingCapacity();
coverage.string_bytes.clearRetainingCapacity();
coverage_source_locations.clearRetainingCapacity();
try coverage_source_locations.appendSlice(gpa, source_locations);
try coverage.string_bytes.appendSlice(gpa, string_bytes);
try coverage.files.entries.resize(gpa, files.len);
@memcpy(coverage.files.entries.items(.key), files);
try coverage.files.reIndexContext(gpa, .{ .string_bytes = coverage.string_bytes.items });
try coverage.directories.entries.resize(gpa, directories.len);
@memcpy(coverage.directories.entries.items(.key), directories);
try coverage.directories.reIndexContext(gpa, .{ .string_bytes = coverage.string_bytes.items });
}
export fn sourceLocationLinkHtml(index: SourceLocationIndex) String {
string_result.clearRetainingCapacity();
index.sourceLocationLinkHtml(&string_result) catch @panic("OOM");
return String.init(string_result.items);
}
/// Returns empty string if coverage metadata is not available for this source location.
export fn sourceLocationPath(sli: SourceLocationIndex) String {
string_result.clearRetainingCapacity();
if (sli.haveCoverage()) sli.appendPath(&string_result) catch @panic("OOM");
return String.init(string_result.items);
}
export fn sourceLocationFileHtml(sli: SourceLocationIndex) String {
string_result.clearRetainingCapacity();
sli.fileHtml(&string_result) catch |err| switch (err) {
error.OutOfMemory => @panic("OOM"),
error.SourceUnavailable => {},
unpackSourcesInner(tar_bytes) catch |err| {
fatal("unable to unpack sources.tar: {s}", .{@errorName(err)});
};
return String.init(string_result.items);
}
export fn sourceLocationFileCoveredList(sli_file: SourceLocationIndex) Slice(SourceLocationIndex) {
const global = struct {
var result: std.ArrayListUnmanaged(SourceLocationIndex) = .empty;
fn add(i: u32, want_file: Coverage.File.Index) void {
const src_loc_index: SourceLocationIndex = @enumFromInt(i);
if (src_loc_index.ptr().file == want_file) result.appendAssumeCapacity(src_loc_index);
fn unpackSourcesInner(tar_bytes: []u8) !void {
var tar_reader: std.Io.Reader = .fixed(tar_bytes);
var file_name_buffer: [1024]u8 = undefined;
var link_name_buffer: [1024]u8 = undefined;
var it: std.tar.Iterator = .init(&tar_reader, .{
.file_name_buffer = &file_name_buffer,
.link_name_buffer = &link_name_buffer,
});
while (try it.next()) |tar_file| {
switch (tar_file.kind) {
.file => {
if (tar_file.size == 0 and tar_file.name.len == 0) break;
if (std.mem.endsWith(u8, tar_file.name, ".zig")) {
log.debug("found file: '{s}'", .{tar_file.name});
const file_name = try gpa.dupe(u8, tar_file.name);
if (std.mem.indexOfScalar(u8, file_name, '/')) |pkg_name_end| {
const pkg_name = file_name[0..pkg_name_end];
const gop = try Walk.modules.getOrPut(gpa, pkg_name);
const file: Walk.File.Index = @enumFromInt(Walk.files.entries.len);
if (!gop.found_existing or
std.mem.eql(u8, file_name[pkg_name_end..], "/root.zig") or
std.mem.eql(u8, file_name[pkg_name_end + 1 .. file_name.len - ".zig".len], pkg_name))
{
gop.value_ptr.* = file;
}
const file_bytes = tar_reader.take(@intCast(tar_file.size)) catch unreachable;
it.unread_file_bytes = 0; // we have read the whole thing
assert(file == try Walk.add_file(file_name, file_bytes));
}
} else {
log.warn("skipping: '{s}' - the tar creation should have done that", .{tar_file.name});
}
},
else => continue,
}
}
}
fn updateStats() error{OutOfMemory}!void {
@setFloatMode(.optimized);
if (recent_coverage_update.items.len == 0) return;
const hdr: *abi.fuzz.CoverageUpdateHeader = @alignCast(@ptrCast(
recent_coverage_update.items[0..@sizeOf(abi.fuzz.CoverageUpdateHeader)],
));
const covered_src_locs: usize = n: {
var n: usize = 0;
const covered_bits = recent_coverage_update.items[@sizeOf(abi.fuzz.CoverageUpdateHeader)..];
for (covered_bits) |byte| n += @popCount(byte);
break :n n;
};
const want_file = sli_file.ptr().file;
global.result.clearRetainingCapacity();
const total_src_locs = coverage_source_locations.items.len;
const avg_speed: f64 = speed: {
const ns_elapsed: f64 = @floatFromInt(nsSince(start_fuzzing_timestamp));
const n_runs: f64 = @floatFromInt(hdr.n_runs);
break :speed n_runs / (ns_elapsed / std.time.ns_per_s);
};
const html = try std.fmt.allocPrint(gpa,
\\<span slot="stat-total-runs">{d}</span>
\\<span slot="stat-unique-runs">{d} ({d:.1}%)</span>
\\<span slot="stat-coverage">{d} / {d} ({d:.1}%)</span>
\\<span slot="stat-speed">{d:.0}</span>
, .{
hdr.n_runs,
hdr.unique_runs,
@as(f64, @floatFromInt(hdr.unique_runs)) / @as(f64, @floatFromInt(hdr.n_runs)),
covered_src_locs,
total_src_locs,
@as(f64, @floatFromInt(covered_src_locs)) / @as(f64, @floatFromInt(total_src_locs)),
avg_speed,
});
defer gpa.free(html);
js.updateStats(html.ptr, html.len);
}
fn updateEntryPoints() error{OutOfMemory}!void {
var html: std.ArrayListUnmanaged(u8) = .empty;
defer html.deinit(gpa);
for (entry_points.items) |sli| {
try html.appendSlice(gpa, "<li>");
try sli.sourceLocationLinkHtml(&html, selected_source_location == sli);
try html.appendSlice(gpa, "</li>\n");
}
js.updateEntryPoints(html.items.ptr, html.items.len);
}
fn updateCoverage() error{OutOfMemory}!void {
if (recent_coverage_update.items.len == 0) return;
const want_file = (selected_source_location orelse return).ptr().file;
var covered: std.ArrayListUnmanaged(SourceLocationIndex) = .empty;
defer covered.deinit(gpa);
// This code assumes 64-bit elements, which is incorrect if the executable
// being fuzzed is not a 64-bit CPU. It also assumes little-endian which
// can also be incorrect.
comptime assert(abi.CoverageUpdateHeader.trailing[0] == .pc_bits_usize);
comptime assert(abi.fuzz.CoverageUpdateHeader.trailing[0] == .pc_bits_usize);
const n_bitset_elems = (coverage_source_locations.items.len + @bitSizeOf(u64) - 1) / @bitSizeOf(u64);
const covered_bits = std.mem.bytesAsSlice(
u64,
recent_coverage_update.items[@sizeOf(abi.CoverageUpdateHeader)..][0 .. n_bitset_elems * @sizeOf(u64)],
recent_coverage_update.items[@sizeOf(abi.fuzz.CoverageUpdateHeader)..][0 .. n_bitset_elems * @sizeOf(u64)],
);
var sli: u32 = 0;
var sli: SourceLocationIndex = @enumFromInt(0);
for (covered_bits) |elem| {
global.result.ensureUnusedCapacity(gpa, 64) catch @panic("OOM");
try covered.ensureUnusedCapacity(gpa, 64);
for (0..@bitSizeOf(u64)) |i| {
if ((elem & (@as(u64, 1) << @intCast(i))) != 0) global.add(sli, want_file);
sli += 1;
if ((elem & (@as(u64, 1) << @intCast(i))) != 0) {
if (sli.ptr().file == want_file) {
covered.appendAssumeCapacity(sli);
}
}
sli = @enumFromInt(@intFromEnum(sli) + 1);
}
}
return Slice(SourceLocationIndex).init(global.result.items);
js.updateCoverage(covered.items.ptr, covered.items.len);
}
fn updateSource() error{OutOfMemory}!void {
if (recent_coverage_update.items.len == 0) return;
const file_sli = selected_source_location.?;
var html: std.ArrayListUnmanaged(u8) = .empty;
defer html.deinit(gpa);
file_sli.fileHtml(&html) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.SourceUnavailable => {},
};
js.updateSource(html.items.ptr, html.items.len);
}
var selected_source_location: ?SourceLocationIndex = null;
/// This function is not used directly by `main.js`, but a reference to it is
/// emitted by `SourceLocationIndex.sourceLocationLinkHtml`.
export fn fuzzSelectSli(sli: SourceLocationIndex) void {
if (!sli.haveCoverage()) return;
selected_source_location = sli;
updateEntryPoints() catch @panic("out of memory"); // highlights the selected one green
updateSource() catch @panic("out of memory");
updateCoverage() catch @panic("out of memory");
}
const std = @import("std");
const Allocator = std.mem.Allocator;
const Coverage = std.debug.Coverage;
const abi = std.Build.abi;
const assert = std.debug.assert;
const gpa = std.heap.wasm_allocator;
const Walk = @import("Walk");
const html_render = @import("html_render");
const nsSince = @import("main.zig").nsSince;
const Slice = @import("main.zig").Slice;
const fatal = @import("main.zig").fatal;
const log = std.log;
const String = Slice(u8);

202
lib/build-web/index.html Normal file
View File

@ -0,0 +1,202 @@
<!doctype html>
<meta charset="utf-8">
<title>Zig Build System</title>
<link rel="stylesheet" href="style.css">
<!-- Highly compressed 32x32 Zig logo -->
<link rel="icon" href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAABSklEQVRYw8WWXbLDIAiFP5xuURYpi+Q+VDvJTYxaY8pLJ52EA5zDj/AD8wRABCw8DeyJBDiAKMiDGaecNYCKYgCvh4EBjPgGh0UVqAB/MEU3D57efDRMiRhWddprCljRAECPCE0Uw4iz4Jn3tP2zFYAB6on4/8NBM1Es+9kl0aKgaMRnwHPpT5MIDb6YzLzp57wNIyIC7iCCdijeL3gv78jZe6cVENn/drRbXbxl4lXSmB3FtbY0iNrjIEwMm6u2VFFjWQCN0qtov6+wANxG/IV7eR8DHw6gzft4NuEXvA8HcDfv31SgyvsMeDUA90/WTd47bsCdv8PUrWzDyw02uIYv13ktgOVr+IqCouila7gWgNYuly/BfVSEdsP5Vdqyiz7pPC40C+p2e21bL5/dByGtAD6eZPuzeznwjoIN748BfyqwmVDyJHCxPwLSkjUkraEXAAAAAElFTkSuQmCC">
<!-- Templates, to be cloned into shadow DOMs by JavaScript -->
<template id="timeReportEntryTemplate">
<link rel="stylesheet" href="style.css">
<link rel="stylesheet" href="time_report.css">
<details>
<summary><slot name="step-name"></slot></summary>
<div id="genericReport">
<div class="stats">
Time: <slot name="stat-total-time"></slot><br>
</div>
</div>
<div id="compileReport">
<div class="stats">
Files Discovered: <slot name="stat-reachable-files"></slot><br>
Files Analyzed: <slot name="stat-imported-files"></slot><br>
Generic Instances Analyzed: <slot name="stat-generic-instances"></slot><br>
Inline Calls Analyzed: <slot name="stat-inline-calls"></slot><br>
Compilation Time: <slot name="stat-compilation-time"></slot><br>
</div>
<table class="time-stats">
<thead>
<tr>
<th scope="col">Pipeline Component</th>
<th scope="col" class="tooltip">CPU Time
<span class="tooltip-content">Sum across all threads of the time spent in this pipeline component</span>
</th>
<th scope="col" class="tooltip">Real Time
<span class="tooltip-content">Wall-clock time elapsed between the start and end of this compilation phase</span>
</th>
<th scope="col">Compilation Phase</th>
</tr>
</thead>
<tbody>
<tr>
<th scope="row" class="tooltip">Parsing
<span class="tooltip-content"><code>tokenize</code> converts a file of Zig source code into a sequence of tokens, which are then processed by <code>Parse</code> into an Abstract Syntax Tree (AST).</span>
</th>
<td><slot name="cpu-time-parse"></slot></td>
<td rowspan="2"><slot name="real-time-files"></slot></td>
<th scope="row" rowspan="2" class="tooltip">File Lower
<span class="tooltip-content">Tokenization, parsing, and lowering of Zig source files to a high-level IR.<br><br>Starting from module roots, every file theoretically accessible through a chain of <code>@import</code> calls is processed. Individual source files are processed serially, but different files are processed in parallel by a thread pool.<br><br>The results of this phase of compilation are cached on disk per source file, meaning the time spent here is typically only relevant to "clean" builds.</span>
</th>
</tr>
<tr>
<th scope="row" class="tooltip">AST Lowering
<span class="tooltip-content"><code>AstGen</code> converts a file's AST into a high-level SSA IR named Zig Intermediate Representation (ZIR). The resulting ZIR code is cached on disk to avoid, for instance, re-lowering all source files in the Zig standard library each time the compiler is invoked.</span>
</th>
<td><slot name="cpu-time-astgen"></slot></td>
</tr>
<tr>
<th scope="row" class="tooltip">Semantic Analysis
<span class="tooltip-content"><code>Sema</code> interprets ZIR to perform type checking, compile-time code execution, and type resolution, collectively termed "semantic analysis". When a runtime function body is analyzed, it emits Analyzed Intermediate Representation (AIR) code to be sent to the next pipeline component. Semantic analysis is currently entirely single-threaded.</span>
</th>
<td><slot name="cpu-time-sema"></slot></td>
<td rowspan="3"><slot name="real-time-decls"></slot></td>
<th scope="row" rowspan="3" class="tooltip">Declaration Lower
<span class="tooltip-content">Semantic analysis, code generation, and linking, at the granularity of individual declarations (as opposed to whole source files).<br><br>These components are run in parallel with one another. Semantic analysis is almost always the bottleneck, as it is complex and currently can only run single-threaded.<br><br>This phase completes when a work queue empties, but semantic analysis may add work by one declaration referencing another.<br><br>This is the main phase of compilation, typically taking significantly longer than File Lower (even in a clean build).</span>
</th>
</tr>
<tr>
<th scope="row" class="tooltip">Code Generation
<span class="tooltip-content"><code>CodeGen</code> converts AIR from <code>Sema</code> into machine instructions in the form of Machine Intermediate Representation (MIR). This work is usually highly parallel, since in most cases, arbitrarily many functions can be run through <code>CodeGen</code> simultaneously.</span>
</th>
<td><slot name="cpu-time-codegen"></slot></td>
</tr>
<tr>
<th scope="row" class="tooltip">Linking
<span class="tooltip-content"><code>link</code> converts MIR from <code>CodeGen</code>, as well as global constants and variables from <code>Sema</code>, and places them in the output binary. MIR is converted to a finished sequence of real instruction bytes.<br><br>When using the LLVM backend, most of this work is instead deferred to the "LLVM Emit" phase.</span>
</th>
<td><slot name="cpu-time-link"></slot></td>
</tr>
<tr class="llvm-only">
<th class="empty-cell"></th>
<td class="empty-cell"></td>
<td><slot name="real-time-llvm-emit"></slot></td>
<th scope="row" class="tooltip">LLVM Emit
<span class="tooltip-content"><b>Only applicable when using the LLVM backend.</b><br><br>Conversion of generated LLVM bitcode to an object file, including any optimization passes.<br><br>When using LLVM, this phase of compilation is typically the slowest by a significant margin. Unfortunately, the Zig compiler implementation has essentially no control over it.</span>
</th>
</tr>
<tr>
<th class="empty-cell"></th>
<td class="empty-cell"></td>
<td><slot name="real-time-link-flush"></slot></td>
<th scope="row" class="tooltip">Linker Flush
<span class="tooltip-content">Finalizing the emitted binary, and ensuring it is fully written to disk.<br><br>When using LLD, this phase represents the entire linker invocation. Otherwise, the amount of work performed here is dependent on details of Zig's linker implementation for the particular output format, but typically aims to be fairly minimal.</span>
</th>
</tr>
</tbody>
</table>
<details class="section">
<summary>Files</summary>
<table class="time-stats">
<thead>
<tr>
<th scope="col">File</th>
<th scope="col">Semantic Analysis</th>
<th scope="col">Code Generation</th>
<th scope="col">Linking</th>
</tr>
</thead>
<!-- HTML does not allow placing a 'slot' inside of a 'tbody' for backwards-compatibility
reasons, so we unfortunately must template on the `id` here. -->
<tbody id="fileTableBody"></tbody>
</table>
</details>
<details class="section">
<summary>Declarations</summary>
<table class="time-stats">
<thead>
<tr>
<th scope="col">File</th>
<th scope="col">Declaration</th>
<th scope="col" class="tooltip">Analysis Count
<span class="tooltip-content">The number of times the compiler analyzed some part of this declaration. If this is a function, <code>inline</code> and <code>comptime</code> calls to it are <i>not</i> included here. Typically, this value is approximately equal to the number of instances of a generic declaration.</span>
</th>
<th scope="col">Semantic Analysis</th>
<th scope="col">Code Generation</th>
<th scope="col">Linking</th>
</tr>
</thead>
<!-- HTML does not allow placing a 'slot' inside of a 'tbody' for backwards-compatibility
reasons, so we unfortunately must template on the `id` here. -->
<tbody id="declTableBody"></tbody>
</table>
</details>
<details class="section llvm-only">
<summary>LLVM Pass Timings</summary>
<div><slot name="llvm-pass-timings"></slot></div>
</details>
</div>
</details>
</template>
<template id="fuzzEntryTemplate">
<link rel="stylesheet" href="style.css">
<ul>
<li>Total Runs: <slot name="stat-total-runs"></slot></li>
<li>Unique Runs: <slot name="stat-unique-runs"></slot></li>
<li>Speed: <slot name="stat-speed"></slot> runs/sec</li>
<li>Coverage: <slot name="stat-coverage"></slot></li>
</ul>
<!-- I have observed issues in Firefox clicking frequently-updating slotted links, so the entry
point list is handled separately since it rarely changes. -->
<ul id="entryPointList" class="no-marker"></ul>
<div id="source" class="hidden">
<h2>Source Code</h2>
<pre><code id="sourceText"></code></pre>
</div>
</template>
<!-- The actual body: fairly minimal, content populated by JavaScript -->
<p id="connectionStatus">Loading JavaScript...</p>
<p class="hidden" id="firefoxWebSocketBullshitExplainer">
If you are using Firefox and <code>zig build --listen</code> is definitely running, you may be experiencing an unreasonably aggressive exponential
backoff for WebSocket connection attempts, which is enabled by default and can block connection attempts for up to a minute. To disable this limit,
open <code>about:config</code> and set the <code>network.websocket.delay-failed-reconnects</code> option to <code>false</code>.
</p>
<main class="hidden">
<h1>Zig Build System</h1>
<p><span id="summaryStatus"></span> | <span id="summaryStepCount"></span> steps</p>
<button class="big-btn" id="buttonRebuild" disabled>Rebuild</button>
<ul class="no-marker" id="stepList"></ul>
<hr>
<div id="timeReport" class="hidden">
<h1>Time Report</h1>
<div id="timeReportList"></div>
<hr>
</div>
<div id="fuzz" class="hidden">
<h1>Fuzzer</h1>
<p id="fuzzStatus"></p>
<div id="fuzzEntries"></div>
<hr>
</div>
<h1>Help</h1>
<p>This is the Zig Build System web interface. It allows live interaction with the build system.</p>
<p>The following <code>zig build</code> flags can expose extra features of this interface:</p>
<ul>
<li><code>--time-report</code>: collect and show statistics about the time taken to evaluate a build graph</li>
<li><code>--fuzz</code>: enable the fuzzer for any Zig test binaries in the build graph (experimental)</li>
</ul>
</main>
<!-- JavaScript at the very end -->
<script src="main.js"></script>

346
lib/build-web/main.js Normal file
View File

@ -0,0 +1,346 @@
const domConnectionStatus = document.getElementById("connectionStatus");
const domFirefoxWebSocketBullshitExplainer = document.getElementById("firefoxWebSocketBullshitExplainer");
const domMain = document.getElementsByTagName("main")[0];
const domSummary = {
stepCount: document.getElementById("summaryStepCount"),
status: document.getElementById("summaryStatus"),
};
const domButtonRebuild = document.getElementById("buttonRebuild");
const domStepList = document.getElementById("stepList");
let domSteps = [];
let wasm_promise = fetch("main.wasm");
let wasm_exports = null;
const text_decoder = new TextDecoder();
const text_encoder = new TextEncoder();
domButtonRebuild.addEventListener("click", () => wasm_exports.rebuild());
setConnectionStatus("Loading WebAssembly...", false);
WebAssembly.instantiateStreaming(wasm_promise, {
core: {
log: function(ptr, len) {
const msg = decodeString(ptr, len);
console.log(msg);
},
panic: function (ptr, len) {
const msg = decodeString(ptr, len);
throw new Error("panic: " + msg);
},
timestamp: function () {
return BigInt(new Date());
},
hello: hello,
updateBuildStatus: updateBuildStatus,
updateStepStatus: updateStepStatus,
sendWsMessage: (ptr, len) => ws.send(new Uint8Array(wasm_exports.memory.buffer, ptr, len)),
},
fuzz: {
requestSources: fuzzRequestSources,
ready: fuzzReady,
updateStats: fuzzUpdateStats,
updateEntryPoints: fuzzUpdateEntryPoints,
updateSource: fuzzUpdateSource,
updateCoverage: fuzzUpdateCoverage,
},
time_report: {
updateCompile: timeReportUpdateCompile,
updateGeneric: timeReportUpdateGeneric,
},
}).then(function(obj) {
setConnectionStatus("Connecting to WebSocket...", true);
connectWebSocket();
wasm_exports = obj.instance.exports;
window.wasm = obj; // for debugging
});
function connectWebSocket() {
const host = document.location.host;
const pathname = document.location.pathname;
const isHttps = document.location.protocol === 'https:';
const match = host.match(/^(.+):(\d+)$/);
const defaultPort = isHttps ? 443 : 80;
const port = match ? parseInt(match[2], 10) : defaultPort;
const hostName = match ? match[1] : host;
const wsProto = isHttps ? "wss:" : "ws:";
const wsUrl = wsProto + '//' + hostName + ':' + port + pathname;
ws = new WebSocket(wsUrl);
ws.binaryType = "arraybuffer";
ws.addEventListener('message', onWebSocketMessage, false);
ws.addEventListener('error', onWebSocketClose, false);
ws.addEventListener('close', onWebSocketClose, false);
ws.addEventListener('open', onWebSocketOpen, false);
}
function onWebSocketOpen() {
setConnectionStatus("Waiting for data...", false);
}
function onWebSocketMessage(ev) {
const jsArray = new Uint8Array(ev.data);
const ptr = wasm_exports.message_begin(jsArray.length);
const wasmArray = new Uint8Array(wasm_exports.memory.buffer, ptr, jsArray.length);
wasmArray.set(jsArray);
wasm_exports.message_end();
}
function onWebSocketClose() {
setConnectionStatus("WebSocket connection closed. Re-connecting...", true);
ws.removeEventListener('message', onWebSocketMessage, false);
ws.removeEventListener('error', onWebSocketClose, false);
ws.removeEventListener('close', onWebSocketClose, false);
ws.removeEventListener('open', onWebSocketOpen, false);
ws = null;
setTimeout(connectWebSocket, 1000);
}
function setConnectionStatus(msg, is_websocket_connect) {
domConnectionStatus.textContent = msg;
if (msg.length > 0) {
domConnectionStatus.classList.remove("hidden");
domMain.classList.add("hidden");
} else {
domConnectionStatus.classList.add("hidden");
domMain.classList.remove("hidden");
}
if (is_websocket_connect) {
domFirefoxWebSocketBullshitExplainer.classList.remove("hidden");
} else {
domFirefoxWebSocketBullshitExplainer.classList.add("hidden");
}
}
function hello(
steps_len,
build_status,
time_report,
) {
domSummary.stepCount.textContent = steps_len;
updateBuildStatus(build_status);
setConnectionStatus("", false);
{
let entries = [];
for (let i = 0; i < steps_len; i += 1) {
const step_name = unwrapString(wasm_exports.stepName(i));
const code = document.createElement("code");
code.textContent = step_name;
const li = document.createElement("li");
li.appendChild(code);
entries.push(li);
}
domStepList.replaceChildren(...entries);
for (let i = 0; i < steps_len; i += 1) {
updateStepStatus(i);
}
}
if (time_report) timeReportReset(steps_len);
fuzzReset();
}
function updateBuildStatus(s) {
let text;
let active = false;
let reset_time_reports = false;
if (s == 0) {
text = "Idle";
} else if (s == 1) {
text = "Watching for changes...";
} else if (s == 2) {
text = "Running...";
active = true;
reset_time_reports = true;
} else if (s == 3) {
text = "Starting fuzzer...";
active = true;
} else {
console.log(`bad build status: ${s}`);
}
domSummary.status.textContent = text;
if (active) {
domSummary.status.classList.add("status-running");
domSummary.status.classList.remove("status-idle");
domButtonRebuild.disabled = true;
} else {
domSummary.status.classList.remove("status-running");
domSummary.status.classList.add("status-idle");
domButtonRebuild.disabled = false;
}
if (reset_time_reports) {
// Grey out and collapse all the time reports
for (const time_report_host of domTimeReportList.children) {
const details = time_report_host.shadowRoot.querySelector(":host > details");
details.classList.add("pending");
details.open = false;
}
}
}
function updateStepStatus(step_idx) {
const li = domStepList.children[step_idx];
const step_status = wasm_exports.stepStatus(step_idx);
li.classList.remove("step-wip", "step-success", "step-failure");
if (step_status == 0) {
// pending
} else if (step_status == 1) {
li.classList.add("step-wip");
} else if (step_status == 2) {
li.classList.add("step-success");
} else if (step_status == 3) {
li.classList.add("step-failure");
} else {
console.log(`bad step status: ${step_status}`);
}
}
function decodeString(ptr, len) {
if (len === 0) return "";
return text_decoder.decode(new Uint8Array(wasm_exports.memory.buffer, ptr, len));
}
function getU32Array(ptr, len) {
if (len === 0) return new Uint32Array();
return new Uint32Array(wasm_exports.memory.buffer, ptr, len);
}
function unwrapString(bigint) {
const ptr = Number(bigint & 0xffffffffn);
const len = Number(bigint >> 32n);
return decodeString(ptr, len);
}
const time_report_entry_template = document.getElementById("timeReportEntryTemplate").content;
const domTimeReport = document.getElementById("timeReport");
const domTimeReportList = document.getElementById("timeReportList");
function timeReportReset(steps_len) {
let entries = [];
for (let i = 0; i < steps_len; i += 1) {
const step_name = unwrapString(wasm_exports.stepName(i));
const host = document.createElement("div");
const shadow = host.attachShadow({ mode: "open" });
shadow.appendChild(time_report_entry_template.cloneNode(true));
shadow.querySelector(":host > details").classList.add("pending");
const slotted_name = document.createElement("code");
slotted_name.setAttribute("slot", "step-name");
slotted_name.textContent = step_name;
host.appendChild(slotted_name);
entries.push(host);
}
domTimeReportList.replaceChildren(...entries);
domTimeReport.classList.remove("hidden");
}
function timeReportUpdateCompile(
step_idx,
inner_html_ptr,
inner_html_len,
file_table_html_ptr,
file_table_html_len,
decl_table_html_ptr,
decl_table_html_len,
use_llvm,
) {
const inner_html = decodeString(inner_html_ptr, inner_html_len);
const file_table_html = decodeString(file_table_html_ptr, file_table_html_len);
const decl_table_html = decodeString(decl_table_html_ptr, decl_table_html_len);
const host = domTimeReportList.children.item(step_idx);
const shadow = host.shadowRoot;
shadow.querySelector(":host > details").classList.remove("pending", "no-llvm");
shadow.getElementById("genericReport").classList.add("hidden");
shadow.getElementById("compileReport").classList.remove("hidden");
if (!use_llvm) shadow.querySelector(":host > details").classList.add("no-llvm");
host.innerHTML = inner_html;
shadow.getElementById("fileTableBody").innerHTML = file_table_html;
shadow.getElementById("declTableBody").innerHTML = decl_table_html;
}
function timeReportUpdateGeneric(
step_idx,
inner_html_ptr,
inner_html_len,
) {
const inner_html = decodeString(inner_html_ptr, inner_html_len);
const host = domTimeReportList.children.item(step_idx);
const shadow = host.shadowRoot;
shadow.querySelector(":host > details").classList.remove("pending", "no-llvm");
shadow.getElementById("genericReport").classList.remove("hidden");
shadow.getElementById("compileReport").classList.add("hidden");
host.innerHTML = inner_html;
}
const fuzz_entry_template = document.getElementById("fuzzEntryTemplate").content;
const domFuzz = document.getElementById("fuzz");
const domFuzzStatus = document.getElementById("fuzzStatus");
const domFuzzEntries = document.getElementById("fuzzEntries");
let domFuzzInstance = null;
function fuzzRequestSources() {
domFuzzStatus.classList.remove("hidden");
domFuzzStatus.textContent = "Loading sources tarball...";
fetch("sources.tar").then(function(response) {
if (!response.ok) throw new Error("unable to download sources");
domFuzzStatus.textContent = "Parsing fuzz test sources...";
return response.arrayBuffer();
}).then(function(buffer) {
if (buffer.length === 0) throw new Error("sources.tar was empty");
const js_array = new Uint8Array(buffer);
const ptr = wasm_exports.alloc(js_array.length);
const wasm_array = new Uint8Array(wasm_exports.memory.buffer, ptr, js_array.length);
wasm_array.set(js_array);
wasm_exports.fuzzUnpackSources(ptr, js_array.length);
domFuzzStatus.textContent = "";
domFuzzStatus.classList.add("hidden");
});
}
function fuzzReady() {
domFuzz.classList.remove("hidden");
// TODO: multiple fuzzer instances
if (domFuzzInstance !== null) return;
const host = document.createElement("div");
const shadow = host.attachShadow({ mode: "open" });
shadow.appendChild(fuzz_entry_template.cloneNode(true));
domFuzzInstance = host;
domFuzzEntries.appendChild(host);
}
function fuzzReset() {
domFuzz.classList.add("hidden");
domFuzzEntries.replaceChildren();
domFuzzInstance = null;
}
function fuzzUpdateStats(stats_html_ptr, stats_html_len) {
if (domFuzzInstance === null) throw new Error("fuzzUpdateStats called when fuzzer inactive");
const stats_html = decodeString(stats_html_ptr, stats_html_len);
const host = domFuzzInstance;
host.innerHTML = stats_html;
}
function fuzzUpdateEntryPoints(entry_points_html_ptr, entry_points_html_len) {
if (domFuzzInstance === null) throw new Error("fuzzUpdateEntryPoints called when fuzzer inactive");
const entry_points_html = decodeString(entry_points_html_ptr, entry_points_html_len);
const domEntryPointList = domFuzzInstance.shadowRoot.getElementById("entryPointList");
domEntryPointList.innerHTML = entry_points_html;
}
function fuzzUpdateSource(source_html_ptr, source_html_len) {
if (domFuzzInstance === null) throw new Error("fuzzUpdateSource called when fuzzer inactive");
const source_html = decodeString(source_html_ptr, source_html_len);
const domSourceText = domFuzzInstance.shadowRoot.getElementById("sourceText");
domSourceText.innerHTML = source_html;
domFuzzInstance.shadowRoot.getElementById("source").classList.remove("hidden");
}
function fuzzUpdateCoverage(covered_ptr, covered_len) {
if (domFuzzInstance === null) throw new Error("fuzzUpdateCoverage called when fuzzer inactive");
const shadow = domFuzzInstance.shadowRoot;
const domSourceText = shadow.getElementById("sourceText");
const covered = getU32Array(covered_ptr, covered_len);
for (let i = 0; i < domSourceText.children.length; i += 1) {
const childDom = domSourceText.children[i];
if (childDom.id != null && childDom.id[0] == "l") {
childDom.classList.add("l");
childDom.classList.remove("c");
}
}
for (const sli of covered) {
shadow.getElementById(`l${sli}`).classList.add("c");
}
}

213
lib/build-web/main.zig Normal file
View File

@ -0,0 +1,213 @@
const std = @import("std");
const assert = std.debug.assert;
const abi = std.Build.abi;
const gpa = std.heap.wasm_allocator;
const log = std.log;
const Allocator = std.mem.Allocator;
const fuzz = @import("fuzz.zig");
const time_report = @import("time_report.zig");
/// Nanoseconds.
var server_base_timestamp: i64 = 0;
/// Milliseconds.
var client_base_timestamp: i64 = 0;
pub var step_list: []Step = &.{};
/// Not accessed after initialization, but must be freed alongside `step_list`.
pub var step_list_data: []u8 = &.{};
const Step = struct {
name: []const u8,
status: abi.StepUpdate.Status,
};
const js = struct {
extern "core" fn log(ptr: [*]const u8, len: usize) void;
extern "core" fn panic(ptr: [*]const u8, len: usize) noreturn;
extern "core" fn timestamp() i64;
extern "core" fn hello(
steps_len: u32,
status: abi.BuildStatus,
time_report: bool,
) void;
extern "core" fn updateBuildStatus(status: abi.BuildStatus) void;
extern "core" fn updateStepStatus(step_idx: u32) void;
extern "core" fn sendWsMessage(ptr: [*]const u8, len: usize) void;
};
pub const std_options: std.Options = .{
.logFn = logFn,
};
pub fn panic(msg: []const u8, st: ?*std.builtin.StackTrace, addr: ?usize) noreturn {
_ = st;
_ = addr;
log.err("panic: {s}", .{msg});
@trap();
}
fn logFn(
comptime message_level: log.Level,
comptime scope: @TypeOf(.enum_literal),
comptime format: []const u8,
args: anytype,
) void {
const level_txt = comptime message_level.asText();
const prefix2 = if (scope == .default) ": " else "(" ++ @tagName(scope) ++ "): ";
var buf: [500]u8 = undefined;
const line = std.fmt.bufPrint(&buf, level_txt ++ prefix2 ++ format, args) catch l: {
buf[buf.len - 3 ..][0..3].* = "...".*;
break :l &buf;
};
js.log(line.ptr, line.len);
}
export fn alloc(n: usize) [*]u8 {
const slice = gpa.alloc(u8, n) catch @panic("OOM");
return slice.ptr;
}
var message_buffer: std.ArrayListAlignedUnmanaged(u8, .of(u64)) = .empty;
/// Resizes the message buffer to be the correct length; returns the pointer to
/// the query string.
export fn message_begin(len: usize) [*]u8 {
message_buffer.resize(gpa, len) catch @panic("OOM");
return message_buffer.items.ptr;
}
export fn message_end() void {
const msg_bytes = message_buffer.items;
const tag: abi.ToClientTag = @enumFromInt(msg_bytes[0]);
switch (tag) {
_ => @panic("malformed message"),
.hello => return helloMessage(msg_bytes) catch @panic("OOM"),
.status_update => return statusUpdateMessage(msg_bytes) catch @panic("OOM"),
.step_update => return stepUpdateMessage(msg_bytes) catch @panic("OOM"),
.fuzz_source_index => return fuzz.sourceIndexMessage(msg_bytes) catch @panic("OOM"),
.fuzz_coverage_update => return fuzz.coverageUpdateMessage(msg_bytes) catch @panic("OOM"),
.fuzz_entry_points => return fuzz.entryPointsMessage(msg_bytes) catch @panic("OOM"),
.time_report_generic_result => return time_report.genericResultMessage(msg_bytes) catch @panic("OOM"),
.time_report_compile_result => return time_report.compileResultMessage(msg_bytes) catch @panic("OOM"),
}
}
const String = Slice(u8);
pub fn Slice(T: type) type {
return packed struct(u64) {
ptr: u32,
len: u32,
pub fn init(s: []const T) @This() {
return .{
.ptr = @intFromPtr(s.ptr),
.len = s.len,
};
}
};
}
pub fn fatal(comptime format: []const u8, args: anytype) noreturn {
var buf: [500]u8 = undefined;
const line = std.fmt.bufPrint(&buf, format, args) catch l: {
buf[buf.len - 3 ..][0..3].* = "...".*;
break :l &buf;
};
js.panic(line.ptr, line.len);
}
fn helloMessage(msg_bytes: []align(4) u8) Allocator.Error!void {
if (msg_bytes.len < @sizeOf(abi.Hello)) @panic("malformed Hello message");
const hdr: *const abi.Hello = @ptrCast(msg_bytes[0..@sizeOf(abi.Hello)]);
const trailing = msg_bytes[@sizeOf(abi.Hello)..];
client_base_timestamp = js.timestamp();
server_base_timestamp = hdr.timestamp;
const steps = try gpa.alloc(Step, hdr.steps_len);
errdefer gpa.free(steps);
const step_name_lens: []align(1) const u32 = @ptrCast(trailing[0 .. steps.len * 4]);
const step_name_data_len: usize = len: {
var sum: usize = 0;
for (step_name_lens) |n| sum += n;
break :len sum;
};
const step_name_data: []const u8 = trailing[steps.len * 4 ..][0..step_name_data_len];
const step_status_bits: []const u8 = trailing[steps.len * 4 + step_name_data_len ..];
const duped_step_name_data = try gpa.dupe(u8, step_name_data);
errdefer gpa.free(duped_step_name_data);
var name_off: usize = 0;
for (steps, step_name_lens, 0..) |*step_out, name_len, step_idx| {
step_out.* = .{
.name = duped_step_name_data[name_off..][0..name_len],
.status = @enumFromInt(@as(u2, @truncate(step_status_bits[step_idx / 4] >> @intCast((step_idx % 4) * 2)))),
};
name_off += name_len;
}
gpa.free(step_list);
gpa.free(step_list_data);
step_list = steps;
step_list_data = duped_step_name_data;
js.hello(step_list.len, hdr.status, hdr.flags.time_report);
}
fn statusUpdateMessage(msg_bytes: []u8) Allocator.Error!void {
if (msg_bytes.len < @sizeOf(abi.StatusUpdate)) @panic("malformed StatusUpdate message");
const msg: *const abi.StatusUpdate = @ptrCast(msg_bytes[0..@sizeOf(abi.StatusUpdate)]);
js.updateBuildStatus(msg.new);
}
fn stepUpdateMessage(msg_bytes: []u8) Allocator.Error!void {
if (msg_bytes.len < @sizeOf(abi.StepUpdate)) @panic("malformed StepUpdate message");
const msg: *const abi.StepUpdate = @ptrCast(msg_bytes[0..@sizeOf(abi.StepUpdate)]);
if (msg.step_idx >= step_list.len) @panic("malformed StepUpdate message");
step_list[msg.step_idx].status = msg.bits.status;
js.updateStepStatus(msg.step_idx);
}
export fn stepName(idx: usize) String {
return .init(step_list[idx].name);
}
export fn stepStatus(idx: usize) u8 {
return @intFromEnum(step_list[idx].status);
}
export fn rebuild() void {
const msg: abi.Rebuild = .{};
const raw: []const u8 = @ptrCast(&msg);
js.sendWsMessage(raw.ptr, raw.len);
}
/// Nanoseconds passed since a server timestamp.
pub fn nsSince(server_timestamp: i64) i64 {
const ms_passed = js.timestamp() - client_base_timestamp;
const ns_passed = server_base_timestamp - server_timestamp;
return ns_passed + ms_passed * std.time.ns_per_ms;
}
pub fn fmtEscapeHtml(unescaped: []const u8) HtmlEscaper {
return .{ .unescaped = unescaped };
}
const HtmlEscaper = struct {
unescaped: []const u8,
pub fn format(he: HtmlEscaper, w: *std.Io.Writer) !void {
for (he.unescaped) |c| switch (c) {
'&' => try w.writeAll("&amp;"),
'<' => try w.writeAll("&lt;"),
'>' => try w.writeAll("&gt;"),
'"' => try w.writeAll("&quot;"),
'\'' => try w.writeAll("&#39;"),
else => try w.writeByte(c),
};
}
};

240
lib/build-web/style.css Normal file
View File

@ -0,0 +1,240 @@
body {
font-family: system-ui, -apple-system, Roboto, "Segoe UI", sans-serif;
color: #000000;
padding: 1em 10%;
}
ul.no-marker {
list-style-type: none;
padding-left: 0;
}
hr {
margin: 2em 0;
}
.hidden {
display: none;
}
.empty-cell {
background: #ccc;
}
table.time-stats > tbody > tr > th {
text-align: left;
}
table.time-stats > tbody > tr > td {
text-align: right;
}
details > summary {
cursor: pointer;
font-size: 1.5em;
}
.tooltip {
text-decoration: underline;
cursor: help;
}
.tooltip-content {
border-radius: 6px;
display: none;
position: absolute;
background: #fff;
border: 1px solid black;
max-width: 500px;
padding: 1em;
text-align: left;
font-weight: normal;
pointer-events: none;
}
.tooltip:hover > .tooltip-content {
display: block;
}
table {
margin: 1.0em auto 1.5em 0;
border-collapse: collapse;
}
th, td {
padding: 0.5em 1em 0.5em 1em;
border: 1px solid;
border-color: black;
}
a, button {
color: #2A6286;
}
button {
background: #eee;
cursor: pointer;
border: none;
border-radius: 3px;
padding: 0.2em 0.5em;
}
button.big-btn {
font-size: 1.3em;
}
button.linkish {
background: none;
text-decoration: underline;
padding: 0;
}
button:disabled {
color: #888;
cursor: not-allowed;
}
pre {
font-family: "Source Code Pro", monospace;
font-size: 1em;
background-color: #F5F5F5;
padding: 1em;
margin: 0;
overflow-x: auto;
}
:not(pre) > code {
white-space: break-spaces;
}
code {
font-family: "Source Code Pro", monospace;
font-size: 0.9em;
}
code a {
color: #000000;
}
kbd {
color: #000;
background-color: #fafbfc;
border-color: #d1d5da;
border-bottom-color: #c6cbd1;
box-shadow-color: #c6cbd1;
display: inline-block;
padding: 0.3em 0.2em;
font: 1.2em monospace;
line-height: 0.8em;
vertical-align: middle;
border: solid 1px;
border-radius: 3px;
box-shadow: inset 0 -1px 0;
cursor: default;
}
.status-running { color: #181; }
.status-idle { color: #444; }
.step-success { color: #181; }
.step-failure { color: #d11; }
.step-wip::before {
content: '';
position: absolute;
margin-left: -1.5em;
width: 1em;
text-align: center;
animation-name: spinner;
animation-duration: 0.5s;
animation-iteration-count: infinite;
animation-timing-function: step-start;
}
@keyframes spinner {
0% { content: '|'; }
25% { content: '/'; }
50% { content: '-'; }
75% { content: '\\'; }
100% { content: '|'; }
}
.l {
display: inline-block;
background: red;
width: 1em;
height: 1em;
border-radius: 1em;
}
.c {
background-color: green;
}
.tok-kw {
color: #333;
font-weight: bold;
}
.tok-str {
color: #d14;
}
.tok-builtin {
color: #0086b3;
}
.tok-comment {
color: #777;
font-style: italic;
}
.tok-fn {
color: #900;
font-weight: bold;
}
.tok-null {
color: #008080;
}
.tok-number {
color: #008080;
}
.tok-type {
color: #458;
font-weight: bold;
}
@media (prefers-color-scheme: dark) {
body {
background-color: #111;
color: #ddd;
}
pre {
background-color: #222;
}
a, button {
color: #88f;
}
button {
background: #333;
}
button:disabled {
color: #555;
}
code a {
color: #eee;
}
th, td {
border-color: white;
}
.empty-cell {
background: #000;
}
.tooltip-content {
background: #060606;
border-color: white;
}
.status-running { color: #90ee90; }
.status-idle { color: #bbb; }
.step-success { color: #90ee90; }
.step-failure { color: #f66; }
.l {
background-color: red;
}
.c {
background-color: green;
}
.tok-kw {
color: #eee;
}
.tok-str {
color: #2e5;
}
.tok-builtin {
color: #ff894c;
}
.tok-comment {
color: #aa7;
}
.tok-fn {
color: #B1A0F8;
}
.tok-null {
color: #ff8080;
}
.tok-number {
color: #ff8080;
}
.tok-type {
color: #68f;
}
}

View File

@ -0,0 +1,43 @@
:host > details {
padding: 0.5em 1em;
background: #f2f2f2;
margin-bottom: 1.0em;
overflow-x: scroll;
}
:host > details.pending {
pointer-events: none;
background: #fafafa;
color: #666;
}
:host > details > div {
margin: 1em 2em;
overflow: scroll; /* we'll try to avoid overflow, but if it does happen, this makes sense */
}
.stats {
font-size: 1.2em;
}
details.section {
margin: 1.0em 0 0 0;
}
details.section > summary {
font-weight: bold;
}
details.section > :not(summary) {
margin-left: 2em;
}
:host > details.no-llvm .llvm-only {
display: none;
}
@media (prefers-color-scheme: dark) {
:host > details {
background: #222;
}
:host > details.pending {
background: #181818;
color: #888;
}
}
th {
max-width: 20em; /* don't let the 'file' column get crazy long */
overflow-wrap: anywhere; /* avoid overflow where possible */
}

View File

@ -0,0 +1,234 @@
const std = @import("std");
const gpa = std.heap.wasm_allocator;
const abi = std.Build.abi.time_report;
const fmtEscapeHtml = @import("root").fmtEscapeHtml;
const step_list = &@import("root").step_list;
const js = struct {
extern "time_report" fn updateGeneric(
/// The index of the step.
step_idx: u32,
// The HTML which will be used to populate the template slots.
inner_html_ptr: [*]const u8,
inner_html_len: usize,
) void;
extern "time_report" fn updateCompile(
/// The index of the step.
step_idx: u32,
// The HTML which will be used to populate the template slots.
inner_html_ptr: [*]const u8,
inner_html_len: usize,
// The HTML which will populate the <tbody> of the file table.
file_table_html_ptr: [*]const u8,
file_table_html_len: usize,
// The HTML which will populate the <tbody> of the decl table.
decl_table_html_ptr: [*]const u8,
decl_table_html_len: usize,
/// Whether the LLVM backend was used. If not, LLVM-specific statistics are hidden.
use_llvm: bool,
) void;
};
pub fn genericResultMessage(msg_bytes: []u8) error{OutOfMemory}!void {
if (msg_bytes.len != @sizeOf(abi.GenericResult)) @panic("malformed GenericResult message");
const msg: *const abi.GenericResult = @ptrCast(msg_bytes);
if (msg.step_idx >= step_list.*.len) @panic("malformed GenericResult message");
const inner_html = try std.fmt.allocPrint(gpa,
\\<code slot="step-name">{[step_name]f}</code>
\\<span slot="stat-total-time">{[stat_total_time]D}</span>
, .{
.step_name = fmtEscapeHtml(step_list.*[msg.step_idx].name),
.stat_total_time = msg.ns_total,
});
defer gpa.free(inner_html);
js.updateGeneric(msg.step_idx, inner_html.ptr, inner_html.len);
}
pub fn compileResultMessage(msg_bytes: []u8) error{OutOfMemory}!void {
const max_table_rows = 500;
if (msg_bytes.len < @sizeOf(abi.CompileResult)) @panic("malformed CompileResult message");
const hdr: *const abi.CompileResult = @ptrCast(msg_bytes[0..@sizeOf(abi.CompileResult)]);
if (hdr.step_idx >= step_list.*.len) @panic("malformed CompileResult message");
var trailing = msg_bytes[@sizeOf(abi.CompileResult)..];
const llvm_pass_timings = trailing[0..hdr.llvm_pass_timings_len];
trailing = trailing[hdr.llvm_pass_timings_len..];
const FileTimeReport = struct {
name: []const u8,
ns_sema: u64,
ns_codegen: u64,
ns_link: u64,
};
const DeclTimeReport = struct {
file_name: []const u8,
name: []const u8,
sema_count: u32,
ns_sema: u64,
ns_codegen: u64,
ns_link: u64,
};
const slowest_files = try gpa.alloc(FileTimeReport, hdr.files_len);
defer gpa.free(slowest_files);
const slowest_decls = try gpa.alloc(DeclTimeReport, hdr.decls_len);
defer gpa.free(slowest_decls);
for (slowest_files) |*file_out| {
const i = std.mem.indexOfScalar(u8, trailing, 0) orelse @panic("malformed CompileResult message");
file_out.* = .{
.name = trailing[0..i],
.ns_sema = 0,
.ns_codegen = 0,
.ns_link = 0,
};
trailing = trailing[i + 1 ..];
}
for (slowest_decls) |*decl_out| {
const i = std.mem.indexOfScalar(u8, trailing, 0) orelse @panic("malformed CompileResult message");
const file_idx = std.mem.readInt(u32, trailing[i..][1..5], .little);
const sema_count = std.mem.readInt(u32, trailing[i..][5..9], .little);
const sema_ns = std.mem.readInt(u64, trailing[i..][9..17], .little);
const codegen_ns = std.mem.readInt(u64, trailing[i..][17..25], .little);
const link_ns = std.mem.readInt(u64, trailing[i..][25..33], .little);
const file = &slowest_files[file_idx];
decl_out.* = .{
.file_name = file.name,
.name = trailing[0..i],
.sema_count = sema_count,
.ns_sema = sema_ns,
.ns_codegen = codegen_ns,
.ns_link = link_ns,
};
trailing = trailing[i + 33 ..];
file.ns_sema += sema_ns;
file.ns_codegen += codegen_ns;
file.ns_link += link_ns;
}
const S = struct {
fn fileLessThan(_: void, lhs: FileTimeReport, rhs: FileTimeReport) bool {
const lhs_ns = lhs.ns_sema + lhs.ns_codegen + lhs.ns_link;
const rhs_ns = rhs.ns_sema + rhs.ns_codegen + rhs.ns_link;
return lhs_ns > rhs_ns; // flipped to sort in reverse order
}
fn declLessThan(_: void, lhs: DeclTimeReport, rhs: DeclTimeReport) bool {
//if (true) return lhs.sema_count > rhs.sema_count;
const lhs_ns = lhs.ns_sema + lhs.ns_codegen + lhs.ns_link;
const rhs_ns = rhs.ns_sema + rhs.ns_codegen + rhs.ns_link;
return lhs_ns > rhs_ns; // flipped to sort in reverse order
}
};
std.mem.sort(FileTimeReport, slowest_files, {}, S.fileLessThan);
std.mem.sort(DeclTimeReport, slowest_decls, {}, S.declLessThan);
const stats = hdr.stats;
const inner_html = try std.fmt.allocPrint(gpa,
\\<code slot="step-name">{[step_name]f}</code>
\\<span slot="stat-reachable-files">{[stat_reachable_files]d}</span>
\\<span slot="stat-imported-files">{[stat_imported_files]d}</span>
\\<span slot="stat-generic-instances">{[stat_generic_instances]d}</span>
\\<span slot="stat-inline-calls">{[stat_inline_calls]d}</span>
\\<span slot="stat-compilation-time">{[stat_compilation_time]D}</span>
\\<span slot="cpu-time-parse">{[cpu_time_parse]D}</span>
\\<span slot="cpu-time-astgen">{[cpu_time_astgen]D}</span>
\\<span slot="cpu-time-sema">{[cpu_time_sema]D}</span>
\\<span slot="cpu-time-codegen">{[cpu_time_codegen]D}</span>
\\<span slot="cpu-time-link">{[cpu_time_link]D}</span>
\\<span slot="real-time-files">{[real_time_files]D}</span>
\\<span slot="real-time-decls">{[real_time_decls]D}</span>
\\<span slot="real-time-llvm-emit">{[real_time_llvm_emit]D}</span>
\\<span slot="real-time-link-flush">{[real_time_link_flush]D}</span>
\\<pre slot="llvm-pass-timings"><code>{[llvm_pass_timings]f}</code></pre>
\\
, .{
.step_name = fmtEscapeHtml(step_list.*[hdr.step_idx].name),
.stat_reachable_files = stats.n_reachable_files,
.stat_imported_files = stats.n_imported_files,
.stat_generic_instances = stats.n_generic_instances,
.stat_inline_calls = stats.n_inline_calls,
.stat_compilation_time = hdr.ns_total,
.cpu_time_parse = stats.cpu_ns_parse,
.cpu_time_astgen = stats.cpu_ns_astgen,
.cpu_time_sema = stats.cpu_ns_sema,
.cpu_time_codegen = stats.cpu_ns_codegen,
.cpu_time_link = stats.cpu_ns_link,
.real_time_files = stats.real_ns_files,
.real_time_decls = stats.real_ns_decls,
.real_time_llvm_emit = stats.real_ns_llvm_emit,
.real_time_link_flush = stats.real_ns_link_flush,
.llvm_pass_timings = fmtEscapeHtml(llvm_pass_timings),
});
defer gpa.free(inner_html);
var file_table_html: std.ArrayListUnmanaged(u8) = .empty;
defer file_table_html.deinit(gpa);
for (slowest_files[0..@min(max_table_rows, slowest_files.len)]) |file| {
try file_table_html.writer(gpa).print(
\\<tr>
\\ <th scope="row"><code>{f}</code></th>
\\ <td>{D}</td>
\\ <td>{D}</td>
\\ <td>{D}</td>
\\</tr>
\\
, .{
fmtEscapeHtml(file.name),
file.ns_sema,
file.ns_codegen,
file.ns_link,
});
}
if (slowest_files.len > max_table_rows) {
try file_table_html.writer(gpa).print(
\\<tr><td colspan="4">{d} more rows omitted</td></tr>
\\
, .{slowest_files.len - max_table_rows});
}
var decl_table_html: std.ArrayListUnmanaged(u8) = .empty;
defer decl_table_html.deinit(gpa);
for (slowest_decls[0..@min(max_table_rows, slowest_decls.len)]) |decl| {
try decl_table_html.writer(gpa).print(
\\<tr>
\\ <th scope="row"><code>{f}</code></th>
\\ <th scope="row"><code>{f}</code></th>
\\ <td>{d}</td>
\\ <td>{D}</td>
\\ <td>{D}</td>
\\ <td>{D}</td>
\\</tr>
\\
, .{
fmtEscapeHtml(decl.file_name),
fmtEscapeHtml(decl.name),
decl.sema_count,
decl.ns_sema,
decl.ns_codegen,
decl.ns_link,
});
}
if (slowest_decls.len > max_table_rows) {
try decl_table_html.writer(gpa).print(
\\<tr><td colspan="6">{d} more rows omitted</td></tr>
\\
, .{slowest_decls.len - max_table_rows});
}
js.updateCompile(
hdr.step_idx,
inner_html.ptr,
inner_html.len,
file_table_html.items.ptr,
file_table_html.items.len,
decl_table_html.items.ptr,
decl_table_html.items.len,
hdr.flags.use_llvm,
);
}

View File

@ -9,7 +9,7 @@ const ArrayList = std.ArrayList;
const File = std.fs.File;
const Step = std.Build.Step;
const Watch = std.Build.Watch;
const Fuzz = std.Build.Fuzz;
const WebServer = std.Build.WebServer;
const Allocator = std.mem.Allocator;
const fatal = std.process.fatal;
const Writer = std.io.Writer;
@ -25,15 +25,16 @@ pub const std_options: std.Options = .{
};
pub fn main() !void {
// Here we use an ArenaAllocator backed by a page allocator because a build is a short-lived,
// one shot program. We don't need to waste time freeing memory and finding places to squish
// bytes into. So we free everything all at once at the very end.
var single_threaded_arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer single_threaded_arena.deinit();
// The build runner is often short-lived, but thanks to `--watch` and `--webui`, that's not
// always the case. So, we do need a true gpa for some things.
var debug_gpa_state: std.heap.DebugAllocator(.{}) = .init;
defer _ = debug_gpa_state.deinit();
const gpa = debug_gpa_state.allocator();
var thread_safe_arena: std.heap.ThreadSafeAllocator = .{
.child_allocator = single_threaded_arena.allocator(),
};
// ...but we'll back our arena by `std.heap.page_allocator` for efficiency.
var single_threaded_arena: std.heap.ArenaAllocator = .init(std.heap.page_allocator);
defer single_threaded_arena.deinit();
var thread_safe_arena: std.heap.ThreadSafeAllocator = .{ .child_allocator = single_threaded_arena.allocator() };
const arena = thread_safe_arena.allocator();
const args = try process.argsAlloc(arena);
@ -81,6 +82,7 @@ pub fn main() !void {
.query = .{},
.result = try std.zig.system.resolveTargetQuery(.{}),
},
.time_report = false,
};
graph.cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() });
@ -113,7 +115,7 @@ pub fn main() !void {
var watch = false;
var fuzz = false;
var debounce_interval_ms: u16 = 50;
var listen_port: u16 = 0;
var webui_listen: ?std.net.Address = null;
while (nextArg(args, &arg_idx)) |arg| {
if (mem.startsWith(u8, arg, "-Z")) {
@ -220,13 +222,13 @@ pub fn main() !void {
next_arg, @errorName(err),
});
};
} else if (mem.eql(u8, arg, "--port")) {
const next_arg = nextArg(args, &arg_idx) orelse
fatalWithHint("expected u16 after '{s}'", .{arg});
listen_port = std.fmt.parseUnsigned(u16, next_arg, 10) catch |err| {
fatal("unable to parse port '{s}' as unsigned 16-bit integer: {s}\n", .{
next_arg, @errorName(err),
});
} else if (mem.eql(u8, arg, "--webui")) {
webui_listen = std.net.Address.parseIp("::1", 0) catch unreachable;
} else if (mem.startsWith(u8, arg, "--webui=")) {
const addr_str = arg["--webui=".len..];
if (std.mem.eql(u8, addr_str, "-")) fatal("web interface cannot listen on stdio", .{});
webui_listen = std.net.Address.parseIpAndPort(addr_str) catch |err| {
fatal("invalid web UI address '{s}': {s}", .{ addr_str, @errorName(err) });
};
} else if (mem.eql(u8, arg, "--debug-log")) {
const next_arg = nextArgOrFatal(args, &arg_idx);
@ -267,8 +269,16 @@ pub fn main() !void {
prominent_compile_errors = true;
} else if (mem.eql(u8, arg, "--watch")) {
watch = true;
} else if (mem.eql(u8, arg, "--time-report")) {
graph.time_report = true;
if (webui_listen == null) {
webui_listen = std.net.Address.parseIp("::1", 0) catch unreachable;
}
} else if (mem.eql(u8, arg, "--fuzz")) {
fuzz = true;
if (webui_listen == null) {
webui_listen = std.net.Address.parseIp("::1", 0) catch unreachable;
}
} else if (mem.eql(u8, arg, "-fincremental")) {
graph.incremental = true;
} else if (mem.eql(u8, arg, "-fno-incremental")) {
@ -331,6 +341,10 @@ pub fn main() !void {
}
}
if (webui_listen != null and watch) fatal(
\\the build system does not yet support combining '--webui' and '--watch'; consider omitting '--watch' in favour of the web UI "Rebuild" button
, .{});
const stderr: std.fs.File = .stderr();
const ttyconf = get_tty_conf(color, stderr);
switch (ttyconf) {
@ -394,14 +408,16 @@ pub fn main() !void {
}
var run: Run = .{
.gpa = gpa,
.max_rss = max_rss,
.max_rss_is_default = false,
.max_rss_mutex = .{},
.skip_oom_steps = skip_oom_steps,
.watch = watch,
.fuzz = fuzz,
.memory_blocked_steps = std.ArrayList(*Step).init(arena),
.step_stack = .{},
.web_server = undefined, // set after `prepare`
.memory_blocked_steps = .empty,
.step_stack = .empty,
.prominent_compile_errors = prominent_compile_errors,
.claimed_rss = 0,
@ -410,74 +426,81 @@ pub fn main() !void {
.stderr = stderr,
.thread_pool = undefined,
};
defer {
run.memory_blocked_steps.deinit(gpa);
run.step_stack.deinit(gpa);
}
if (run.max_rss == 0) {
run.max_rss = process.totalSystemMemory() catch std.math.maxInt(u64);
run.max_rss_is_default = true;
}
const gpa = arena;
prepare(gpa, arena, builder, targets.items, &run, graph.random_seed) catch |err| switch (err) {
prepare(arena, builder, targets.items, &run, graph.random_seed) catch |err| switch (err) {
error.UncleanExit => process.exit(1),
else => return err,
};
var w: Watch = if (watch and Watch.have_impl) try Watch.init() else undefined;
var w: Watch = w: {
if (!watch) break :w undefined;
if (!Watch.have_impl) fatal("--watch not yet implemented for {s}", .{@tagName(builtin.os.tag)});
break :w try .init();
};
try run.thread_pool.init(thread_pool_options);
defer run.thread_pool.deinit();
run.web_server = if (webui_listen) |listen_address| .init(.{
.gpa = gpa,
.thread_pool = &run.thread_pool,
.graph = &graph,
.all_steps = run.step_stack.keys(),
.ttyconf = run.ttyconf,
.root_prog_node = main_progress_node,
.watch = watch,
.listen_address = listen_address,
}) else null;
if (run.web_server) |*ws| {
ws.start() catch |err| fatal("failed to start web server: {s}", .{@errorName(err)});
}
rebuild: while (true) {
if (run.web_server) |*ws| ws.startBuild();
runStepNames(
gpa,
builder,
targets.items,
main_progress_node,
&run,
) catch |err| switch (err) {
error.UncleanExit => {
assert(!run.watch);
assert(!run.watch and run.web_server == null);
process.exit(1);
},
else => return err,
};
if (fuzz) {
if (builtin.single_threaded) {
fatal("--fuzz not yet implemented for single-threaded builds", .{});
}
switch (builtin.os.tag) {
// Current implementation depends on two things that need to be ported to Windows:
// * Memory-mapping to share data between the fuzzer and build runner.
// * COFF/PE support added to `std.debug.Info` (it needs a batching API for resolving
// many addresses to source locations).
.windows => fatal("--fuzz not yet implemented for {s}", .{@tagName(builtin.os.tag)}),
else => {},
}
if (@bitSizeOf(usize) != 64) {
// Current implementation depends on posix.mmap()'s second parameter, `length: usize`,
// being compatible with `std.fs.getEndPos() u64`'s return value. This is not the case
// on 32-bit platforms.
// Affects or affected by issues #5185, #22523, and #22464.
fatal("--fuzz not yet implemented on {d}-bit platforms", .{@bitSizeOf(usize)});
}
const listen_address = std.net.Address.parseIp("127.0.0.1", listen_port) catch unreachable;
try Fuzz.start(
gpa,
arena,
global_cache_directory,
zig_lib_directory,
zig_exe,
&run.thread_pool,
run.step_stack.keys(),
run.ttyconf,
listen_address,
main_progress_node,
);
if (run.web_server) |*web_server| {
web_server.finishBuild(.{ .fuzz = fuzz });
}
if (!watch) return cleanExit();
if (!watch and run.web_server == null) {
return cleanExit();
}
if (!Watch.have_impl) fatal("--watch not yet implemented for {s}", .{@tagName(builtin.os.tag)});
if (run.web_server) |*ws| {
assert(!watch); // fatal error after CLI parsing
while (true) switch (ws.wait()) {
.rebuild => {
for (run.step_stack.keys()) |step| {
step.state = .precheck_done;
step.reset(gpa);
}
continue :rebuild;
},
};
}
try w.update(gpa, run.step_stack.keys());
@ -491,15 +514,16 @@ pub fn main() !void {
w.dir_table.entries.len, countSubProcesses(run.step_stack.keys()),
}) catch &caption_buf;
var debouncing_node = main_progress_node.start(caption, 0);
var debounce_timeout: Watch.Timeout = .none;
while (true) switch (try w.wait(gpa, debounce_timeout)) {
var in_debounce = false;
while (true) switch (try w.wait(gpa, if (in_debounce) .{ .ms = debounce_interval_ms } else .none)) {
.timeout => {
assert(in_debounce);
debouncing_node.end();
markFailedStepsDirty(gpa, run.step_stack.keys());
continue :rebuild;
},
.dirty => if (debounce_timeout == .none) {
debounce_timeout = .{ .ms = debounce_interval_ms };
.dirty => if (!in_debounce) {
in_debounce = true;
debouncing_node.end();
debouncing_node = main_progress_node.start("Debouncing (Change Detected)", 0);
},
@ -530,13 +554,16 @@ fn countSubProcesses(all_steps: []const *Step) usize {
}
const Run = struct {
gpa: Allocator,
max_rss: u64,
max_rss_is_default: bool,
max_rss_mutex: std.Thread.Mutex,
skip_oom_steps: bool,
watch: bool,
fuzz: bool,
memory_blocked_steps: std.ArrayList(*Step),
web_server: ?WebServer,
/// Allocated into `gpa`.
memory_blocked_steps: std.ArrayListUnmanaged(*Step),
/// Allocated into `gpa`.
step_stack: std.AutoArrayHashMapUnmanaged(*Step, void),
prominent_compile_errors: bool,
thread_pool: std.Thread.Pool,
@ -547,19 +574,19 @@ const Run = struct {
stderr: File,
fn cleanExit(run: Run) void {
if (run.watch or run.fuzz) return;
if (run.watch or run.web_server != null) return;
return runner.cleanExit();
}
};
fn prepare(
gpa: Allocator,
arena: Allocator,
b: *std.Build,
step_names: []const []const u8,
run: *Run,
seed: u32,
) !void {
const gpa = run.gpa;
const step_stack = &run.step_stack;
if (step_names.len == 0) {
@ -583,7 +610,7 @@ fn prepare(
rand.shuffle(*Step, starting_steps);
for (starting_steps) |s| {
constructGraphAndCheckForDependencyLoop(b, s, &run.step_stack, rand) catch |err| switch (err) {
constructGraphAndCheckForDependencyLoop(gpa, b, s, &run.step_stack, rand) catch |err| switch (err) {
error.DependencyLoopDetected => return uncleanExit(),
else => |e| return e,
};
@ -614,12 +641,12 @@ fn prepare(
}
fn runStepNames(
gpa: Allocator,
b: *std.Build,
step_names: []const []const u8,
parent_prog_node: std.Progress.Node,
run: *Run,
) !void {
const gpa = run.gpa;
const step_stack = &run.step_stack;
const thread_pool = &run.thread_pool;
@ -675,6 +702,7 @@ fn runStepNames(
// B will be marked as dependency_failure, while A may never be queued, and thus
// remain in the initial state of precheck_done.
s.state = .dependency_failure;
if (run.web_server) |*ws| ws.updateStepStatus(s, .failure);
pending_count += 1;
},
.dependency_failure => pending_count += 1,
@ -768,7 +796,7 @@ fn runStepNames(
}
}
if (!run.watch) {
if (!run.watch and run.web_server == null) {
// Signal to parent process that we have printed compile errors. The
// parent process may choose to omit the "following command failed"
// line in this case.
@ -777,7 +805,7 @@ fn runStepNames(
}
}
if (!run.watch) return uncleanExit();
if (!run.watch and run.web_server == null) return uncleanExit();
}
const PrintNode = struct {
@ -1022,6 +1050,7 @@ fn printTreeStep(
/// when it finishes executing in `workerMakeOneStep`, it spawns next steps
/// to run in random order
fn constructGraphAndCheckForDependencyLoop(
gpa: Allocator,
b: *std.Build,
s: *Step,
step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void),
@ -1035,17 +1064,19 @@ fn constructGraphAndCheckForDependencyLoop(
.precheck_unstarted => {
s.state = .precheck_started;
try step_stack.ensureUnusedCapacity(b.allocator, s.dependencies.items.len);
try step_stack.ensureUnusedCapacity(gpa, s.dependencies.items.len);
// We dupe to avoid shuffling the steps in the summary, it depends
// on s.dependencies' order.
const deps = b.allocator.dupe(*Step, s.dependencies.items) catch @panic("OOM");
const deps = gpa.dupe(*Step, s.dependencies.items) catch @panic("OOM");
defer gpa.free(deps);
rand.shuffle(*Step, deps);
for (deps) |dep| {
try step_stack.put(b.allocator, dep, {});
try step_stack.put(gpa, dep, {});
try dep.dependants.append(b.allocator, s);
constructGraphAndCheckForDependencyLoop(b, dep, step_stack, rand) catch |err| {
constructGraphAndCheckForDependencyLoop(gpa, b, dep, step_stack, rand) catch |err| {
if (err == error.DependencyLoopDetected) {
std.debug.print(" {s}\n", .{s.name});
}
@ -1084,6 +1115,7 @@ fn workerMakeOneStep(
.success, .skipped => continue,
.failure, .dependency_failure, .skipped_oom => {
@atomicStore(Step.State, &s.state, .dependency_failure, .seq_cst);
if (run.web_server) |*ws| ws.updateStepStatus(s, .failure);
return;
},
.precheck_done, .running => {
@ -1109,7 +1141,7 @@ fn workerMakeOneStep(
if (new_claimed_rss > run.max_rss) {
// Running this step right now could possibly exceed the allotted RSS.
// Add this step to the queue of memory-blocked steps.
run.memory_blocked_steps.append(s) catch @panic("OOM");
run.memory_blocked_steps.append(run.gpa, s) catch @panic("OOM");
return;
}
@ -1126,10 +1158,14 @@ fn workerMakeOneStep(
const sub_prog_node = prog_node.start(s.name, 0);
defer sub_prog_node.end();
if (run.web_server) |*ws| ws.updateStepStatus(s, .wip);
const make_result = s.make(.{
.progress_node = sub_prog_node,
.thread_pool = thread_pool,
.watch = run.watch,
.web_server = if (run.web_server) |*ws| ws else null,
.gpa = run.gpa,
});
// No matter the result, we want to display error/warning messages.
@ -1141,21 +1177,24 @@ fn workerMakeOneStep(
if (show_error_msgs or show_compile_errors or show_stderr) {
const bw = std.debug.lockStderrWriter(&stdio_buffer_allocation);
defer std.debug.unlockStderrWriter();
const gpa = b.allocator;
printErrorMessages(gpa, s, .{ .ttyconf = run.ttyconf }, bw, run.prominent_compile_errors) catch {};
printErrorMessages(run.gpa, s, .{ .ttyconf = run.ttyconf }, bw, run.prominent_compile_errors) catch {};
}
handle_result: {
if (make_result) |_| {
@atomicStore(Step.State, &s.state, .success, .seq_cst);
if (run.web_server) |*ws| ws.updateStepStatus(s, .success);
} else |err| switch (err) {
error.MakeFailed => {
@atomicStore(Step.State, &s.state, .failure, .seq_cst);
if (run.web_server) |*ws| ws.updateStepStatus(s, .failure);
std.Progress.setStatus(.failure_working);
break :handle_result;
},
error.MakeSkipped => @atomicStore(Step.State, &s.state, .skipped, .seq_cst),
error.MakeSkipped => {
@atomicStore(Step.State, &s.state, .skipped, .seq_cst);
if (run.web_server) |*ws| ws.updateStepStatus(s, .success);
},
}
// Successful completion of a step, so we queue up its dependants as well.
@ -1255,10 +1294,10 @@ pub fn printErrorMessages(
}
fn printSteps(builder: *std.Build, w: *Writer) !void {
const allocator = builder.allocator;
const arena = builder.graph.arena;
for (builder.top_level_steps.values()) |top_level_step| {
const name = if (&top_level_step.step == builder.default_step)
try fmt.allocPrint(allocator, "{s} (default)", .{top_level_step.step.name})
try fmt.allocPrint(arena, "{s} (default)", .{top_level_step.step.name})
else
top_level_step.step.name;
try w.print(" {s:<28} {s}\n", .{ name, top_level_step.description });
@ -1319,8 +1358,11 @@ fn printUsage(b: *std.Build, w: *Writer) !void {
\\ needed (Default) Lazy dependencies are fetched as needed
\\ all Lazy dependencies are always fetched
\\ --watch Continuously rebuild when source files are modified
\\ --fuzz Continuously search for unit test failures
\\ --debounce <ms> Delay before rebuilding after changed file detected
\\ --webui[=ip] Enable the web interface on the given IP address
\\ --fuzz Continuously search for unit test failures (implies '--webui')
\\ --time-report Force full rebuild and provide detailed information on
\\ compilation time of Zig source code (implies '--webui')
\\ -fincremental Enable incremental compilation
\\ -fno-incremental Disable incremental compilation
\\
@ -1328,7 +1370,7 @@ fn printUsage(b: *std.Build, w: *Writer) !void {
\\
);
const arena = b.allocator;
const arena = b.graph.arena;
if (b.available_options_list.items.len == 0) {
try w.print(" (none)\n", .{});
} else {

View File

@ -3,7 +3,7 @@ const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const fatal = std.process.fatal;
const SeenPcsHeader = std.Build.Fuzz.abi.SeenPcsHeader;
const SeenPcsHeader = std.Build.abi.fuzz.SeenPcsHeader;
pub const std_options = std.Options{
.logFn = logOverride,

View File

@ -1,161 +0,0 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>Zig Build System Interface</title>
<style type="text/css">
body {
font-family: system-ui, -apple-system, Roboto, "Segoe UI", sans-serif;
color: #000000;
}
.hidden {
display: none;
}
table {
width: 100%;
}
a {
color: #2A6286;
}
pre{
font-family:"Source Code Pro",monospace;
font-size:1em;
background-color:#F5F5F5;
padding: 1em;
margin: 0;
overflow-x: auto;
}
:not(pre) > code {
white-space: break-spaces;
}
code {
font-family:"Source Code Pro",monospace;
font-size: 0.9em;
}
code a {
color: #000000;
}
kbd {
color: #000;
background-color: #fafbfc;
border-color: #d1d5da;
border-bottom-color: #c6cbd1;
box-shadow-color: #c6cbd1;
display: inline-block;
padding: 0.3em 0.2em;
font: 1.2em monospace;
line-height: 0.8em;
vertical-align: middle;
border: solid 1px;
border-radius: 3px;
box-shadow: inset 0 -1px 0;
cursor: default;
}
.l {
display: inline-block;
background: red;
width: 1em;
height: 1em;
border-radius: 1em;
}
.c {
background-color: green;
}
.tok-kw {
color: #333;
font-weight: bold;
}
.tok-str {
color: #d14;
}
.tok-builtin {
color: #0086b3;
}
.tok-comment {
color: #777;
font-style: italic;
}
.tok-fn {
color: #900;
font-weight: bold;
}
.tok-null {
color: #008080;
}
.tok-number {
color: #008080;
}
.tok-type {
color: #458;
font-weight: bold;
}
@media (prefers-color-scheme: dark) {
body {
background-color: #111;
color: #bbb;
}
pre {
background-color: #222;
color: #ccc;
}
a {
color: #88f;
}
code a {
color: #ccc;
}
.l {
background-color: red;
}
.c {
background-color: green;
}
.tok-kw {
color: #eee;
}
.tok-str {
color: #2e5;
}
.tok-builtin {
color: #ff894c;
}
.tok-comment {
color: #aa7;
}
.tok-fn {
color: #B1A0F8;
}
.tok-null {
color: #ff8080;
}
.tok-number {
color: #ff8080;
}
.tok-type {
color: #68f;
}
}
</style>
</head>
<body>
<p id="status">Loading JavaScript...</p>
<div id="sectStats" class="hidden">
<ul>
<li>Total Runs: <span id="statTotalRuns"></span></li>
<li>Unique Runs: <span id="statUniqueRuns"></span></li>
<li>Speed (Runs/Second): <span id="statSpeed"></span></li>
<li>Coverage: <span id="statCoverage"></span></li>
<li>Entry Points: <ul id="entryPointsList"></ul></li>
</ul>
</div>
<div id="sectSource" class="hidden">
<h2>Source Code</h2>
<pre><code id="sourceText"></code></pre>
</div>
<script src="main.js"></script>
</body>
</html>

View File

@ -1,252 +0,0 @@
(function() {
const domStatus = document.getElementById("status");
const domSectSource = document.getElementById("sectSource");
const domSectStats = document.getElementById("sectStats");
const domSourceText = document.getElementById("sourceText");
const domStatTotalRuns = document.getElementById("statTotalRuns");
const domStatUniqueRuns = document.getElementById("statUniqueRuns");
const domStatSpeed = document.getElementById("statSpeed");
const domStatCoverage = document.getElementById("statCoverage");
const domEntryPointsList = document.getElementById("entryPointsList");
let wasm_promise = fetch("main.wasm");
let sources_promise = fetch("sources.tar").then(function(response) {
if (!response.ok) throw new Error("unable to download sources");
return response.arrayBuffer();
});
var wasm_exports = null;
var curNavSearch = null;
var curNavLocation = null;
const text_decoder = new TextDecoder();
const text_encoder = new TextEncoder();
domStatus.textContent = "Loading WebAssembly...";
WebAssembly.instantiateStreaming(wasm_promise, {
js: {
log: function(ptr, len) {
const msg = decodeString(ptr, len);
console.log(msg);
},
panic: function (ptr, len) {
const msg = decodeString(ptr, len);
throw new Error("panic: " + msg);
},
timestamp: function () {
return BigInt(new Date());
},
emitSourceIndexChange: onSourceIndexChange,
emitCoverageUpdate: onCoverageUpdate,
emitEntryPointsUpdate: renderStats,
},
}).then(function(obj) {
wasm_exports = obj.instance.exports;
window.wasm = obj; // for debugging
domStatus.textContent = "Loading sources tarball...";
sources_promise.then(function(buffer) {
domStatus.textContent = "Parsing sources...";
const js_array = new Uint8Array(buffer);
const ptr = wasm_exports.alloc(js_array.length);
const wasm_array = new Uint8Array(wasm_exports.memory.buffer, ptr, js_array.length);
wasm_array.set(js_array);
wasm_exports.unpack(ptr, js_array.length);
window.addEventListener('popstate', onPopState, false);
onHashChange(null);
domStatus.textContent = "Waiting for server to send source location metadata...";
connectWebSocket();
});
});
function onPopState(ev) {
onHashChange(ev.state);
}
function onHashChange(state) {
history.replaceState({}, "");
navigate(location.hash);
if (state == null) window.scrollTo({top: 0});
}
function navigate(location_hash) {
domSectSource.classList.add("hidden");
curNavLocation = null;
curNavSearch = null;
if (location_hash.length > 1 && location_hash[0] === '#') {
const query = location_hash.substring(1);
const qpos = query.indexOf("?");
let nonSearchPart;
if (qpos === -1) {
nonSearchPart = query;
} else {
nonSearchPart = query.substring(0, qpos);
curNavSearch = decodeURIComponent(query.substring(qpos + 1));
}
if (nonSearchPart[0] == "l") {
curNavLocation = +nonSearchPart.substring(1);
renderSource(curNavLocation);
}
}
render();
}
function connectWebSocket() {
const host = document.location.host;
const pathname = document.location.pathname;
const isHttps = document.location.protocol === 'https:';
const match = host.match(/^(.+):(\d+)$/);
const defaultPort = isHttps ? 443 : 80;
const port = match ? parseInt(match[2], 10) : defaultPort;
const hostName = match ? match[1] : host;
const wsProto = isHttps ? "wss:" : "ws:";
const wsUrl = wsProto + '//' + hostName + ':' + port + pathname;
ws = new WebSocket(wsUrl);
ws.binaryType = "arraybuffer";
ws.addEventListener('message', onWebSocketMessage, false);
ws.addEventListener('error', timeoutThenCreateNew, false);
ws.addEventListener('close', timeoutThenCreateNew, false);
ws.addEventListener('open', onWebSocketOpen, false);
}
function onWebSocketOpen() {
//console.log("web socket opened");
}
function onWebSocketMessage(ev) {
wasmOnMessage(ev.data);
}
function timeoutThenCreateNew() {
ws.removeEventListener('message', onWebSocketMessage, false);
ws.removeEventListener('error', timeoutThenCreateNew, false);
ws.removeEventListener('close', timeoutThenCreateNew, false);
ws.removeEventListener('open', onWebSocketOpen, false);
ws = null;
setTimeout(connectWebSocket, 1000);
}
function wasmOnMessage(data) {
const jsArray = new Uint8Array(data);
const ptr = wasm_exports.message_begin(jsArray.length);
const wasmArray = new Uint8Array(wasm_exports.memory.buffer, ptr, jsArray.length);
wasmArray.set(jsArray);
wasm_exports.message_end();
}
function onSourceIndexChange() {
render();
if (curNavLocation != null) renderSource(curNavLocation);
}
function onCoverageUpdate() {
renderStats();
renderCoverage();
}
function render() {
domStatus.classList.add("hidden");
}
function renderStats() {
const totalRuns = wasm_exports.totalRuns();
const uniqueRuns = wasm_exports.uniqueRuns();
const totalSourceLocations = wasm_exports.totalSourceLocations();
const coveredSourceLocations = wasm_exports.coveredSourceLocations();
domStatTotalRuns.innerText = totalRuns;
domStatUniqueRuns.innerText = uniqueRuns + " (" + percent(uniqueRuns, totalRuns) + "%)";
domStatCoverage.innerText = coveredSourceLocations + " / " + totalSourceLocations + " (" + percent(coveredSourceLocations, totalSourceLocations) + "%)";
domStatSpeed.innerText = wasm_exports.totalRunsPerSecond().toFixed(0);
const entryPoints = unwrapInt32Array(wasm_exports.entryPoints());
resizeDomList(domEntryPointsList, entryPoints.length, "<li></li>");
for (let i = 0; i < entryPoints.length; i += 1) {
const liDom = domEntryPointsList.children[i];
liDom.innerHTML = unwrapString(wasm_exports.sourceLocationLinkHtml(entryPoints[i]));
}
domSectStats.classList.remove("hidden");
}
function renderCoverage() {
if (curNavLocation == null) return;
const sourceLocationIndex = curNavLocation;
for (let i = 0; i < domSourceText.children.length; i += 1) {
const childDom = domSourceText.children[i];
if (childDom.id != null && childDom.id[0] == "l") {
childDom.classList.add("l");
childDom.classList.remove("c");
}
}
const coveredList = unwrapInt32Array(wasm_exports.sourceLocationFileCoveredList(sourceLocationIndex));
for (let i = 0; i < coveredList.length; i += 1) {
document.getElementById("l" + coveredList[i]).classList.add("c");
}
}
function resizeDomList(listDom, desiredLen, templateHtml) {
for (let i = listDom.childElementCount; i < desiredLen; i += 1) {
listDom.insertAdjacentHTML('beforeend', templateHtml);
}
while (desiredLen < listDom.childElementCount) {
listDom.removeChild(listDom.lastChild);
}
}
function percent(a, b) {
return ((Number(a) / Number(b)) * 100).toFixed(1);
}
function renderSource(sourceLocationIndex) {
const pathName = unwrapString(wasm_exports.sourceLocationPath(sourceLocationIndex));
if (pathName.length === 0) return;
const h2 = domSectSource.children[0];
h2.innerText = pathName;
domSourceText.innerHTML = unwrapString(wasm_exports.sourceLocationFileHtml(sourceLocationIndex));
domSectSource.classList.remove("hidden");
// Empirically, Firefox needs this requestAnimationFrame in order for the scrollIntoView to work.
requestAnimationFrame(function() {
const slDom = document.getElementById("l" + sourceLocationIndex);
if (slDom != null) slDom.scrollIntoView({
behavior: "smooth",
block: "center",
});
});
}
function decodeString(ptr, len) {
if (len === 0) return "";
return text_decoder.decode(new Uint8Array(wasm_exports.memory.buffer, ptr, len));
}
function unwrapInt32Array(bigint) {
const ptr = Number(bigint & 0xffffffffn);
const len = Number(bigint >> 32n);
if (len === 0) return new Uint32Array();
return new Uint32Array(wasm_exports.memory.buffer, ptr, len);
}
function setInputString(s) {
const jsArray = text_encoder.encode(s);
const len = jsArray.length;
const ptr = wasm_exports.set_input_string(len);
const wasmArray = new Uint8Array(wasm_exports.memory.buffer, ptr, len);
wasmArray.set(jsArray);
}
function unwrapString(bigint) {
const ptr = Number(bigint & 0xffffffffn);
const len = Number(bigint >> 32n);
return decodeString(ptr, len);
}
})();

Binary file not shown.

View File

@ -837,12 +837,15 @@ typedef struct
#define NT_ARM_ZT 0x40d /* ARM SME ZT registers. */
#define NT_ARM_FPMR 0x40e /* ARM floating point mode register. */
#define NT_ARM_POE 0x40f /* ARM POE registers. */
#define NT_ARM_GCS 0x410 /* ARM GCS state. */
#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note. */
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers. */
#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode. */
#define NT_MIPS_MSA 0x802 /* MIPS SIMD registers. */
#define NT_RISCV_CSR 0x900 /* RISC-V Control and Status Registers */
#define NT_RISCV_VECTOR 0x901 /* RISC-V vector registers */
#define NT_RISCV_TAGGED_ADDR_CTRL 0x902 /* RISC-V tagged
address control */
#define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers. */
#define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and
status registers. */
@ -2906,19 +2909,6 @@ enum
#define R_AARCH64_NONE 0 /* No relocation. */
/* ILP32 AArch64 relocs. */
#define R_AARCH64_P32_ABS32 1 /* Direct 32 bit. */
#define R_AARCH64_P32_COPY 180 /* Copy symbol at runtime. */
#define R_AARCH64_P32_GLOB_DAT 181 /* Create GOT entry. */
#define R_AARCH64_P32_JUMP_SLOT 182 /* Create PLT entry. */
#define R_AARCH64_P32_RELATIVE 183 /* Adjust by program base. */
#define R_AARCH64_P32_TLS_DTPMOD 184 /* Module number, 32 bit. */
#define R_AARCH64_P32_TLS_DTPREL 185 /* Module-relative offset, 32 bit. */
#define R_AARCH64_P32_TLS_TPREL 186 /* TP-relative offset, 32 bit. */
#define R_AARCH64_P32_TLSDESC 187 /* TLS Descriptor. */
#define R_AARCH64_P32_IRELATIVE 188 /* STT_GNU_IFUNC relocation. */
/* LP64 AArch64 relocs. */
#define R_AARCH64_ABS64 257 /* Direct 64 bit. */
#define R_AARCH64_ABS32 258 /* Direct 32 bit. */
#define R_AARCH64_ABS16 259 /* Direct 16-bit. */
@ -4091,6 +4081,7 @@ enum
#define R_RISCV_TLS_DTPREL64 9
#define R_RISCV_TLS_TPREL32 10
#define R_RISCV_TLS_TPREL64 11
#define R_RISCV_TLSDESC 12
#define R_RISCV_BRANCH 16
#define R_RISCV_JAL 17
#define R_RISCV_CALL 18
@ -4116,16 +4107,10 @@ enum
#define R_RISCV_SUB16 38
#define R_RISCV_SUB32 39
#define R_RISCV_SUB64 40
#define R_RISCV_GNU_VTINHERIT 41
#define R_RISCV_GNU_VTENTRY 42
#define R_RISCV_GOT32_PCREL 41
#define R_RISCV_ALIGN 43
#define R_RISCV_RVC_BRANCH 44
#define R_RISCV_RVC_JUMP 45
#define R_RISCV_RVC_LUI 46
#define R_RISCV_GPREL_I 47
#define R_RISCV_GPREL_S 48
#define R_RISCV_TPREL_I 49
#define R_RISCV_TPREL_S 50
#define R_RISCV_RELAX 51
#define R_RISCV_SUB6 52
#define R_RISCV_SET6 53
@ -4137,8 +4122,12 @@ enum
#define R_RISCV_PLT32 59
#define R_RISCV_SET_ULEB128 60
#define R_RISCV_SUB_ULEB128 61
#define R_RISCV_TLSDESC_HI20 62
#define R_RISCV_TLSDESC_LOAD_LO12 63
#define R_RISCV_TLSDESC_ADD_LO12 64
#define R_RISCV_TLSDESC_CALL 65
#define R_RISCV_NUM 62
#define R_RISCV_NUM 66
/* RISC-V specific values for the st_other field. */
#define STO_RISCV_VARIANT_CC 0x80 /* Function uses variant calling
@ -4147,7 +4136,7 @@ enum
/* RISC-V specific values for the sh_type field. */
#define SHT_RISCV_ATTRIBUTES (SHT_LOPROC + 3)
/* RISC-V specific values for the p_type field. */
/* RISC-V specific values for the p_type field (deprecated). */
#define PT_RISCV_ATTRIBUTES (PT_LOPROC + 3)
/* RISC-V specific values for the d_tag field. */

View File

@ -15,6 +15,19 @@
# define ELF_NOTE_NEXT_OFFSET(namesz, descsz, align) \
ALIGN_UP (ELF_NOTE_DESC_OFFSET ((namesz), (align)) + (descsz), (align))
# ifdef HIDDEN_VAR_NEEDS_DYNAMIC_RELOC
# define DL_ADDRESS_WITHOUT_RELOC(expr) (expr)
# else
/* Evaluate EXPR without run-time relocation for it. EXPR should be an
array, an address of an object, or a string literal. */
# define DL_ADDRESS_WITHOUT_RELOC(expr) \
({ \
__auto_type _result = (expr); \
asm ("" : "+r" (_result)); \
_result; \
})
# endif
/* Some information which is not meant for the public and therefore not
in <elf.h>. */
# include <dl-dtprocnum.h>

View File

@ -155,7 +155,7 @@
extern __typeof (name) aliasname __attribute__ ((weak, alias (#name))) \
__attribute_copy__ (name);
/* Zig patch. weak_hidden_alias was removed from glibc v2.36 (v2.37?), Zig
/* zig patch: weak_hidden_alias was removed from glibc v2.36 (v2.37?), Zig
needs it for the v2.32 and earlier {f,l,}stat wrappers, so only include
in this header for 2.32 and earlier. */
#if (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 32) || __GLIBC__ < 2
@ -220,7 +220,7 @@
#define __make_section_unallocated(section_string) \
asm (".section " section_string "\n\t.previous");
/* Tacking on "\n\t#" to the section name makes gcc put it's bogus
/* Tacking on "\n\t#" to the section name makes gcc put its bogus
section attributes on what looks like a comment to the assembler. */
#ifdef HAVE_SECTION_QUOTES
# define __sec_comment "\"\n\t#\""
@ -280,7 +280,7 @@ for linking")
/*
*/
#ifdef HAVE_GNU_RETAIN
@ -807,7 +807,7 @@ for linking")
#define libm_ifunc_init()
#define libm_ifunc(name, expr) \
__ifunc (name, name, expr, void, libm_ifunc_init)
/* These macros facilitate sharing source files with gnulib.
They are here instead of sys/cdefs.h because they should not be

View File

@ -368,6 +368,21 @@ struct abort_msg_s
extern struct abort_msg_s *__abort_msg;
libc_hidden_proto (__abort_msg)
enum readonly_error_type
{
readonly_noerror,
readonly_area_writable,
readonly_procfs_inaccessible,
readonly_procfs_open_fail,
};
extern enum readonly_error_type __readonly_area (const void *ptr,
size_t size)
attribute_hidden;
extern enum readonly_error_type __readonly_area_fallback (const void *ptr,
size_t size)
attribute_hidden;
# if IS_IN (rtld)
extern __typeof (unsetenv) unsetenv attribute_hidden;
extern __typeof (__strtoul_internal) __strtoul_internal attribute_hidden;

View File

@ -168,7 +168,7 @@ typedef __pid_t pid_t;
#endif
/* fcntl was a simple symbol until glibc 2.27 inclusive. glibc 2.28 onwards
/* zig patch: fcntl was a simple symbol until glibc 2.27 inclusive. glibc 2.28 onwards
* re-defines it to fcntl64 (via #define) if _FILE_OFFSET_BITS == 64. */
#if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 28) || __GLIBC__ > 2
/* Do the file control operation described by CMD on FD.
@ -288,16 +288,17 @@ extern int creat64 (const char *__file, mode_t __mode) __nonnull ((1));
# define F_TEST 3 /* Test a region for other processes locks. */
# ifndef __USE_FILE_OFFSET64
extern int lockf (int __fd, int __cmd, off_t __len);
extern int lockf (int __fd, int __cmd, off_t __len) __wur;
# else
# ifdef __REDIRECT
extern int __REDIRECT (lockf, (int __fd, int __cmd, __off64_t __len), lockf64);
extern int __REDIRECT (lockf, (int __fd, int __cmd, __off64_t __len),
lockf64) __wur;
# else
# define lockf lockf64
# endif
# endif
# ifdef __USE_LARGEFILE64
extern int lockf64 (int __fd, int __cmd, off64_t __len);
extern int lockf64 (int __fd, int __cmd, off64_t __len) __wur;
# endif
#endif

View File

@ -217,7 +217,7 @@ typedef int __sig_atomic_t;
/* Seconds since the Epoch, visible to user code when time_t is too
narrow only for consistency with the old way of widening too-narrow
types. User code should never use __time64_t. */
/* Zig patch: Don't check __LIBC here because it breaks fstatat.c on x86. */
/* zig patch: Don't check __LIBC here because it breaks fstatat.c on x86. */
#if __TIMESIZE == 64
# define __time64_t __time_t
#elif __TIMESIZE != 64

View File

@ -985,6 +985,12 @@ __extension__ extern long long int llabs (long long int __x)
__THROW __attribute__ ((__const__)) __wur;
#endif
#if __GLIBC_USE (ISOC2Y)
extern unsigned int uabs (int __x) __THROW __attribute__ ((__const__)) __wur;
extern unsigned long int ulabs (long int __x) __THROW __attribute__ ((__const__)) __wur;
__extension__ extern unsigned long long int ullabs (long long int __x)
__THROW __attribute__ ((__const__)) __wur;
#endif
/* Return the `div_t', `ldiv_t' or `lldiv_t' representation
of the value of NUMER over DENOM. */

View File

@ -21,23 +21,13 @@
#include <bits/endian.h>
#ifdef __ILP32__
# define __SIZEOF_PTHREAD_ATTR_T 32
# define __SIZEOF_PTHREAD_MUTEX_T 32
# define __SIZEOF_PTHREAD_MUTEXATTR_T 4
# define __SIZEOF_PTHREAD_CONDATTR_T 4
# define __SIZEOF_PTHREAD_RWLOCK_T 48
# define __SIZEOF_PTHREAD_BARRIER_T 20
# define __SIZEOF_PTHREAD_BARRIERATTR_T 4
#else
# define __SIZEOF_PTHREAD_ATTR_T 64
# define __SIZEOF_PTHREAD_MUTEX_T 48
# define __SIZEOF_PTHREAD_MUTEXATTR_T 8
# define __SIZEOF_PTHREAD_CONDATTR_T 8
# define __SIZEOF_PTHREAD_RWLOCK_T 56
# define __SIZEOF_PTHREAD_BARRIER_T 32
# define __SIZEOF_PTHREAD_BARRIERATTR_T 8
#endif
#define __SIZEOF_PTHREAD_ATTR_T 64
#define __SIZEOF_PTHREAD_MUTEX_T 48
#define __SIZEOF_PTHREAD_MUTEXATTR_T 8
#define __SIZEOF_PTHREAD_CONDATTR_T 8
#define __SIZEOF_PTHREAD_RWLOCK_T 56
#define __SIZEOF_PTHREAD_BARRIER_T 32
#define __SIZEOF_PTHREAD_BARRIERATTR_T 8
#define __SIZEOF_PTHREAD_COND_T 48
#define __SIZEOF_PTHREAD_RWLOCKATTR_T 8

View File

@ -54,8 +54,8 @@ _start:
mov x5, x0
/* Load argc and a pointer to argv */
ldr PTR_REG (1), [sp, #0]
add x2, sp, #PTR_SIZE
ldr x1, [sp, #0]
add x2, sp, 8
/* Setup stack limit in argument register */
mov x6, sp
@ -63,13 +63,13 @@ _start:
#ifdef PIC
# ifdef SHARED
adrp x0, :got:main
ldr PTR_REG (0), [x0, #:got_lo12:main]
ldr x0, [x0, #:got_lo12:main]
adrp x3, :got:__libc_csu_init
ldr PTR_REG (3), [x3, #:got_lo12:__libc_csu_init]
ldr x3, [x3, #:got_lo12:__libc_csu_init]
adrp x4, :got:__libc_csu_fini
ldr PTR_REG (4), [x4, #:got_lo12:__libc_csu_fini]
ldr x4, [x4, #:got_lo12:__libc_csu_fini]
# else
adrp x0, __wrap_main
add x0, x0, :lo12:__wrap_main
@ -80,9 +80,18 @@ _start:
# endif
#else
/* Set up the other arguments in registers */
MOVL (0, main)
MOVL (3, __libc_csu_init)
MOVL (4, __libc_csu_fini)
movz x0, :abs_g3:main
movk x0, :abs_g2_nc:main
movk x0, :abs_g1_nc:main
movk x0, :abs_g0_nc:main
movz x3, :abs_g3:__libc_csu_init
movk x3, :abs_g2_nc:__libc_csu_init
movk x3, :abs_g1_nc:__libc_csu_init
movk x3, :abs_g0_nc:__libc_csu_init
movz x4, :abs_g3:__libc_csu_fini
movk x4, :abs_g2_nc:__libc_csu_fini
movk x4, :abs_g1_nc:__libc_csu_fini
movk x4, :abs_g0_nc:__libc_csu_fini
#endif
/* __libc_start_main (main, argc, argv, init, fini, rtld_fini,

View File

@ -70,8 +70,8 @@ ENTRY(_start)
mov x5, x0
/* Load argc and a pointer to argv */
ldr PTR_REG (1), [sp, #0]
add x2, sp, #PTR_SIZE
ldr x1, [sp, #0]
add x2, sp, 8
/* Setup stack limit in argument register */
mov x6, sp
@ -79,14 +79,16 @@ ENTRY(_start)
#ifdef PIC
# ifdef SHARED
adrp x0, :got:main
ldr PTR_REG (0), [x0, #:got_lo12:main]
ldr x0, [x0, #:got_lo12:main]
# else
adrp x0, __wrap_main
add x0, x0, :lo12:__wrap_main
# endif
#else
/* Set up the other arguments in registers */
MOVL (0, main)
movz x0, :abs_g3:main
movk x0, :abs_g2_nc:main
movk x0, :abs_g1_nc:main
movk x0, :abs_g0_nc:main
#endif
mov x3, #0 /* Used to be init. */
mov x4, #0 /* Used to be fini. */
@ -106,7 +108,7 @@ ENTRY(_start)
because crt1.o and rcrt1.o share code and the later must avoid the
use of GOT relocations before __libc_start_main is called. */
__wrap_main:
BTI_C
bti c
b main
#endif
END(_start)

View File

@ -21,59 +21,15 @@
#include <sysdeps/generic/sysdep.h>
#ifdef __LP64__
# define AARCH64_R(NAME) R_AARCH64_ ## NAME
# define PTR_REG(n) x##n
# define PTR_LOG_SIZE 3
# define PTR_ARG(n)
# define SIZE_ARG(n)
#else
# define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
# define PTR_REG(n) w##n
# define PTR_LOG_SIZE 2
# define PTR_ARG(n) mov w##n, w##n
# define SIZE_ARG(n) mov w##n, w##n
#endif
#define PTR_SIZE (1<<PTR_LOG_SIZE)
#ifndef __ASSEMBLER__
/* Strip pointer authentication code from pointer p. */
static inline void *
strip_pac (void *p)
{
register void *ra asm ("x30") = (p);
asm ("hint 7 // xpaclri" : "+r"(ra));
return ra;
}
/* This is needed when glibc is built with -mbranch-protection=pac-ret
with a gcc that is affected by PR target/94891. */
# if HAVE_AARCH64_PAC_RET
# undef RETURN_ADDRESS
# define RETURN_ADDRESS(n) strip_pac (__builtin_return_address (n))
# endif
#endif
#ifdef __ASSEMBLER__
/* CFI directive for return address. */
#define cfi_negate_ra_state .cfi_negate_ra_state
/* Syntactic details of assembler. */
#define ASM_SIZE_DIRECTIVE(name) .size name,.-name
/* Branch Target Identitication support. */
#if HAVE_AARCH64_BTI
# define BTI_C hint 34
# define BTI_J hint 36
#else
# define BTI_C nop
# define BTI_J nop
#endif
/* Return address signing support (pac-ret). */
#define PACIASP hint 25
#define AUTIASP hint 29
/* Guarded Control Stack support. */
#define CHKFEAT_X16 hint 40
#define MRS_GCSPR(x) mrs x, s3_3_c2_c5_1
@ -103,11 +59,7 @@ strip_pac (void *p)
/* Add GNU property note with the supported features to all asm code
where sysdep.h is included. */
#if HAVE_AARCH64_BTI && HAVE_AARCH64_PAC_RET
GNU_PROPERTY (FEATURE_1_AND, FEATURE_1_BTI|FEATURE_1_PAC|FEATURE_1_GCS)
#elif HAVE_AARCH64_BTI
GNU_PROPERTY (FEATURE_1_AND, FEATURE_1_BTI|FEATURE_1_GCS)
#endif
/* Define an entry point visible from C. */
#define ENTRY(name) \
@ -116,7 +68,7 @@ GNU_PROPERTY (FEATURE_1_AND, FEATURE_1_BTI|FEATURE_1_GCS)
.p2align 6; \
C_LABEL(name) \
cfi_startproc; \
BTI_C; \
bti c; \
CALL_MCOUNT
/* Define an entry point visible from C. */
@ -126,7 +78,7 @@ GNU_PROPERTY (FEATURE_1_AND, FEATURE_1_BTI|FEATURE_1_GCS)
.p2align align; \
C_LABEL(name) \
cfi_startproc; \
BTI_C; \
bti c; \
CALL_MCOUNT
/* Define an entry point visible from C with a specified alignment and
@ -143,7 +95,7 @@ GNU_PROPERTY (FEATURE_1_AND, FEATURE_1_BTI|FEATURE_1_GCS)
.endr; \
C_LABEL(name) \
cfi_startproc; \
BTI_C; \
bti c; \
CALL_MCOUNT
#undef END
@ -195,33 +147,6 @@ GNU_PROPERTY (FEATURE_1_AND, FEATURE_1_BTI|FEATURE_1_GCS)
# define L(name) .L##name
#endif
/* Load or store to/from a pc-relative EXPR into/from R, using T.
Note R and T are register numbers and not register names. */
#define LDST_PCREL(OP, R, T, EXPR) \
adrp x##T, EXPR; \
OP PTR_REG (R), [x##T, #:lo12:EXPR]; \
/* Load or store to/from a got-relative EXPR into/from R, using T.
Note R and T are register numbers and not register names. */
#define LDST_GLOBAL(OP, R, T, EXPR) \
adrp x##T, :got:EXPR; \
ldr PTR_REG (T), [x##T, #:got_lo12:EXPR]; \
OP PTR_REG (R), [x##T];
/* Load an immediate into R.
Note R is a register number and not a register name. */
#ifdef __LP64__
# define MOVL(R, NAME) \
movz PTR_REG (R), #:abs_g3:NAME; \
movk PTR_REG (R), #:abs_g2_nc:NAME; \
movk PTR_REG (R), #:abs_g1_nc:NAME; \
movk PTR_REG (R), #:abs_g0_nc:NAME;
#else
# define MOVL(R, NAME) \
movz PTR_REG (R), #:abs_g1:NAME; \
movk PTR_REG (R), #:abs_g0_nc:NAME;
#endif
/* Since C identifiers are not normally prefixed with an underscore
on this system, the asm identifier `syscall_error' intrudes on the
C name space. Make sure we use an innocuous name. */

View File

@ -45,6 +45,7 @@
# define cfi_adjust_cfa_offset(off) .cfi_adjust_cfa_offset off
# define cfi_offset(reg, off) .cfi_offset reg, off
# define cfi_rel_offset(reg, off) .cfi_rel_offset reg, off
# define cfi_val_offset(reg, off) .cfi_val_offset reg, off
# define cfi_register(r1, r2) .cfi_register r1, r2
# define cfi_return_column(reg) .cfi_return_column reg
# define cfi_restore(reg) .cfi_restore reg
@ -74,6 +75,8 @@
".cfi_offset " CFI_STRINGIFY(reg) "," CFI_STRINGIFY(off)
# define CFI_REL_OFFSET(reg, off) \
".cfi_rel_offset " CFI_STRINGIFY(reg) "," CFI_STRINGIFY(off)
# define CFI_VAL_OFFSET(reg, off) \
".cfi_val_offset " CFI_STRINGIFY(reg) "," CFI_STRINGIFY(off)
# define CFI_REGISTER(r1, r2) \
".cfi_register " CFI_STRINGIFY(r1) "," CFI_STRINGIFY(r2)
# define CFI_RETURN_COLUMN(reg) \

View File

@ -75,7 +75,6 @@
extern int __pthread_mutex_init (pthread_mutex_t *__mutex,
const pthread_mutexattr_t *__mutex_attr);
extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex);
libc_hidden_proto (__pthread_mutex_destroy)
@ -91,75 +90,47 @@ libc_hidden_proto (__pthread_mutexattr_init)
extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr);
libc_hidden_proto (__pthread_mutexattr_destroy)
extern int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr,
int __kind);
extern int __pthread_rwlock_init (pthread_rwlock_t *__rwlock,
const pthread_rwlockattr_t *__attr);
libc_hidden_proto (__pthread_rwlock_init)
extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock);
libc_hidden_proto (__pthread_rwlock_destroy)
extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock);
libc_hidden_proto (__pthread_rwlock_rdlock)
extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock);
libc_hidden_proto (__pthread_rwlock_tryrdlock)
extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock);
libc_hidden_proto (__pthread_rwlock_wrlock)
extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock);
libc_hidden_proto (__pthread_rwlock_trywrlock)
extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock);
libc_hidden_proto (__pthread_rwlock_unlock)
extern int __pthread_once (pthread_once_t *__once_control,
void (*__init_routine) (void));
libc_hidden_proto (__pthread_once);
extern int __pthread_atfork (void (*__prepare) (void),
void (*__parent) (void),
void (*__child) (void));
extern int __pthread_setcancelstate (int state, int *oldstate);
libc_hidden_proto (__pthread_setcancelstate)
/* Make the pthread functions weak so that we can elide them from
single-threaded processes. */
#if !defined(__NO_WEAK_PTHREAD_ALIASES) && !IS_IN (libpthread)
# ifdef weak_extern
weak_extern (__pthread_mutex_init)
weak_extern (__pthread_mutex_destroy)
weak_extern (__pthread_mutex_lock)
weak_extern (__pthread_mutex_trylock)
weak_extern (__pthread_mutex_unlock)
weak_extern (__pthread_mutexattr_settype)
weak_extern (__pthread_rwlock_init)
weak_extern (__pthread_rwlock_destroy)
weak_extern (__pthread_rwlock_rdlock)
weak_extern (__pthread_rwlock_tryrdlock)
weak_extern (__pthread_rwlock_wrlock)
weak_extern (__pthread_rwlock_trywrlock)
weak_extern (__pthread_rwlock_unlock)
weak_extern (__pthread_key_create)
weak_extern (__pthread_setspecific)
weak_extern (__pthread_getspecific)
weak_extern (__pthread_once)
weak_extern (__pthread_initialize)
weak_extern (__pthread_atfork)
weak_extern (__pthread_setcancelstate)
# else
# pragma weak __pthread_mutex_init
# pragma weak __pthread_mutex_destroy
# pragma weak __pthread_mutex_lock
# pragma weak __pthread_mutex_trylock
# pragma weak __pthread_mutex_unlock
# pragma weak __pthread_mutexattr_settype
# pragma weak __pthread_rwlock_destroy
# pragma weak __pthread_rwlock_rdlock
# pragma weak __pthread_rwlock_tryrdlock
# pragma weak __pthread_rwlock_wrlock
# pragma weak __pthread_rwlock_trywrlock
# pragma weak __pthread_rwlock_unlock
# pragma weak __pthread_key_create
# pragma weak __pthread_setspecific
# pragma weak __pthread_getspecific
# pragma weak __pthread_once
# pragma weak __pthread_initialize
# pragma weak __pthread_atfork
# pragma weak __pthread_setcancelstate
# endif
#endif

View File

@ -20,6 +20,11 @@
/* Get the Mach definitions of ENTRY and kernel_trap. */
#include <mach/machine/syscall_sw.h>
/* This macro is defined in Mach system headers, but string functions use it
with different definitions depending on whether being compiled for
wide-characters or not. */
#undef P2ALIGN
/* The Mach definitions assume underscores should be prepended to
symbol names. Redefine them to do so only when appropriate. */
#undef EXT

View File

@ -1317,6 +1317,11 @@ extern int pthread_getcpuclockid (pthread_t __thread_id,
__THROW __nonnull ((2));
#endif
#ifdef __USE_GNU
/* Return the Linux TID for THREAD_ID. Returns -1 on failure. */
extern pid_t pthread_gettid_np (pthread_t __thread_id);
#endif
/* Install handlers to be called when a new process is created with FORK.
The PREPARE handler is called in the parent process just before performing

View File

@ -35,6 +35,7 @@
#include <sysdep.h>
#define FRAME_SIZE 104
.section ".text"
.align 4
@ -48,12 +49,12 @@ _start:
/* Terminate the stack frame, and reserve space for functions to
drop their arguments. */
mov %g0, %fp
sub %sp, 6*4, %sp
sub %sp, FRAME_SIZE, %sp
/* Extract the arguments and environment as encoded on the stack. The
argument info starts after one register window (16 words) past the SP. */
ld [%sp+22*4], %o1
add %sp, 23*4, %o2
ld [%sp+168], %o1
add %sp, 172, %o2
/* Load the addresses of the user entry points. */
#ifndef PIC
@ -73,6 +74,10 @@ _start:
be NULL. */
mov %g1, %o5
/* Provide the highest stack address to update the __libc_stack_end (used
to enable executable stacks if required). */
st %sp, [%sp+23*4]
/* Let libc do the rest of the initialization, and call main. */
call __libc_start_main
nop

View File

@ -74,6 +74,10 @@ _start:
be NULL. */
mov %g1, %o5
/* Provide the highest stack address to update the __libc_stack_end (used
to enable executable stacks if required). */
stx %sp, [%sp+STACK_BIAS+22*8]
/* Let libc do the rest of the initialization, and call main. */
call __libc_start_main
nop

View File

@ -152,13 +152,8 @@
#else /* not __ASSEMBLER__ */
# ifdef __LP64__
# define VDSO_NAME "LINUX_2.6.39"
# define VDSO_HASH 123718537
# else
# define VDSO_NAME "LINUX_4.9"
# define VDSO_HASH 61765625
# endif
# define VDSO_NAME "LINUX_2.6.39"
# define VDSO_HASH 123718537
/* List of system calls which are supported as vsyscalls. */
# define HAVE_CLOCK_GETRES64_VSYSCALL "__kernel_clock_getres"

View File

@ -54,6 +54,10 @@
configurations). */
#define __ASSUME_SET_ROBUST_LIST 1
/* The termios2 interface was introduced across all architectures except
Alpha in kernel 2.6.22. */
#define __ASSUME_TERMIOS2 1
/* Support for various CLOEXEC and NONBLOCK flags was added in
2.6.27. */
#define __ASSUME_IN_NONBLOCK 1

View File

@ -145,11 +145,12 @@
# define HAVE_CLOCK_GETRES64_VSYSCALL "__vdso_clock_getres"
# define HAVE_CLOCK_GETTIME64_VSYSCALL "__vdso_clock_gettime"
# define HAVE_GETTIMEOFDAY_VSYSCALL "__vdso_gettimeofday"
# define HAVE_GETRANDOM_VSYSCALL "__vdso_getrandom"
# else
# define VDSO_NAME "LINUX_5.4"
# define VDSO_HASH 61765876
/* RV32 does not support the gettime VDSO syscalls. */
/* RV32 does not support the gettime and getrandom VDSO syscalls. */
# endif
# define HAVE_CLONE3_WRAPPER 1

View File

@ -102,6 +102,9 @@
| (1 << X86_XSTATE_ZMM_ID) \
| (1 << X86_XSTATE_APX_F_ID))
/* The maximum supported xstate ID. */
# define X86_XSTATE_MAX_ID X86_XSTATE_APX_F_ID
/* AMX state mask. */
# define AMX_STATE_SAVE_MASK \
((1 << X86_XSTATE_TILECFG_ID) | (1 << X86_XSTATE_TILEDATA_ID))
@ -123,6 +126,9 @@
| (1 << X86_XSTATE_K_ID) \
| (1 << X86_XSTATE_ZMM_H_ID))
/* The maximum supported xstate ID. */
# define X86_XSTATE_MAX_ID X86_XSTATE_ZMM_H_ID
/* States to be included in xsave_state_size. */
# define FULL_STATE_SAVE_MASK STATE_SAVE_MASK
#endif
@ -177,6 +183,29 @@
#define atom_text_section .section ".text.atom", "ax"
#ifndef DL_STACK_ALIGNMENT
/* Due to GCC bug:
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
__tls_get_addr may be called with 8-byte/4-byte stack alignment.
Although this bug has been fixed in GCC 4.9.4, 5.3 and 6, we can't
assume that stack will be always aligned at 16 bytes. */
# ifdef __x86_64__
# define DL_STACK_ALIGNMENT 8
# define MINIMUM_ALIGNMENT 16
# else
# define DL_STACK_ALIGNMENT 4
# endif
#endif
/* True if _dl_runtime_resolve/_dl_tlsdesc_dynamic should align stack for
STATE_SAVE or align stack to MINIMUM_ALIGNMENT bytes before calling
_dl_fixup/__tls_get_addr. */
#define DL_RUNTIME_RESOLVE_REALIGN_STACK \
(STATE_SAVE_ALIGNMENT > DL_STACK_ALIGNMENT \
|| MINIMUM_ALIGNMENT > DL_STACK_ALIGNMENT)
#endif /* __ASSEMBLER__ */
#endif /* _X86_SYSDEP_H */

View File

@ -25,17 +25,11 @@
#define __O_NOFOLLOW 0100000
#define __O_DIRECT 0200000
#ifdef __ILP32__
# define __O_LARGEFILE 0400000
#else
# define __O_LARGEFILE 0
#endif
#define __O_LARGEFILE 0
#ifdef __LP64__
# define F_GETLK64 5
# define F_SETLK64 6
# define F_SETLKW64 7
#endif
#define F_GETLK64 5
#define F_SETLK64 6
#define F_SETLKW64 7
struct flock
{

View File

@ -37,6 +37,10 @@
# define __DECL_SIMD_acosh __DECL_SIMD_aarch64
# undef __DECL_SIMD_acoshf
# define __DECL_SIMD_acoshf __DECL_SIMD_aarch64
# undef __DECL_SIMD_acospi
# define __DECL_SIMD_acospi __DECL_SIMD_aarch64
# undef __DECL_SIMD_acospif
# define __DECL_SIMD_acospif __DECL_SIMD_aarch64
# undef __DECL_SIMD_asin
# define __DECL_SIMD_asin __DECL_SIMD_aarch64
# undef __DECL_SIMD_asinf
@ -45,6 +49,10 @@
# define __DECL_SIMD_asinh __DECL_SIMD_aarch64
# undef __DECL_SIMD_asinhf
# define __DECL_SIMD_asinhf __DECL_SIMD_aarch64
# undef __DECL_SIMD_asinpi
# define __DECL_SIMD_asinpi __DECL_SIMD_aarch64
# undef __DECL_SIMD_asinpif
# define __DECL_SIMD_asinpif __DECL_SIMD_aarch64
# undef __DECL_SIMD_atan
# define __DECL_SIMD_atan __DECL_SIMD_aarch64
# undef __DECL_SIMD_atanf
@ -53,10 +61,18 @@
# define __DECL_SIMD_atanh __DECL_SIMD_aarch64
# undef __DECL_SIMD_atanhf
# define __DECL_SIMD_atanhf __DECL_SIMD_aarch64
# undef __DECL_SIMD_atanpi
# define __DECL_SIMD_atanpi __DECL_SIMD_aarch64
# undef __DECL_SIMD_atanpif
# define __DECL_SIMD_atanpif __DECL_SIMD_aarch64
# undef __DECL_SIMD_atan2
# define __DECL_SIMD_atan2 __DECL_SIMD_aarch64
# undef __DECL_SIMD_atan2f
# define __DECL_SIMD_atan2f __DECL_SIMD_aarch64
# undef __DECL_SIMD_atan2pi
# define __DECL_SIMD_atan2pi __DECL_SIMD_aarch64
# undef __DECL_SIMD_atan2pif
# define __DECL_SIMD_atan2pif __DECL_SIMD_aarch64
# undef __DECL_SIMD_cbrt
# define __DECL_SIMD_cbrt __DECL_SIMD_aarch64
# undef __DECL_SIMD_cbrtf
@ -176,12 +192,16 @@ typedef __SVBool_t __sv_bool_t;
# define __vpcs __attribute__ ((__aarch64_vector_pcs__))
__vpcs __f32x4_t _ZGVnN4vv_atan2f (__f32x4_t, __f32x4_t);
__vpcs __f32x4_t _ZGVnN4vv_atan2pif (__f32x4_t, __f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_acosf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_acoshf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_acospif (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_asinf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_asinhf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_asinpif (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_atanf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_atanhf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_atanpif (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_cbrtf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_cosf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_coshf (__f32x4_t);
@ -207,12 +227,16 @@ __vpcs __f32x4_t _ZGVnN4v_tanhf (__f32x4_t);
__vpcs __f32x4_t _ZGVnN4v_tanpif (__f32x4_t);
__vpcs __f64x2_t _ZGVnN2vv_atan2 (__f64x2_t, __f64x2_t);
__vpcs __f64x2_t _ZGVnN2vv_atan2pi (__f64x2_t, __f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_acos (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_acosh (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_acospi (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_asin (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_asinh (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_asinpi (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_atan (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_atanh (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_atanpi (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_cbrt (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_cos (__f64x2_t);
__vpcs __f64x2_t _ZGVnN2v_cosh (__f64x2_t);
@ -243,12 +267,16 @@ __vpcs __f64x2_t _ZGVnN2v_tanpi (__f64x2_t);
#ifdef __SVE_VEC_MATH_SUPPORTED
__sv_f32_t _ZGVsMxvv_atan2f (__sv_f32_t, __sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxvv_atan2pif (__sv_f32_t, __sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_acosf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_acoshf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_acospif (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_asinf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_asinhf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_asinpif (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_atanf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_atanhf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_atanpif (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_cbrtf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_cosf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_coshf (__sv_f32_t, __sv_bool_t);
@ -274,12 +302,16 @@ __sv_f32_t _ZGVsMxv_tanhf (__sv_f32_t, __sv_bool_t);
__sv_f32_t _ZGVsMxv_tanpif (__sv_f32_t, __sv_bool_t);
__sv_f64_t _ZGVsMxvv_atan2 (__sv_f64_t, __sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxvv_atan2pi (__sv_f64_t, __sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_acos (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_acosh (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_acospi (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_asin (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_asinh (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_asinpi (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_atan (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_atanh (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_atanpi (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_cbrt (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_cos (__sv_f64_t, __sv_bool_t);
__sv_f64_t _ZGVsMxv_cosh (__sv_f64_t, __sv_bool_t);

View File

@ -21,23 +21,13 @@
#include <bits/endian.h>
#ifdef __ILP32__
# define __SIZEOF_PTHREAD_ATTR_T 32
# define __SIZEOF_PTHREAD_MUTEX_T 32
# define __SIZEOF_PTHREAD_MUTEXATTR_T 4
# define __SIZEOF_PTHREAD_CONDATTR_T 4
# define __SIZEOF_PTHREAD_RWLOCK_T 48
# define __SIZEOF_PTHREAD_BARRIER_T 20
# define __SIZEOF_PTHREAD_BARRIERATTR_T 4
#else
# define __SIZEOF_PTHREAD_ATTR_T 64
# define __SIZEOF_PTHREAD_MUTEX_T 48
# define __SIZEOF_PTHREAD_MUTEXATTR_T 8
# define __SIZEOF_PTHREAD_CONDATTR_T 8
# define __SIZEOF_PTHREAD_RWLOCK_T 56
# define __SIZEOF_PTHREAD_BARRIER_T 32
# define __SIZEOF_PTHREAD_BARRIERATTR_T 8
#endif
#define __SIZEOF_PTHREAD_ATTR_T 64
#define __SIZEOF_PTHREAD_MUTEX_T 48
#define __SIZEOF_PTHREAD_MUTEXATTR_T 8
#define __SIZEOF_PTHREAD_CONDATTR_T 8
#define __SIZEOF_PTHREAD_RWLOCK_T 56
#define __SIZEOF_PTHREAD_BARRIER_T 32
#define __SIZEOF_PTHREAD_BARRIERATTR_T 8
#define __SIZEOF_PTHREAD_COND_T 48
#define __SIZEOF_PTHREAD_RWLOCKATTR_T 8

View File

@ -20,13 +20,7 @@
# error "Never use <bits/semaphore.h> directly; include <semaphore.h> instead."
#endif
#ifdef __ILP32__
# define __SIZEOF_SEM_T 16
#else
# define __SIZEOF_SEM_T 32
#endif
#define __SIZEOF_SEM_T 32
/* Value returned if `sem_open' failed. */
#define SEM_FAILED ((sem_t *) 0)

View File

@ -17,12 +17,5 @@
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#ifdef __LP64__
# define __WORDSIZE 64
#else
# define __WORDSIZE 32
# define __WORDSIZE32_SIZE_ULONG 1
# define __WORDSIZE32_PTRDIFF_LONG 1
#endif
#define __WORDSIZE 64
#define __WORDSIZE_TIME64_COMPAT32 0

View File

@ -101,6 +101,11 @@ extern char *inet_nsap_ntoa (int __len, const unsigned char *__cp,
char *__buf) __THROW;
#endif
#if __USE_FORTIFY_LEVEL > 0 && defined __fortify_function
/* Include functions with security checks. */
# include <bits/inet-fortified.h>
#endif
__END_DECLS
#endif /* arpa/inet.h */

View File

@ -379,6 +379,8 @@ struct file_handle
identity and may not
be usable to
open_by_handle_at. */
# define AT_HANDLE_MNT_ID_UNIQUE 1 /* Return the 64-bit unique mount
ID. */
#endif
__BEGIN_DECLS

View File

@ -0,0 +1,42 @@
/* Declarations of checking macros for inet functions.
Copyright (C) 2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#ifndef _BITS_INET_FORTIFIED_DEC_H
#define _BITS_INET_FORTIFIED_DEC_H 1
#ifndef _ARPA_INET_H
# error "Never include <bits/inet-fortified-decl.h> directly; use <arpa/inet.h> instead."
#endif
extern const char *__inet_ntop_chk (int, const void *, char *, socklen_t, size_t);
extern const char *__REDIRECT_FORTIFY_NTH (__inet_ntop_alias,
(int, const void *, char *, socklen_t), inet_ntop);
extern const char *__REDIRECT_NTH (__inet_ntop_chk_warn,
(int, const void *, char *, socklen_t, size_t), __inet_ntop_chk)
__warnattr ("inet_ntop called with bigger length than "
"size of destination buffer");
extern int __inet_pton_chk (int, const char *, void *, size_t);
extern int __REDIRECT_FORTIFY_NTH (__inet_pton_alias,
(int, const char *, void *), inet_pton);
extern int __REDIRECT_NTH (__inet_pton_chk_warn,
(int, const char *, void *, size_t), __inet_pton_chk)
__warnattr ("inet_pton called with a destination buffer size too small");
#endif /* bits/inet-fortified-decl.h. */

View File

@ -0,0 +1,61 @@
/* Checking macros for inet functions.
Copyright (C) 2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#ifndef _BITS_INET_FORTIFIED_H
#define _BITS_INET_FORTIFIED_H 1
#ifndef _ARPA_INET_H
# error "Never include <bits/inet-fortified.h> directly; use <arpa/inet.h> instead."
#endif
#include <bits/inet-fortified-decl.h>
__fortify_function __attribute_overloadable__ const char *
__NTH (inet_ntop (int __af,
__fortify_clang_overload_arg (const void *, __restrict, __src),
char *__restrict __dst, socklen_t __dst_size))
__fortify_clang_warning_only_if_bos_lt (__dst_size, __dst,
"inet_ntop called with bigger length "
"than size of destination buffer")
{
return __glibc_fortify (inet_ntop, __dst_size, sizeof (char),
__glibc_objsize (__dst),
__af, __src, __dst, __dst_size);
};
__fortify_function __attribute_overloadable__ int
__NTH (inet_pton (int __af,
const char *__restrict __src,
__fortify_clang_overload_arg (void *, __restrict, __dst)))
__fortify_clang_warning_only_if_bos0_lt
(4, __dst, "inet_pton called with destination buffer size less than 4")
{
size_t sz = 0;
if (__af == AF_INET)
sz = sizeof (struct in_addr);
else if (__af == AF_INET6)
sz = sizeof (struct in6_addr);
else
return __inet_pton_alias (__af, __src, __dst);
return __glibc_fortify (inet_pton, sz, sizeof (char),
__glibc_objsize (__dst),
__af, __src, __dst);
};
#endif /* bits/inet-fortified.h. */

View File

@ -32,17 +32,6 @@ struct winsize
unsigned short int ws_ypixel;
};
#define NCC 8
struct termio
{
unsigned short int c_iflag; /* input mode flags */
unsigned short int c_oflag; /* output mode flags */
unsigned short int c_cflag; /* control mode flags */
unsigned short int c_lflag; /* local mode flags */
unsigned char c_line; /* line discipline */
unsigned char c_cc[NCC]; /* control characters */
};
/* modem lines */
#define TIOCM_LE 0x001
#define TIOCM_DTR 0x002

View File

@ -22,87 +22,4 @@
/* Use the definitions from the kernel header files. */
#include <asm/ioctls.h>
/* Routing table calls. */
#define SIOCADDRT 0x890B /* add routing table entry */
#define SIOCDELRT 0x890C /* delete routing table entry */
#define SIOCRTMSG 0x890D /* call to routing system */
/* Socket configuration controls. */
#define SIOCGIFNAME 0x8910 /* get iface name */
#define SIOCSIFLINK 0x8911 /* set iface channel */
#define SIOCGIFCONF 0x8912 /* get iface list */
#define SIOCGIFFLAGS 0x8913 /* get flags */
#define SIOCSIFFLAGS 0x8914 /* set flags */
#define SIOCGIFADDR 0x8915 /* get PA address */
#define SIOCSIFADDR 0x8916 /* set PA address */
#define SIOCGIFDSTADDR 0x8917 /* get remote PA address */
#define SIOCSIFDSTADDR 0x8918 /* set remote PA address */
#define SIOCGIFBRDADDR 0x8919 /* get broadcast PA address */
#define SIOCSIFBRDADDR 0x891a /* set broadcast PA address */
#define SIOCGIFNETMASK 0x891b /* get network PA mask */
#define SIOCSIFNETMASK 0x891c /* set network PA mask */
#define SIOCGIFMETRIC 0x891d /* get metric */
#define SIOCSIFMETRIC 0x891e /* set metric */
#define SIOCGIFMEM 0x891f /* get memory address (BSD) */
#define SIOCSIFMEM 0x8920 /* set memory address (BSD) */
#define SIOCGIFMTU 0x8921 /* get MTU size */
#define SIOCSIFMTU 0x8922 /* set MTU size */
#define SIOCSIFNAME 0x8923 /* set interface name */
#define SIOCSIFHWADDR 0x8924 /* set hardware address */
#define SIOCGIFENCAP 0x8925 /* get/set encapsulations */
#define SIOCSIFENCAP 0x8926
#define SIOCGIFHWADDR 0x8927 /* Get hardware address */
#define SIOCGIFSLAVE 0x8929 /* Driver slaving support */
#define SIOCSIFSLAVE 0x8930
#define SIOCADDMULTI 0x8931 /* Multicast address lists */
#define SIOCDELMULTI 0x8932
#define SIOCGIFINDEX 0x8933 /* name -> if_index mapping */
#define SIOGIFINDEX SIOCGIFINDEX /* misprint compatibility :-) */
#define SIOCSIFPFLAGS 0x8934 /* set/get extended flags set */
#define SIOCGIFPFLAGS 0x8935
#define SIOCDIFADDR 0x8936 /* delete PA address */
#define SIOCSIFHWBROADCAST 0x8937 /* set hardware broadcast addr */
#define SIOCGIFCOUNT 0x8938 /* get number of devices */
#define SIOCGIFBR 0x8940 /* Bridging support */
#define SIOCSIFBR 0x8941 /* Set bridging options */
#define SIOCGIFTXQLEN 0x8942 /* Get the tx queue length */
#define SIOCSIFTXQLEN 0x8943 /* Set the tx queue length */
/* ARP cache control calls. */
/* 0x8950 - 0x8952 * obsolete calls, don't re-use */
#define SIOCDARP 0x8953 /* delete ARP table entry */
#define SIOCGARP 0x8954 /* get ARP table entry */
#define SIOCSARP 0x8955 /* set ARP table entry */
/* RARP cache control calls. */
#define SIOCDRARP 0x8960 /* delete RARP table entry */
#define SIOCGRARP 0x8961 /* get RARP table entry */
#define SIOCSRARP 0x8962 /* set RARP table entry */
/* Driver configuration calls */
#define SIOCGIFMAP 0x8970 /* Get device parameters */
#define SIOCSIFMAP 0x8971 /* Set device parameters */
/* DLCI configuration calls */
#define SIOCADDDLCI 0x8980 /* Create new DLCI device */
#define SIOCDELDLCI 0x8981 /* Delete DLCI device */
/* Device private ioctl calls. */
/* These 16 ioctls are available to devices via the do_ioctl() device
vector. Each device should include this file and redefine these
names as their own. Because these are device dependent it is a good
idea _NOT_ to issue them to random objects and hope. */
#define SIOCDEVPRIVATE 0x89F0 /* to 89FF */
/*
* These 16 ioctl calls are protocol private
*/
#define SIOCPROTOPRIVATE 0x89E0 /* to 89EF */
#include <linux/sockios.h>

View File

@ -373,4 +373,48 @@
#define __DECL_SIMD_tanpif32x
#define __DECL_SIMD_tanpif64x
#define __DECL_SIMD_tanpif128x
#define __DECL_SIMD_acospi
#define __DECL_SIMD_acospif
#define __DECL_SIMD_acospil
#define __DECL_SIMD_acospif16
#define __DECL_SIMD_acospif32
#define __DECL_SIMD_acospif64
#define __DECL_SIMD_acospif128
#define __DECL_SIMD_acospif32x
#define __DECL_SIMD_acospif64x
#define __DECL_SIMD_acospif128x
#define __DECL_SIMD_asinpi
#define __DECL_SIMD_asinpif
#define __DECL_SIMD_asinpil
#define __DECL_SIMD_asinpif16
#define __DECL_SIMD_asinpif32
#define __DECL_SIMD_asinpif64
#define __DECL_SIMD_asinpif128
#define __DECL_SIMD_asinpif32x
#define __DECL_SIMD_asinpif64x
#define __DECL_SIMD_asinpif128x
#define __DECL_SIMD_atanpi
#define __DECL_SIMD_atanpif
#define __DECL_SIMD_atanpil
#define __DECL_SIMD_atanpif16
#define __DECL_SIMD_atanpif32
#define __DECL_SIMD_atanpif64
#define __DECL_SIMD_atanpif128
#define __DECL_SIMD_atanpif32x
#define __DECL_SIMD_atanpif64x
#define __DECL_SIMD_atanpif128x
#define __DECL_SIMD_atan2pi
#define __DECL_SIMD_atan2pif
#define __DECL_SIMD_atan2pil
#define __DECL_SIMD_atan2pif16
#define __DECL_SIMD_atan2pif32
#define __DECL_SIMD_atan2pif64
#define __DECL_SIMD_atan2pif128
#define __DECL_SIMD_atan2pif32x
#define __DECL_SIMD_atan2pif64x
#define __DECL_SIMD_atan2pif128x
#endif

View File

@ -34,7 +34,7 @@
#define __MATHCALLX(function,suffix, args, attrib) \
__MATHDECLX (_Mdouble_,function,suffix, args, attrib)
#define __MATHDECLX(type, function,suffix, args, attrib) \
__MATHDECL_1(type, function,suffix, args) __attribute__ (attrib);
__MATHDECL_1(type, function,suffix, args) __attribute__ (attrib)
#define __MATHDECL_1_IMPL(type, function, suffix, args) \
extern type __MATH_PRECNAME(function,suffix) args __THROW
#define __MATHDECL_1(type, function, suffix, args) \

View File

@ -68,12 +68,16 @@ __MATHCALL_VEC (tan,, (_Mdouble_ __x));
#if __GLIBC_USE (IEC_60559_FUNCS_EXT_C23)
/* Arc cosine of X, divided by pi. */
__MATHCALL (acospi,, (_Mdouble_ __x));
__MATHCALL_VEC (acospi,, (_Mdouble_ __x));
/* Arc sine of X, divided by pi. */
__MATHCALL (asinpi,, (_Mdouble_ __x));
__MATHCALL_VEC (asinpi,, (_Mdouble_ __x));
/* Arc tangent of X, divided by pi. */
__MATHCALL (atanpi,, (_Mdouble_ __x));
__MATHCALL_VEC (atanpi,, (_Mdouble_ __x));
/* Arc tangent of Y/X, divided by pi. */
__MATHCALL (atan2pi,, (_Mdouble_ __y, _Mdouble_ __x));
__MATHCALL_VEC (atan2pi,, (_Mdouble_ __y, _Mdouble_ __x));
/* Cosine of pi * X. */
__MATHCALL_VEC (cospi,, (_Mdouble_ __x));
@ -185,6 +189,23 @@ __MATHCALL_VEC (hypot,, (_Mdouble_ __x, _Mdouble_ __y));
__MATHCALL_VEC (cbrt,, (_Mdouble_ __x));
#endif
#if __GLIBC_USE (IEC_60559_FUNCS_EXT_C23)
/* Return 1+X to the Y power. */
__MATHCALL (compoundn,, (_Mdouble_ __x, long long int __y));
/* Return X to the Y power. */
__MATHCALL (pown,, (_Mdouble_ __x, long long int __y));
/* Return X to the Y power. */
__MATHCALL (powr,, (_Mdouble_ __x, _Mdouble_ __y));
/* Return the Yth root of X. */
__MATHCALL (rootn,, (_Mdouble_ __x, long long int __y));
/* Return the reciprocal of the square root of X. */
__MATHCALL (rsqrt,, (_Mdouble_ __x));
#endif
/* Nearest integer, absolute value, and remainder functions. */

View File

@ -113,6 +113,8 @@
locked pages too. */
# define MADV_COLLAPSE 25 /* Synchronous hugepage collapse. */
# define MADV_HWPOISON 100 /* Poison a page for testing. */
# define MADV_GUARD_INSTALL 102 /* Fatal signal on access to range */
# define MADV_GUARD_REMOVE 103 /* Unguard range */
#endif
/* The POSIX people had to invent similar names for the same things. */

View File

@ -43,10 +43,9 @@
# endif
/* Access restrictions for pkey_alloc. */
# ifndef PKEY_DISABLE_ACCESS
# define PKEY_DISABLE_ACCESS 0x1
# define PKEY_DISABLE_WRITE 0x2
# endif
# define PKEY_UNRESTRICTED 0x0
# define PKEY_DISABLE_ACCESS 0x1
# define PKEY_DISABLE_WRITE 0x2
__BEGIN_DECLS

View File

@ -152,7 +152,7 @@ int sched_setattr (pid_t tid, struct sched_attr *attr, unsigned int flags)
store it in *ATTR. */
int sched_getattr (pid_t tid, struct sched_attr *attr, unsigned int size,
unsigned int flags)
__THROW __nonnull ((2)) __attr_access ((__write_only__, 2, 3));
__THROW __nonnull ((2));
#endif

View File

@ -151,7 +151,7 @@ __NTH (strncat (__fortify_clang_overload_arg (char *, __restrict, __dest),
}
/*
* strlcpy and strlcat introduced in glibc 2.38
* zig patch: strlcpy and strlcat introduced in glibc 2.38
* https://sourceware.org/git/?p=glibc.git;a=commit;h=2e0bbbfbf95fc9e22692e93658a6fbdd2d4554da
*/
#if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 38) || __GLIBC__ > 2

View File

@ -1,11 +1,11 @@
/* Generated at libc build time from syscall list. */
/* The system call list corresponds to kernel 6.12. */
/* The system call list corresponds to kernel 6.15. */
#ifndef _SYSCALL_H
# error "Never use <bits/syscall.h> directly; include <sys/syscall.h> instead."
#endif
#define __GLIBC_LINUX_VERSION_CODE 396288
#define __GLIBC_LINUX_VERSION_CODE 397056
#ifdef __NR_FAST_atomic_update
# define SYS_FAST_atomic_update __NR_FAST_atomic_update
@ -703,6 +703,10 @@
# define SYS_getxattr __NR_getxattr
#endif
#ifdef __NR_getxattrat
# define SYS_getxattrat __NR_getxattrat
#endif
#ifdef __NR_getxgid
# define SYS_getxgid __NR_getxgid
#endif
@ -875,6 +879,10 @@
# define SYS_listxattr __NR_listxattr
#endif
#ifdef __NR_listxattrat
# define SYS_listxattrat __NR_listxattrat
#endif
#ifdef __NR_llistxattr
# define SYS_llistxattr __NR_llistxattr
#endif
@ -1167,6 +1175,10 @@
# define SYS_open_tree __NR_open_tree
#endif
#ifdef __NR_open_tree_attr
# define SYS_open_tree_attr __NR_open_tree_attr
#endif
#ifdef __NR_openat
# define SYS_openat __NR_openat
#endif
@ -1839,6 +1851,10 @@
# define SYS_removexattr __NR_removexattr
#endif
#ifdef __NR_removexattrat
# define SYS_removexattrat __NR_removexattrat
#endif
#ifdef __NR_rename
# define SYS_rename __NR_rename
#endif
@ -2199,6 +2215,10 @@
# define SYS_setxattr __NR_setxattr
#endif
#ifdef __NR_setxattrat
# define SYS_setxattrat __NR_setxattrat
#endif
#ifdef __NR_sgetmask
# define SYS_sgetmask __NR_sgetmask
#endif

View File

@ -1,4 +1,4 @@
/* termios baud rate selection definitions. Linux/generic version.
/* termios baud rate selection definitions. Universal version for sane speed_t.
Copyright (C) 2019-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
@ -20,29 +20,56 @@
# error "Never include <bits/termios-baud.h> directly; use <termios.h> instead."
#endif
#ifdef __USE_MISC
# define CBAUD 000000010017 /* Baud speed mask (not in POSIX). */
# define CBAUDEX 000000010000 /* Extra baud speed mask, included in CBAUD.
(not in POSIX). */
# define CIBAUD 002003600000 /* Input baud rate (not used). */
# define CMSPAR 010000000000 /* Mark or space (stick) parity. */
# define CRTSCTS 020000000000 /* Flow control. */
/* POSIX required baud rates */
#define B0 0U /* Hang up or ispeed == ospeed */
#define B50 50U
#define B75 75U
#define B110 110U
#define B134 134U /* Really 134.5 baud by POSIX spec */
#define B150 150U
#define B200 200U
#define B300 300U
#define B600 600U
#define B1200 1200U
#define B1800 1800U
#define B2400 2400U
#define B4800 4800U
#define B9600 9600U
#define B19200 19200U
#define B38400 38400U
#ifdef __USE_MISC
# define EXTA B19200
# define EXTB B38400
#endif
/* Extra output baud rates (not in POSIX). */
#define B57600 0010001
#define B115200 0010002
#define B230400 0010003
#define B460800 0010004
#define B500000 0010005
#define B576000 0010006
#define B921600 0010007
#define B1000000 0010010
#define B1152000 0010011
#define B1500000 0010012
#define B2000000 0010013
#define B2500000 0010014
#define B3000000 0010015
#define B3500000 0010016
#define B4000000 0010017
#define __MAX_BAUD B4000000
/* Other baud rates, "nonstandard" but known to be used */
#define B7200 7200U
#define B14400 14400U
#define B28800 28800U
#define B33600 33600U
#define B57600 57600U
#define B76800 76800U
#define B115200 115200U
#define B153600 153600U
#define B230400 230400U
#define B307200 307200U
#define B460800 460800U
#define B500000 500000U
#define B576000 576000U
#define B614400 614400U
#define B921600 921600U
#define B1000000 1000000U
#define B1152000 1152000U
#define B1500000 1500000U
#define B2000000 2000000U
#define B2500000 2500000U
#define B3000000 3000000U
#define B3500000 3500000U
#define B4000000 4000000U
#define B5000000 5000000U
#define B10000000 10000000U
#ifdef __USE_GNU
#define SPEED_MAX 4294967295U /* maximum valid speed_t value */
#endif
#define __MAX_BAUD 4294967295U /* legacy alias for SPEED_MAX */

View File

@ -34,5 +34,7 @@
#define CLOCAL 0004000
#ifdef __USE_MISC
# define ADDRB 04000000000
# define ADDRB 04000000000
# define CMSPAR 010000000000 /* Mark or space (stick) parity. */
# define CRTSCTS 020000000000 /* Flow control. */
#endif

View File

@ -0,0 +1,47 @@
/* termios baud rate selection definitions. Linux/generic version.
Copyright (C) 2019-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library. If not, see
<https://www.gnu.org/licenses/>. */
#ifndef _TERMIOS_H
# error "Never include <bits/termios-cbaud.h> directly; use <termios.h> instead."
#endif
#ifdef __USE_MISC
# define CBAUD 000000010017 /* Baud speed mask (not in POSIX). */
# define CBAUDEX 000000010000 /* Extra baud speed mask, included in CBAUD.
(not in POSIX). */
# define CIBAUD 002003600000 /* Input baud rate. */
# define IBSHIFT 16
#endif
/* Extra output baud rates (not in POSIX). */
#define __BOTHER 0010000
#define __B57600 0010001
#define __B115200 0010002
#define __B230400 0010003
#define __B460800 0010004
#define __B500000 0010005
#define __B576000 0010006
#define __B921600 0010007
#define __B1000000 0010010
#define __B1152000 0010011
#define __B1500000 0010012
#define __B2000000 0010013
#define __B2500000 0010014
#define __B3000000 0010015
#define __B3500000 0010016
#define __B4000000 0010017

View File

@ -29,8 +29,15 @@ struct termios
tcflag_t c_lflag; /* local mode flags */
cc_t c_line; /* line discipline */
cc_t c_cc[NCCS]; /* control characters */
speed_t c_ispeed; /* input speed */
speed_t c_ospeed; /* output speed */
/* Input and output baud rates. */
__extension__ union {
speed_t __ispeed;
speed_t c_ispeed;
};
#define _HAVE_STRUCT_TERMIOS_C_ISPEED 1
__extension__ union {
speed_t __ospeed;
speed_t c_ospeed;
};
#define _HAVE_STRUCT_TERMIOS_C_OSPEED 1
};

View File

@ -24,35 +24,41 @@ typedef unsigned char cc_t;
typedef unsigned int speed_t;
typedef unsigned int tcflag_t;
#include <bits/termios-struct.h>
#ifdef _TERMIOS_H
# include <bits/termios-struct.h>
#endif
#include <bits/termios-c_cc.h>
#include <bits/termios-c_iflag.h>
#include <bits/termios-c_oflag.h>
/* c_cflag bit meaning */
#define B0 0000000 /* hang up */
#define B50 0000001
#define B75 0000002
#define B110 0000003
#define B134 0000004
#define B150 0000005
#define B200 0000006
#define B300 0000007
#define B600 0000010
#define B1200 0000011
#define B1800 0000012
#define B2400 0000013
#define B4800 0000014
#define B9600 0000015
#define B19200 0000016
#define B38400 0000017
#ifdef __USE_MISC
# define EXTA B19200
# define EXTB B38400
#endif
#include <bits/termios-baud.h>
#include <bits/termios-c_cflag.h>
#ifdef __USE_MISC
#define __B0 0000000 /* hang up */
#define __B50 0000001
#define __B75 0000002
#define __B110 0000003
#define __B134 0000004
#define __B150 0000005
#define __B200 0000006
#define __B300 0000007
#define __B600 0000010
#define __B1200 0000011
#define __B1800 0000012
#define __B2400 0000013
#define __B4800 0000014
#define __B9600 0000015
#define __B19200 0000016
#define __B38400 0000017
#include <bits/termios-cbaud.h>
# define __EXTA __B19200
# define __EXTB __B38400
# define BOTHER __BOTHER
#endif
#include <bits/termios-c_lflag.h>
#ifdef __USE_MISC
@ -73,4 +79,6 @@ typedef unsigned int tcflag_t;
#include <bits/termios-tcflow.h>
#include <bits/termios-misc.h>
#include <bits/termios-misc.h>
#include <bits/termios-baud.h>

View File

@ -32,6 +32,7 @@
#endif
#include <bits/types.h>
#include <bits/wordsize.h>
struct _IO_FILE;
struct _IO_marker;
@ -97,8 +98,15 @@ struct _IO_FILE_complete
void *_freeres_buf;
struct _IO_FILE **_prevchain;
int _mode;
#if __WORDSIZE == 64
int _unused3;
#endif
__uint64_t _total_written;
#if __WORDSIZE == 32
int _unused3;
#endif
/* Make sure we don't get into trouble again. */
char _unused2[15 * sizeof (int) - 5 * sizeof (void *)];
char _unused2[12 * sizeof (int) - 5 * sizeof (void *)];
};
/* These macros are used by bits/stdio.h and internal headers. */

View File

@ -217,15 +217,21 @@ struct dl_find_object
int dlfo_eh_count; /* Number of exception handling entries. */
unsigned int __dlfo_eh_count_pad;
# endif
__extension__ unsigned long long int __dflo_reserved[7];
void *dlfo_sframe; /* SFrame stack trace data of the object. */
#if __WORDSIZE == 32
unsigned int __dlfo_sframe_pad;
#endif
__extension__ unsigned long long int __dlfo_reserved[6];
};
/* If ADDRESS is found in an object, fill in *RESULT and return 0.
Otherwise, return -1. */
int _dl_find_object (void *__address, struct dl_find_object *__result) __THROW;
#endif /* __USE_GNU */
/* SFrame stack trace data is valid. */
#define DLFO_FLAG_SFRAME (1ULL << 0)
#endif /* __USE_GNU */
__END_DECLS

View File

@ -837,12 +837,15 @@ typedef struct
#define NT_ARM_ZT 0x40d /* ARM SME ZT registers. */
#define NT_ARM_FPMR 0x40e /* ARM floating point mode register. */
#define NT_ARM_POE 0x40f /* ARM POE registers. */
#define NT_ARM_GCS 0x410 /* ARM GCS state. */
#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note. */
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers. */
#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode. */
#define NT_MIPS_MSA 0x802 /* MIPS SIMD registers. */
#define NT_RISCV_CSR 0x900 /* RISC-V Control and Status Registers */
#define NT_RISCV_VECTOR 0x901 /* RISC-V vector registers */
#define NT_RISCV_TAGGED_ADDR_CTRL 0x902 /* RISC-V tagged
address control */
#define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers. */
#define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and
status registers. */
@ -2906,19 +2909,6 @@ enum
#define R_AARCH64_NONE 0 /* No relocation. */
/* ILP32 AArch64 relocs. */
#define R_AARCH64_P32_ABS32 1 /* Direct 32 bit. */
#define R_AARCH64_P32_COPY 180 /* Copy symbol at runtime. */
#define R_AARCH64_P32_GLOB_DAT 181 /* Create GOT entry. */
#define R_AARCH64_P32_JUMP_SLOT 182 /* Create PLT entry. */
#define R_AARCH64_P32_RELATIVE 183 /* Adjust by program base. */
#define R_AARCH64_P32_TLS_DTPMOD 184 /* Module number, 32 bit. */
#define R_AARCH64_P32_TLS_DTPREL 185 /* Module-relative offset, 32 bit. */
#define R_AARCH64_P32_TLS_TPREL 186 /* TP-relative offset, 32 bit. */
#define R_AARCH64_P32_TLSDESC 187 /* TLS Descriptor. */
#define R_AARCH64_P32_IRELATIVE 188 /* STT_GNU_IFUNC relocation. */
/* LP64 AArch64 relocs. */
#define R_AARCH64_ABS64 257 /* Direct 64 bit. */
#define R_AARCH64_ABS32 258 /* Direct 32 bit. */
#define R_AARCH64_ABS16 259 /* Direct 16-bit. */
@ -4091,6 +4081,7 @@ enum
#define R_RISCV_TLS_DTPREL64 9
#define R_RISCV_TLS_TPREL32 10
#define R_RISCV_TLS_TPREL64 11
#define R_RISCV_TLSDESC 12
#define R_RISCV_BRANCH 16
#define R_RISCV_JAL 17
#define R_RISCV_CALL 18
@ -4116,16 +4107,10 @@ enum
#define R_RISCV_SUB16 38
#define R_RISCV_SUB32 39
#define R_RISCV_SUB64 40
#define R_RISCV_GNU_VTINHERIT 41
#define R_RISCV_GNU_VTENTRY 42
#define R_RISCV_GOT32_PCREL 41
#define R_RISCV_ALIGN 43
#define R_RISCV_RVC_BRANCH 44
#define R_RISCV_RVC_JUMP 45
#define R_RISCV_RVC_LUI 46
#define R_RISCV_GPREL_I 47
#define R_RISCV_GPREL_S 48
#define R_RISCV_TPREL_I 49
#define R_RISCV_TPREL_S 50
#define R_RISCV_RELAX 51
#define R_RISCV_SUB6 52
#define R_RISCV_SET6 53
@ -4137,8 +4122,12 @@ enum
#define R_RISCV_PLT32 59
#define R_RISCV_SET_ULEB128 60
#define R_RISCV_SUB_ULEB128 61
#define R_RISCV_TLSDESC_HI20 62
#define R_RISCV_TLSDESC_LOAD_LO12 63
#define R_RISCV_TLSDESC_ADD_LO12 64
#define R_RISCV_TLSDESC_CALL 65
#define R_RISCV_NUM 62
#define R_RISCV_NUM 66
/* RISC-V specific values for the st_other field. */
#define STO_RISCV_VARIANT_CC 0x80 /* Function uses variant calling
@ -4147,7 +4136,7 @@ enum
/* RISC-V specific values for the sh_type field. */
#define SHT_RISCV_ATTRIBUTES (SHT_LOPROC + 3)
/* RISC-V specific values for the p_type field. */
/* RISC-V specific values for the p_type field (deprecated). */
#define PT_RISCV_ATTRIBUTES (PT_LOPROC + 3)
/* RISC-V specific values for the d_tag field. */

View File

@ -168,7 +168,7 @@ typedef __pid_t pid_t;
#endif
/* fcntl was a simple symbol until glibc 2.27 inclusive.
/* zig patch: fcntl was a simple symbol until glibc 2.27 inclusive.
* glibc 2.28 onwards converted it to a macro when compiled with
* USE_LARGEFILE64. */
#if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 28) || __GLIBC__ > 2
@ -289,16 +289,17 @@ extern int creat64 (const char *__file, mode_t __mode) __nonnull ((1));
# define F_TEST 3 /* Test a region for other processes locks. */
# ifndef __USE_FILE_OFFSET64
extern int lockf (int __fd, int __cmd, off_t __len);
extern int lockf (int __fd, int __cmd, off_t __len) __wur;
# else
# ifdef __REDIRECT
extern int __REDIRECT (lockf, (int __fd, int __cmd, __off64_t __len), lockf64);
extern int __REDIRECT (lockf, (int __fd, int __cmd, __off64_t __len),
lockf64) __wur;
# else
# define lockf lockf64
# endif
# endif
# ifdef __USE_LARGEFILE64
extern int lockf64 (int __fd, int __cmd, off64_t __len);
extern int lockf64 (int __fd, int __cmd, off64_t __len) __wur;
# endif
#endif
@ -351,4 +352,4 @@ extern int posix_fallocate64 (int __fd, off64_t __offset, off64_t __len);
__END_DECLS
#endif /* fcntl.h */
#endif /* fcntl.h */

View File

@ -491,7 +491,7 @@
or without -D_GNU_SOURCE, but -std=c89 -D_GNU_SOURCE will have the
old extension. */
#if (__GLIBC__ == 2 && __GLIBC_MINOR__ < 7)
/* support for ISOC99 was added in glibc-2.7 */
/* zig patch: support for ISOC99 was added in glibc-2.7 */
# define __GLIBC_USE_DEPRECATED_SCANF 1
#elif (defined __USE_GNU \
&& (defined __cplusplus \
@ -503,7 +503,7 @@
#endif
/* support for ISO C2X strtol was added in 2.38
/* zig patch: support for ISO C2X strtol was added in 2.38
* glibc commit 64924422a99690d147a166b4de3103f3bf3eaf6c
*/
#if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 38) || __GLIBC__ > 2
@ -564,4 +564,4 @@
#include <gnu/stubs.h>
#endif /* features.h */
#endif /* features.h */

View File

@ -195,7 +195,8 @@ extern void globfree64 (glob64_t *__pglob) __THROW;
This function is not part of the interface specified by POSIX.2
but several programs want to use it. */
extern int glob_pattern_p (const char *__pattern, int __quote) __THROW;
extern int glob_pattern_p (const char *__pattern, int __quote) __THROW
__nonnull ((1));
#endif
__END_DECLS

View File

@ -350,6 +350,11 @@ typedef struct
/* Compute absolute value of N. */
extern intmax_t imaxabs (intmax_t __n) __THROW __attribute__ ((__const__));
#if __GLIBC_USE (ISOC2Y)
extern uintmax_t uimaxabs (intmax_t __n) __THROW __attribute__ ((__const__));
#endif
/* Return the `imaxdiv_t' representation of the value of NUMER over DENOM. */
extern imaxdiv_t imaxdiv (intmax_t __numer, intmax_t __denom)
__THROW __attribute__ ((__const__));

View File

@ -52,7 +52,7 @@ extern void *realloc (void *__ptr, size_t __size)
__THROW __attribute_warn_unused_result__ __attribute_alloc_size__ ((2));
/*
* reallocarray introduced in glibc 2.26
* zig patch: reallocarray introduced in glibc 2.26
* https://sourceware.org/git/?p=glibc.git;a=commit;h=2e0bbbfbf95fc9e22692e93658a6fbdd2d4554da
*/
#if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 26) || __GLIBC__ > 2
@ -164,4 +164,4 @@ extern void malloc_stats (void) __THROW;
extern int malloc_info (int __options, FILE *__fp) __THROW;
__END_DECLS
#endif /* malloc.h */
#endif /* malloc.h */

View File

@ -212,6 +212,9 @@ enum
# define TCPI_OPT_ECN 8 /* ECN was negotiated at TCP session init */
# define TCPI_OPT_ECN_SEEN 16 /* we received at least one packet with ECT */
# define TCPI_OPT_SYN_DATA 32 /* SYN-ACK acked data in SYN sent or rcvd */
# define TCPI_OPT_USEC_TS 64 /* usec timestamps */
# define TCPI_OPT_TFO_CHILD 128 /* child from a Fast Open option on SYN */
/* Values for tcpi_state. */
enum tcp_ca_state

View File

@ -1317,6 +1317,11 @@ extern int pthread_getcpuclockid (pthread_t __thread_id,
__THROW __nonnull ((2));
#endif
#ifdef __USE_GNU
/* Return the Linux TID for THREAD_ID. Returns -1 on failure. */
extern pid_t pthread_gettid_np (pthread_t __thread_id);
#endif
/* Install handlers to be called when a new process is created with FORK.
The PREPARE handler is called in the parent process just before performing

View File

@ -171,7 +171,7 @@ __END_DECLS
#define res_init __res_init
#define res_isourserver __res_isourserver
/* In glibc 2.33 and earlier res_search, res_nsearch, res_query, res_nquery,
/* zig patch: In glibc 2.33 and earlier res_search, res_nsearch, res_query, res_nquery,
* res_querydomain, res_nquerydomain, dn_skipname, dn_comp, dn_expand were
* #define'd to __res_search, __res_nsearch, etc. glibc 2.34 onwards removes
* the macros and exposes the symbols directly. New glibc exposes compat
@ -336,4 +336,4 @@ void res_nclose (res_state) __THROW;
__END_DECLS
#endif /* !_RESOLV_H_ */
#endif /* !_RESOLV_H_ */

View File

@ -168,8 +168,11 @@ extern int renameat (int __oldfd, const char *__old, int __newfd,
#ifdef __USE_GNU
/* Flags for renameat2. */
# define RENAME_NOREPLACE (1 << 0)
# define AT_RENAME_NOREPLACE RENAME_NOREPLACE
# define RENAME_EXCHANGE (1 << 1)
# define AT_RENAME_EXCHANGE RENAME_EXCHANGE
# define RENAME_WHITEOUT (1 << 2)
# define AT_RENAME_WHITEOUT RENAME_WHITEOUT
/* Rename file OLD relative to OLDFD to NEW relative to NEWFD, with
additional flags. */
@ -604,9 +607,6 @@ extern int fgetc_unlocked (FILE *__stream) __nonnull ((1));
/* Write a character to STREAM.
These functions are possible cancellation points and therefore not
marked with __THROW.
These functions is a possible cancellation point and therefore not
marked with __THROW. */
extern int fputc (int __c, FILE *__stream) __nonnull ((2));
extern int putc (int __c, FILE *__stream) __nonnull ((2));

View File

@ -43,43 +43,43 @@ __BEGIN_DECLS
/* Return the size of the buffer of FP in bytes currently in use by
the given stream. */
extern size_t __fbufsize (FILE *__fp) __THROW;
extern size_t __fbufsize (FILE *__fp) __THROW __nonnull ((1));
/* Return non-zero value iff the stream FP is opened readonly, or if the
last operation on the stream was a read operation. */
extern int __freading (FILE *__fp) __THROW;
extern int __freading (FILE *__fp) __THROW __nonnull ((1));
/* Return non-zero value iff the stream FP is opened write-only or
append-only, or if the last operation on the stream was a write
operation. */
extern int __fwriting (FILE *__fp) __THROW;
extern int __fwriting (FILE *__fp) __THROW __nonnull ((1));
/* Return non-zero value iff stream FP is not opened write-only or
append-only. */
extern int __freadable (FILE *__fp) __THROW;
extern int __freadable (FILE *__fp) __THROW __nonnull ((1));
/* Return non-zero value iff stream FP is not opened read-only. */
extern int __fwritable (FILE *__fp) __THROW;
extern int __fwritable (FILE *__fp) __THROW __nonnull ((1));
/* Return non-zero value iff the stream FP is line-buffered. */
extern int __flbf (FILE *__fp) __THROW;
extern int __flbf (FILE *__fp) __THROW __nonnull ((1));
/* Discard all pending buffered I/O on the stream FP. */
extern void __fpurge (FILE *__fp) __THROW;
extern void __fpurge (FILE *__fp) __THROW __nonnull ((1));
/* Return amount of output in bytes pending on a stream FP. */
extern size_t __fpending (FILE *__fp) __THROW;
extern size_t __fpending (FILE *__fp) __THROW __nonnull ((1));
/* Flush all line-buffered files. */
extern void _flushlbf (void);
/* Set locking status of stream FP to TYPE. */
extern int __fsetlocking (FILE *__fp, int __type) __THROW;
extern int __fsetlocking (FILE *__fp, int __type) __THROW __nonnull ((1));
__END_DECLS

View File

@ -654,7 +654,7 @@ extern int lcong48_r (unsigned short int __param[7],
__THROW __nonnull ((1, 2));
/*
* arc4random* symbols introduced in glibc 2.36:
* zig patch: arc4random* symbols introduced in glibc 2.36:
* https://sourceware.org/git/?p=glibc.git;a=blob;f=NEWS;h=8420a65cd06874ee09518366b8fba746a557212a;hb=6f4e0fcfa2d2b0915816a3a3a1d48b4763a7dee2
*/
# if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 36) || __GLIBC__ > 2
@ -693,7 +693,7 @@ extern void *realloc (void *__ptr, size_t __size)
extern void free (void *__ptr) __THROW;
/*
* reallocarray introduced in glibc 2.26
* zig patch: reallocarray introduced in glibc 2.26
* https://sourceware.org/git/?p=glibc.git;a=commit;h=2e0bbbfbf95fc9e22692e93658a6fbdd2d4554da
*/
#if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 26) || __GLIBC__ > 2
@ -997,6 +997,12 @@ __extension__ extern long long int llabs (long long int __x)
__THROW __attribute__ ((__const__)) __wur;
#endif
#if __GLIBC_USE (ISOC2Y)
extern unsigned int uabs (int __x) __THROW __attribute__ ((__const__)) __wur;
extern unsigned long int ulabs (long int __x) __THROW __attribute__ ((__const__)) __wur;
__extension__ extern unsigned long long int ullabs (long long int __x)
__THROW __attribute__ ((__const__)) __wur;
#endif
/* Return the `div_t', `ldiv_t' or `lldiv_t' representation
of the value of NUMER over DENOM. */
@ -1178,4 +1184,4 @@ extern int ttyslot (void) __THROW;
__END_DECLS
#endif /* stdlib.h */
#endif /* stdlib.h */

View File

@ -502,7 +502,7 @@ extern char *stpncpy (char *__restrict __dest,
#endif
/*
* strlcpy and strlcat introduced in glibc 2.38
* zig patch: strlcpy and strlcat introduced in glibc 2.38
* https://sourceware.org/git/?p=glibc.git;a=commit;h=2e0bbbfbf95fc9e22692e93658a6fbdd2d4554da
*/
#if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 38) || __GLIBC__ > 2
@ -557,4 +557,4 @@ extern char *basename (const char *__filename) __THROW __nonnull ((1));
__END_DECLS
#endif /* string.h */
#endif /* string.h */

View File

@ -21,6 +21,7 @@
#define _SYS_HWPROBE_H 1
#include <features.h>
#include <sched.h>
#include <stddef.h>
#include <errno.h>
#ifdef __has_include
@ -63,22 +64,39 @@ struct riscv_hwprobe {
__BEGIN_DECLS
extern int __riscv_hwprobe (struct riscv_hwprobe *__pairs, size_t __pair_count,
size_t __cpu_count, unsigned long int *__cpus,
unsigned int __flags)
__nonnull ((1)) __wur
__fortified_attr_access (__read_write__, 1, 2)
__fortified_attr_access (__read_only__, 4, 3);
#if defined __cplusplus || !__GNUC_PREREQ (2, 7)
# define __RISCV_HWPROBE_CPUS_TYPE cpu_set_t *
#else
/* The fourth argument to __riscv_hwprobe should be a null pointer or a
pointer to a cpu_set_t (either the fixed-size type or allocated with
CPU_ALLOC). However, early versions of this header file used the
argument type unsigned long int *. The transparent union allows
the argument to be either cpu_set_t * or unsigned long int * for
compatibility. The older header file requiring unsigned long int *
can be identified by the lack of the __RISCV_HWPROBE_CPUS_TYPE macro.
In C++ and with compilers that do not support transparent unions, the
argument type must be cpu_set_t *. */
typedef union {
cpu_set_t *__cs;
unsigned long int *__ul;
} __RISCV_HWPROBE_CPUS_TYPE __attribute__ ((__transparent_union__));
# define __RISCV_HWPROBE_CPUS_TYPE __RISCV_HWPROBE_CPUS_TYPE
#endif
/* A pointer to the __riscv_hwprobe vDSO function is passed as the second
extern int __riscv_hwprobe (struct riscv_hwprobe *__pairs,
size_t __pair_count, size_t __cpusetsize,
__RISCV_HWPROBE_CPUS_TYPE __cpus,
unsigned int __flags)
__THROW __nonnull ((1)) __attr_access ((__read_write__, 1, 2));
/* A pointer to the __riscv_hwprobe function is passed as the second
argument to ifunc selector routines. Include a function pointer type for
convenience in calling the function in those settings. */
typedef int (*__riscv_hwprobe_t) (struct riscv_hwprobe *__pairs, size_t __pair_count,
size_t __cpu_count, unsigned long int *__cpus,
typedef int (*__riscv_hwprobe_t) (struct riscv_hwprobe *__pairs,
size_t __pair_count, size_t __cpusetsize,
__RISCV_HWPROBE_CPUS_TYPE __cpus,
unsigned int __flags)
__nonnull ((1)) __wur
__fortified_attr_access (__read_write__, 1, 2)
__fortified_attr_access (__read_only__, 4, 3);
__nonnull ((1)) __attr_access ((__read_write__, 1, 2));
/* Helper function usable from ifunc selectors that probes a single key. */
static __inline int

View File

@ -19,24 +19,77 @@
#ifndef _SYS_IFUNC_H
#define _SYS_IFUNC_H
#include <sys/cdefs.h>
/* A second argument is passed to the ifunc resolver. */
#define _IFUNC_ARG_HWCAP (1ULL << 62)
/* The prototype of a gnu indirect function resolver on AArch64 is
/* Maximum number of HWCAP elements that are currently supported. */
#define _IFUNC_HWCAP_MAX 4
/* The prototype of a GNU indirect function resolver on AArch64 is
ElfW(Addr) ifunc_resolver (uint64_t, const uint64_t *);
The following prototype is also compatible:
ElfW(Addr) ifunc_resolver (uint64_t, const __ifunc_arg_t *);
the first argument should have the _IFUNC_ARG_HWCAP bit set and
the remaining bits should match the AT_HWCAP settings. */
The first argument might have the _IFUNC_ARG_HWCAP bit set and
the remaining bits should match the AT_HWCAP settings.
If the _IFUNC_ARG_HWCAP bit is set in the first argument, then
the second argument is passed to the resolver function. In
this case, the second argument is a const pointer to a buffer
that allows to access all available HWCAP elements.
This buffer has its size in bytes at offset 0. The HWCAP elements
are available at offsets 8, 16, 24, 32... respectively for AT_HWCAP,
AT_HWCAP2, AT_HWCAP3, AT_HWCAP4... (these offsets are multiples of
sizeof (unsigned long)).
Indirect function resolvers must check availability of HWCAP
elements at runtime before accessing them using the size of the
buffer. */
/* Second argument to an ifunc resolver. */
struct __ifunc_arg_t
{
unsigned long _size; /* Size of the struct, so it can grow. */
unsigned long _size; /* Size of the struct, so it can grow. */
unsigned long _hwcap;
unsigned long _hwcap2;
unsigned long _hwcap2; /* End of 1st published struct. */
unsigned long _hwcap3;
unsigned long _hwcap4; /* End of 2nd published struct. */
};
typedef struct __ifunc_arg_t __ifunc_arg_t;
/* Constants for IDs of HWCAP elements to be used with the
__ifunc_hwcap function below. */
enum
{
_IFUNC_ARG_AT_HWCAP = 1,
_IFUNC_ARG_AT_HWCAP2 = 2,
_IFUNC_ARG_AT_HWCAP3 = 3,
_IFUNC_ARG_AT_HWCAP4 = 4,
};
/* A helper function to obtain HWCAP element by its ID from the
parameters ARG0 and ARG1 passed to the ifunc resolver. Note that
ID 1 corresponds to AT_HWCAP, ID 2 corresponds to AT_HWCAP2, etc.
If there is no element available for the requested ID then 0 is
returned. If ID doesn't much any supported AT_HWCAP{,2,...} value,
then 0 is also returned. */
static __inline unsigned long __attribute__ ((unused, always_inline))
__ifunc_hwcap (unsigned long __id,
unsigned long __arg0, const unsigned long *__arg1)
{
if (__glibc_likely (__arg0 & _IFUNC_ARG_HWCAP))
{
const unsigned long size = __arg1[0];
const unsigned long offset = __id * sizeof (unsigned long);
return offset < size && __id > 0 ? __arg1[__id] : 0;
}
return __id == 1 ? __arg0 : 0;
}
#endif

View File

@ -121,7 +121,7 @@ enum
MS_ACTIVE = 1 << 30,
#define MS_ACTIVE MS_ACTIVE
#undef MS_NOUSER
MS_NOUSER = 1 << 31
MS_NOUSER = 1U << 31
#define MS_NOUSER MS_NOUSER
};

View File

@ -54,8 +54,4 @@ struct ttychars {
char tc_lnextc; /* literal next character */
};
#ifdef __USE_OLD_TTY
#include <sys/ttydefaults.h> /* to pick up character defaults */
#endif
#endif /* sys/ttychars.h */

View File

@ -1,6 +0,0 @@
/* Compatible <termio.h> for old `struct termio' ioctl interface.
This is obsolete; use the POSIX.1 `struct termios' interface
defined in <termios.h> instead. */
#include <termios.h>
#include <sys/ioctl.h>

View File

@ -61,6 +61,26 @@ extern int cfsetispeed (struct termios *__termios_p, speed_t __speed) __THROW;
extern int cfsetspeed (struct termios *__termios_p, speed_t __speed) __THROW;
#endif
#ifdef __USE_GNU
/* Interfaces that are explicitly numeric representations of baud rates */
typedef speed_t baud_t;
#define BAUD_MAX SPEED_MAX
/* Return the output baud rate stored in *TERMIOS_P. */
extern baud_t cfgetobaud (const struct termios *__termios_p) __THROW;
/* Return the input baud rate stored in *TERMIOS_P. */
extern baud_t cfgetibaud (const struct termios *__termios_p) __THROW;
/* Set the output baud rate stored in *TERMIOS_P to BAUD. */
extern int cfsetobaud (struct termios *__termios_p, baud_t __baud) __THROW;
/* Set the input baud rate stored in *TERMIOS_P to BAUD. */
extern int cfsetibaud (struct termios *__termios_p, baud_t __baud) __THROW;
/* Set both the input and output baud rates in *TERMIOS_OP to BAUD. */
extern int cfsetbaud (struct termios *__termios_p, baud_t __baud) __THROW;
#endif
/* Put the state of FD into *TERMIOS_P. */
extern int tcgetattr (int __fd, struct termios *__termios_p) __THROW;

View File

@ -923,6 +923,24 @@
/* Return the cube root of X. */
#define cbrt(Val) __TGMATH_UNARY_REAL_ONLY (Val, cbrt)
#if __GLIBC_USE (IEC_60559_FUNCS_EXT_C23)
/* Return 1+X to the Y power. */
# define compoundn(Val1, Val2) \
__TGMATH_BINARY_FIRST_REAL_ONLY (Val1, Val2, compoundn)
/* Return X to the Y power. */
# define pown(Val1, Val2) __TGMATH_BINARY_FIRST_REAL_ONLY (Val1, Val2, pown)
/* Return X to the Y power. */
# define powr(Val1, Val2) __TGMATH_BINARY_REAL_ONLY (Val1, Val2, powr)
/* Return the Yth root of X. */
# define rootn(Val1, Val2) __TGMATH_BINARY_FIRST_REAL_ONLY (Val1, Val2, rootn)
/* Return 1/sqrt(X). */
# define rsqrt(Val) __TGMATH_UNARY_REAL_ONLY (Val, rsqrt)
#endif
/* Nearest integer, absolute value, and remainder functions. */

View File

@ -1231,4 +1231,4 @@ extern int close_range (unsigned int __fd, unsigned int __max_fd,
__END_DECLS
#endif /* unistd.h */
#endif /* unistd.h */

View File

@ -31,18 +31,6 @@ struct winsize
unsigned short int ws_ypixel;
};
#define NCC 8
struct termio
{
unsigned short int c_iflag; /* input mode flags */
unsigned short int c_oflag; /* output mode flags */
unsigned short int c_cflag; /* control mode flags */
unsigned short int c_lflag; /* local mode flags */
char c_line; /* line discipline */
/* Yes, this is really NCCS. */
unsigned char c_cc[32 /* NCCS */]; /* control characters */
};
/* modem lines */
#define TIOCM_LE 0x001 /* line enable */
#define TIOCM_DTR 0x002 /* data terminal ready */

View File

@ -1,34 +0,0 @@
/* struct termios definition. Linux/mips version.
Copyright (C) 2019-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library. If not, see
<https://www.gnu.org/licenses/>. */
#ifndef _TERMIOS_H
# error "Never include <bits/termios-struct.h> directly; use <termios.h> instead."
#endif
#define NCCS 32
struct termios
{
tcflag_t c_iflag; /* input mode flags */
tcflag_t c_oflag; /* output mode flags */
tcflag_t c_cflag; /* control mode flags */
tcflag_t c_lflag; /* local mode flags */
cc_t c_line; /* line discipline */
cc_t c_cc[NCCS]; /* control characters */
#define _HAVE_STRUCT_TERMIOS_C_ISPEED 0
#define _HAVE_STRUCT_TERMIOS_C_OSPEED 0
};

View File

@ -32,17 +32,6 @@ struct winsize
unsigned short int ws_ypixel;
};
#define NCC 10
struct termio
{
unsigned short int c_iflag; /* input mode flags */
unsigned short int c_oflag; /* output mode flags */
unsigned short int c_cflag; /* control mode flags */
unsigned short int c_lflag; /* local mode flags */
unsigned char c_line; /* line discipline */
unsigned char c_cc[NCC]; /* control characters */
};
/* modem lines */
#define TIOCM_LE 0x001
#define TIOCM_DTR 0x002

View File

@ -35,5 +35,7 @@
#define CLOCAL 00100000
#ifdef __USE_MISC
# define ADDRB 04000000000
# define ADDRB 04000000000
# define CMSPAR 010000000000 /* Mark or space (stick) parity. */
# define CRTSCTS 020000000000 /* Flow control. */
#endif

View File

@ -17,29 +17,29 @@
<https://www.gnu.org/licenses/>. */
#ifndef _TERMIOS_H
# error "Never include <bits/termios-baud.h> directly; use <termios.h> instead."
# error "Never include <bits/termios-cbaud.h> directly; use <termios.h> instead."
#endif
#ifdef __USE_MISC
# define CBAUD 0000377
# define CBAUDEX 0000020
# define CMSPAR 010000000000 /* mark or space (stick) parity */
# define CRTSCTS 020000000000 /* flow control */
# define CBAUD 000000377
# define CBAUDEX 000000020
# define CIBAUD 077600000
# define IBSHIFT 16
#endif
#define B57600 00020
#define B115200 00021
#define B230400 00022
#define B460800 00023
#define B500000 00024
#define B576000 00025
#define B921600 00026
#define B1000000 00027
#define B1152000 00030
#define B1500000 00031
#define B2000000 00032
#define B2500000 00033
#define B3000000 00034
#define B3500000 00035
#define B4000000 00036
#define __MAX_BAUD B4000000
#define __B57600 00020
#define __B115200 00021
#define __B230400 00022
#define __B460800 00023
#define __B500000 00024
#define __B576000 00025
#define __B921600 00026
#define __B1000000 00027
#define __B1152000 00030
#define __B1500000 00031
#define __B2000000 00032
#define __B2500000 00033
#define __B3000000 00034
#define __B3500000 00035
#define __B4000000 00036
#define __BOTHER 00037

View File

@ -17,30 +17,29 @@
<https://www.gnu.org/licenses/>. */
#ifndef _TERMIOS_H
# error "Never include <bits/termios-baud.h> directly; use <termios.h> instead."
# error "Never include <bits/termios-cbaud.h> directly; use <termios.h> instead."
#endif
#ifdef __USE_MISC
# define CBAUD 0x0000100f
# define CBAUDEX 0x00001000
# define CIBAUD 0x100f0000 /* input baud rate (not used) */
# define CMSPAR 0x40000000 /* mark or space (stick) parity */
# define CRTSCTS 0x80000000 /* flow control */
# define CIBAUD 0x100f0000 /* input baud rate */
# define IBSHIFT 16
#endif
#define B57600 0x00001001
#define B115200 0x00001002
#define B230400 0x00001003
#define B460800 0x00001004
#define B76800 0x00001005
#define B153600 0x00001006
#define B307200 0x00001007
#define B614400 0x00001008
#define B921600 0x00001009
#define B500000 0x0000100a
#define B576000 0x0000100b
#define B1000000 0x0000100c
#define B1152000 0x0000100d
#define B1500000 0x0000100e
#define B2000000 0x0000100f
#define __MAX_BAUD B2000000
#define __B57600 0x00001001
#define __B115200 0x00001002
#define __B230400 0x00001003
#define __B460800 0x00001004
#define __B76800 0x00001005
#define __B153600 0x00001006
#define __B307200 0x00001007
#define __B614400 0x00001008
#define __B921600 0x00001009
#define __B500000 0x0000100a
#define __B576000 0x0000100b
#define __B1000000 0x0000100c
#define __B1152000 0x0000100d
#define __B1500000 0x0000100e
#define __B2000000 0x0000100f
#define __BOTHER 0x00001000

View File

@ -1,34 +0,0 @@
/* struct termios definition. Linux/sparc version.
Copyright (C) 2019-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library. If not, see
<https://www.gnu.org/licenses/>. */
#ifndef _TERMIOS_H
# error "Never include <bits/termios-struct.h> directly; use <termios.h> instead."
#endif
#define NCCS 17
struct termios
{
tcflag_t c_iflag; /* input mode flags */
tcflag_t c_oflag; /* output mode flags */
tcflag_t c_cflag; /* control mode flags */
tcflag_t c_lflag; /* local mode flags */
cc_t c_line; /* line discipline */
cc_t c_cc[NCCS]; /* control characters */
#define _HAVE_STRUCT_TERMIOS_C_ISPEED 0
#define _HAVE_STRUCT_TERMIOS_C_OSPEED 0
};

View File

@ -25,11 +25,15 @@
floating-point type with the IEEE 754 binary128 format, and this
glibc includes corresponding *f128 interfaces for it. The required
libgcc support was added some time after the basic compiler
support, for x86_64 and x86. */
support, for x86_64 and x86. Intel SYCL compiler doesn't support
_Float128: https://github.com/intel/llvm/issues/16903
*/
#if (defined __x86_64__ \
? __GNUC_PREREQ (4, 3) \
: (defined __GNU__ ? __GNUC_PREREQ (4, 5) : __GNUC_PREREQ (4, 4))) \
|| __glibc_clang_prereq (3, 4)
|| (__glibc_clang_prereq (3, 9) \
&& (!defined __INTEL_LLVM_COMPILER \
|| !defined SYCL_LANGUAGE_VERSION))
# define __HAVE_FLOAT128 1
#else
# define __HAVE_FLOAT128 0
@ -89,7 +93,7 @@ typedef _Complex float __cfloat128 __attribute__ ((__mode__ (__TC__)));
/* The type _Float128 exists only since GCC 7.0. */
# if !__GNUC_PREREQ (7, 0) \
|| (defined __cplusplus && !__GNUC_PREREQ (13, 0)) \
|| __glibc_clang_prereq (3, 4)
|| __glibc_clang_prereq (3, 9)
typedef __float128 _Float128;
# endif

View File

@ -22,6 +22,8 @@ pub const Step = @import("Build/Step.zig");
pub const Module = @import("Build/Module.zig");
pub const Watch = @import("Build/Watch.zig");
pub const Fuzz = @import("Build/Fuzz.zig");
pub const WebServer = @import("Build/WebServer.zig");
pub const abi = @import("Build/abi.zig");
/// Shared state among all Build instances.
graph: *Graph,
@ -125,6 +127,7 @@ pub const Graph = struct {
random_seed: u32 = 0,
dependency_cache: InitializedDepMap = .empty,
allow_so_scripts: ?bool = null,
time_report: bool,
};
const AvailableDeps = []const struct { []const u8, []const u8 };

View File

@ -1,108 +1,134 @@
const builtin = @import("builtin");
const std = @import("../std.zig");
const Build = std.Build;
const Cache = Build.Cache;
const Step = std.Build.Step;
const assert = std.debug.assert;
const fatal = std.process.fatal;
const Allocator = std.mem.Allocator;
const log = std.log;
const Coverage = std.debug.Coverage;
const abi = Build.abi.fuzz;
const Fuzz = @This();
const build_runner = @import("root");
pub const WebServer = @import("Fuzz/WebServer.zig");
pub const abi = @import("Fuzz/abi.zig");
ws: *Build.WebServer,
pub fn start(
gpa: Allocator,
arena: Allocator,
global_cache_directory: Build.Cache.Directory,
zig_lib_directory: Build.Cache.Directory,
zig_exe_path: []const u8,
thread_pool: *std.Thread.Pool,
all_steps: []const *Step,
ttyconf: std.io.tty.Config,
listen_address: std.net.Address,
prog_node: std.Progress.Node,
) Allocator.Error!void {
const fuzz_run_steps = block: {
const rebuild_node = prog_node.start("Rebuilding Unit Tests", 0);
/// Allocated into `ws.gpa`.
run_steps: []const *Step.Run,
wait_group: std.Thread.WaitGroup,
prog_node: std.Progress.Node,
/// Protects `coverage_files`.
coverage_mutex: std.Thread.Mutex,
coverage_files: std.AutoArrayHashMapUnmanaged(u64, CoverageMap),
queue_mutex: std.Thread.Mutex,
queue_cond: std.Thread.Condition,
msg_queue: std.ArrayListUnmanaged(Msg),
const Msg = union(enum) {
coverage: struct {
id: u64,
run: *Step.Run,
},
entry_point: struct {
coverage_id: u64,
addr: u64,
},
};
const CoverageMap = struct {
mapped_memory: []align(std.heap.page_size_min) const u8,
coverage: Coverage,
source_locations: []Coverage.SourceLocation,
/// Elements are indexes into `source_locations` pointing to the unit tests that are being fuzz tested.
entry_points: std.ArrayListUnmanaged(u32),
start_timestamp: i64,
fn deinit(cm: *CoverageMap, gpa: Allocator) void {
std.posix.munmap(cm.mapped_memory);
cm.coverage.deinit(gpa);
cm.* = undefined;
}
};
pub fn init(ws: *Build.WebServer) Allocator.Error!Fuzz {
const gpa = ws.gpa;
const run_steps: []const *Step.Run = steps: {
var steps: std.ArrayListUnmanaged(*Step.Run) = .empty;
defer steps.deinit(gpa);
const rebuild_node = ws.root_prog_node.start("Rebuilding Unit Tests", 0);
defer rebuild_node.end();
var wait_group: std.Thread.WaitGroup = .{};
defer wait_group.wait();
var fuzz_run_steps: std.ArrayListUnmanaged(*Step.Run) = .empty;
defer fuzz_run_steps.deinit(gpa);
for (all_steps) |step| {
const run = step.cast(Step.Run) orelse continue;
if (run.fuzz_tests.items.len > 0 and run.producer != null) {
thread_pool.spawnWg(&wait_group, rebuildTestsWorkerRun, .{ run, ttyconf, rebuild_node });
try fuzz_run_steps.append(gpa, run);
}
}
if (fuzz_run_steps.items.len == 0) fatal("no fuzz tests found", .{});
rebuild_node.setEstimatedTotalItems(fuzz_run_steps.items.len);
break :block try arena.dupe(*Step.Run, fuzz_run_steps.items);
};
var rebuild_wg: std.Thread.WaitGroup = .{};
defer rebuild_wg.wait();
// Detect failure.
for (fuzz_run_steps) |run| {
for (ws.all_steps) |step| {
const run = step.cast(Step.Run) orelse continue;
if (run.producer == null) continue;
if (run.fuzz_tests.items.len == 0) continue;
try steps.append(gpa, run);
ws.thread_pool.spawnWg(&rebuild_wg, rebuildTestsWorkerRun, .{ run, gpa, ws.ttyconf, rebuild_node });
}
if (steps.items.len == 0) fatal("no fuzz tests found", .{});
rebuild_node.setEstimatedTotalItems(steps.items.len);
break :steps try gpa.dupe(*Step.Run, steps.items);
};
errdefer gpa.free(run_steps);
for (run_steps) |run| {
assert(run.fuzz_tests.items.len > 0);
if (run.rebuilt_executable == null)
fatal("one or more unit tests failed to be rebuilt in fuzz mode", .{});
}
var web_server: WebServer = .{
.gpa = gpa,
.global_cache_directory = global_cache_directory,
.zig_lib_directory = zig_lib_directory,
.zig_exe_path = zig_exe_path,
.listen_address = listen_address,
.fuzz_run_steps = fuzz_run_steps,
.msg_queue = .{},
.mutex = .{},
.condition = .{},
.coverage_files = .{},
return .{
.ws = ws,
.run_steps = run_steps,
.wait_group = .{},
.prog_node = .none,
.coverage_files = .empty,
.coverage_mutex = .{},
.coverage_condition = .{},
.base_timestamp = std.time.nanoTimestamp(),
.queue_mutex = .{},
.queue_cond = .{},
.msg_queue = .empty,
};
// For accepting HTTP connections.
const web_server_thread = std.Thread.spawn(.{}, WebServer.run, .{&web_server}) catch |err| {
fatal("unable to spawn web server thread: {s}", .{@errorName(err)});
};
defer web_server_thread.join();
// For polling messages and sending updates to subscribers.
const coverage_thread = std.Thread.spawn(.{}, WebServer.coverageRun, .{&web_server}) catch |err| {
fatal("unable to spawn coverage thread: {s}", .{@errorName(err)});
};
defer coverage_thread.join();
{
const fuzz_node = prog_node.start("Fuzzing", fuzz_run_steps.len);
defer fuzz_node.end();
var wait_group: std.Thread.WaitGroup = .{};
defer wait_group.wait();
for (fuzz_run_steps) |run| {
for (run.fuzz_tests.items) |unit_test_index| {
assert(run.rebuilt_executable != null);
thread_pool.spawnWg(&wait_group, fuzzWorkerRun, .{
run, &web_server, unit_test_index, ttyconf, fuzz_node,
});
}
}
}
log.err("all fuzz workers crashed", .{});
}
fn rebuildTestsWorkerRun(run: *Step.Run, ttyconf: std.io.tty.Config, parent_prog_node: std.Progress.Node) void {
rebuildTestsWorkerRunFallible(run, ttyconf, parent_prog_node) catch |err| {
pub fn start(fuzz: *Fuzz) void {
const ws = fuzz.ws;
fuzz.prog_node = ws.root_prog_node.start("Fuzzing", fuzz.run_steps.len);
// For polling messages and sending updates to subscribers.
fuzz.wait_group.start();
_ = std.Thread.spawn(.{}, coverageRun, .{fuzz}) catch |err| {
fuzz.wait_group.finish();
fatal("unable to spawn coverage thread: {s}", .{@errorName(err)});
};
for (fuzz.run_steps) |run| {
for (run.fuzz_tests.items) |unit_test_index| {
assert(run.rebuilt_executable != null);
ws.thread_pool.spawnWg(&fuzz.wait_group, fuzzWorkerRun, .{
fuzz, run, unit_test_index,
});
}
}
}
pub fn deinit(fuzz: *Fuzz) void {
if (true) @panic("TODO: terminate the fuzzer processes");
fuzz.wait_group.wait();
fuzz.prog_node.end();
const gpa = fuzz.ws.gpa;
gpa.free(fuzz.run_steps);
}
fn rebuildTestsWorkerRun(run: *Step.Run, gpa: Allocator, ttyconf: std.io.tty.Config, parent_prog_node: std.Progress.Node) void {
rebuildTestsWorkerRunFallible(run, gpa, ttyconf, parent_prog_node) catch |err| {
const compile = run.producer.?;
log.err("step '{s}': failed to rebuild in fuzz mode: {s}", .{
compile.step.name, @errorName(err),
@ -110,14 +136,12 @@ fn rebuildTestsWorkerRun(run: *Step.Run, ttyconf: std.io.tty.Config, parent_prog
};
}
fn rebuildTestsWorkerRunFallible(run: *Step.Run, ttyconf: std.io.tty.Config, parent_prog_node: std.Progress.Node) !void {
const gpa = run.step.owner.allocator;
fn rebuildTestsWorkerRunFallible(run: *Step.Run, gpa: Allocator, ttyconf: std.io.tty.Config, parent_prog_node: std.Progress.Node) !void {
const compile = run.producer.?;
const prog_node = parent_prog_node.start(compile.step.name, 0);
defer prog_node.end();
const result = compile.rebuildInFuzzMode(prog_node);
const result = compile.rebuildInFuzzMode(gpa, prog_node);
const show_compile_errors = compile.step.result_error_bundle.errorMessageCount() > 0;
const show_error_msgs = compile.step.result_error_msgs.items.len > 0;
@ -138,24 +162,22 @@ fn rebuildTestsWorkerRunFallible(run: *Step.Run, ttyconf: std.io.tty.Config, par
}
fn fuzzWorkerRun(
fuzz: *Fuzz,
run: *Step.Run,
web_server: *WebServer,
unit_test_index: u32,
ttyconf: std.io.tty.Config,
parent_prog_node: std.Progress.Node,
) void {
const gpa = run.step.owner.allocator;
const test_name = run.cached_test_metadata.?.testName(unit_test_index);
const prog_node = parent_prog_node.start(test_name, 0);
const prog_node = fuzz.prog_node.start(test_name, 0);
defer prog_node.end();
run.rerunInFuzzMode(web_server, unit_test_index, prog_node) catch |err| switch (err) {
run.rerunInFuzzMode(fuzz, unit_test_index, prog_node) catch |err| switch (err) {
error.MakeFailed => {
var buf: [256]u8 = undefined;
const w = std.debug.lockStderrWriter(&buf);
defer std.debug.unlockStderrWriter();
build_runner.printErrorMessages(gpa, &run.step, .{ .ttyconf = ttyconf }, w, false) catch {};
build_runner.printErrorMessages(gpa, &run.step, .{ .ttyconf = fuzz.ws.ttyconf }, w, false) catch {};
return;
},
else => {
@ -166,3 +188,270 @@ fn fuzzWorkerRun(
},
};
}
pub fn serveSourcesTar(fuzz: *Fuzz, req: *std.http.Server.Request) !void {
const gpa = fuzz.ws.gpa;
var arena_state: std.heap.ArenaAllocator = .init(gpa);
defer arena_state.deinit();
const arena = arena_state.allocator();
const DedupTable = std.ArrayHashMapUnmanaged(Build.Cache.Path, void, Build.Cache.Path.TableAdapter, false);
var dedup_table: DedupTable = .empty;
defer dedup_table.deinit(gpa);
for (fuzz.run_steps) |run_step| {
const compile_inputs = run_step.producer.?.step.inputs.table;
for (compile_inputs.keys(), compile_inputs.values()) |dir_path, *file_list| {
try dedup_table.ensureUnusedCapacity(gpa, file_list.items.len);
for (file_list.items) |sub_path| {
if (!std.mem.endsWith(u8, sub_path, ".zig")) continue;
const joined_path = try dir_path.join(arena, sub_path);
dedup_table.putAssumeCapacity(joined_path, {});
}
}
}
const deduped_paths = dedup_table.keys();
const SortContext = struct {
pub fn lessThan(this: @This(), lhs: Build.Cache.Path, rhs: Build.Cache.Path) bool {
_ = this;
return switch (std.mem.order(u8, lhs.root_dir.path orelse ".", rhs.root_dir.path orelse ".")) {
.lt => true,
.gt => false,
.eq => std.mem.lessThan(u8, lhs.sub_path, rhs.sub_path),
};
}
};
std.mem.sortUnstable(Build.Cache.Path, deduped_paths, SortContext{}, SortContext.lessThan);
return fuzz.ws.serveTarFile(req, deduped_paths);
}
pub const Previous = struct {
unique_runs: usize,
entry_points: usize,
pub const init: Previous = .{ .unique_runs = 0, .entry_points = 0 };
};
pub fn sendUpdate(
fuzz: *Fuzz,
socket: *std.http.WebSocket,
prev: *Previous,
) !void {
fuzz.coverage_mutex.lock();
defer fuzz.coverage_mutex.unlock();
const coverage_maps = fuzz.coverage_files.values();
if (coverage_maps.len == 0) return;
// TODO: handle multiple fuzz steps in the WebSocket packets
const coverage_map = &coverage_maps[0];
const cov_header: *const abi.SeenPcsHeader = @ptrCast(coverage_map.mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]);
// TODO: this isn't sound! We need to do volatile reads of these bits rather than handing the
// buffer off to the kernel, because we might race with the fuzzer process[es]. This brings the
// whole mmap strategy into question. Incidentally, I wonder if post-writergate we could pass
// this data straight to the socket with sendfile...
const seen_pcs = cov_header.seenBits();
const n_runs = @atomicLoad(usize, &cov_header.n_runs, .monotonic);
const unique_runs = @atomicLoad(usize, &cov_header.unique_runs, .monotonic);
if (prev.unique_runs != unique_runs) {
// There has been an update.
if (prev.unique_runs == 0) {
// We need to send initial context.
const header: abi.SourceIndexHeader = .{
.directories_len = @intCast(coverage_map.coverage.directories.entries.len),
.files_len = @intCast(coverage_map.coverage.files.entries.len),
.source_locations_len = @intCast(coverage_map.source_locations.len),
.string_bytes_len = @intCast(coverage_map.coverage.string_bytes.items.len),
.start_timestamp = coverage_map.start_timestamp,
};
const iovecs: [5]std.posix.iovec_const = .{
makeIov(@ptrCast(&header)),
makeIov(@ptrCast(coverage_map.coverage.directories.keys())),
makeIov(@ptrCast(coverage_map.coverage.files.keys())),
makeIov(@ptrCast(coverage_map.source_locations)),
makeIov(coverage_map.coverage.string_bytes.items),
};
try socket.writeMessagev(&iovecs, .binary);
}
const header: abi.CoverageUpdateHeader = .{
.n_runs = n_runs,
.unique_runs = unique_runs,
};
const iovecs: [2]std.posix.iovec_const = .{
makeIov(@ptrCast(&header)),
makeIov(@ptrCast(seen_pcs)),
};
try socket.writeMessagev(&iovecs, .binary);
prev.unique_runs = unique_runs;
}
if (prev.entry_points != coverage_map.entry_points.items.len) {
const header: abi.EntryPointHeader = .init(@intCast(coverage_map.entry_points.items.len));
const iovecs: [2]std.posix.iovec_const = .{
makeIov(@ptrCast(&header)),
makeIov(@ptrCast(coverage_map.entry_points.items)),
};
try socket.writeMessagev(&iovecs, .binary);
prev.entry_points = coverage_map.entry_points.items.len;
}
}
fn coverageRun(fuzz: *Fuzz) void {
defer fuzz.wait_group.finish();
fuzz.queue_mutex.lock();
defer fuzz.queue_mutex.unlock();
while (true) {
fuzz.queue_cond.wait(&fuzz.queue_mutex);
for (fuzz.msg_queue.items) |msg| switch (msg) {
.coverage => |coverage| prepareTables(fuzz, coverage.run, coverage.id) catch |err| switch (err) {
error.AlreadyReported => continue,
else => |e| log.err("failed to prepare code coverage tables: {s}", .{@errorName(e)}),
},
.entry_point => |entry_point| addEntryPoint(fuzz, entry_point.coverage_id, entry_point.addr) catch |err| switch (err) {
error.AlreadyReported => continue,
else => |e| log.err("failed to prepare code coverage tables: {s}", .{@errorName(e)}),
},
};
fuzz.msg_queue.clearRetainingCapacity();
}
}
fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutOfMemory, AlreadyReported }!void {
const ws = fuzz.ws;
const gpa = ws.gpa;
fuzz.coverage_mutex.lock();
defer fuzz.coverage_mutex.unlock();
const gop = try fuzz.coverage_files.getOrPut(gpa, coverage_id);
if (gop.found_existing) {
// We are fuzzing the same executable with multiple threads.
// Perhaps the same unit test; perhaps a different one. In any
// case, since the coverage file is the same, we only have to
// notice changes to that one file in order to learn coverage for
// this particular executable.
return;
}
errdefer _ = fuzz.coverage_files.pop();
gop.value_ptr.* = .{
.coverage = std.debug.Coverage.init,
.mapped_memory = undefined, // populated below
.source_locations = undefined, // populated below
.entry_points = .{},
.start_timestamp = ws.now(),
};
errdefer gop.value_ptr.coverage.deinit(gpa);
const rebuilt_exe_path = run_step.rebuilt_executable.?;
var debug_info = std.debug.Info.load(gpa, rebuilt_exe_path, &gop.value_ptr.coverage) catch |err| {
log.err("step '{s}': failed to load debug information for '{f}': {s}", .{
run_step.step.name, rebuilt_exe_path, @errorName(err),
});
return error.AlreadyReported;
};
defer debug_info.deinit(gpa);
const coverage_file_path: Build.Cache.Path = .{
.root_dir = run_step.step.owner.cache_root,
.sub_path = "v/" ++ std.fmt.hex(coverage_id),
};
var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| {
log.err("step '{s}': failed to load coverage file '{f}': {s}", .{
run_step.step.name, coverage_file_path, @errorName(err),
});
return error.AlreadyReported;
};
defer coverage_file.close();
const file_size = coverage_file.getEndPos() catch |err| {
log.err("unable to check len of coverage file '{f}': {s}", .{ coverage_file_path, @errorName(err) });
return error.AlreadyReported;
};
const mapped_memory = std.posix.mmap(
null,
file_size,
std.posix.PROT.READ,
.{ .TYPE = .SHARED },
coverage_file.handle,
0,
) catch |err| {
log.err("failed to map coverage file '{f}': {s}", .{ coverage_file_path, @errorName(err) });
return error.AlreadyReported;
};
gop.value_ptr.mapped_memory = mapped_memory;
const header: *const abi.SeenPcsHeader = @ptrCast(mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]);
const pcs = header.pcAddrs();
const source_locations = try gpa.alloc(Coverage.SourceLocation, pcs.len);
errdefer gpa.free(source_locations);
// Unfortunately the PCs array that LLVM gives us from the 8-bit PC
// counters feature is not sorted.
var sorted_pcs: std.MultiArrayList(struct { pc: u64, index: u32, sl: Coverage.SourceLocation }) = .{};
defer sorted_pcs.deinit(gpa);
try sorted_pcs.resize(gpa, pcs.len);
@memcpy(sorted_pcs.items(.pc), pcs);
for (sorted_pcs.items(.index), 0..) |*v, i| v.* = @intCast(i);
sorted_pcs.sortUnstable(struct {
addrs: []const u64,
pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
return ctx.addrs[a_index] < ctx.addrs[b_index];
}
}{ .addrs = sorted_pcs.items(.pc) });
debug_info.resolveAddresses(gpa, sorted_pcs.items(.pc), sorted_pcs.items(.sl)) catch |err| {
log.err("failed to resolve addresses to source locations: {s}", .{@errorName(err)});
return error.AlreadyReported;
};
for (sorted_pcs.items(.index), sorted_pcs.items(.sl)) |i, sl| source_locations[i] = sl;
gop.value_ptr.source_locations = source_locations;
ws.notifyUpdate();
}
fn addEntryPoint(fuzz: *Fuzz, coverage_id: u64, addr: u64) error{ AlreadyReported, OutOfMemory }!void {
fuzz.coverage_mutex.lock();
defer fuzz.coverage_mutex.unlock();
const coverage_map = fuzz.coverage_files.getPtr(coverage_id).?;
const header: *const abi.SeenPcsHeader = @ptrCast(coverage_map.mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]);
const pcs = header.pcAddrs();
// Since this pcs list is unsorted, we must linear scan for the best index.
const index = i: {
var best: usize = 0;
for (pcs[1..], 1..) |elem_addr, i| {
if (elem_addr == addr) break :i i;
if (elem_addr > addr) continue;
if (elem_addr > pcs[best]) best = i;
}
break :i best;
};
if (index >= pcs.len) {
log.err("unable to find unit test entry address 0x{x} in source locations (range: 0x{x} to 0x{x})", .{
addr, pcs[0], pcs[pcs.len - 1],
});
return error.AlreadyReported;
}
if (false) {
const sl = coverage_map.source_locations[index];
const file_name = coverage_map.coverage.stringAt(coverage_map.coverage.fileAt(sl.file).basename);
log.debug("server found entry point for 0x{x} at {s}:{d}:{d} - index {d} between {x} and {x}", .{
addr, file_name, sl.line, sl.column, index, pcs[index - 1], pcs[index + 1],
});
}
try coverage_map.entry_points.append(fuzz.ws.gpa, @intCast(index));
}
fn makeIov(s: []const u8) std.posix.iovec_const {
return .{
.base = s.ptr,
.len = s.len,
};
}

View File

@ -1,711 +0,0 @@
const builtin = @import("builtin");
const std = @import("../../std.zig");
const Allocator = std.mem.Allocator;
const Build = std.Build;
const Step = std.Build.Step;
const Coverage = std.debug.Coverage;
const abi = std.Build.Fuzz.abi;
const log = std.log;
const assert = std.debug.assert;
const Cache = std.Build.Cache;
const Path = Cache.Path;
const WebServer = @This();
gpa: Allocator,
global_cache_directory: Build.Cache.Directory,
zig_lib_directory: Build.Cache.Directory,
zig_exe_path: []const u8,
listen_address: std.net.Address,
fuzz_run_steps: []const *Step.Run,
/// Messages from fuzz workers. Protected by mutex.
msg_queue: std.ArrayListUnmanaged(Msg),
/// Protects `msg_queue` only.
mutex: std.Thread.Mutex,
/// Signaled when there is a message in `msg_queue`.
condition: std.Thread.Condition,
coverage_files: std.AutoArrayHashMapUnmanaged(u64, CoverageMap),
/// Protects `coverage_files` only.
coverage_mutex: std.Thread.Mutex,
/// Signaled when `coverage_files` changes.
coverage_condition: std.Thread.Condition,
/// Time at initialization of WebServer.
base_timestamp: i128,
const fuzzer_bin_name = "fuzzer";
const fuzzer_arch_os_abi = "wasm32-freestanding";
const fuzzer_cpu_features = "baseline+atomics+bulk_memory+multivalue+mutable_globals+nontrapping_fptoint+reference_types+sign_ext";
const CoverageMap = struct {
mapped_memory: []align(std.heap.page_size_min) const u8,
coverage: Coverage,
source_locations: []Coverage.SourceLocation,
/// Elements are indexes into `source_locations` pointing to the unit tests that are being fuzz tested.
entry_points: std.ArrayListUnmanaged(u32),
start_timestamp: i64,
fn deinit(cm: *CoverageMap, gpa: Allocator) void {
std.posix.munmap(cm.mapped_memory);
cm.coverage.deinit(gpa);
cm.* = undefined;
}
};
const Msg = union(enum) {
coverage: struct {
id: u64,
run: *Step.Run,
},
entry_point: struct {
coverage_id: u64,
addr: u64,
},
};
pub fn run(ws: *WebServer) void {
var http_server = ws.listen_address.listen(.{
.reuse_address = true,
}) catch |err| {
log.err("failed to listen to port {d}: {s}", .{ ws.listen_address.in.getPort(), @errorName(err) });
return;
};
const port = http_server.listen_address.in.getPort();
log.info("web interface listening at http://127.0.0.1:{d}/", .{port});
if (ws.listen_address.in.getPort() == 0)
log.info("hint: pass --port {d} to use this same port next time", .{port});
while (true) {
const connection = http_server.accept() catch |err| {
log.err("failed to accept connection: {s}", .{@errorName(err)});
return;
};
_ = std.Thread.spawn(.{}, accept, .{ ws, connection }) catch |err| {
log.err("unable to spawn connection thread: {s}", .{@errorName(err)});
connection.stream.close();
continue;
};
}
}
fn now(s: *const WebServer) i64 {
return @intCast(std.time.nanoTimestamp() - s.base_timestamp);
}
fn accept(ws: *WebServer, connection: std.net.Server.Connection) void {
defer connection.stream.close();
var sr = connection.stream.reader();
var rb: [0x4000]u8 = undefined;
var br = sr.interface().buffered(&rb);
var sw = connection.stream.writer();
var wb: [0x4000]u8 = undefined;
var bw = sw.interface().buffered(&wb);
var server: std.http.Server = .init(&br, &bw);
var web_socket: std.http.WebSocket = undefined;
var ws_recv_buffer: [0x4000]u8 align(4) = undefined;
while (server.reader.state == .ready) {
var request = server.receiveHead() catch |err| switch (err) {
error.HttpConnectionClosing => return,
else => {
log.err("closing http connection: {s}", .{@errorName(err)});
return;
},
};
if (web_socket.init(&request, &ws_recv_buffer) catch |err| {
log.err("initializing web socket: {s}", .{@errorName(err)});
return;
}) {
serveWebSocket(ws, &web_socket) catch |err| {
log.err("unable to serve web socket connection: {s}", .{@errorName(err)});
return;
};
} else {
serveRequest(ws, &request) catch |err| switch (err) {
error.AlreadyReported => return,
else => |e| {
log.err("unable to serve {s}: {s}", .{ request.head.target, @errorName(e) });
return;
},
};
}
}
}
fn serveRequest(ws: *WebServer, request: *std.http.Server.Request) !void {
if (std.mem.eql(u8, request.head.target, "/") or
std.mem.eql(u8, request.head.target, "/debug") or
std.mem.eql(u8, request.head.target, "/debug/"))
{
try serveFile(ws, request, "fuzzer/web/index.html", "text/html");
} else if (std.mem.eql(u8, request.head.target, "/main.js") or
std.mem.eql(u8, request.head.target, "/debug/main.js"))
{
try serveFile(ws, request, "fuzzer/web/main.js", "application/javascript");
} else if (std.mem.eql(u8, request.head.target, "/main.wasm")) {
try serveWasm(ws, request, .ReleaseFast);
} else if (std.mem.eql(u8, request.head.target, "/debug/main.wasm")) {
try serveWasm(ws, request, .Debug);
} else if (std.mem.eql(u8, request.head.target, "/sources.tar") or
std.mem.eql(u8, request.head.target, "/debug/sources.tar"))
{
try serveSourcesTar(ws, request);
} else {
try request.respond("not found", .{
.status = .not_found,
.extra_headers = &.{
.{ .name = "content-type", .value = "text/plain" },
},
});
}
}
fn serveFile(
ws: *WebServer,
request: *std.http.Server.Request,
name: []const u8,
content_type: []const u8,
) !void {
const gpa = ws.gpa;
// The desired API is actually sendfile, which will require enhancing std.http.Server.
// We load the file with every request so that the user can make changes to the file
// and refresh the HTML page without restarting this server.
const file_contents = ws.zig_lib_directory.handle.readFileAlloc(name, gpa, .limited(10 * 1024 * 1024)) catch |err| {
log.err("failed to read '{f}{s}': {t}", .{ ws.zig_lib_directory, name, err });
return error.AlreadyReported;
};
defer gpa.free(file_contents);
try request.respond(file_contents, .{
.extra_headers = &.{
.{ .name = "content-type", .value = content_type },
cache_control_header,
},
});
}
fn serveWasm(
ws: *WebServer,
request: *std.http.Server.Request,
optimize_mode: std.builtin.OptimizeMode,
) !void {
const gpa = ws.gpa;
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
const arena = arena_instance.allocator();
// Do the compilation every request, so that the user can edit the files
// and see the changes without restarting the server.
const wasm_base_path = try buildWasmBinary(ws, arena, optimize_mode);
const bin_name = try std.zig.binNameAlloc(arena, .{
.root_name = fuzzer_bin_name,
.target = &(std.zig.system.resolveTargetQuery(std.Build.parseTargetQuery(.{
.arch_os_abi = fuzzer_arch_os_abi,
.cpu_features = fuzzer_cpu_features,
}) catch unreachable) catch unreachable),
.output_mode = .Exe,
});
// std.http.Server does not have a sendfile API yet.
const bin_path = try wasm_base_path.join(arena, bin_name);
const file_contents = try bin_path.root_dir.handle.readFileAlloc(bin_path.sub_path, gpa, .limited(10 * 1024 * 1024));
defer gpa.free(file_contents);
try request.respond(file_contents, .{
.extra_headers = &.{
.{ .name = "content-type", .value = "application/wasm" },
cache_control_header,
},
});
}
fn buildWasmBinary(
ws: *WebServer,
arena: Allocator,
optimize_mode: std.builtin.OptimizeMode,
) !Path {
const gpa = ws.gpa;
const main_src_path: Build.Cache.Path = .{
.root_dir = ws.zig_lib_directory,
.sub_path = "fuzzer/web/main.zig",
};
const walk_src_path: Build.Cache.Path = .{
.root_dir = ws.zig_lib_directory,
.sub_path = "docs/wasm/Walk.zig",
};
const html_render_src_path: Build.Cache.Path = .{
.root_dir = ws.zig_lib_directory,
.sub_path = "docs/wasm/html_render.zig",
};
var argv: std.ArrayListUnmanaged([]const u8) = .empty;
try argv.appendSlice(arena, &.{
ws.zig_exe_path, "build-exe", //
"-fno-entry", //
"-O", @tagName(optimize_mode), //
"-target", fuzzer_arch_os_abi, //
"-mcpu", fuzzer_cpu_features, //
"--cache-dir", ws.global_cache_directory.path orelse ".", //
"--global-cache-dir", ws.global_cache_directory.path orelse ".", //
"--name", fuzzer_bin_name, //
"-rdynamic", //
"-fsingle-threaded", //
"--dep", "Walk", //
"--dep", "html_render", //
try std.fmt.allocPrint(arena, "-Mroot={f}", .{main_src_path}), //
try std.fmt.allocPrint(arena, "-MWalk={f}", .{walk_src_path}), //
"--dep", "Walk", //
try std.fmt.allocPrint(arena, "-Mhtml_render={f}", .{html_render_src_path}), //
"--listen=-",
});
var child = std.process.Child.init(argv.items, gpa);
child.stdin_behavior = .Pipe;
child.stdout_behavior = .Pipe;
child.stderr_behavior = .Pipe;
try child.spawn();
var poller = std.io.poll(gpa, enum { stdout, stderr }, .{
.stdout = child.stdout.?,
.stderr = child.stderr.?,
});
defer poller.deinit();
try sendMessage(child.stdin.?, .update);
try sendMessage(child.stdin.?, .exit);
var result: ?Path = null;
var result_error_bundle = std.zig.ErrorBundle.empty;
const stdout = poller.reader(.stdout);
poll: while (true) {
const Header = std.zig.Server.Message.Header;
while (stdout.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll;
const header = stdout.takeStruct(Header, .little) catch unreachable;
while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll;
const body = stdout.take(header.bytes_len) catch unreachable;
switch (header.tag) {
.zig_version => {
if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
return error.ZigProtocolVersionMismatch;
}
},
.error_bundle => {
const EbHdr = std.zig.Server.Message.ErrorBundle;
const eb_hdr = @as(*align(1) const EbHdr, @ptrCast(body));
const extra_bytes =
body[@sizeOf(EbHdr)..][0 .. @sizeOf(u32) * eb_hdr.extra_len];
const string_bytes =
body[@sizeOf(EbHdr) + extra_bytes.len ..][0..eb_hdr.string_bytes_len];
// TODO: use @ptrCast when the compiler supports it
const unaligned_extra = std.mem.bytesAsSlice(u32, extra_bytes);
const extra_array = try arena.alloc(u32, unaligned_extra.len);
@memcpy(extra_array, unaligned_extra);
result_error_bundle = .{
.string_bytes = try arena.dupe(u8, string_bytes),
.extra = extra_array,
};
},
.emit_digest => {
const EmitDigest = std.zig.Server.Message.EmitDigest;
const ebp_hdr = @as(*align(1) const EmitDigest, @ptrCast(body));
if (!ebp_hdr.flags.cache_hit) {
log.info("source changes detected; rebuilt wasm component", .{});
}
const digest = body[@sizeOf(EmitDigest)..][0..Cache.bin_digest_len];
result = .{
.root_dir = ws.global_cache_directory,
.sub_path = try arena.dupe(u8, "o" ++ std.fs.path.sep_str ++ Cache.binToHex(digest.*)),
};
},
else => {}, // ignore other messages
}
}
const stderr_contents = try poller.toOwnedSlice(.stderr);
if (stderr_contents.len > 0) {
std.debug.print("{s}", .{stderr_contents});
}
// Send EOF to stdin.
child.stdin.?.close();
child.stdin = null;
switch (try child.wait()) {
.Exited => |code| {
if (code != 0) {
log.err(
"the following command exited with error code {d}:\n{s}",
.{ code, try Build.Step.allocPrintCmd(arena, null, argv.items) },
);
return error.WasmCompilationFailed;
}
},
.Signal, .Stopped, .Unknown => {
log.err(
"the following command terminated unexpectedly:\n{s}",
.{try Build.Step.allocPrintCmd(arena, null, argv.items)},
);
return error.WasmCompilationFailed;
},
}
if (result_error_bundle.errorMessageCount() > 0) {
const color = std.zig.Color.auto;
result_error_bundle.renderToStdErr(color.renderOptions());
log.err("the following command failed with {d} compilation errors:\n{s}", .{
result_error_bundle.errorMessageCount(),
try Build.Step.allocPrintCmd(arena, null, argv.items),
});
return error.WasmCompilationFailed;
}
return result orelse {
log.err("child process failed to report result\n{s}", .{
try Build.Step.allocPrintCmd(arena, null, argv.items),
});
return error.WasmCompilationFailed;
};
}
fn sendMessage(file: std.fs.File, tag: std.zig.Client.Message.Tag) !void {
const header: std.zig.Client.Message.Header = .{
.tag = tag,
.bytes_len = 0,
};
try file.writeAll(std.mem.asBytes(&header));
}
fn serveWebSocket(ws: *WebServer, web_socket: *std.http.WebSocket) !void {
ws.coverage_mutex.lock();
defer ws.coverage_mutex.unlock();
// On first connection, the client needs to know what time the server
// thinks it is to rebase timestamps.
{
const timestamp_message: abi.CurrentTime = .{ .base = ws.now() };
try web_socket.writeMessage(std.mem.asBytes(&timestamp_message), .binary);
}
// On first connection, the client needs all the coverage information
// so that subsequent updates can contain only the updated bits.
var prev_unique_runs: usize = 0;
var prev_entry_points: usize = 0;
try sendCoverageContext(ws, web_socket, &prev_unique_runs, &prev_entry_points);
while (true) {
ws.coverage_condition.timedWait(&ws.coverage_mutex, std.time.ns_per_ms * 500) catch {};
try sendCoverageContext(ws, web_socket, &prev_unique_runs, &prev_entry_points);
}
}
fn sendCoverageContext(
ws: *WebServer,
web_socket: *std.http.WebSocket,
prev_unique_runs: *usize,
prev_entry_points: *usize,
) !void {
const coverage_maps = ws.coverage_files.values();
if (coverage_maps.len == 0) return;
// TODO: make each events URL correspond to one coverage map
const coverage_map = &coverage_maps[0];
const cov_header: *const abi.SeenPcsHeader = @ptrCast(coverage_map.mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]);
const seen_pcs = cov_header.seenBits();
const n_runs = @atomicLoad(usize, &cov_header.n_runs, .monotonic);
const unique_runs = @atomicLoad(usize, &cov_header.unique_runs, .monotonic);
if (prev_unique_runs.* != unique_runs) {
// There has been an update.
if (prev_unique_runs.* == 0) {
// We need to send initial context.
const header: abi.SourceIndexHeader = .{
.flags = .{},
.directories_len = @intCast(coverage_map.coverage.directories.entries.len),
.files_len = @intCast(coverage_map.coverage.files.entries.len),
.source_locations_len = @intCast(coverage_map.source_locations.len),
.string_bytes_len = @intCast(coverage_map.coverage.string_bytes.items.len),
.start_timestamp = coverage_map.start_timestamp,
};
const iovecs: [5]std.posix.iovec_const = .{
makeIov(std.mem.asBytes(&header)),
makeIov(std.mem.sliceAsBytes(coverage_map.coverage.directories.keys())),
makeIov(std.mem.sliceAsBytes(coverage_map.coverage.files.keys())),
makeIov(std.mem.sliceAsBytes(coverage_map.source_locations)),
makeIov(coverage_map.coverage.string_bytes.items),
};
try web_socket.writeMessagev(&iovecs, .binary);
}
const header: abi.CoverageUpdateHeader = .{
.n_runs = n_runs,
.unique_runs = unique_runs,
};
const iovecs: [2]std.posix.iovec_const = .{
makeIov(std.mem.asBytes(&header)),
makeIov(std.mem.sliceAsBytes(seen_pcs)),
};
try web_socket.writeMessagev(&iovecs, .binary);
prev_unique_runs.* = unique_runs;
}
if (prev_entry_points.* != coverage_map.entry_points.items.len) {
const header: abi.EntryPointHeader = .{
.flags = .{
.locs_len = @intCast(coverage_map.entry_points.items.len),
},
};
const iovecs: [2]std.posix.iovec_const = .{
makeIov(std.mem.asBytes(&header)),
makeIov(std.mem.sliceAsBytes(coverage_map.entry_points.items)),
};
try web_socket.writeMessagev(&iovecs, .binary);
prev_entry_points.* = coverage_map.entry_points.items.len;
}
}
fn serveSourcesTar(ws: *WebServer, request: *std.http.Server.Request) !void {
const gpa = ws.gpa;
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
const arena = arena_instance.allocator();
var body = try request.respondStreaming(.{
.respond_options = .{
.extra_headers = &.{
.{ .name = "content-type", .value = "application/x-tar" },
cache_control_header,
},
},
});
const DedupeTable = std.ArrayHashMapUnmanaged(Build.Cache.Path, void, Build.Cache.Path.TableAdapter, false);
var dedupe_table: DedupeTable = .{};
defer dedupe_table.deinit(gpa);
for (ws.fuzz_run_steps) |run_step| {
const compile_step_inputs = run_step.producer.?.step.inputs.table;
for (compile_step_inputs.keys(), compile_step_inputs.values()) |dir_path, *file_list| {
try dedupe_table.ensureUnusedCapacity(gpa, file_list.items.len);
for (file_list.items) |sub_path| {
// Special file "." means the entire directory.
if (std.mem.eql(u8, sub_path, ".")) continue;
const joined_path = try dir_path.join(arena, sub_path);
_ = dedupe_table.getOrPutAssumeCapacity(joined_path);
}
}
}
const deduped_paths = dedupe_table.keys();
const SortContext = struct {
pub fn lessThan(this: @This(), lhs: Build.Cache.Path, rhs: Build.Cache.Path) bool {
_ = this;
return switch (std.mem.order(u8, lhs.root_dir.path orelse ".", rhs.root_dir.path orelse ".")) {
.lt => true,
.gt => false,
.eq => std.mem.lessThan(u8, lhs.sub_path, rhs.sub_path),
};
}
};
std.mem.sortUnstable(Build.Cache.Path, deduped_paths, SortContext{}, SortContext.lessThan);
var cwd_cache: ?[]const u8 = null;
var response_writer = body.writer().unbuffered();
var archiver: std.tar.Writer = .{ .underlying_writer = &response_writer };
var read_buffer: [1024]u8 = undefined;
for (deduped_paths) |joined_path| {
var file = joined_path.root_dir.handle.openFile(joined_path.sub_path, .{}) catch |err| {
log.err("failed to open {f}: {s}", .{ joined_path, @errorName(err) });
continue;
};
defer file.close();
const stat = try file.stat();
var file_reader: std.fs.File.Reader = .initSize(file, &read_buffer, stat.size);
archiver.prefix = joined_path.root_dir.path orelse try memoizedCwd(arena, &cwd_cache);
try archiver.writeFile(joined_path.sub_path, &file_reader, stat.mtime);
}
try body.end();
}
fn memoizedCwd(arena: Allocator, opt_ptr: *?[]const u8) ![]const u8 {
if (opt_ptr.*) |cached| return cached;
const result = try std.process.getCwdAlloc(arena);
opt_ptr.* = result;
return result;
}
const cache_control_header: std.http.Header = .{
.name = "cache-control",
.value = "max-age=0, must-revalidate",
};
pub fn coverageRun(ws: *WebServer) void {
ws.mutex.lock();
defer ws.mutex.unlock();
while (true) {
ws.condition.wait(&ws.mutex);
for (ws.msg_queue.items) |msg| switch (msg) {
.coverage => |coverage| prepareTables(ws, coverage.run, coverage.id) catch |err| switch (err) {
error.AlreadyReported => continue,
else => |e| log.err("failed to prepare code coverage tables: {s}", .{@errorName(e)}),
},
.entry_point => |entry_point| addEntryPoint(ws, entry_point.coverage_id, entry_point.addr) catch |err| switch (err) {
error.AlreadyReported => continue,
else => |e| log.err("failed to prepare code coverage tables: {s}", .{@errorName(e)}),
},
};
ws.msg_queue.clearRetainingCapacity();
}
}
fn prepareTables(
ws: *WebServer,
run_step: *Step.Run,
coverage_id: u64,
) error{ OutOfMemory, AlreadyReported }!void {
const gpa = ws.gpa;
ws.coverage_mutex.lock();
defer ws.coverage_mutex.unlock();
const gop = try ws.coverage_files.getOrPut(gpa, coverage_id);
if (gop.found_existing) {
// We are fuzzing the same executable with multiple threads.
// Perhaps the same unit test; perhaps a different one. In any
// case, since the coverage file is the same, we only have to
// notice changes to that one file in order to learn coverage for
// this particular executable.
return;
}
errdefer _ = ws.coverage_files.pop();
gop.value_ptr.* = .{
.coverage = std.debug.Coverage.init,
.mapped_memory = undefined, // populated below
.source_locations = undefined, // populated below
.entry_points = .{},
.start_timestamp = ws.now(),
};
errdefer gop.value_ptr.coverage.deinit(gpa);
const rebuilt_exe_path = run_step.rebuilt_executable.?;
var debug_info = std.debug.Info.load(gpa, rebuilt_exe_path, &gop.value_ptr.coverage) catch |err| {
log.err("step '{s}': failed to load debug information for '{f}': {s}", .{
run_step.step.name, rebuilt_exe_path, @errorName(err),
});
return error.AlreadyReported;
};
defer debug_info.deinit(gpa);
const coverage_file_path: Build.Cache.Path = .{
.root_dir = run_step.step.owner.cache_root,
.sub_path = "v/" ++ std.fmt.hex(coverage_id),
};
var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| {
log.err("step '{s}': failed to load coverage file '{f}': {s}", .{
run_step.step.name, coverage_file_path, @errorName(err),
});
return error.AlreadyReported;
};
defer coverage_file.close();
const file_size = coverage_file.getEndPos() catch |err| {
log.err("unable to check len of coverage file '{f}': {s}", .{ coverage_file_path, @errorName(err) });
return error.AlreadyReported;
};
const mapped_memory = std.posix.mmap(
null,
file_size,
std.posix.PROT.READ,
.{ .TYPE = .SHARED },
coverage_file.handle,
0,
) catch |err| {
log.err("failed to map coverage file '{f}': {s}", .{ coverage_file_path, @errorName(err) });
return error.AlreadyReported;
};
gop.value_ptr.mapped_memory = mapped_memory;
const header: *const abi.SeenPcsHeader = @ptrCast(mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]);
const pcs = header.pcAddrs();
const source_locations = try gpa.alloc(Coverage.SourceLocation, pcs.len);
errdefer gpa.free(source_locations);
// Unfortunately the PCs array that LLVM gives us from the 8-bit PC
// counters feature is not sorted.
var sorted_pcs: std.MultiArrayList(struct { pc: u64, index: u32, sl: Coverage.SourceLocation }) = .{};
defer sorted_pcs.deinit(gpa);
try sorted_pcs.resize(gpa, pcs.len);
@memcpy(sorted_pcs.items(.pc), pcs);
for (sorted_pcs.items(.index), 0..) |*v, i| v.* = @intCast(i);
sorted_pcs.sortUnstable(struct {
addrs: []const u64,
pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
return ctx.addrs[a_index] < ctx.addrs[b_index];
}
}{ .addrs = sorted_pcs.items(.pc) });
debug_info.resolveAddresses(gpa, sorted_pcs.items(.pc), sorted_pcs.items(.sl)) catch |err| {
log.err("failed to resolve addresses to source locations: {s}", .{@errorName(err)});
return error.AlreadyReported;
};
for (sorted_pcs.items(.index), sorted_pcs.items(.sl)) |i, sl| source_locations[i] = sl;
gop.value_ptr.source_locations = source_locations;
ws.coverage_condition.broadcast();
}
fn addEntryPoint(ws: *WebServer, coverage_id: u64, addr: u64) error{ AlreadyReported, OutOfMemory }!void {
ws.coverage_mutex.lock();
defer ws.coverage_mutex.unlock();
const coverage_map = ws.coverage_files.getPtr(coverage_id).?;
const header: *const abi.SeenPcsHeader = @ptrCast(coverage_map.mapped_memory[0..@sizeOf(abi.SeenPcsHeader)]);
const pcs = header.pcAddrs();
// Since this pcs list is unsorted, we must linear scan for the best index.
const index = i: {
var best: usize = 0;
for (pcs[1..], 1..) |elem_addr, i| {
if (elem_addr == addr) break :i i;
if (elem_addr > addr) continue;
if (elem_addr > pcs[best]) best = i;
}
break :i best;
};
if (index >= pcs.len) {
log.err("unable to find unit test entry address 0x{x} in source locations (range: 0x{x} to 0x{x})", .{
addr, pcs[0], pcs[pcs.len - 1],
});
return error.AlreadyReported;
}
if (false) {
const sl = coverage_map.source_locations[index];
const file_name = coverage_map.coverage.stringAt(coverage_map.coverage.fileAt(sl.file).basename);
log.debug("server found entry point for 0x{x} at {s}:{d}:{d} - index {d} between {x} and {x}", .{
addr, file_name, sl.line, sl.column, index, pcs[index - 1], pcs[index + 1],
});
}
const gpa = ws.gpa;
try coverage_map.entry_points.append(gpa, @intCast(index));
}
fn makeIov(s: []const u8) std.posix.iovec_const {
return .{
.base = s.ptr,
.len = s.len,
};
}

View File

@ -1,112 +0,0 @@
//! This file is shared among Zig code running in wildly different contexts:
//! libfuzzer, compiled alongside unit tests, the build runner, running on the
//! host computer, and the fuzzing web interface webassembly code running in
//! the browser. All of these components interface to some degree via an ABI.
/// libfuzzer uses this and its usize is the one that counts. To match the ABI,
/// make the ints be the size of the target used with libfuzzer.
///
/// Trailing:
/// * 1 bit per pc_addr, usize elements
/// * pc_addr: usize for each pcs_len
pub const SeenPcsHeader = extern struct {
n_runs: usize,
unique_runs: usize,
pcs_len: usize,
/// Used for comptime assertions. Provides a mechanism for strategically
/// causing compile errors.
pub const trailing = .{
.pc_bits_usize,
.pc_addr,
};
pub fn headerEnd(header: *const SeenPcsHeader) []const usize {
const ptr: [*]align(@alignOf(usize)) const u8 = @ptrCast(header);
const header_end_ptr: [*]const usize = @ptrCast(ptr + @sizeOf(SeenPcsHeader));
const pcs_len = header.pcs_len;
return header_end_ptr[0 .. pcs_len + seenElemsLen(pcs_len)];
}
pub fn seenBits(header: *const SeenPcsHeader) []const usize {
return header.headerEnd()[0..seenElemsLen(header.pcs_len)];
}
pub fn seenElemsLen(pcs_len: usize) usize {
return (pcs_len + @bitSizeOf(usize) - 1) / @bitSizeOf(usize);
}
pub fn pcAddrs(header: *const SeenPcsHeader) []const usize {
const pcs_len = header.pcs_len;
return header.headerEnd()[seenElemsLen(pcs_len)..][0..pcs_len];
}
};
pub const ToClientTag = enum(u8) {
current_time,
source_index,
coverage_update,
entry_points,
_,
};
pub const CurrentTime = extern struct {
tag: ToClientTag = .current_time,
/// Number of nanoseconds that all other timestamps are in reference to.
base: i64 align(1),
};
/// Sent to the fuzzer web client on first connection to the websocket URL.
///
/// Trailing:
/// * std.debug.Coverage.String for each directories_len
/// * std.debug.Coverage.File for each files_len
/// * std.debug.Coverage.SourceLocation for each source_locations_len
/// * u8 for each string_bytes_len
pub const SourceIndexHeader = extern struct {
flags: Flags,
directories_len: u32,
files_len: u32,
source_locations_len: u32,
string_bytes_len: u32,
/// When, according to the server, fuzzing started.
start_timestamp: i64 align(4),
pub const Flags = packed struct(u32) {
tag: ToClientTag = .source_index,
_: u24 = 0,
};
};
/// Sent to the fuzzer web client whenever the set of covered source locations
/// changes.
///
/// Trailing:
/// * one bit per source_locations_len, contained in u64 elements
pub const CoverageUpdateHeader = extern struct {
flags: Flags = .{},
n_runs: u64,
unique_runs: u64,
pub const Flags = packed struct(u64) {
tag: ToClientTag = .coverage_update,
_: u56 = 0,
};
pub const trailing = .{
.pc_bits_usize,
};
};
/// Sent to the fuzzer web client when the set of entry points is updated.
///
/// Trailing:
/// * one u32 index of source_locations per locs_len
pub const EntryPointHeader = extern struct {
flags: Flags,
pub const Flags = packed struct(u32) {
tag: ToClientTag = .entry_points,
locs_len: u24,
};
};

View File

@ -72,6 +72,14 @@ pub const MakeOptions = struct {
progress_node: std.Progress.Node,
thread_pool: *std.Thread.Pool,
watch: bool,
web_server: switch (builtin.target.cpu.arch) {
else => ?*Build.WebServer,
// WASM code references `Build.abi` which happens to incidentally reference this type, but
// it currently breaks because `std.net.Address` doesn't work there. Work around for now.
.wasm32 => void,
},
/// Not to be confused with `Build.allocator`, which is an alias of `Build.graph.arena`.
gpa: Allocator,
};
pub const MakeFn = *const fn (step: *Step, options: MakeOptions) anyerror!void;
@ -229,7 +237,17 @@ pub fn init(options: StepOptions) Step {
pub fn make(s: *Step, options: MakeOptions) error{ MakeFailed, MakeSkipped }!void {
const arena = s.owner.allocator;
s.makeFn(s, options) catch |err| switch (err) {
var timer: ?std.time.Timer = t: {
if (!s.owner.graph.time_report) break :t null;
if (s.id == .compile) break :t null;
break :t std.time.Timer.start() catch @panic("--time-report not supported on this host");
};
const make_result = s.makeFn(s, options);
if (timer) |*t| {
options.web_server.?.updateTimeReportGeneric(s, t.read());
}
make_result catch |err| switch (err) {
error.MakeFailed => return error.MakeFailed,
error.MakeSkipped => return error.MakeSkipped,
else => {
@ -372,18 +390,20 @@ pub fn evalZigProcess(
argv: []const []const u8,
prog_node: std.Progress.Node,
watch: bool,
web_server: ?*Build.WebServer,
gpa: Allocator,
) !?Path {
if (s.getZigProcess()) |zp| update: {
assert(watch);
if (std.Progress.have_ipc) if (zp.progress_ipc_fd) |fd| prog_node.setIpcFd(fd);
const result = zigProcessUpdate(s, zp, watch) catch |err| switch (err) {
const result = zigProcessUpdate(s, zp, watch, web_server, gpa) catch |err| switch (err) {
error.BrokenPipe => {
// Process restart required.
const term = zp.child.wait() catch |e| {
return s.fail("unable to wait for {s}: {s}", .{ argv[0], @errorName(e) });
};
_ = term;
s.clearZigProcess();
s.clearZigProcess(gpa);
break :update;
},
else => |e| return e,
@ -398,7 +418,7 @@ pub fn evalZigProcess(
return s.fail("unable to wait for {s}: {s}", .{ argv[0], @errorName(e) });
};
s.result_peak_rss = zp.child.resource_usage_statistics.getMaxRss() orelse 0;
s.clearZigProcess();
s.clearZigProcess(gpa);
try handleChildProcessTerm(s, term, null, argv);
return error.MakeFailed;
}
@ -408,7 +428,6 @@ pub fn evalZigProcess(
assert(argv.len != 0);
const b = s.owner;
const arena = b.allocator;
const gpa = arena;
try handleChildProcUnsupported(s, null, argv);
try handleVerbose(s.owner, null, argv);
@ -435,9 +454,12 @@ pub fn evalZigProcess(
.progress_ipc_fd = if (std.Progress.have_ipc) child.progress_node.getIpcFd() else {},
};
if (watch) s.setZigProcess(zp);
defer if (!watch) zp.poller.deinit();
defer if (!watch) {
zp.poller.deinit();
gpa.destroy(zp);
};
const result = try zigProcessUpdate(s, zp, watch);
const result = try zigProcessUpdate(s, zp, watch, web_server, gpa);
if (!watch) {
// Send EOF to stdin.
@ -499,7 +521,7 @@ pub fn installDir(s: *Step, dest_path: []const u8) !std.fs.Dir.MakePathStatus {
};
}
fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path {
fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build.WebServer, gpa: Allocator) !?Path {
const b = s.owner;
const arena = b.allocator;
@ -537,12 +559,14 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path {
body[@sizeOf(EbHdr) + extra_bytes.len ..][0..eb_hdr.string_bytes_len];
// TODO: use @ptrCast when the compiler supports it
const unaligned_extra = std.mem.bytesAsSlice(u32, extra_bytes);
const extra_array = try arena.alloc(u32, unaligned_extra.len);
@memcpy(extra_array, unaligned_extra);
s.result_error_bundle = .{
.string_bytes = try arena.dupe(u8, string_bytes),
.extra = extra_array,
};
{
s.result_error_bundle = .{ .string_bytes = &.{}, .extra = &.{} };
errdefer s.result_error_bundle.deinit(gpa);
s.result_error_bundle.string_bytes = try gpa.dupe(u8, string_bytes);
const extra = try gpa.alloc(u32, unaligned_extra.len);
@memcpy(extra, unaligned_extra);
s.result_error_bundle.extra = extra;
}
// This message indicates the end of the update.
if (watch) break :poll;
},
@ -602,6 +626,20 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool) !?Path {
}
}
},
.time_report => if (web_server) |ws| {
const TimeReport = std.zig.Server.Message.TimeReport;
const tr: *align(1) const TimeReport = @ptrCast(body[0..@sizeOf(TimeReport)]);
ws.updateTimeReportCompile(.{
.compile = s.cast(Step.Compile).?,
.use_llvm = tr.flags.use_llvm,
.stats = tr.stats,
.ns_total = timer.read(),
.llvm_pass_timings_len = tr.llvm_pass_timings_len,
.files_len = tr.files_len,
.decls_len = tr.decls_len,
.trailing = body[@sizeOf(TimeReport)..],
});
},
else => {}, // ignore other messages
}
}
@ -630,8 +668,7 @@ fn setZigProcess(s: *Step, zp: *ZigProcess) void {
}
}
fn clearZigProcess(s: *Step) void {
const gpa = s.owner.allocator;
fn clearZigProcess(s: *Step, gpa: Allocator) void {
switch (s.id) {
.compile => {
const compile = s.cast(Compile).?;
@ -947,7 +984,8 @@ fn addWatchInputFromPath(step: *Step, path: Build.Cache.Path, basename: []const
try gop.value_ptr.append(gpa, basename);
}
fn reset(step: *Step, gpa: Allocator) void {
/// Implementation detail of file watching and forced rebuilds. Prepares the step for being re-evaluated.
pub fn reset(step: *Step, gpa: Allocator) void {
assert(step.state == .precheck_done);
step.result_error_msgs.clearRetainingCapacity();

View File

@ -1491,6 +1491,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
if (b.verbose_link or compile.verbose_link) try zig_args.append("--verbose-link");
if (b.verbose_cc or compile.verbose_cc) try zig_args.append("--verbose-cc");
if (b.verbose_llvm_cpu_features) try zig_args.append("--verbose-llvm-cpu-features");
if (b.graph.time_report) try zig_args.append("--time-report");
if (compile.generated_asm != null) try zig_args.append("-femit-asm");
if (compile.generated_bin == null) try zig_args.append("-fno-emit-bin");
@ -1851,6 +1852,8 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
zig_args,
options.progress_node,
(b.graph.incremental == true) and options.watch,
options.web_server,
options.gpa,
) catch |err| switch (err) {
error.NeedCompileErrorCheck => {
assert(compile.expect_errors != null);
@ -1905,9 +1908,7 @@ fn outputPath(c: *Compile, out_dir: std.Build.Cache.Path, ea: std.zig.EmitArtifa
return out_dir.joinString(arena, name) catch @panic("OOM");
}
pub fn rebuildInFuzzMode(c: *Compile, progress_node: std.Progress.Node) !Path {
const gpa = c.step.owner.allocator;
pub fn rebuildInFuzzMode(c: *Compile, gpa: Allocator, progress_node: std.Progress.Node) !Path {
c.step.result_error_msgs.clearRetainingCapacity();
c.step.result_stderr = "";
@ -1915,7 +1916,7 @@ pub fn rebuildInFuzzMode(c: *Compile, progress_node: std.Progress.Node) !Path {
c.step.result_error_bundle = std.zig.ErrorBundle.empty;
const zig_args = try getZigArgs(c, true);
const maybe_output_bin_path = try c.step.evalZigProcess(zig_args, progress_node, false);
const maybe_output_bin_path = try c.step.evalZigProcess(zig_args, progress_node, false, null, gpa);
return maybe_output_bin_path.?;
}

View File

@ -236,7 +236,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
try argv.appendSlice(&.{ full_src_path, full_dest_path });
try argv.append("--listen=-");
_ = try step.evalZigProcess(argv.items, prog_node, false);
_ = try step.evalZigProcess(argv.items, prog_node, false, options.web_server, options.gpa);
objcopy.output_file.path = full_dest_path;
if (objcopy.output_file_debug) |*file| file.path = full_dest_path_debug;

View File

@ -549,6 +549,7 @@ test Options {
.result = try std.zig.system.resolveTargetQuery(.{}),
},
.zig_lib_directory = std.Build.Cache.Directory.cwd(),
.time_report = false,
};
var builder = try std.Build.create(

View File

@ -944,7 +944,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
pub fn rerunInFuzzMode(
run: *Run,
web_server: *std.Build.Fuzz.WebServer,
fuzz: *std.Build.Fuzz,
unit_test_index: u32,
prog_node: std.Progress.Node,
) !void {
@ -984,7 +984,7 @@ pub fn rerunInFuzzMode(
const tmp_dir_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int);
try runCommand(run, argv_list.items, has_side_effects, tmp_dir_path, prog_node, .{
.unit_test_index = unit_test_index,
.web_server = web_server,
.fuzz = fuzz,
});
}
@ -1054,7 +1054,7 @@ fn termMatches(expected: ?std.process.Child.Term, actual: std.process.Child.Term
}
const FuzzContext = struct {
web_server: *std.Build.Fuzz.WebServer,
fuzz: *std.Build.Fuzz,
unit_test_index: u32,
};
@ -1638,31 +1638,31 @@ fn evalZigTest(
};
},
.coverage_id => {
const web_server = fuzz_context.?.web_server;
const fuzz = fuzz_context.?.fuzz;
const msg_ptr: *align(1) const u64 = @ptrCast(body);
coverage_id = msg_ptr.*;
{
web_server.mutex.lock();
defer web_server.mutex.unlock();
try web_server.msg_queue.append(web_server.gpa, .{ .coverage = .{
fuzz.queue_mutex.lock();
defer fuzz.queue_mutex.unlock();
try fuzz.msg_queue.append(fuzz.ws.gpa, .{ .coverage = .{
.id = coverage_id.?,
.run = run,
} });
web_server.condition.signal();
fuzz.queue_cond.signal();
}
},
.fuzz_start_addr => {
const web_server = fuzz_context.?.web_server;
const fuzz = fuzz_context.?.fuzz;
const msg_ptr: *align(1) const u64 = @ptrCast(body);
const addr = msg_ptr.*;
{
web_server.mutex.lock();
defer web_server.mutex.unlock();
try web_server.msg_queue.append(web_server.gpa, .{ .entry_point = .{
fuzz.queue_mutex.lock();
defer fuzz.queue_mutex.unlock();
try fuzz.msg_queue.append(fuzz.ws.gpa, .{ .entry_point = .{
.addr = addr,
.coverage_id = coverage_id.?,
} });
web_server.condition.signal();
fuzz.queue_cond.signal();
}
},
else => {}, // ignore other messages

View File

@ -187,7 +187,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
const c_source_path = translate_c.source.getPath2(b, step);
try argv_list.append(c_source_path);
const output_dir = try step.evalZigProcess(argv_list.items, prog_node, false);
const output_dir = try step.evalZigProcess(argv_list.items, prog_node, false, options.web_server, options.gpa);
const basename = std.fs.path.stem(std.fs.path.basename(c_source_path));
translate_c.out_basename = b.fmt("{s}.zig", .{basename});

823
lib/std/Build/WebServer.zig Normal file
View File

@ -0,0 +1,823 @@
gpa: Allocator,
thread_pool: *std.Thread.Pool,
graph: *const Build.Graph,
all_steps: []const *Build.Step,
listen_address: std.net.Address,
ttyconf: std.io.tty.Config,
root_prog_node: std.Progress.Node,
watch: bool,
tcp_server: ?std.net.Server,
serve_thread: ?std.Thread,
base_timestamp: i128,
/// The "step name" data which trails `abi.Hello`, for the steps in `all_steps`.
step_names_trailing: []u8,
/// The bit-packed "step status" data. Values are `abi.StepUpdate.Status`. LSBs are earlier steps.
/// Accessed atomically.
step_status_bits: []u8,
fuzz: ?Fuzz,
time_report_mutex: std.Thread.Mutex,
time_report_msgs: [][]u8,
time_report_update_times: []i64,
build_status: std.atomic.Value(abi.BuildStatus),
/// When an event occurs which means WebSocket clients should be sent updates, call `notifyUpdate`
/// to increment this value. Each client thread waits for this increment with `std.Thread.Futex`, so
/// `notifyUpdate` will wake those threads. Updates are sent on a short interval regardless, so it
/// is recommended to only use `notifyUpdate` for changes which the user should see immediately. For
/// instance, we do not call `notifyUpdate` when the number of "unique runs" in the fuzzer changes,
/// because this value changes quickly so this would result in constantly spamming all clients with
/// an unreasonable number of packets.
update_id: std.atomic.Value(u32),
runner_request_mutex: std.Thread.Mutex,
runner_request_ready_cond: std.Thread.Condition,
runner_request_empty_cond: std.Thread.Condition,
runner_request: ?RunnerRequest,
/// If a client is not explicitly notified of changes with `notifyUpdate`, it will be sent updates
/// on a fixed interval of this many milliseconds.
const default_update_interval_ms = 500;
/// Thread-safe. Triggers updates to be sent to connected WebSocket clients; see `update_id`.
pub fn notifyUpdate(ws: *WebServer) void {
_ = ws.update_id.rmw(.Add, 1, .release);
std.Thread.Futex.wake(&ws.update_id, 16);
}
pub const Options = struct {
gpa: Allocator,
thread_pool: *std.Thread.Pool,
graph: *const std.Build.Graph,
all_steps: []const *Build.Step,
ttyconf: std.io.tty.Config,
root_prog_node: std.Progress.Node,
watch: bool,
listen_address: std.net.Address,
};
pub fn init(opts: Options) WebServer {
if (builtin.single_threaded) {
// The upcoming `std.Io` interface should allow us to use `Io.async` and `Io.concurrent`
// instead of threads, so that the web server can function in single-threaded builds.
std.process.fatal("--webui not yet implemented for single-threaded builds", .{});
}
if (builtin.os.tag == .windows) {
// At the time of writing, there are two bugs in the standard library which break this feature on Windows:
// * Reading from a socket on one thread while writing to it on another seems to deadlock.
// * Vectored writes to sockets currently trigger an infinite loop when a buffer has length 0.
//
// Both of these bugs are expected to be solved by changes which are currently in the unmerged
// 'wrangle-writer-buffering' branch. Until that makes it in, this must remain disabled.
std.process.fatal("--webui is currently disabled on Windows due to bugs", .{});
}
const all_steps = opts.all_steps;
const step_names_trailing = opts.gpa.alloc(u8, len: {
var name_bytes: usize = 0;
for (all_steps) |step| name_bytes += step.name.len;
break :len name_bytes + all_steps.len * 4;
}) catch @panic("out of memory");
{
const step_name_lens: []align(1) u32 = @ptrCast(step_names_trailing[0 .. all_steps.len * 4]);
var idx: usize = all_steps.len * 4;
for (all_steps, step_name_lens) |step, *name_len| {
name_len.* = @intCast(step.name.len);
@memcpy(step_names_trailing[idx..][0..step.name.len], step.name);
idx += step.name.len;
}
assert(idx == step_names_trailing.len);
}
const step_status_bits = opts.gpa.alloc(
u8,
std.math.divCeil(usize, all_steps.len, 4) catch unreachable,
) catch @panic("out of memory");
@memset(step_status_bits, 0);
const time_reports_len: usize = if (opts.graph.time_report) all_steps.len else 0;
const time_report_msgs = opts.gpa.alloc([]u8, time_reports_len) catch @panic("out of memory");
const time_report_update_times = opts.gpa.alloc(i64, time_reports_len) catch @panic("out of memory");
@memset(time_report_msgs, &.{});
@memset(time_report_update_times, std.math.minInt(i64));
return .{
.gpa = opts.gpa,
.thread_pool = opts.thread_pool,
.graph = opts.graph,
.all_steps = all_steps,
.listen_address = opts.listen_address,
.ttyconf = opts.ttyconf,
.root_prog_node = opts.root_prog_node,
.watch = opts.watch,
.tcp_server = null,
.serve_thread = null,
.base_timestamp = std.time.nanoTimestamp(),
.step_names_trailing = step_names_trailing,
.step_status_bits = step_status_bits,
.fuzz = null,
.time_report_mutex = .{},
.time_report_msgs = time_report_msgs,
.time_report_update_times = time_report_update_times,
.build_status = .init(.idle),
.update_id = .init(0),
.runner_request_mutex = .{},
.runner_request_ready_cond = .{},
.runner_request_empty_cond = .{},
.runner_request = null,
};
}
pub fn deinit(ws: *WebServer) void {
const gpa = ws.gpa;
gpa.free(ws.step_names_trailing);
gpa.free(ws.step_status_bits);
if (ws.fuzz) |*f| f.deinit();
for (ws.time_report_msgs) |msg| gpa.free(msg);
gpa.free(ws.time_report_msgs);
gpa.free(ws.time_report_update_times);
if (ws.serve_thread) |t| {
if (ws.tcp_server) |*s| s.stream.close();
t.join();
}
if (ws.tcp_server) |*s| s.deinit();
gpa.free(ws.step_names_trailing);
}
pub fn start(ws: *WebServer) error{AlreadyReported}!void {
assert(ws.tcp_server == null);
assert(ws.serve_thread == null);
ws.tcp_server = ws.listen_address.listen(.{ .reuse_address = true }) catch |err| {
log.err("failed to listen to port {d}: {s}", .{ ws.listen_address.getPort(), @errorName(err) });
return error.AlreadyReported;
};
ws.serve_thread = std.Thread.spawn(.{}, serve, .{ws}) catch |err| {
log.err("unable to spawn web server thread: {s}", .{@errorName(err)});
ws.tcp_server.?.deinit();
ws.tcp_server = null;
return error.AlreadyReported;
};
log.info("web interface listening at http://{f}/", .{ws.tcp_server.?.listen_address});
if (ws.listen_address.getPort() == 0) {
log.info("hint: pass '--webui={f}' to use the same port next time", .{ws.tcp_server.?.listen_address});
}
}
fn serve(ws: *WebServer) void {
while (true) {
const connection = ws.tcp_server.?.accept() catch |err| {
log.err("failed to accept connection: {s}", .{@errorName(err)});
return;
};
_ = std.Thread.spawn(.{}, accept, .{ ws, connection }) catch |err| {
log.err("unable to spawn connection thread: {s}", .{@errorName(err)});
connection.stream.close();
continue;
};
}
}
pub fn startBuild(ws: *WebServer) void {
if (ws.fuzz) |*fuzz| {
fuzz.deinit();
ws.fuzz = null;
}
for (ws.step_status_bits) |*bits| @atomicStore(u8, bits, 0, .monotonic);
ws.build_status.store(.running, .monotonic);
ws.notifyUpdate();
}
pub fn updateStepStatus(ws: *WebServer, step: *Build.Step, new_status: abi.StepUpdate.Status) void {
const step_idx: u32 = for (ws.all_steps, 0..) |s, i| {
if (s == step) break @intCast(i);
} else unreachable;
const ptr = &ws.step_status_bits[step_idx / 4];
const bit_offset: u3 = @intCast((step_idx % 4) * 2);
const old_bits: u2 = @truncate(@atomicLoad(u8, ptr, .monotonic) >> bit_offset);
const mask = @as(u8, @intFromEnum(new_status) ^ old_bits) << bit_offset;
_ = @atomicRmw(u8, ptr, .Xor, mask, .monotonic);
ws.notifyUpdate();
}
pub fn finishBuild(ws: *WebServer, opts: struct {
fuzz: bool,
}) void {
if (opts.fuzz) {
switch (builtin.os.tag) {
// Current implementation depends on two things that need to be ported to Windows:
// * Memory-mapping to share data between the fuzzer and build runner.
// * COFF/PE support added to `std.debug.Info` (it needs a batching API for resolving
// many addresses to source locations).
.windows => std.process.fatal("--fuzz not yet implemented for {s}", .{@tagName(builtin.os.tag)}),
else => {},
}
if (@bitSizeOf(usize) != 64) {
// Current implementation depends on posix.mmap()'s second parameter, `length: usize`,
// being compatible with `std.fs.getEndPos() u64`'s return value. This is not the case
// on 32-bit platforms.
// Affects or affected by issues #5185, #22523, and #22464.
std.process.fatal("--fuzz not yet implemented on {d}-bit platforms", .{@bitSizeOf(usize)});
}
assert(ws.fuzz == null);
ws.build_status.store(.fuzz_init, .monotonic);
ws.notifyUpdate();
ws.fuzz = Fuzz.init(ws) catch |err| std.process.fatal("failed to start fuzzer: {s}", .{@errorName(err)});
ws.fuzz.?.start();
}
ws.build_status.store(if (ws.watch) .watching else .idle, .monotonic);
ws.notifyUpdate();
}
pub fn now(s: *const WebServer) i64 {
return @intCast(std.time.nanoTimestamp() - s.base_timestamp);
}
fn accept(ws: *WebServer, connection: std.net.Server.Connection) void {
defer connection.stream.close();
var read_buf: [0x4000]u8 = undefined;
var server: std.http.Server = .init(connection, &read_buf);
while (true) {
var request = server.receiveHead() catch |err| switch (err) {
error.HttpConnectionClosing => return,
else => {
log.err("failed to receive http request: {s}", .{@errorName(err)});
return;
},
};
var ws_send_buf: [0x4000]u8 = undefined;
var ws_recv_buf: [0x4000]u8 align(4) = undefined;
if (std.http.WebSocket.init(&request, &ws_send_buf, &ws_recv_buf) catch |err| {
log.err("failed to initialize websocket connection: {s}", .{@errorName(err)});
return;
}) |ws_init| {
var web_socket = ws_init;
ws.serveWebSocket(&web_socket) catch |err| {
log.err("failed to serve websocket: {s}", .{@errorName(err)});
return;
};
comptime unreachable;
} else {
ws.serveRequest(&request) catch |err| switch (err) {
error.AlreadyReported => return,
else => {
log.err("failed to serve '{s}': {s}", .{ request.head.target, @errorName(err) });
return;
},
};
}
}
}
fn makeIov(s: []const u8) std.posix.iovec_const {
return .{
.base = s.ptr,
.len = s.len,
};
}
fn serveWebSocket(ws: *WebServer, sock: *std.http.WebSocket) !noreturn {
var prev_build_status = ws.build_status.load(.monotonic);
const prev_step_status_bits = try ws.gpa.alloc(u8, ws.step_status_bits.len);
defer ws.gpa.free(prev_step_status_bits);
for (prev_step_status_bits, ws.step_status_bits) |*copy, *shared| {
copy.* = @atomicLoad(u8, shared, .monotonic);
}
_ = try std.Thread.spawn(.{}, recvWebSocketMessages, .{ ws, sock });
{
const hello_header: abi.Hello = .{
.status = prev_build_status,
.flags = .{
.time_report = ws.graph.time_report,
},
.timestamp = ws.now(),
.steps_len = @intCast(ws.all_steps.len),
};
try sock.writeMessagev(&.{
makeIov(@ptrCast(&hello_header)),
makeIov(ws.step_names_trailing),
makeIov(prev_step_status_bits),
}, .binary);
}
var prev_fuzz: Fuzz.Previous = .init;
var prev_time: i64 = std.math.minInt(i64);
while (true) {
const start_time = ws.now();
const start_update_id = ws.update_id.load(.acquire);
if (ws.fuzz) |*fuzz| {
try fuzz.sendUpdate(sock, &prev_fuzz);
}
{
ws.time_report_mutex.lock();
defer ws.time_report_mutex.unlock();
for (ws.time_report_msgs, ws.time_report_update_times) |msg, update_time| {
if (update_time <= prev_time) continue;
// We want to send `msg`, but shouldn't block `ws.time_report_mutex` while we do, so
// that we don't hold up the build system on the client accepting this packet.
const owned_msg = try ws.gpa.dupe(u8, msg);
defer ws.gpa.free(owned_msg);
// Temporarily unlock, then re-lock after the message is sent.
ws.time_report_mutex.unlock();
defer ws.time_report_mutex.lock();
try sock.writeMessage(msg, .binary);
}
}
{
const build_status = ws.build_status.load(.monotonic);
if (build_status != prev_build_status) {
prev_build_status = build_status;
const msg: abi.StatusUpdate = .{ .new = build_status };
try sock.writeMessage(@ptrCast(&msg), .binary);
}
}
for (prev_step_status_bits, ws.step_status_bits, 0..) |*prev_byte, *shared, byte_idx| {
const cur_byte = @atomicLoad(u8, shared, .monotonic);
if (prev_byte.* == cur_byte) continue;
const cur: [4]abi.StepUpdate.Status = .{
@enumFromInt(@as(u2, @truncate(cur_byte >> 0))),
@enumFromInt(@as(u2, @truncate(cur_byte >> 2))),
@enumFromInt(@as(u2, @truncate(cur_byte >> 4))),
@enumFromInt(@as(u2, @truncate(cur_byte >> 6))),
};
const prev: [4]abi.StepUpdate.Status = .{
@enumFromInt(@as(u2, @truncate(prev_byte.* >> 0))),
@enumFromInt(@as(u2, @truncate(prev_byte.* >> 2))),
@enumFromInt(@as(u2, @truncate(prev_byte.* >> 4))),
@enumFromInt(@as(u2, @truncate(prev_byte.* >> 6))),
};
for (cur, prev, byte_idx * 4..) |cur_status, prev_status, step_idx| {
const msg: abi.StepUpdate = .{ .step_idx = @intCast(step_idx), .bits = .{ .status = cur_status } };
if (cur_status != prev_status) try sock.writeMessage(@ptrCast(&msg), .binary);
}
prev_byte.* = cur_byte;
}
prev_time = start_time;
std.Thread.Futex.timedWait(&ws.update_id, start_update_id, std.time.ns_per_ms * default_update_interval_ms) catch {};
}
}
fn recvWebSocketMessages(ws: *WebServer, sock: *std.http.WebSocket) void {
while (true) {
const msg = sock.readSmallMessage() catch return;
if (msg.opcode != .binary) continue;
if (msg.data.len == 0) continue;
const tag: abi.ToServerTag = @enumFromInt(msg.data[0]);
switch (tag) {
_ => continue,
.rebuild => while (true) {
ws.runner_request_mutex.lock();
defer ws.runner_request_mutex.unlock();
if (ws.runner_request == null) {
ws.runner_request = .rebuild;
ws.runner_request_ready_cond.signal();
break;
}
ws.runner_request_empty_cond.wait(&ws.runner_request_mutex);
},
}
}
}
fn serveRequest(ws: *WebServer, req: *std.http.Server.Request) !void {
// Strip an optional leading '/debug' component from the request.
const target: []const u8, const debug: bool = target: {
if (mem.eql(u8, req.head.target, "/debug")) break :target .{ "/", true };
if (mem.eql(u8, req.head.target, "/debug/")) break :target .{ "/", true };
if (mem.startsWith(u8, req.head.target, "/debug/")) break :target .{ req.head.target["/debug".len..], true };
break :target .{ req.head.target, false };
};
if (mem.eql(u8, target, "/")) return serveLibFile(ws, req, "build-web/index.html", "text/html");
if (mem.eql(u8, target, "/main.js")) return serveLibFile(ws, req, "build-web/main.js", "application/javascript");
if (mem.eql(u8, target, "/style.css")) return serveLibFile(ws, req, "build-web/style.css", "text/css");
if (mem.eql(u8, target, "/time_report.css")) return serveLibFile(ws, req, "build-web/time_report.css", "text/css");
if (mem.eql(u8, target, "/main.wasm")) return serveClientWasm(ws, req, if (debug) .Debug else .ReleaseFast);
if (ws.fuzz) |*fuzz| {
if (mem.eql(u8, target, "/sources.tar")) return fuzz.serveSourcesTar(req);
}
try req.respond("not found", .{
.status = .not_found,
.extra_headers = &.{
.{ .name = "Content-Type", .value = "text/plain" },
},
});
}
fn serveLibFile(
ws: *WebServer,
request: *std.http.Server.Request,
sub_path: []const u8,
content_type: []const u8,
) !void {
return serveFile(ws, request, .{
.root_dir = ws.graph.zig_lib_directory,
.sub_path = sub_path,
}, content_type);
}
fn serveClientWasm(
ws: *WebServer,
req: *std.http.Server.Request,
optimize_mode: std.builtin.OptimizeMode,
) !void {
var arena_state: std.heap.ArenaAllocator = .init(ws.gpa);
defer arena_state.deinit();
const arena = arena_state.allocator();
// We always rebuild the wasm on-the-fly, so that if it is edited the user can just refresh the page.
const bin_path = try buildClientWasm(ws, arena, optimize_mode);
return serveFile(ws, req, bin_path, "application/wasm");
}
pub fn serveFile(
ws: *WebServer,
request: *std.http.Server.Request,
path: Cache.Path,
content_type: []const u8,
) !void {
const gpa = ws.gpa;
// The desired API is actually sendfile, which will require enhancing std.http.Server.
// We load the file with every request so that the user can make changes to the file
// and refresh the HTML page without restarting this server.
const file_contents = path.root_dir.handle.readFileAlloc(gpa, path.sub_path, 10 * 1024 * 1024) catch |err| {
log.err("failed to read '{f}': {s}", .{ path, @errorName(err) });
return error.AlreadyReported;
};
defer gpa.free(file_contents);
try request.respond(file_contents, .{
.extra_headers = &.{
.{ .name = "Content-Type", .value = content_type },
cache_control_header,
},
});
}
pub fn serveTarFile(
ws: *WebServer,
request: *std.http.Server.Request,
paths: []const Cache.Path,
) !void {
const gpa = ws.gpa;
var send_buf: [0x4000]u8 = undefined;
var response = request.respondStreaming(.{
.send_buffer = &send_buf,
.respond_options = .{
.extra_headers = &.{
.{ .name = "Content-Type", .value = "application/x-tar" },
cache_control_header,
},
},
});
var cached_cwd_path: ?[]const u8 = null;
defer if (cached_cwd_path) |p| gpa.free(p);
var response_buf: [1024]u8 = undefined;
var adapter = response.writer().adaptToNewApi();
adapter.new_interface.buffer = &response_buf;
var archiver: std.tar.Writer = .{ .underlying_writer = &adapter.new_interface };
for (paths) |path| {
var file = path.root_dir.handle.openFile(path.sub_path, .{}) catch |err| {
log.err("failed to open '{f}': {s}", .{ path, @errorName(err) });
continue;
};
defer file.close();
const stat = try file.stat();
var read_buffer: [1024]u8 = undefined;
var file_reader: std.fs.File.Reader = .initSize(file, &read_buffer, stat.size);
// TODO: this logic is completely bogus -- obviously so, because `path.root_dir.path` can
// be cwd-relative. This is also related to why linkification doesn't work in the fuzzer UI:
// it turns out the WASM treats the first path component as the module name, typically
// resulting in modules named "" and "src". The compiler needs to tell the build system
// about the module graph so that the build system can correctly encode this information in
// the tar file.
archiver.prefix = path.root_dir.path orelse cwd: {
if (cached_cwd_path == null) cached_cwd_path = try std.process.getCwdAlloc(gpa);
break :cwd cached_cwd_path.?;
};
try archiver.writeFile(path.sub_path, &file_reader, stat.mtime);
}
// intentionally not calling `archiver.finishPedantically`
try adapter.new_interface.flush();
try response.end();
}
fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.OptimizeMode) !Cache.Path {
const root_name = "build-web";
const arch_os_abi = "wasm32-freestanding";
const cpu_features = "baseline+atomics+bulk_memory+multivalue+mutable_globals+nontrapping_fptoint+reference_types+sign_ext";
const gpa = ws.gpa;
const graph = ws.graph;
const main_src_path: Cache.Path = .{
.root_dir = graph.zig_lib_directory,
.sub_path = "build-web/main.zig",
};
const walk_src_path: Cache.Path = .{
.root_dir = graph.zig_lib_directory,
.sub_path = "docs/wasm/Walk.zig",
};
const html_render_src_path: Cache.Path = .{
.root_dir = graph.zig_lib_directory,
.sub_path = "docs/wasm/html_render.zig",
};
var argv: std.ArrayListUnmanaged([]const u8) = .empty;
try argv.appendSlice(arena, &.{
graph.zig_exe, "build-exe", //
"-fno-entry", //
"-O", @tagName(optimize), //
"-target", arch_os_abi, //
"-mcpu", cpu_features, //
"--cache-dir", graph.global_cache_root.path orelse ".", //
"--global-cache-dir", graph.global_cache_root.path orelse ".", //
"--zig-lib-dir", graph.zig_lib_directory.path orelse ".", //
"--name", root_name, //
"-rdynamic", //
"-fsingle-threaded", //
"--dep", "Walk", //
"--dep", "html_render", //
try std.fmt.allocPrint(arena, "-Mroot={f}", .{main_src_path}), //
try std.fmt.allocPrint(arena, "-MWalk={f}", .{walk_src_path}), //
"--dep", "Walk", //
try std.fmt.allocPrint(arena, "-Mhtml_render={f}", .{html_render_src_path}), //
"--listen=-",
});
var child: std.process.Child = .init(argv.items, gpa);
child.stdin_behavior = .Pipe;
child.stdout_behavior = .Pipe;
child.stderr_behavior = .Pipe;
try child.spawn();
var poller = std.io.poll(gpa, enum { stdout, stderr }, .{
.stdout = child.stdout.?,
.stderr = child.stderr.?,
});
defer poller.deinit();
try child.stdin.?.writeAll(@ptrCast(@as([]const std.zig.Client.Message.Header, &.{
.{ .tag = .update, .bytes_len = 0 },
.{ .tag = .exit, .bytes_len = 0 },
})));
const Header = std.zig.Server.Message.Header;
var result: ?Cache.Path = null;
var result_error_bundle = std.zig.ErrorBundle.empty;
const stdout = poller.reader(.stdout);
poll: while (true) {
while (stdout.buffered().len < @sizeOf(Header)) if (!(try poller.poll())) break :poll;
const header = stdout.takeStruct(Header, .little) catch unreachable;
while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll;
const body = stdout.take(header.bytes_len) catch unreachable;
switch (header.tag) {
.zig_version => {
if (!std.mem.eql(u8, builtin.zig_version_string, body)) {
return error.ZigProtocolVersionMismatch;
}
},
.error_bundle => {
const EbHdr = std.zig.Server.Message.ErrorBundle;
const eb_hdr = @as(*align(1) const EbHdr, @ptrCast(body));
const extra_bytes =
body[@sizeOf(EbHdr)..][0 .. @sizeOf(u32) * eb_hdr.extra_len];
const string_bytes =
body[@sizeOf(EbHdr) + extra_bytes.len ..][0..eb_hdr.string_bytes_len];
const unaligned_extra: []align(1) const u32 = @ptrCast(extra_bytes);
const extra_array = try arena.alloc(u32, unaligned_extra.len);
@memcpy(extra_array, unaligned_extra);
result_error_bundle = .{
.string_bytes = try arena.dupe(u8, string_bytes),
.extra = extra_array,
};
},
.emit_digest => {
const EmitDigest = std.zig.Server.Message.EmitDigest;
const ebp_hdr: *align(1) const EmitDigest = @ptrCast(body);
if (!ebp_hdr.flags.cache_hit) {
log.info("source changes detected; rebuilt wasm component", .{});
}
const digest = body[@sizeOf(EmitDigest)..][0..Cache.bin_digest_len];
result = .{
.root_dir = graph.global_cache_root,
.sub_path = try arena.dupe(u8, "o" ++ std.fs.path.sep_str ++ Cache.binToHex(digest.*)),
};
},
else => {}, // ignore other messages
}
}
const stderr_contents = try poller.toOwnedSlice(.stderr);
if (stderr_contents.len > 0) {
std.debug.print("{s}", .{stderr_contents});
}
// Send EOF to stdin.
child.stdin.?.close();
child.stdin = null;
switch (try child.wait()) {
.Exited => |code| {
if (code != 0) {
log.err(
"the following command exited with error code {d}:\n{s}",
.{ code, try Build.Step.allocPrintCmd(arena, null, argv.items) },
);
return error.WasmCompilationFailed;
}
},
.Signal, .Stopped, .Unknown => {
log.err(
"the following command terminated unexpectedly:\n{s}",
.{try Build.Step.allocPrintCmd(arena, null, argv.items)},
);
return error.WasmCompilationFailed;
},
}
if (result_error_bundle.errorMessageCount() > 0) {
const color = std.zig.Color.auto;
result_error_bundle.renderToStdErr(color.renderOptions());
log.err("the following command failed with {d} compilation errors:\n{s}", .{
result_error_bundle.errorMessageCount(),
try Build.Step.allocPrintCmd(arena, null, argv.items),
});
return error.WasmCompilationFailed;
}
const base_path = result orelse {
log.err("child process failed to report result\n{s}", .{
try Build.Step.allocPrintCmd(arena, null, argv.items),
});
return error.WasmCompilationFailed;
};
const bin_name = try std.zig.binNameAlloc(arena, .{
.root_name = root_name,
.target = &(std.zig.system.resolveTargetQuery(std.Build.parseTargetQuery(.{
.arch_os_abi = arch_os_abi,
.cpu_features = cpu_features,
}) catch unreachable) catch unreachable),
.output_mode = .Exe,
});
return base_path.join(arena, bin_name);
}
pub fn updateTimeReportCompile(ws: *WebServer, opts: struct {
compile: *Build.Step.Compile,
use_llvm: bool,
stats: abi.time_report.CompileResult.Stats,
ns_total: u64,
llvm_pass_timings_len: u32,
files_len: u32,
decls_len: u32,
/// The trailing data of `abi.time_report.CompileResult`, except the step name.
trailing: []const u8,
}) void {
const gpa = ws.gpa;
const step_idx: u32 = for (ws.all_steps, 0..) |s, i| {
if (s == &opts.compile.step) break @intCast(i);
} else unreachable;
const old_buf = old: {
ws.time_report_mutex.lock();
defer ws.time_report_mutex.unlock();
const old = ws.time_report_msgs[step_idx];
ws.time_report_msgs[step_idx] = &.{};
break :old old;
};
const buf = gpa.realloc(old_buf, @sizeOf(abi.time_report.CompileResult) + opts.trailing.len) catch @panic("out of memory");
const out_header: *align(1) abi.time_report.CompileResult = @ptrCast(buf[0..@sizeOf(abi.time_report.CompileResult)]);
out_header.* = .{
.step_idx = step_idx,
.flags = .{
.use_llvm = opts.use_llvm,
},
.stats = opts.stats,
.ns_total = opts.ns_total,
.llvm_pass_timings_len = opts.llvm_pass_timings_len,
.files_len = opts.files_len,
.decls_len = opts.decls_len,
};
@memcpy(buf[@sizeOf(abi.time_report.CompileResult)..], opts.trailing);
{
ws.time_report_mutex.lock();
defer ws.time_report_mutex.unlock();
assert(ws.time_report_msgs[step_idx].len == 0);
ws.time_report_msgs[step_idx] = buf;
ws.time_report_update_times[step_idx] = ws.now();
}
ws.notifyUpdate();
}
pub fn updateTimeReportGeneric(ws: *WebServer, step: *Build.Step, ns_total: u64) void {
const gpa = ws.gpa;
const step_idx: u32 = for (ws.all_steps, 0..) |s, i| {
if (s == step) break @intCast(i);
} else unreachable;
const old_buf = old: {
ws.time_report_mutex.lock();
defer ws.time_report_mutex.unlock();
const old = ws.time_report_msgs[step_idx];
ws.time_report_msgs[step_idx] = &.{};
break :old old;
};
const buf = gpa.realloc(old_buf, @sizeOf(abi.time_report.GenericResult)) catch @panic("out of memory");
const out: *align(1) abi.time_report.GenericResult = @ptrCast(buf);
out.* = .{
.step_idx = step_idx,
.ns_total = ns_total,
};
{
ws.time_report_mutex.lock();
defer ws.time_report_mutex.unlock();
assert(ws.time_report_msgs[step_idx].len == 0);
ws.time_report_msgs[step_idx] = buf;
ws.time_report_update_times[step_idx] = ws.now();
}
ws.notifyUpdate();
}
const RunnerRequest = union(enum) {
rebuild,
};
pub fn getRunnerRequest(ws: *WebServer) ?RunnerRequest {
ws.runner_request_mutex.lock();
defer ws.runner_request_mutex.unlock();
if (ws.runner_request) |req| {
ws.runner_request = null;
ws.runner_request_empty_cond.signal();
return req;
}
return null;
}
pub fn wait(ws: *WebServer) RunnerRequest {
ws.runner_request_mutex.lock();
defer ws.runner_request_mutex.unlock();
while (true) {
if (ws.runner_request) |req| {
ws.runner_request = null;
ws.runner_request_empty_cond.signal();
return req;
}
ws.runner_request_ready_cond.wait(&ws.runner_request_mutex);
}
}
const cache_control_header: std.http.Header = .{
.name = "Cache-Control",
.value = "max-age=0, must-revalidate",
};
const builtin = @import("builtin");
const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;
const log = std.log.scoped(.web_server);
const Allocator = std.mem.Allocator;
const Build = std.Build;
const Cache = Build.Cache;
const Fuzz = Build.Fuzz;
const abi = Build.abi;
const WebServer = @This();

313
lib/std/Build/abi.zig Normal file
View File

@ -0,0 +1,313 @@
//! This file is shared among Zig code running in wildly different contexts:
//! * The build runner, running on the host computer
//! * The build system web interface Wasm code, running in the browser
//! * `libfuzzer`, compiled alongside unit tests
//!
//! All of these components interface to some degree via an ABI:
//! * The build runner communicates with the web interface over a WebSocket connection
//! * The build runner communicates with `libfuzzer` over a shared memory-mapped file
// Check that no WebSocket message type has implicit padding bits. This ensures we never send any
// undefined bits over the wire, and also helps validate that the layout doesn't differ between, for
// instance, the web server in `std.Build` and the Wasm client.
comptime {
const check = struct {
fn check(comptime T: type) void {
const std = @import("std");
std.debug.assert(@typeInfo(T) == .@"struct");
std.debug.assert(@typeInfo(T).@"struct".layout == .@"extern");
std.debug.assert(std.meta.hasUniqueRepresentation(T));
}
}.check;
// server->client
check(Hello);
check(StatusUpdate);
check(StepUpdate);
check(fuzz.SourceIndexHeader);
check(fuzz.CoverageUpdateHeader);
check(fuzz.EntryPointHeader);
check(time_report.GenericResult);
check(time_report.CompileResult);
// client->server
check(Rebuild);
}
/// All WebSocket messages sent by the server to the client begin with a `ToClientTag` byte. This
/// enum is non-exhaustive only to avoid Illegal Behavior when malformed messages are sent over the
/// socket; unnamed tags are an error condition and should terminate the connection.
///
/// Every tag has a curresponding `extern struct` representing the full message (or a header of the
/// message if it is variable-length). For instance, `.hello` corresponds to `Hello`.
///
/// When introducing a tag, make sure to add a corresponding `extern struct` whose first field is
/// this enum, and `check` its layout in the `comptime` block above.
pub const ToClientTag = enum(u8) {
hello,
status_update,
step_update,
// `--fuzz`
fuzz_source_index,
fuzz_coverage_update,
fuzz_entry_points,
// `--time-report`
time_report_generic_result,
time_report_compile_result,
_,
};
/// Like `ToClientTag`, but for messages sent by the client to the server.
pub const ToServerTag = enum(u8) {
rebuild,
_,
};
/// The current overall status of the build runner.
/// Keep in sync with indices in web UI `main.js:updateBuildStatus`.
pub const BuildStatus = enum(u8) {
idle,
watching,
running,
fuzz_init,
};
/// WebSocket server->client.
///
/// Sent by the server as the first message after a WebSocket connection opens to provide basic
/// information about the server, the build graph, etc.
///
/// Trailing:
/// * `step_name_len: u32` for each `steps_len`
/// * `step_name: [step_name_len]u8` for each `step_name_len`
/// * `step_status: u8` for every 4 `steps_len`; every 2 bits is a `StepUpdate.Status`, LSBs first
pub const Hello = extern struct {
tag: ToClientTag = .hello,
status: BuildStatus,
flags: Flags,
/// Any message containing a timestamp represents it as a number of nanoseconds relative to when
/// the build began. This field is the current timestamp, represented in that form.
timestamp: i64 align(4),
/// The number of steps in the build graph which are reachable from the top-level step[s] being
/// run; in other words, the number of steps which will be executed by this build. The name of
/// each step trails this message.
steps_len: u32 align(1),
pub const Flags = packed struct(u16) {
/// Whether time reporting is enabled.
time_report: bool,
_: u15 = 0,
};
};
/// WebSocket server->client.
///
/// Indicates that the build status has changed.
pub const StatusUpdate = extern struct {
tag: ToClientTag = .status_update,
new: BuildStatus,
};
/// WebSocket server->client.
///
/// Indicates a change in a step's status.
pub const StepUpdate = extern struct {
tag: ToClientTag = .step_update,
step_idx: u32 align(1),
bits: packed struct(u8) {
status: Status,
_: u6 = 0,
},
/// Keep in sync with indices in web UI `main.js:updateStepStatus`.
pub const Status = enum(u2) {
pending,
wip,
success,
failure,
};
};
pub const Rebuild = extern struct {
tag: ToServerTag = .rebuild,
};
/// ABI bits specifically relating to the fuzzer interface.
pub const fuzz = struct {
/// libfuzzer uses this and its usize is the one that counts. To match the ABI,
/// make the ints be the size of the target used with libfuzzer.
///
/// Trailing:
/// * 1 bit per pc_addr, usize elements
/// * pc_addr: usize for each pcs_len
pub const SeenPcsHeader = extern struct {
n_runs: usize,
unique_runs: usize,
pcs_len: usize,
/// Used for comptime assertions. Provides a mechanism for strategically
/// causing compile errors.
pub const trailing = .{
.pc_bits_usize,
.pc_addr,
};
pub fn headerEnd(header: *const SeenPcsHeader) []const usize {
const ptr: [*]align(@alignOf(usize)) const u8 = @ptrCast(header);
const header_end_ptr: [*]const usize = @ptrCast(ptr + @sizeOf(SeenPcsHeader));
const pcs_len = header.pcs_len;
return header_end_ptr[0 .. pcs_len + seenElemsLen(pcs_len)];
}
pub fn seenBits(header: *const SeenPcsHeader) []const usize {
return header.headerEnd()[0..seenElemsLen(header.pcs_len)];
}
pub fn seenElemsLen(pcs_len: usize) usize {
return (pcs_len + @bitSizeOf(usize) - 1) / @bitSizeOf(usize);
}
pub fn pcAddrs(header: *const SeenPcsHeader) []const usize {
const pcs_len = header.pcs_len;
return header.headerEnd()[seenElemsLen(pcs_len)..][0..pcs_len];
}
};
/// WebSocket server->client.
///
/// Sent once, when fuzzing starts, to indicate the available coverage data.
///
/// Trailing:
/// * std.debug.Coverage.String for each directories_len
/// * std.debug.Coverage.File for each files_len
/// * std.debug.Coverage.SourceLocation for each source_locations_len
/// * u8 for each string_bytes_len
pub const SourceIndexHeader = extern struct {
tag: ToClientTag = .fuzz_source_index,
_: [3]u8 = @splat(0),
directories_len: u32,
files_len: u32,
source_locations_len: u32,
string_bytes_len: u32,
/// When, according to the server, fuzzing started.
start_timestamp: i64 align(4),
};
/// WebSocket server->client.
///
/// Sent whenever the set of covered source locations is updated.
///
/// Trailing:
/// * one bit per source_locations_len, contained in u64 elements
pub const CoverageUpdateHeader = extern struct {
tag: ToClientTag = .fuzz_coverage_update,
_: [7]u8 = @splat(0),
n_runs: u64,
unique_runs: u64,
pub const trailing = .{
.pc_bits_usize,
};
};
/// WebSocket server->client.
///
/// Sent whenever the set of entry points is updated.
///
/// Trailing:
/// * one u32 index of source_locations per locsLen()
pub const EntryPointHeader = extern struct {
tag: ToClientTag = .fuzz_entry_points,
locs_len_raw: [3]u8,
pub fn locsLen(hdr: EntryPointHeader) u24 {
return @bitCast(hdr.locs_len_raw);
}
pub fn init(locs_len: u24) EntryPointHeader {
return .{ .locs_len_raw = @bitCast(locs_len) };
}
};
};
/// ABI bits specifically relating to the time report interface.
pub const time_report = struct {
/// WebSocket server->client.
///
/// Sent after a `Step` finishes, providing the time taken to execute the step.
pub const GenericResult = extern struct {
tag: ToClientTag = .time_report_generic_result,
step_idx: u32 align(1),
ns_total: u64 align(1),
};
/// WebSocket server->client.
///
/// Sent after a `Step.Compile` finishes, providing the step's time report.
///
/// Trailing:
/// * `llvm_pass_timings: [llvm_pass_timings_len]u8` (ASCII-encoded)
/// * for each `files_len`:
/// * `name` (null-terminated UTF-8 string)
/// * for each `decls_len`:
/// * `name` (null-terminated UTF-8 string)
/// * `file: u32` (index of file this decl is in)
/// * `sema_ns: u64` (nanoseconds spent semantically analyzing this decl)
/// * `codegen_ns: u64` (nanoseconds spent semantically analyzing this decl)
/// * `link_ns: u64` (nanoseconds spent semantically analyzing this decl)
pub const CompileResult = extern struct {
tag: ToClientTag = .time_report_compile_result,
step_idx: u32 align(1),
flags: Flags,
stats: Stats align(1),
ns_total: u64 align(1),
llvm_pass_timings_len: u32 align(1),
files_len: u32 align(1),
decls_len: u32 align(1),
pub const Flags = packed struct(u8) {
use_llvm: bool,
_: u7 = 0,
};
pub const Stats = extern struct {
n_reachable_files: u32,
n_imported_files: u32,
n_generic_instances: u32,
n_inline_calls: u32,
cpu_ns_parse: u64,
cpu_ns_astgen: u64,
cpu_ns_sema: u64,
cpu_ns_codegen: u64,
cpu_ns_link: u64,
real_ns_files: u64,
real_ns_decls: u64,
real_ns_llvm_emit: u64,
real_ns_link_flush: u64,
pub const init: Stats = .{
.n_reachable_files = 0,
.n_imported_files = 0,
.n_generic_instances = 0,
.n_inline_calls = 0,
.cpu_ns_parse = 0,
.cpu_ns_astgen = 0,
.cpu_ns_sema = 0,
.cpu_ns_codegen = 0,
.cpu_ns_link = 0,
.real_ns_files = 0,
.real_ns_decls = 0,
.real_ns_llvm_emit = 0,
.real_ns_link_flush = 0,
};
};
};
};

Some files were not shown because too many files have changed in this diff Show More