Merge pull request #20580 from ziglang/watch

introduce file system watching features to the zig build system
This commit is contained in:
Andrew Kelley 2024-07-12 16:56:17 -07:00 committed by GitHub
commit 1d20ff11d7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
27 changed files with 1551 additions and 416 deletions

View File

@ -595,7 +595,7 @@ fn addWasiUpdateStep(b: *std.Build, version: [:0]const u8) !void {
run_opt.addArg("-o");
run_opt.addFileArg(b.path("stage1/zig1.wasm"));
const copy_zig_h = b.addWriteFiles();
const copy_zig_h = b.addUpdateSourceFiles();
copy_zig_h.addCopyFileToSource(b.path("lib/zig.h"), "stage1/zig.h");
const update_zig1_step = b.step("update-zig1", "Update stage1/zig1.wasm");
@ -1261,7 +1261,9 @@ fn generateLangRef(b: *std.Build) std.Build.LazyPath {
});
var dir = b.build_root.handle.openDir("doc/langref", .{ .iterate = true }) catch |err| {
std.debug.panic("unable to open 'doc/langref' directory: {s}", .{@errorName(err)});
std.debug.panic("unable to open '{}doc/langref' directory: {s}", .{
b.build_root, @errorName(err),
});
};
defer dir.close();
@ -1280,10 +1282,7 @@ fn generateLangRef(b: *std.Build) std.Build.LazyPath {
// in a temporary directory
"--cache-root", b.cache_root.path orelse ".",
});
if (b.zig_lib_dir) |p| {
cmd.addArg("--zig-lib-dir");
cmd.addDirectoryArg(p);
}
cmd.addArgs(&.{ "--zig-lib-dir", b.fmt("{}", .{b.graph.zig_lib_directory}) });
cmd.addArgs(&.{"-i"});
cmd.addFileArg(b.path(b.fmt("doc/langref/{s}", .{entry.name})));

View File

@ -8,6 +8,9 @@ const process = std.process;
const ArrayList = std.ArrayList;
const File = std.fs.File;
const Step = std.Build.Step;
const Watch = std.Build.Watch;
const Allocator = std.mem.Allocator;
const fatal = std.zig.fatal;
pub const root = @import("@build");
pub const dependencies = @import("@dependencies");
@ -29,21 +32,15 @@ pub fn main() !void {
// skip my own exe name
var arg_idx: usize = 1;
const zig_exe = nextArg(args, &arg_idx) orelse {
std.debug.print("Expected path to zig compiler\n", .{});
return error.InvalidArgs;
};
const build_root = nextArg(args, &arg_idx) orelse {
std.debug.print("Expected build root directory path\n", .{});
return error.InvalidArgs;
};
const cache_root = nextArg(args, &arg_idx) orelse {
std.debug.print("Expected cache root directory path\n", .{});
return error.InvalidArgs;
};
const global_cache_root = nextArg(args, &arg_idx) orelse {
std.debug.print("Expected global cache root directory path\n", .{});
return error.InvalidArgs;
const zig_exe = nextArg(args, &arg_idx) orelse fatal("missing zig compiler path", .{});
const zig_lib_dir = nextArg(args, &arg_idx) orelse fatal("missing zig lib directory path", .{});
const build_root = nextArg(args, &arg_idx) orelse fatal("missing build root directory path", .{});
const cache_root = nextArg(args, &arg_idx) orelse fatal("missing cache root directory path", .{});
const global_cache_root = nextArg(args, &arg_idx) orelse fatal("missing global cache root directory path", .{});
const zig_lib_directory: std.Build.Cache.Directory = .{
.path = zig_lib_dir,
.handle = try std.fs.cwd().openDir(zig_lib_dir, .{}),
};
const build_root_directory: std.Build.Cache.Directory = .{
@ -70,6 +67,7 @@ pub fn main() !void {
.zig_exe = zig_exe,
.env_map = try process.getEnvMap(arena),
.global_cache_root = global_cache_directory,
.zig_lib_directory = zig_lib_directory,
.host = .{
.query = .{},
.result = try std.zig.system.resolveTargetQuery(.{}),
@ -97,13 +95,15 @@ pub fn main() !void {
var dir_list = std.Build.DirList{};
var summary: ?Summary = null;
var max_rss: u64 = 0;
var skip_oom_steps: bool = false;
var skip_oom_steps = false;
var color: Color = .auto;
var seed: u32 = 0;
var prominent_compile_errors: bool = false;
var help_menu: bool = false;
var steps_menu: bool = false;
var prominent_compile_errors = false;
var help_menu = false;
var steps_menu = false;
var output_tmp_nonce: ?[16]u8 = null;
var watch = false;
var debounce_interval_ms: u16 = 50;
while (nextArg(args, &arg_idx)) |arg| {
if (mem.startsWith(u8, arg, "-Z")) {
@ -185,13 +185,19 @@ pub fn main() !void {
arg, next_arg,
});
};
} else if (mem.eql(u8, arg, "--zig-lib-dir")) {
builder.zig_lib_dir = .{ .cwd_relative = nextArgOrFatal(args, &arg_idx) };
} else if (mem.eql(u8, arg, "--seed")) {
const next_arg = nextArg(args, &arg_idx) orelse
fatalWithHint("expected u32 after '{s}'", .{arg});
seed = std.fmt.parseUnsigned(u32, next_arg, 0) catch |err| {
fatal("unable to parse seed '{s}' as 32-bit integer: {s}\n", .{
fatal("unable to parse seed '{s}' as unsigned 32-bit integer: {s}\n", .{
next_arg, @errorName(err),
});
};
} else if (mem.eql(u8, arg, "--debounce")) {
const next_arg = nextArg(args, &arg_idx) orelse
fatalWithHint("expected u16 after '{s}'", .{arg});
debounce_interval_ms = std.fmt.parseUnsigned(u16, next_arg, 0) catch |err| {
fatal("unable to parse debounce interval '{s}' as unsigned 16-bit integer: {s}\n", .{
next_arg, @errorName(err),
});
};
@ -227,6 +233,8 @@ pub fn main() !void {
builder.verbose_llvm_cpu_features = true;
} else if (mem.eql(u8, arg, "--prominent-compile-errors")) {
prominent_compile_errors = true;
} else if (mem.eql(u8, arg, "--watch")) {
watch = true;
} else if (mem.eql(u8, arg, "-fwine")) {
builder.enable_wine = true;
} else if (mem.eql(u8, arg, "-fno-wine")) {
@ -292,6 +300,7 @@ pub fn main() !void {
const main_progress_node = std.Progress.start(.{
.disable_printing = (color == .off),
});
defer main_progress_node.end();
builder.debug_log_scopes = debug_log_scopes.items;
builder.resolveInstallPrefix(install_prefix, dir_list);
@ -340,13 +349,16 @@ pub fn main() !void {
.max_rss_is_default = false,
.max_rss_mutex = .{},
.skip_oom_steps = skip_oom_steps,
.watch = watch,
.memory_blocked_steps = std.ArrayList(*Step).init(arena),
.step_stack = .{},
.prominent_compile_errors = prominent_compile_errors,
.claimed_rss = 0,
.summary = summary,
.summary = summary orelse if (watch) .new else .failures,
.ttyconf = ttyconf,
.stderr = stderr,
.thread_pool = undefined,
};
if (run.max_rss == 0) {
@ -354,18 +366,78 @@ pub fn main() !void {
run.max_rss_is_default = true;
}
runStepNames(
arena,
builder,
targets.items,
main_progress_node,
thread_pool_options,
&run,
seed,
) catch |err| switch (err) {
const gpa = arena;
prepare(gpa, arena, builder, targets.items, &run, seed) catch |err| switch (err) {
error.UncleanExit => process.exit(1),
else => return err,
};
var w = if (watch) try Watch.init() else undefined;
try run.thread_pool.init(thread_pool_options);
defer run.thread_pool.deinit();
rebuild: while (true) {
runStepNames(
gpa,
builder,
targets.items,
main_progress_node,
&run,
) catch |err| switch (err) {
error.UncleanExit => {
assert(!run.watch);
process.exit(1);
},
else => return err,
};
if (!watch) return cleanExit();
switch (builtin.os.tag) {
.linux => {},
else => fatal("--watch not yet implemented for {s}", .{@tagName(builtin.os.tag)}),
}
try w.update(gpa, run.step_stack.keys());
// Wait until a file system notification arrives. Read all such events
// until the buffer is empty. Then wait for a debounce interval, resetting
// if any more events come in. After the debounce interval has passed,
// trigger a rebuild on all steps with modified inputs, as well as their
// recursive dependants.
var caption_buf: [std.Progress.Node.max_name_len]u8 = undefined;
const caption = std.fmt.bufPrint(&caption_buf, "Watching {d} Directories", .{
w.dir_table.entries.len,
}) catch &caption_buf;
var debouncing_node = main_progress_node.start(caption, 0);
var debounce_timeout: Watch.Timeout = .none;
while (true) switch (try w.wait(gpa, debounce_timeout)) {
.timeout => {
debouncing_node.end();
markFailedStepsDirty(gpa, run.step_stack.keys());
continue :rebuild;
},
.dirty => if (debounce_timeout == .none) {
debounce_timeout = .{ .ms = debounce_interval_ms };
debouncing_node.end();
debouncing_node = main_progress_node.start("Debouncing (Change Detected)", 0);
},
.clean => {},
};
}
}
fn markFailedStepsDirty(gpa: Allocator, all_steps: []const *Step) void {
for (all_steps) |step| switch (step.state) {
.dependency_failure, .failure, .skipped => step.recursiveReset(gpa),
else => continue,
};
// Now that all dirty steps have been found, the remaining steps that
// succeeded from last run shall be marked "cached".
for (all_steps) |step| switch (step.state) {
.success => step.result_cached = true,
else => continue,
};
}
const Run = struct {
@ -373,27 +445,27 @@ const Run = struct {
max_rss_is_default: bool,
max_rss_mutex: std.Thread.Mutex,
skip_oom_steps: bool,
watch: bool,
memory_blocked_steps: std.ArrayList(*Step),
step_stack: std.AutoArrayHashMapUnmanaged(*Step, void),
prominent_compile_errors: bool,
thread_pool: std.Thread.Pool,
claimed_rss: usize,
summary: ?Summary,
summary: Summary,
ttyconf: std.io.tty.Config,
stderr: File,
};
fn runStepNames(
arena: std.mem.Allocator,
fn prepare(
gpa: Allocator,
arena: Allocator,
b: *std.Build,
step_names: []const []const u8,
parent_prog_node: std.Progress.Node,
thread_pool_options: std.Thread.Pool.Options,
run: *Run,
seed: u32,
) !void {
const gpa = b.allocator;
var step_stack: std.AutoArrayHashMapUnmanaged(*Step, void) = .{};
defer step_stack.deinit(gpa);
const step_stack = &run.step_stack;
if (step_names.len == 0) {
try step_stack.put(gpa, b.default_step, {});
@ -416,8 +488,8 @@ fn runStepNames(
rand.shuffle(*Step, starting_steps);
for (starting_steps) |s| {
constructGraphAndCheckForDependencyLoop(b, s, &step_stack, rand) catch |err| switch (err) {
error.DependencyLoopDetected => return error.UncleanExit,
constructGraphAndCheckForDependencyLoop(b, s, &run.step_stack, rand) catch |err| switch (err) {
error.DependencyLoopDetected => return uncleanExit(),
else => |e| return e,
};
}
@ -442,17 +514,22 @@ fn runStepNames(
if (run.max_rss_is_default) {
std.debug.print("note: use --maxrss to override the default", .{});
}
return error.UncleanExit;
return uncleanExit();
}
}
}
var thread_pool: std.Thread.Pool = undefined;
try thread_pool.init(thread_pool_options);
defer thread_pool.deinit();
fn runStepNames(
gpa: Allocator,
b: *std.Build,
step_names: []const []const u8,
parent_prog_node: std.Progress.Node,
run: *Run,
) !void {
const step_stack = &run.step_stack;
const thread_pool = &run.thread_pool;
{
defer parent_prog_node.end();
const step_prog = parent_prog_node.start("steps", step_stack.count());
defer step_prog.end();
@ -468,7 +545,7 @@ fn runStepNames(
if (step.state == .skipped_oom) continue;
thread_pool.spawnWg(&wait_group, workerMakeOneStep, .{
&wait_group, &thread_pool, b, step, step_prog, run,
&wait_group, b, step, step_prog, run,
});
}
}
@ -485,8 +562,6 @@ fn runStepNames(
var failure_count: usize = 0;
var pending_count: usize = 0;
var total_compile_errors: usize = 0;
var compile_error_steps: std.ArrayListUnmanaged(*Step) = .{};
defer compile_error_steps.deinit(gpa);
for (step_stack.keys()) |s| {
test_fail_count += s.test_results.fail_count;
@ -516,7 +591,6 @@ fn runStepNames(
const compile_errors_len = s.result_error_bundle.errorMessageCount();
if (compile_errors_len > 0) {
total_compile_errors += compile_errors_len;
try compile_error_steps.append(gpa, s);
}
},
}
@ -524,13 +598,22 @@ fn runStepNames(
// A proper command line application defaults to silently succeeding.
// The user may request verbose mode if they have a different preference.
const failures_only = run.summary != .all and run.summary != .new;
if (failure_count == 0 and failures_only) return cleanExit();
const failures_only = switch (run.summary) {
.failures, .none => true,
else => false,
};
if (failure_count == 0 and failures_only) {
if (!run.watch) cleanExit();
return;
}
const ttyconf = run.ttyconf;
const stderr = run.stderr;
if (run.summary != Summary.none) {
if (run.summary != .none) {
std.debug.lockStdErr();
defer std.debug.unlockStdErr();
const stderr = run.stderr;
const total_count = success_count + failure_count + pending_count + skipped_count;
ttyconf.setColor(stderr, .cyan) catch {};
stderr.writeAll("Build Summary:") catch {};
@ -544,25 +627,23 @@ fn runStepNames(
if (test_fail_count > 0) stderr.writer().print("; {d} failed", .{test_fail_count}) catch {};
if (test_leak_count > 0) stderr.writer().print("; {d} leaked", .{test_leak_count}) catch {};
if (run.summary == null) {
ttyconf.setColor(stderr, .dim) catch {};
stderr.writeAll(" (disable with --summary none)") catch {};
ttyconf.setColor(stderr, .reset) catch {};
}
stderr.writeAll("\n") catch {};
// Print a fancy tree with build results.
var step_stack_copy = try step_stack.clone(gpa);
defer step_stack_copy.deinit(gpa);
var print_node: PrintNode = .{ .parent = null };
if (step_names.len == 0) {
print_node.last = true;
printTreeStep(b, b.default_step, run, stderr, ttyconf, &print_node, &step_stack) catch {};
printTreeStep(b, b.default_step, run, stderr, ttyconf, &print_node, &step_stack_copy) catch {};
} else {
const last_index = if (run.summary == .all) b.top_level_steps.count() else blk: {
var i: usize = step_names.len;
while (i > 0) {
i -= 1;
const step = b.top_level_steps.get(step_names[i]).?.step;
const found = switch (run.summary orelse .failures) {
const found = switch (run.summary) {
.all, .none => unreachable,
.failures => step.state != .success,
.new => !step.result_cached,
@ -574,30 +655,34 @@ fn runStepNames(
for (step_names, 0..) |step_name, i| {
const tls = b.top_level_steps.get(step_name).?;
print_node.last = i + 1 == last_index;
printTreeStep(b, &tls.step, run, stderr, ttyconf, &print_node, &step_stack) catch {};
printTreeStep(b, &tls.step, run, stderr, ttyconf, &print_node, &step_stack_copy) catch {};
}
}
}
if (failure_count == 0) return cleanExit();
if (failure_count == 0) {
if (!run.watch) cleanExit();
return;
}
// Finally, render compile errors at the bottom of the terminal.
// We use a separate compile_error_steps array list because step_stack is destructively
// mutated in printTreeStep above.
if (run.prominent_compile_errors and total_compile_errors > 0) {
for (compile_error_steps.items) |s| {
for (step_stack.keys()) |s| {
if (s.result_error_bundle.errorMessageCount() > 0) {
s.result_error_bundle.renderToStdErr(renderOptions(ttyconf));
}
}
// Signal to parent process that we have printed compile errors. The
// parent process may choose to omit the "following command failed"
// line in this case.
process.exit(2);
if (!run.watch) {
// Signal to parent process that we have printed compile errors. The
// parent process may choose to omit the "following command failed"
// line in this case.
std.debug.lockStdErr();
process.exit(2);
}
}
process.exit(1);
if (!run.watch) return uncleanExit();
}
const PrintNode = struct {
@ -768,7 +853,7 @@ fn printTreeStep(
step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void),
) !void {
const first = step_stack.swapRemove(s);
const summary = run.summary orelse .failures;
const summary = run.summary;
const skip = switch (summary) {
.none => unreachable,
.all => false,
@ -889,12 +974,13 @@ fn constructGraphAndCheckForDependencyLoop(
fn workerMakeOneStep(
wg: *std.Thread.WaitGroup,
thread_pool: *std.Thread.Pool,
b: *std.Build,
s: *Step,
prog_node: std.Progress.Node,
run: *Run,
) void {
const thread_pool = &run.thread_pool;
// First, check the conditions for running this step. If they are not met,
// then we return without doing the step, relying on another worker to
// queue this step up again when dependencies are met.
@ -974,7 +1060,7 @@ fn workerMakeOneStep(
// Successful completion of a step, so we queue up its dependants as well.
for (s.dependants.items) |dep| {
thread_pool.spawnWg(wg, workerMakeOneStep, .{
wg, thread_pool, b, dep, prog_node, run,
wg, b, dep, prog_node, run,
});
}
}
@ -999,7 +1085,7 @@ fn workerMakeOneStep(
remaining -= dep.max_rss;
thread_pool.spawnWg(wg, workerMakeOneStep, .{
wg, thread_pool, b, dep, prog_node, run,
wg, b, dep, prog_node, run,
});
} else {
run.memory_blocked_steps.items[i] = dep;
@ -1124,6 +1210,8 @@ fn usage(b: *std.Build, out_stream: anytype) !void {
\\ --maxrss <bytes> Limit memory usage (default is to use available memory)
\\ --skip-oom-steps Instead of failing, skip steps that would exceed --maxrss
\\ --fetch Exit after fetching dependency tree
\\ --watch Continuously rebuild when source files are modified
\\ --debounce <ms> Delay before rebuilding after changed file detected
\\
\\Project-Specific Options:
\\
@ -1218,13 +1306,22 @@ fn argsRest(args: [][:0]const u8, idx: usize) ?[][:0]const u8 {
return args[idx..];
}
/// Perhaps in the future there could be an Advanced Options flag such as
/// --debug-build-runner-leaks which would make this function return instead of
/// calling exit.
fn cleanExit() void {
// Perhaps in the future there could be an Advanced Options flag such as
// --debug-build-runner-leaks which would make this function return instead
// of calling exit.
std.debug.lockStdErr();
process.exit(0);
}
/// Perhaps in the future there could be an Advanced Options flag such as
/// --debug-build-runner-leaks which would make this function return instead of
/// calling exit.
fn uncleanExit() error{UncleanExit} {
std.debug.lockStdErr();
process.exit(1);
}
const Color = std.zig.Color;
const Summary = enum { all, new, failures, none };
@ -1249,11 +1346,6 @@ fn fatalWithHint(comptime f: []const u8, args: anytype) noreturn {
process.exit(1);
}
fn fatal(comptime f: []const u8, args: anytype) noreturn {
std.debug.print(f ++ "\n", args);
process.exit(1);
}
fn validateSystemLibraryOptions(b: *std.Build) void {
var bad = false;
for (b.graph.system_library_options.keys(), b.graph.system_library_options.values()) |k, v| {

View File

@ -198,20 +198,14 @@ fn cmdObjCopy(
return std.process.cleanExit();
},
.update => {
if (seen_update) {
std.debug.print("zig objcopy only supports 1 update for now\n", .{});
std.process.exit(1);
}
if (seen_update) fatal("zig objcopy only supports 1 update for now", .{});
seen_update = true;
try server.serveEmitBinPath(output, .{
.flags = .{ .cache_hit = false },
});
},
else => {
std.debug.print("unsupported message: {s}", .{@tagName(hdr.tag)});
std.process.exit(1);
},
else => fatal("unsupported message: {s}", .{@tagName(hdr.tag)}),
}
}
}

View File

@ -20,6 +20,7 @@ const Build = @This();
pub const Cache = @import("Build/Cache.zig");
pub const Step = @import("Build/Step.zig");
pub const Module = @import("Build/Module.zig");
pub const Watch = @import("Build/Watch.zig");
/// Shared state among all Build instances.
graph: *Graph,
@ -50,11 +51,9 @@ install_path: []const u8,
sysroot: ?[]const u8 = null,
search_prefixes: std.ArrayListUnmanaged([]const u8),
libc_file: ?[]const u8 = null,
installed_files: ArrayList(InstalledFile),
/// Path to the directory containing build.zig.
build_root: Cache.Directory,
cache_root: Cache.Directory,
zig_lib_dir: ?LazyPath,
pkg_config_pkg_list: ?(PkgConfigError![]const PkgConfigPkg) = null,
args: ?[]const []const u8 = null,
debug_log_scopes: []const []const u8 = &.{},
@ -117,6 +116,7 @@ pub const Graph = struct {
zig_exe: [:0]const u8,
env_map: EnvMap,
global_cache_root: Cache.Directory,
zig_lib_directory: Cache.Directory,
needed_lazy_dependencies: std.StringArrayHashMapUnmanaged(void) = .{},
/// Information about the native target. Computed before build() is invoked.
host: ResolvedTarget,
@ -276,7 +276,6 @@ pub fn create(
.exe_dir = undefined,
.h_dir = undefined,
.dest_dir = graph.env_map.get("DESTDIR"),
.installed_files = ArrayList(InstalledFile).init(arena),
.install_tls = .{
.step = Step.init(.{
.id = TopLevelStep.base_id,
@ -294,7 +293,6 @@ pub fn create(
}),
.description = "Remove build artifacts from prefix path",
},
.zig_lib_dir = null,
.install_path = undefined,
.args = null,
.host = graph.host,
@ -378,10 +376,8 @@ fn createChildOnly(
.sysroot = parent.sysroot,
.search_prefixes = parent.search_prefixes,
.libc_file = parent.libc_file,
.installed_files = ArrayList(InstalledFile).init(allocator),
.build_root = build_root,
.cache_root = parent.cache_root,
.zig_lib_dir = parent.zig_lib_dir,
.debug_log_scopes = parent.debug_log_scopes,
.debug_compile_errors = parent.debug_compile_errors,
.debug_pkg_config = parent.debug_pkg_config,
@ -689,7 +685,7 @@ pub fn addExecutable(b: *Build, options: ExecutableOptions) *Step.Compile {
.max_rss = options.max_rss,
.use_llvm = options.use_llvm,
.use_lld = options.use_lld,
.zig_lib_dir = options.zig_lib_dir orelse b.zig_lib_dir,
.zig_lib_dir = options.zig_lib_dir,
.win32_manifest = options.win32_manifest,
});
}
@ -737,7 +733,7 @@ pub fn addObject(b: *Build, options: ObjectOptions) *Step.Compile {
.max_rss = options.max_rss,
.use_llvm = options.use_llvm,
.use_lld = options.use_lld,
.zig_lib_dir = options.zig_lib_dir orelse b.zig_lib_dir,
.zig_lib_dir = options.zig_lib_dir,
});
}
@ -793,7 +789,7 @@ pub fn addSharedLibrary(b: *Build, options: SharedLibraryOptions) *Step.Compile
.max_rss = options.max_rss,
.use_llvm = options.use_llvm,
.use_lld = options.use_lld,
.zig_lib_dir = options.zig_lib_dir orelse b.zig_lib_dir,
.zig_lib_dir = options.zig_lib_dir,
.win32_manifest = options.win32_manifest,
});
}
@ -844,7 +840,7 @@ pub fn addStaticLibrary(b: *Build, options: StaticLibraryOptions) *Step.Compile
.max_rss = options.max_rss,
.use_llvm = options.use_llvm,
.use_lld = options.use_lld,
.zig_lib_dir = options.zig_lib_dir orelse b.zig_lib_dir,
.zig_lib_dir = options.zig_lib_dir,
});
}
@ -907,7 +903,7 @@ pub fn addTest(b: *Build, options: TestOptions) *Step.Compile {
.test_runner = options.test_runner,
.use_llvm = options.use_llvm,
.use_lld = options.use_lld,
.zig_lib_dir = options.zig_lib_dir orelse b.zig_lib_dir,
.zig_lib_dir = options.zig_lib_dir,
});
}
@ -931,7 +927,7 @@ pub fn addAssembly(b: *Build, options: AssemblyOptions) *Step.Compile {
.optimize = options.optimize,
},
.max_rss = options.max_rss,
.zig_lib_dir = options.zig_lib_dir orelse b.zig_lib_dir,
.zig_lib_dir = options.zig_lib_dir,
});
obj_step.addAssemblyFile(options.source_file);
return obj_step;
@ -1054,7 +1050,11 @@ pub fn addWriteFiles(b: *Build) *Step.WriteFile {
return Step.WriteFile.create(b);
}
pub fn addRemoveDirTree(b: *Build, dir_path: []const u8) *Step.RemoveDir {
pub fn addUpdateSourceFiles(b: *Build) *Step.UpdateSourceFiles {
return Step.UpdateSourceFiles.create(b);
}
pub fn addRemoveDirTree(b: *Build, dir_path: LazyPath) *Step.RemoveDir {
return Step.RemoveDir.create(b, dir_path);
}
@ -1083,15 +1083,8 @@ fn makeUninstall(uninstall_step: *Step, prog_node: std.Progress.Node) anyerror!v
const uninstall_tls: *TopLevelStep = @fieldParentPtr("step", uninstall_step);
const b: *Build = @fieldParentPtr("uninstall_tls", uninstall_tls);
for (b.installed_files.items) |installed_file| {
const full_path = b.getInstallPath(installed_file.dir, installed_file.path);
if (b.verbose) {
log.info("rm {s}", .{full_path});
}
fs.cwd().deleteTree(full_path) catch {};
}
// TODO remove empty directories
_ = b;
@panic("TODO implement https://github.com/ziglang/zig/issues/14943");
}
/// Creates a configuration option to be passed to the build.zig script.
@ -1664,15 +1657,6 @@ pub fn addCheckFile(
return Step.CheckFile.create(b, file_source, options);
}
/// deprecated: https://github.com/ziglang/zig/issues/14943
pub fn pushInstalledFile(b: *Build, dir: InstallDir, dest_rel_path: []const u8) void {
const file = InstalledFile{
.dir = dir,
.path = dest_rel_path,
};
b.installed_files.append(file.dupe(b)) catch @panic("OOM");
}
pub fn truncateFile(b: *Build, dest_path: []const u8) !void {
if (b.verbose) {
log.info("truncate {s}", .{dest_path});
@ -2341,36 +2325,52 @@ pub const LazyPath = union(enum) {
}
}
/// Returns an absolute path.
/// Intended to be used during the make phase only.
/// Deprecated, see `getPath3`.
pub fn getPath(lazy_path: LazyPath, src_builder: *Build) []const u8 {
return getPath2(lazy_path, src_builder, null);
}
/// Returns an absolute path.
/// Deprecated, see `getPath3`.
pub fn getPath2(lazy_path: LazyPath, src_builder: *Build, asking_step: ?*Step) []const u8 {
const p = getPath3(lazy_path, src_builder, asking_step);
return src_builder.pathResolve(&.{ p.root_dir.path orelse ".", p.sub_path });
}
/// Intended to be used during the make phase only.
///
/// `asking_step` is only used for debugging purposes; it's the step being
/// run that is asking for the path.
pub fn getPath2(lazy_path: LazyPath, src_builder: *Build, asking_step: ?*Step) []const u8 {
pub fn getPath3(lazy_path: LazyPath, src_builder: *Build, asking_step: ?*Step) Cache.Path {
switch (lazy_path) {
.src_path => |sp| return sp.owner.pathFromRoot(sp.sub_path),
.cwd_relative => |p| return src_builder.pathFromCwd(p),
.src_path => |sp| return .{
.root_dir = sp.owner.build_root,
.sub_path = sp.sub_path,
},
.cwd_relative => |sub_path| return .{
.root_dir = Cache.Directory.cwd(),
.sub_path = sub_path,
},
.generated => |gen| {
var file_path: []const u8 = gen.file.step.owner.pathFromRoot(gen.file.path orelse {
std.debug.lockStdErr();
const stderr = std.io.getStdErr();
dumpBadGetPathHelp(gen.file.step, stderr, src_builder, asking_step) catch {};
std.debug.unlockStdErr();
@panic("misconfigured build script");
});
// TODO make gen.file.path not be absolute and use that as the
// basis for not traversing up too many directories.
var file_path: Cache.Path = .{
.root_dir = gen.file.step.owner.build_root,
.sub_path = gen.file.path orelse {
std.debug.lockStdErr();
const stderr = std.io.getStdErr();
dumpBadGetPathHelp(gen.file.step, stderr, src_builder, asking_step) catch {};
std.debug.unlockStdErr();
@panic("misconfigured build script");
},
};
if (gen.up > 0) {
const cache_root_path = src_builder.cache_root.path orelse
(src_builder.cache_root.join(src_builder.allocator, &.{"."}) catch @panic("OOM"));
for (0..gen.up) |_| {
if (mem.eql(u8, file_path, cache_root_path)) {
if (mem.eql(u8, file_path.sub_path, cache_root_path)) {
// If we hit the cache root and there's still more to go,
// the script attempted to go too far.
dumpBadDirnameHelp(gen.file.step, asking_step,
@ -2384,7 +2384,7 @@ pub const LazyPath = union(enum) {
// path is absolute.
// dirname will return null only if we're at root.
// Typically, we'll stop well before that at the cache root.
file_path = fs.path.dirname(file_path) orelse {
file_path.sub_path = fs.path.dirname(file_path.sub_path) orelse {
dumpBadDirnameHelp(gen.file.step, asking_step,
\\dirname() reached root.
\\No more directories left to go up.
@ -2395,9 +2395,12 @@ pub const LazyPath = union(enum) {
}
}
return src_builder.pathResolve(&.{ file_path, gen.sub_path });
return file_path.join(src_builder.allocator, gen.sub_path) catch @panic("OOM");
},
.dependency => |dep| return .{
.root_dir = dep.dependency.builder.build_root,
.sub_path = dep.sub_path,
},
.dependency => |dep| return dep.dependency.builder.pathFromRoot(dep.sub_path),
}
}
@ -2512,19 +2515,6 @@ pub const InstallDir = union(enum) {
}
};
pub const InstalledFile = struct {
dir: InstallDir,
path: []const u8,
/// Duplicates the installed file path and directory.
pub fn dupe(file: InstalledFile, builder: *Build) InstalledFile {
return .{
.dir = file.dir.dupe(builder),
.path = builder.dupe(file.path),
};
}
};
/// This function is intended to be called in the `configure` phase only.
/// It returns an absolute directory path, which is potentially going to be a
/// source of API breakage in the future, so keep that in mind when using this

View File

@ -354,6 +354,19 @@ pub const Manifest = struct {
/// ```
/// var file_contents = cache_hash.files.keys()[file_index].contents.?;
/// ```
pub fn addFilePath(m: *Manifest, file_path: Path, max_file_size: ?usize) !usize {
const gpa = m.cache.gpa;
try m.files.ensureUnusedCapacity(gpa, 1);
const resolved_path = try fs.path.resolve(gpa, &.{
file_path.root_dir.path orelse ".",
file_path.subPathOrDot(),
});
errdefer gpa.free(resolved_path);
const prefixed_path = try m.cache.findPrefixResolved(resolved_path);
return addFileInner(m, prefixed_path, max_file_size);
}
/// Deprecated; use `addFilePath`.
pub fn addFile(self: *Manifest, file_path: []const u8, max_file_size: ?usize) !usize {
assert(self.manifest_file == null);
@ -362,6 +375,10 @@ pub const Manifest = struct {
const prefixed_path = try self.cache.findPrefix(file_path);
errdefer gpa.free(prefixed_path.sub_path);
return addFileInner(self, prefixed_path, max_file_size);
}
fn addFileInner(self: *Manifest, prefixed_path: PrefixedPath, max_file_size: ?usize) !usize {
const gop = self.files.getOrPutAssumeCapacityAdapted(prefixed_path, FilesAdapter{});
if (gop.found_existing) {
gop.key_ptr.updateMaxSize(max_file_size);
@ -990,6 +1007,23 @@ pub const Manifest = struct {
}
self.files.deinit(self.cache.gpa);
}
pub fn populateFileSystemInputs(man: *Manifest, buf: *std.ArrayListUnmanaged(u8)) Allocator.Error!void {
assert(@typeInfo(std.zig.Server.Message.PathPrefix).Enum.fields.len == man.cache.prefixes_len);
buf.clearRetainingCapacity();
const gpa = man.cache.gpa;
const files = man.files.keys();
if (files.len > 0) {
for (files) |file| {
try buf.ensureUnusedCapacity(gpa, file.prefixed_path.sub_path.len + 2);
buf.appendAssumeCapacity(file.prefixed_path.prefix + 1);
buf.appendSliceAssumeCapacity(file.prefixed_path.sub_path);
buf.appendAssumeCapacity(0);
}
// The null byte is a separator, not a terminator.
buf.items.len -= 1;
}
}
};
/// On operating systems that support symlinks, does a readlink. On other operating systems,

View File

@ -58,6 +58,20 @@ pub fn openFile(
return p.root_dir.handle.openFile(joined_path, flags);
}
pub fn openDir(
p: Path,
sub_path: []const u8,
args: fs.Dir.OpenOptions,
) fs.Dir.OpenError!fs.Dir {
var buf: [fs.max_path_bytes]u8 = undefined;
const joined_path = if (p.sub_path.len == 0) sub_path else p: {
break :p std.fmt.bufPrint(&buf, "{s}" ++ fs.path.sep_str ++ "{s}", .{
p.sub_path, sub_path,
}) catch return error.NameTooLong;
};
return p.root_dir.handle.openDir(joined_path, args);
}
pub fn makeOpenPath(p: Path, sub_path: []const u8, opts: fs.Dir.OpenOptions) !fs.Dir {
var buf: [fs.max_path_bytes]u8 = undefined;
const joined_path = if (p.sub_path.len == 0) sub_path else p: {
@ -137,16 +151,57 @@ pub fn format(
}
if (fmt_string.len > 0)
std.fmt.invalidFmtError(fmt_string, self);
if (std.fs.path.isAbsolute(self.sub_path)) {
try writer.writeAll(self.sub_path);
return;
}
if (self.root_dir.path) |p| {
try writer.writeAll(p);
try writer.writeAll(fs.path.sep_str);
if (self.sub_path.len > 0) {
try writer.writeAll(fs.path.sep_str);
try writer.writeAll(self.sub_path);
}
return;
}
if (self.sub_path.len > 0) {
try writer.writeAll(self.sub_path);
try writer.writeAll(fs.path.sep_str);
return;
}
try writer.writeByte('.');
}
pub fn eql(self: Path, other: Path) bool {
return self.root_dir.eql(other.root_dir) and std.mem.eql(u8, self.sub_path, other.sub_path);
}
pub fn subPathOpt(self: Path) ?[]const u8 {
return if (self.sub_path.len == 0) null else self.sub_path;
}
pub fn subPathOrDot(self: Path) []const u8 {
return if (self.sub_path.len == 0) "." else self.sub_path;
}
/// Useful to make `Path` a key in `std.ArrayHashMap`.
pub const TableAdapter = struct {
pub const Hash = std.hash.Wyhash;
pub fn hash(self: TableAdapter, a: Cache.Path) u32 {
_ = self;
const seed = switch (@typeInfo(@TypeOf(a.root_dir.handle.fd))) {
.Pointer => @intFromPtr(a.root_dir.handle.fd),
.Int => @as(u32, @bitCast(a.root_dir.handle.fd)),
else => @compileError("unimplemented hash function"),
};
return @truncate(Hash.hash(seed, a.sub_path));
}
pub fn eql(self: TableAdapter, a: Cache.Path, b: Cache.Path, b_index: usize) bool {
_ = self;
_ = b_index;
return a.eql(b);
}
};
const Path = @This();
const std = @import("../../std.zig");
const fs = std.fs;

View File

@ -7,6 +7,16 @@ dependencies: std.ArrayList(*Step),
/// This field is empty during execution of the user's build script, and
/// then populated during dependency loop checking in the build runner.
dependants: std.ArrayListUnmanaged(*Step),
/// Collects the set of files that retrigger this step to run.
///
/// This is used by the build system's implementation of `--watch` but it can
/// also be potentially useful for IDEs to know what effects editing a
/// particular file has.
///
/// Populated within `make`. Implementation may choose to clear and repopulate,
/// retain previous value, or update.
inputs: Inputs,
state: State,
/// Set this field to declare an upper bound on the amount of bytes of memory it will
/// take to run the step. Zero means no limit.
@ -63,6 +73,11 @@ pub const MakeFn = *const fn (step: *Step, prog_node: std.Progress.Node) anyerro
pub const State = enum {
precheck_unstarted,
precheck_started,
/// This is also used to indicate "dirty" steps that have been modified
/// after a previous build completed, in which case, the step may or may
/// not have been completed before. Either way, one or more of its direct
/// file system inputs have been modified, meaning that the step needs to
/// be re-evaluated.
precheck_done,
running,
dependency_failure,
@ -87,6 +102,7 @@ pub const Id = enum {
fmt,
translate_c,
write_file,
update_source_files,
run,
check_file,
check_object,
@ -107,6 +123,7 @@ pub const Id = enum {
.fmt => Fmt,
.translate_c => TranslateC,
.write_file => WriteFile,
.update_source_files => UpdateSourceFiles,
.run => Run,
.check_file => CheckFile,
.check_object => CheckObject,
@ -133,6 +150,28 @@ pub const RemoveDir = @import("Step/RemoveDir.zig");
pub const Run = @import("Step/Run.zig");
pub const TranslateC = @import("Step/TranslateC.zig");
pub const WriteFile = @import("Step/WriteFile.zig");
pub const UpdateSourceFiles = @import("Step/UpdateSourceFiles.zig");
pub const Inputs = struct {
table: Table,
pub const init: Inputs = .{
.table = .{},
};
pub const Table = std.ArrayHashMapUnmanaged(Build.Cache.Path, Files, Build.Cache.Path.TableAdapter, false);
/// The special file name "." means any changes inside the directory.
pub const Files = std.ArrayListUnmanaged([]const u8);
pub fn populated(inputs: *Inputs) bool {
return inputs.table.count() != 0;
}
pub fn clear(inputs: *Inputs, gpa: Allocator) void {
for (inputs.table.values()) |*files| files.deinit(gpa);
inputs.table.clearRetainingCapacity();
}
};
pub const StepOptions = struct {
id: Id,
@ -153,6 +192,7 @@ pub fn init(options: StepOptions) Step {
.makeFn = options.makeFn,
.dependencies = std.ArrayList(*Step).init(arena),
.dependants = .{},
.inputs = Inputs.init,
.state = .precheck_unstarted,
.max_rss = options.max_rss,
.debug_stack_trace = blk: {
@ -395,6 +435,44 @@ pub fn evalZigProcess(
s.result_cached = ebp_hdr.flags.cache_hit;
result = try arena.dupe(u8, body[@sizeOf(EbpHdr)..]);
},
.file_system_inputs => {
s.clearWatchInputs();
var it = std.mem.splitScalar(u8, body, 0);
while (it.next()) |prefixed_path| {
const prefix_index: std.zig.Server.Message.PathPrefix = @enumFromInt(prefixed_path[0] - 1);
const sub_path = try arena.dupe(u8, prefixed_path[1..]);
const sub_path_dirname = std.fs.path.dirname(sub_path) orelse "";
switch (prefix_index) {
.cwd => {
const path: Build.Cache.Path = .{
.root_dir = Build.Cache.Directory.cwd(),
.sub_path = sub_path_dirname,
};
try addWatchInputFromPath(s, path, std.fs.path.basename(sub_path));
},
.zig_lib => zl: {
if (s.cast(Step.Compile)) |compile| {
if (compile.zig_lib_dir) |lp| {
try addWatchInput(s, lp);
break :zl;
}
}
const path: Build.Cache.Path = .{
.root_dir = s.owner.graph.zig_lib_directory,
.sub_path = sub_path_dirname,
};
try addWatchInputFromPath(s, path, std.fs.path.basename(sub_path));
},
.local_cache => {
const path: Build.Cache.Path = .{
.root_dir = b.cache_root,
.sub_path = sub_path_dirname,
};
try addWatchInputFromPath(s, path, std.fs.path.basename(sub_path));
},
}
}
},
else => {}, // ignore other messages
}
@ -542,19 +620,36 @@ pub fn allocPrintCmd2(
return buf.toOwnedSlice(arena);
}
pub fn cacheHit(s: *Step, man: *std.Build.Cache.Manifest) !bool {
/// Prefer `cacheHitAndWatch` unless you already added watch inputs
/// separately from using the cache system.
pub fn cacheHit(s: *Step, man: *Build.Cache.Manifest) !bool {
s.result_cached = man.hit() catch |err| return failWithCacheError(s, man, err);
return s.result_cached;
}
fn failWithCacheError(s: *Step, man: *const std.Build.Cache.Manifest, err: anyerror) anyerror {
/// Clears previous watch inputs, if any, and then populates watch inputs from
/// the full set of files picked up by the cache manifest.
///
/// Must be accompanied with `writeManifestAndWatch`.
pub fn cacheHitAndWatch(s: *Step, man: *Build.Cache.Manifest) !bool {
const is_hit = man.hit() catch |err| return failWithCacheError(s, man, err);
s.result_cached = is_hit;
// The above call to hit() populates the manifest with files, so in case of
// a hit, we need to populate watch inputs.
if (is_hit) try setWatchInputsFromManifest(s, man);
return is_hit;
}
fn failWithCacheError(s: *Step, man: *const Build.Cache.Manifest, err: anyerror) anyerror {
const i = man.failed_file_index orelse return err;
const pp = man.files.keys()[i].prefixed_path;
const prefix = man.cache.prefixes()[pp.prefix].path orelse "";
return s.fail("{s}: {s}/{s}", .{ @errorName(err), prefix, pp.sub_path });
}
pub fn writeManifest(s: *Step, man: *std.Build.Cache.Manifest) !void {
/// Prefer `writeManifestAndWatch` unless you already added watch inputs
/// separately from using the cache system.
pub fn writeManifest(s: *Step, man: *Build.Cache.Manifest) !void {
if (s.test_results.isSuccess()) {
man.writeManifest() catch |err| {
try s.addError("unable to write cache manifest: {s}", .{@errorName(err)});
@ -562,6 +657,142 @@ pub fn writeManifest(s: *Step, man: *std.Build.Cache.Manifest) !void {
}
}
/// Clears previous watch inputs, if any, and then populates watch inputs from
/// the full set of files picked up by the cache manifest.
///
/// Must be accompanied with `cacheHitAndWatch`.
pub fn writeManifestAndWatch(s: *Step, man: *Build.Cache.Manifest) !void {
try writeManifest(s, man);
try setWatchInputsFromManifest(s, man);
}
fn setWatchInputsFromManifest(s: *Step, man: *Build.Cache.Manifest) !void {
const arena = s.owner.allocator;
const prefixes = man.cache.prefixes();
clearWatchInputs(s);
for (man.files.keys()) |file| {
// The file path data is freed when the cache manifest is cleaned up at the end of `make`.
const sub_path = try arena.dupe(u8, file.prefixed_path.sub_path);
try addWatchInputFromPath(s, .{
.root_dir = prefixes[file.prefixed_path.prefix],
.sub_path = std.fs.path.dirname(sub_path) orelse "",
}, std.fs.path.basename(sub_path));
}
}
/// For steps that have a single input that never changes when re-running `make`.
pub fn singleUnchangingWatchInput(step: *Step, lazy_path: Build.LazyPath) Allocator.Error!void {
if (!step.inputs.populated()) try step.addWatchInput(lazy_path);
}
pub fn clearWatchInputs(step: *Step) void {
const gpa = step.owner.allocator;
step.inputs.clear(gpa);
}
/// Places a *file* dependency on the path.
pub fn addWatchInput(step: *Step, lazy_file: Build.LazyPath) Allocator.Error!void {
switch (lazy_file) {
.src_path => |src_path| try addWatchInputFromBuilder(step, src_path.owner, src_path.sub_path),
.dependency => |d| try addWatchInputFromBuilder(step, d.dependency.builder, d.sub_path),
.cwd_relative => |path_string| {
try addWatchInputFromPath(step, .{
.root_dir = .{
.path = null,
.handle = std.fs.cwd(),
},
.sub_path = std.fs.path.dirname(path_string) orelse "",
}, std.fs.path.basename(path_string));
},
// Nothing to watch because this dependency edge is modeled instead via `dependants`.
.generated => {},
}
}
/// Any changes inside the directory will trigger invalidation.
///
/// See also `addDirectoryWatchInputFromPath` which takes a `Build.Cache.Path` instead.
///
/// Paths derived from this directory should also be manually added via
/// `addDirectoryWatchInputFromPath` if and only if this function returns
/// `true`.
pub fn addDirectoryWatchInput(step: *Step, lazy_directory: Build.LazyPath) Allocator.Error!bool {
switch (lazy_directory) {
.src_path => |src_path| try addDirectoryWatchInputFromBuilder(step, src_path.owner, src_path.sub_path),
.dependency => |d| try addDirectoryWatchInputFromBuilder(step, d.dependency.builder, d.sub_path),
.cwd_relative => |path_string| {
try addDirectoryWatchInputFromPath(step, .{
.root_dir = .{
.path = null,
.handle = std.fs.cwd(),
},
.sub_path = path_string,
});
},
// Nothing to watch because this dependency edge is modeled instead via `dependants`.
.generated => return false,
}
return true;
}
/// Any changes inside the directory will trigger invalidation.
///
/// See also `addDirectoryWatchInput` which takes a `Build.LazyPath` instead.
///
/// This function should only be called when it has been verified that the
/// dependency on `path` is not already accounted for by a `Step` dependency.
/// In other words, before calling this function, first check that the
/// `Build.LazyPath` which this `path` is derived from is not `generated`.
pub fn addDirectoryWatchInputFromPath(step: *Step, path: Build.Cache.Path) !void {
return addWatchInputFromPath(step, path, ".");
}
fn addWatchInputFromBuilder(step: *Step, builder: *Build, sub_path: []const u8) !void {
return addWatchInputFromPath(step, .{
.root_dir = builder.build_root,
.sub_path = std.fs.path.dirname(sub_path) orelse "",
}, std.fs.path.basename(sub_path));
}
fn addDirectoryWatchInputFromBuilder(step: *Step, builder: *Build, sub_path: []const u8) !void {
return addDirectoryWatchInputFromPath(step, .{
.root_dir = builder.build_root,
.sub_path = sub_path,
});
}
fn addWatchInputFromPath(step: *Step, path: Build.Cache.Path, basename: []const u8) !void {
const gpa = step.owner.allocator;
const gop = try step.inputs.table.getOrPut(gpa, path);
if (!gop.found_existing) gop.value_ptr.* = .{};
try gop.value_ptr.append(gpa, basename);
}
fn reset(step: *Step, gpa: Allocator) void {
assert(step.state == .precheck_done);
step.result_error_msgs.clearRetainingCapacity();
step.result_stderr = "";
step.result_cached = false;
step.result_duration_ns = null;
step.result_peak_rss = 0;
step.test_results = .{};
step.result_error_bundle.deinit(gpa);
step.result_error_bundle = std.zig.ErrorBundle.empty;
}
/// Implementation detail of file watching. Prepares the step for being re-evaluated.
pub fn recursiveReset(step: *Step, gpa: Allocator) void {
assert(step.state != .precheck_done);
step.state = .precheck_done;
step.reset(gpa);
for (step.dependants.items) |dep| {
if (dep.state == .precheck_done) continue;
dep.recursiveReset(gpa);
}
}
test {
_ = CheckFile;
_ = CheckObject;
@ -577,4 +808,5 @@ test {
_ = Run;
_ = TranslateC;
_ = WriteFile;
_ = UpdateSourceFiles;
}

View File

@ -50,6 +50,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void {
_ = prog_node;
const b = step.owner;
const check_file: *CheckFile = @fieldParentPtr("step", step);
try step.singleUnchangingWatchInput(check_file.source);
const src_path = check_file.source.getPath2(b, step);
const contents = fs.cwd().readFileAlloc(b.allocator, src_path, check_file.max_bytes) catch |err| {

View File

@ -555,6 +555,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void {
const b = step.owner;
const gpa = b.allocator;
const check_object: *CheckObject = @fieldParentPtr("step", step);
try step.singleUnchangingWatchInput(check_object.source);
const src_path = check_object.source.getPath2(b, step);
const contents = fs.cwd().readFileAllocOptions(

View File

@ -168,6 +168,8 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void {
_ = prog_node;
const b = step.owner;
const config_header: *ConfigHeader = @fieldParentPtr("step", step);
if (config_header.style.getPath()) |lp| try step.singleUnchangingWatchInput(lp);
const gpa = b.allocator;
const arena = b.allocator;

View File

@ -41,7 +41,6 @@ pub const Options = struct {
};
pub fn create(owner: *std.Build, options: Options) *InstallDir {
owner.pushInstalledFile(options.install_dir, options.install_subdir);
const install_dir = owner.allocator.create(InstallDir) catch @panic("OOM");
install_dir.* = .{
.step = Step.init(.{
@ -60,12 +59,14 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void {
_ = prog_node;
const b = step.owner;
const install_dir: *InstallDir = @fieldParentPtr("step", step);
step.clearWatchInputs();
const arena = b.allocator;
const dest_prefix = b.getInstallPath(install_dir.options.install_dir, install_dir.options.install_subdir);
const src_dir_path = install_dir.options.source_dir.getPath2(b, step);
var src_dir = b.build_root.handle.openDir(src_dir_path, .{ .iterate = true }) catch |err| {
return step.fail("unable to open source directory '{}{s}': {s}", .{
b.build_root, src_dir_path, @errorName(err),
const src_dir_path = install_dir.options.source_dir.getPath3(b, step);
const need_derived_inputs = try step.addDirectoryWatchInput(install_dir.options.source_dir);
var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| {
return step.fail("unable to open source directory '{}': {s}", .{
src_dir_path, @errorName(err),
});
};
defer src_dir.close();
@ -89,12 +90,16 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void {
}
// relative to src build root
const src_sub_path = b.pathJoin(&.{ src_dir_path, entry.path });
const src_sub_path = try src_dir_path.join(arena, entry.path);
const dest_path = b.pathJoin(&.{ dest_prefix, entry.path });
const cwd = fs.cwd();
switch (entry.kind) {
.directory => try cwd.makePath(dest_path),
.directory => {
if (need_derived_inputs) try step.addDirectoryWatchInputFromPath(src_sub_path);
try cwd.makePath(dest_path);
// TODO: set result_cached=false if the directory did not already exist.
},
.file => {
for (install_dir.options.blank_extensions) |ext| {
if (mem.endsWith(u8, entry.path, ext)) {
@ -104,14 +109,14 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void {
}
const prev_status = fs.Dir.updateFile(
b.build_root.handle,
src_sub_path,
src_sub_path.root_dir.handle,
src_sub_path.sub_path,
cwd,
dest_path,
.{},
) catch |err| {
return step.fail("unable to update file from '{}{s}' to '{s}': {s}", .{
b.build_root, src_sub_path, dest_path, @errorName(err),
return step.fail("unable to update file from '{}' to '{s}': {s}", .{
src_sub_path, dest_path, @errorName(err),
});
};
all_cached = all_cached and prev_status == .fresh;

View File

@ -19,7 +19,6 @@ pub fn create(
dest_rel_path: []const u8,
) *InstallFile {
assert(dest_rel_path.len != 0);
owner.pushInstalledFile(dir, dest_rel_path);
const install_file = owner.allocator.create(InstallFile) catch @panic("OOM");
install_file.* = .{
.step = Step.init(.{
@ -40,6 +39,8 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void {
_ = prog_node;
const b = step.owner;
const install_file: *InstallFile = @fieldParentPtr("step", step);
try step.singleUnchangingWatchInput(install_file.source);
const full_src_path = install_file.source.getPath2(b, step);
const full_dest_path = b.getInstallPath(install_file.dir, install_file.dest_rel_path);
const cwd = std.fs.cwd();

View File

@ -93,14 +93,11 @@ pub fn getOutputSeparatedDebug(objcopy: *const ObjCopy) ?std.Build.LazyPath {
fn make(step: *Step, prog_node: std.Progress.Node) !void {
const b = step.owner;
const objcopy: *ObjCopy = @fieldParentPtr("step", step);
try step.singleUnchangingWatchInput(objcopy.input_file);
var man = b.graph.cache.obtain();
defer man.deinit();
// Random bytes to make ObjCopy unique. Refresh this with new random
// bytes when ObjCopy implementation is modified incompatibly.
man.hash.add(@as(u32, 0xe18b7baf));
const full_src_path = objcopy.input_file.getPath2(b, step);
_ = try man.addFile(full_src_path, null);
man.hash.addOptionalBytes(objcopy.only_section);

View File

@ -424,6 +424,9 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void {
item.path.getPath2(b, step),
);
}
if (!step.inputs.populated()) for (options.args.items) |item| {
try step.addWatchInput(item.path);
};
const basename = "options.zig";
@ -520,6 +523,7 @@ test Options {
.query = .{},
.result = try std.zig.system.resolveTargetQuery(.{}),
},
.zig_lib_directory = std.Build.Cache.Directory.cwd(),
};
var builder = try std.Build.create(

View File

@ -2,22 +2,23 @@ const std = @import("std");
const fs = std.fs;
const Step = std.Build.Step;
const RemoveDir = @This();
const LazyPath = std.Build.LazyPath;
pub const base_id: Step.Id = .remove_dir;
step: Step,
dir_path: []const u8,
doomed_path: LazyPath,
pub fn create(owner: *std.Build, dir_path: []const u8) *RemoveDir {
pub fn create(owner: *std.Build, doomed_path: LazyPath) *RemoveDir {
const remove_dir = owner.allocator.create(RemoveDir) catch @panic("OOM");
remove_dir.* = .{
.step = Step.init(.{
.id = base_id,
.name = owner.fmt("RemoveDir {s}", .{dir_path}),
.name = owner.fmt("RemoveDir {s}", .{doomed_path.getDisplayName()}),
.owner = owner,
.makeFn = make,
}),
.dir_path = owner.dupePath(dir_path),
.doomed_path = doomed_path.dupe(owner),
};
return remove_dir;
}
@ -30,14 +31,19 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void {
const b = step.owner;
const remove_dir: *RemoveDir = @fieldParentPtr("step", step);
b.build_root.handle.deleteTree(remove_dir.dir_path) catch |err| {
step.clearWatchInputs();
try step.addWatchInput(remove_dir.doomed_path);
const full_doomed_path = remove_dir.doomed_path.getPath2(b, step);
b.build_root.handle.deleteTree(full_doomed_path) catch |err| {
if (b.build_root.path) |base| {
return step.fail("unable to recursively delete path '{s}/{s}': {s}", .{
base, remove_dir.dir_path, @errorName(err),
base, full_doomed_path, @errorName(err),
});
} else {
return step.fail("unable to recursively delete path '{s}': {s}", .{
remove_dir.dir_path, @errorName(err),
full_doomed_path, @errorName(err),
});
}
};

View File

@ -632,7 +632,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void {
// On Windows we don't have rpaths so we have to add .dll search paths to PATH
run.addPathForDynLibs(artifact);
}
const file_path = artifact.installed_path orelse artifact.generated_bin.?.path.?; // the path is guaranteed to be set
const file_path = artifact.installed_path orelse artifact.generated_bin.?.path.?;
try argv_list.append(b.fmt("{s}{s}", .{ pa.prefix, file_path }));
@ -682,7 +682,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void {
_ = try man.addFile(lazy_path.getPath2(b, step), null);
}
if (!has_side_effects and try step.cacheHit(&man)) {
if (!has_side_effects and try step.cacheHitAndWatch(&man)) {
// cache hit, skip running command
const digest = man.final();
@ -736,7 +736,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void {
}
try runCommand(run, argv_list.items, has_side_effects, output_dir_path, prog_node);
if (!has_side_effects) try step.writeManifest(&man);
if (!has_side_effects) try step.writeManifestAndWatch(&man);
return;
};
@ -812,7 +812,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void {
};
}
if (!has_side_effects) try step.writeManifest(&man);
if (!has_side_effects) try step.writeManifestAndWatch(&man);
try populateGeneratedPaths(
arena,

View File

@ -0,0 +1,114 @@
//! Writes data to paths relative to the package root, effectively mutating the
//! package's source files. Be careful with the latter functionality; it should
//! not be used during the normal build process, but as a utility run by a
//! developer with intention to update source files, which will then be
//! committed to version control.
const std = @import("std");
const Step = std.Build.Step;
const fs = std.fs;
const ArrayList = std.ArrayList;
const UpdateSourceFiles = @This();
step: Step,
output_source_files: std.ArrayListUnmanaged(OutputSourceFile),
pub const base_id: Step.Id = .update_source_files;
pub const OutputSourceFile = struct {
contents: Contents,
sub_path: []const u8,
};
pub const Contents = union(enum) {
bytes: []const u8,
copy: std.Build.LazyPath,
};
pub fn create(owner: *std.Build) *UpdateSourceFiles {
const usf = owner.allocator.create(UpdateSourceFiles) catch @panic("OOM");
usf.* = .{
.step = Step.init(.{
.id = base_id,
.name = "UpdateSourceFiles",
.owner = owner,
.makeFn = make,
}),
.output_source_files = .{},
};
return usf;
}
/// A path relative to the package root.
///
/// Be careful with this because it updates source files. This should not be
/// used as part of the normal build process, but as a utility occasionally
/// run by a developer with intent to modify source files and then commit
/// those changes to version control.
pub fn addCopyFileToSource(usf: *UpdateSourceFiles, source: std.Build.LazyPath, sub_path: []const u8) void {
const b = usf.step.owner;
usf.output_source_files.append(b.allocator, .{
.contents = .{ .copy = source },
.sub_path = sub_path,
}) catch @panic("OOM");
source.addStepDependencies(&usf.step);
}
/// A path relative to the package root.
///
/// Be careful with this because it updates source files. This should not be
/// used as part of the normal build process, but as a utility occasionally
/// run by a developer with intent to modify source files and then commit
/// those changes to version control.
pub fn addBytesToSource(usf: *UpdateSourceFiles, bytes: []const u8, sub_path: []const u8) void {
const b = usf.step.owner;
usf.output_source_files.append(b.allocator, .{
.contents = .{ .bytes = bytes },
.sub_path = sub_path,
}) catch @panic("OOM");
}
fn make(step: *Step, prog_node: std.Progress.Node) !void {
_ = prog_node;
const b = step.owner;
const usf: *UpdateSourceFiles = @fieldParentPtr("step", step);
var any_miss = false;
for (usf.output_source_files.items) |output_source_file| {
if (fs.path.dirname(output_source_file.sub_path)) |dirname| {
b.build_root.handle.makePath(dirname) catch |err| {
return step.fail("unable to make path '{}{s}': {s}", .{
b.build_root, dirname, @errorName(err),
});
};
}
switch (output_source_file.contents) {
.bytes => |bytes| {
b.build_root.handle.writeFile(.{ .sub_path = output_source_file.sub_path, .data = bytes }) catch |err| {
return step.fail("unable to write file '{}{s}': {s}", .{
b.build_root, output_source_file.sub_path, @errorName(err),
});
};
any_miss = true;
},
.copy => |file_source| {
if (!step.inputs.populated()) try step.addWatchInput(file_source);
const source_path = file_source.getPath2(b, step);
const prev_status = fs.Dir.updateFile(
fs.cwd(),
source_path,
b.build_root.handle,
output_source_file.sub_path,
.{},
) catch |err| {
return step.fail("unable to update file from '{s}' to '{}{s}': {s}", .{
source_path, b.build_root, output_source_file.sub_path, @errorName(err),
});
};
any_miss = any_miss or prev_status == .stale;
},
}
}
step.result_cached = !any_miss;
}

View File

@ -1,13 +1,6 @@
//! WriteFile is primarily used to create a directory in an appropriate
//! location inside the local cache which has a set of files that have either
//! been generated during the build, or are copied from the source package.
//!
//! However, this step has an additional capability of writing data to paths
//! relative to the package root, effectively mutating the package's source
//! files. Be careful with the latter functionality; it should not be used
//! during the normal build process, but as a utility run by a developer with
//! intention to update source files, which will then be committed to version
//! control.
//! WriteFile is used to create a directory in an appropriate location inside
//! the local cache which has a set of files that have either been generated
//! during the build, or are copied from the source package.
const std = @import("std");
const Step = std.Build.Step;
const fs = std.fs;
@ -19,8 +12,6 @@ step: Step,
// The elements here are pointers because we need stable pointers for the GeneratedFile field.
files: std.ArrayListUnmanaged(File),
directories: std.ArrayListUnmanaged(Directory),
output_source_files: std.ArrayListUnmanaged(OutputSourceFile),
generated_directory: std.Build.GeneratedFile,
pub const base_id: Step.Id = .write_file;
@ -49,12 +40,23 @@ pub const Directory = struct {
.include_extensions = if (opts.include_extensions) |incs| b.dupeStrings(incs) else null,
};
}
};
};
pub const OutputSourceFile = struct {
contents: Contents,
sub_path: []const u8,
pub fn pathIncluded(opts: Options, path: []const u8) bool {
for (opts.exclude_extensions) |ext| {
if (std.mem.endsWith(u8, path, ext))
return false;
}
if (opts.include_extensions) |incs| {
for (incs) |inc| {
if (std.mem.endsWith(u8, path, inc))
return true;
} else {
return false;
}
}
return true;
}
};
};
pub const Contents = union(enum) {
@ -73,7 +75,6 @@ pub fn create(owner: *std.Build) *WriteFile {
}),
.files = .{},
.directories = .{},
.output_source_files = .{},
.generated_directory = .{ .step = &write_file.step },
};
return write_file;
@ -150,33 +151,6 @@ pub fn addCopyDirectory(
};
}
/// A path relative to the package root.
/// Be careful with this because it updates source files. This should not be
/// used as part of the normal build process, but as a utility occasionally
/// run by a developer with intent to modify source files and then commit
/// those changes to version control.
pub fn addCopyFileToSource(write_file: *WriteFile, source: std.Build.LazyPath, sub_path: []const u8) void {
const b = write_file.step.owner;
write_file.output_source_files.append(b.allocator, .{
.contents = .{ .copy = source },
.sub_path = sub_path,
}) catch @panic("OOM");
source.addStepDependencies(&write_file.step);
}
/// A path relative to the package root.
/// Be careful with this because it updates source files. This should not be
/// used as part of the normal build process, but as a utility occasionally
/// run by a developer with intent to modify source files and then commit
/// those changes to version control.
pub fn addBytesToSource(write_file: *WriteFile, bytes: []const u8, sub_path: []const u8) void {
const b = write_file.step.owner;
write_file.output_source_files.append(b.allocator, .{
.contents = .{ .bytes = bytes },
.sub_path = sub_path,
}) catch @panic("OOM");
}
/// Returns a `LazyPath` representing the base directory that contains all the
/// files from this `WriteFile`.
pub fn getDirectory(write_file: *WriteFile) std.Build.LazyPath {
@ -200,47 +174,10 @@ fn maybeUpdateName(write_file: *WriteFile) void {
fn make(step: *Step, prog_node: std.Progress.Node) !void {
_ = prog_node;
const b = step.owner;
const arena = b.allocator;
const gpa = arena;
const write_file: *WriteFile = @fieldParentPtr("step", step);
// Writing to source files is kind of an extra capability of this
// WriteFile - arguably it should be a different step. But anyway here
// it is, it happens unconditionally and does not interact with the other
// files here.
var any_miss = false;
for (write_file.output_source_files.items) |output_source_file| {
if (fs.path.dirname(output_source_file.sub_path)) |dirname| {
b.build_root.handle.makePath(dirname) catch |err| {
return step.fail("unable to make path '{}{s}': {s}", .{
b.build_root, dirname, @errorName(err),
});
};
}
switch (output_source_file.contents) {
.bytes => |bytes| {
b.build_root.handle.writeFile(.{ .sub_path = output_source_file.sub_path, .data = bytes }) catch |err| {
return step.fail("unable to write file '{}{s}': {s}", .{
b.build_root, output_source_file.sub_path, @errorName(err),
});
};
any_miss = true;
},
.copy => |file_source| {
const source_path = file_source.getPath2(b, step);
const prev_status = fs.Dir.updateFile(
fs.cwd(),
source_path,
b.build_root.handle,
output_source_file.sub_path,
.{},
) catch |err| {
return step.fail("unable to update file from '{s}' to '{}{s}': {s}", .{
source_path, b.build_root, output_source_file.sub_path, @errorName(err),
});
};
any_miss = any_miss or prev_status == .stale;
},
}
}
step.clearWatchInputs();
// The cache is used here not really as a way to speed things up - because writing
// the data to a file would probably be very fast - but as a way to find a canonical
@ -252,39 +189,73 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void {
var man = b.graph.cache.obtain();
defer man.deinit();
// Random bytes to make WriteFile unique. Refresh this with
// new random bytes when WriteFile implementation is modified
// in a non-backwards-compatible way.
man.hash.add(@as(u32, 0xd767ee59));
for (write_file.files.items) |file| {
man.hash.addBytes(file.sub_path);
switch (file.contents) {
.bytes => |bytes| {
man.hash.addBytes(bytes);
},
.copy => |file_source| {
_ = try man.addFile(file_source.getPath2(b, step), null);
.copy => |lazy_path| {
const path = lazy_path.getPath3(b, step);
_ = try man.addFilePath(path, null);
try step.addWatchInput(lazy_path);
},
}
}
for (write_file.directories.items) |dir| {
man.hash.addBytes(dir.source.getPath2(b, step));
const open_dir_cache = try arena.alloc(fs.Dir, write_file.directories.items.len);
var open_dirs_count: usize = 0;
defer closeDirs(open_dir_cache[0..open_dirs_count]);
for (write_file.directories.items, open_dir_cache) |dir, *open_dir_cache_elem| {
man.hash.addBytes(dir.sub_path);
for (dir.options.exclude_extensions) |ext| man.hash.addBytes(ext);
if (dir.options.include_extensions) |incs| for (incs) |inc| man.hash.addBytes(inc);
const need_derived_inputs = try step.addDirectoryWatchInput(dir.source);
const src_dir_path = dir.source.getPath3(b, step);
var src_dir = src_dir_path.root_dir.handle.openDir(src_dir_path.subPathOrDot(), .{ .iterate = true }) catch |err| {
return step.fail("unable to open source directory '{}': {s}", .{
src_dir_path, @errorName(err),
});
};
open_dir_cache_elem.* = src_dir;
open_dirs_count += 1;
var it = try src_dir.walk(gpa);
defer it.deinit();
while (try it.next()) |entry| {
if (!dir.options.pathIncluded(entry.path)) continue;
switch (entry.kind) {
.directory => {
if (need_derived_inputs) {
const entry_path = try src_dir_path.join(arena, entry.path);
try step.addDirectoryWatchInputFromPath(entry_path);
}
},
.file => {
const entry_path = try src_dir_path.join(arena, entry.path);
_ = try man.addFilePath(entry_path, null);
},
else => continue,
}
}
}
if (try step.cacheHit(&man)) {
const digest = man.final();
write_file.generated_directory.path = try b.cache_root.join(b.allocator, &.{ "o", &digest });
write_file.generated_directory.path = try b.cache_root.join(arena, &.{ "o", &digest });
step.result_cached = true;
return;
}
const digest = man.final();
const cache_path = "o" ++ fs.path.sep_str ++ digest;
write_file.generated_directory.path = try b.cache_root.join(b.allocator, &.{ "o", &digest });
write_file.generated_directory.path = try b.cache_root.join(arena, &.{ "o", &digest });
var cache_dir = b.cache_root.handle.makeOpenPath(cache_path, .{}) catch |err| {
return step.fail("unable to make path '{}{s}': {s}", .{
@ -337,8 +308,9 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void {
},
}
}
for (write_file.directories.items) |dir| {
const full_src_dir_path = dir.source.getPath2(b, step);
for (write_file.directories.items, open_dir_cache) |dir, already_open_dir| {
const src_dir_path = dir.source.getPath3(b, step);
const dest_dirname = dir.sub_path;
if (dest_dirname.len != 0) {
@ -349,44 +321,25 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void {
};
}
var src_dir = b.build_root.handle.openDir(full_src_dir_path, .{ .iterate = true }) catch |err| {
return step.fail("unable to open source directory '{s}': {s}", .{
full_src_dir_path, @errorName(err),
});
};
defer src_dir.close();
var it = try already_open_dir.walk(gpa);
defer it.deinit();
while (try it.next()) |entry| {
if (!dir.options.pathIncluded(entry.path)) continue;
var it = try src_dir.walk(b.allocator);
next_entry: while (try it.next()) |entry| {
for (dir.options.exclude_extensions) |ext| {
if (std.mem.endsWith(u8, entry.path, ext)) continue :next_entry;
}
if (dir.options.include_extensions) |incs| {
for (incs) |inc| {
if (std.mem.endsWith(u8, entry.path, inc)) break;
} else {
continue :next_entry;
}
}
const full_src_entry_path = b.pathJoin(&.{ full_src_dir_path, entry.path });
const src_entry_path = try src_dir_path.join(arena, entry.path);
const dest_path = b.pathJoin(&.{ dest_dirname, entry.path });
switch (entry.kind) {
.directory => try cache_dir.makePath(dest_path),
.file => {
const prev_status = fs.Dir.updateFile(
cwd,
full_src_entry_path,
src_entry_path.root_dir.handle,
src_entry_path.sub_path,
cache_dir,
dest_path,
.{},
) catch |err| {
return step.fail("unable to update file from '{s}' to '{}{s}{c}{s}': {s}", .{
full_src_entry_path,
b.cache_root,
cache_path,
fs.path.sep,
dest_path,
@errorName(err),
return step.fail("unable to update file from '{}' to '{}{s}{c}{s}': {s}", .{
src_entry_path, b.cache_root, cache_path, fs.path.sep, dest_path, @errorName(err),
});
};
_ = prev_status;
@ -398,3 +351,7 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void {
try step.writeManifest(&man);
}
fn closeDirs(dirs: []fs.Dir) void {
for (dirs) |*d| d.close();
}

363
lib/std/Build/Watch.zig Normal file
View File

@ -0,0 +1,363 @@
const builtin = @import("builtin");
const std = @import("../std.zig");
const Watch = @This();
const Step = std.Build.Step;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const fatal = std.zig.fatal;
dir_table: DirTable,
os: Os,
generation: Generation,
/// Key is the directory to watch which contains one or more files we are
/// interested in noticing changes to.
///
/// Value is generation.
const DirTable = std.ArrayHashMapUnmanaged(Cache.Path, void, Cache.Path.TableAdapter, false);
/// Special key of "." means any changes in this directory trigger the steps.
const ReactionSet = std.StringArrayHashMapUnmanaged(StepSet);
const StepSet = std.AutoArrayHashMapUnmanaged(*Step, Generation);
const Generation = u8;
const Hash = std.hash.Wyhash;
const Cache = std.Build.Cache;
const Os = switch (builtin.os.tag) {
.linux => struct {
const posix = std.posix;
/// Keyed differently but indexes correspond 1:1 with `dir_table`.
handle_table: HandleTable,
poll_fds: [1]posix.pollfd,
const HandleTable = std.ArrayHashMapUnmanaged(FileHandle, ReactionSet, FileHandle.Adapter, false);
const fan_mask: std.os.linux.fanotify.MarkMask = .{
.CLOSE_WRITE = true,
.CREATE = true,
.DELETE = true,
.DELETE_SELF = true,
.EVENT_ON_CHILD = true,
.MOVED_FROM = true,
.MOVED_TO = true,
.MOVE_SELF = true,
.ONDIR = true,
};
const FileHandle = struct {
handle: *align(1) std.os.linux.file_handle,
fn clone(lfh: FileHandle, gpa: Allocator) Allocator.Error!FileHandle {
const bytes = lfh.slice();
const new_ptr = try gpa.alignedAlloc(
u8,
@alignOf(std.os.linux.file_handle),
@sizeOf(std.os.linux.file_handle) + bytes.len,
);
const new_header: *std.os.linux.file_handle = @ptrCast(new_ptr);
new_header.* = lfh.handle.*;
const new: FileHandle = .{ .handle = new_header };
@memcpy(new.slice(), lfh.slice());
return new;
}
fn destroy(lfh: FileHandle, gpa: Allocator) void {
const ptr: [*]u8 = @ptrCast(lfh.handle);
const allocated_slice = ptr[0 .. @sizeOf(std.os.linux.file_handle) + lfh.handle.handle_bytes];
return gpa.free(allocated_slice);
}
fn slice(lfh: FileHandle) []u8 {
const ptr: [*]u8 = &lfh.handle.f_handle;
return ptr[0..lfh.handle.handle_bytes];
}
const Adapter = struct {
pub fn hash(self: Adapter, a: FileHandle) u32 {
_ = self;
const unsigned_type: u32 = @bitCast(a.handle.handle_type);
return @truncate(Hash.hash(unsigned_type, a.slice()));
}
pub fn eql(self: Adapter, a: FileHandle, b: FileHandle, b_index: usize) bool {
_ = self;
_ = b_index;
return a.handle.handle_type == b.handle.handle_type and std.mem.eql(u8, a.slice(), b.slice());
}
};
};
fn getDirHandle(gpa: Allocator, path: std.Build.Cache.Path) !FileHandle {
var file_handle_buffer: [@sizeOf(std.os.linux.file_handle) + 128]u8 align(@alignOf(std.os.linux.file_handle)) = undefined;
var mount_id: i32 = undefined;
var buf: [std.fs.max_path_bytes]u8 = undefined;
const adjusted_path = if (path.sub_path.len == 0) "./" else std.fmt.bufPrint(&buf, "{s}/", .{
path.sub_path,
}) catch return error.NameTooLong;
const stack_ptr: *std.os.linux.file_handle = @ptrCast(&file_handle_buffer);
stack_ptr.handle_bytes = file_handle_buffer.len - @sizeOf(std.os.linux.file_handle);
try posix.name_to_handle_at(path.root_dir.handle.fd, adjusted_path, stack_ptr, &mount_id, std.os.linux.AT.HANDLE_FID);
const stack_lfh: FileHandle = .{ .handle = stack_ptr };
return stack_lfh.clone(gpa);
}
fn markDirtySteps(w: *Watch, gpa: Allocator) !bool {
const fan_fd = w.os.getFanFd();
const fanotify = std.os.linux.fanotify;
const M = fanotify.event_metadata;
var events_buf: [256 + 4096]u8 = undefined;
var any_dirty = false;
while (true) {
var len = posix.read(fan_fd, &events_buf) catch |err| switch (err) {
error.WouldBlock => return any_dirty,
else => |e| return e,
};
var meta: [*]align(1) M = @ptrCast(&events_buf);
while (len >= @sizeOf(M) and meta[0].event_len >= @sizeOf(M) and meta[0].event_len <= len) : ({
len -= meta[0].event_len;
meta = @ptrCast(@as([*]u8, @ptrCast(meta)) + meta[0].event_len);
}) {
assert(meta[0].vers == M.VERSION);
if (meta[0].mask.Q_OVERFLOW) {
any_dirty = true;
std.log.warn("file system watch queue overflowed; falling back to fstat", .{});
markAllFilesDirty(w, gpa);
return true;
}
const fid: *align(1) fanotify.event_info_fid = @ptrCast(meta + 1);
switch (fid.hdr.info_type) {
.DFID_NAME => {
const file_handle: *align(1) std.os.linux.file_handle = @ptrCast(&fid.handle);
const file_name_z: [*:0]u8 = @ptrCast((&file_handle.f_handle).ptr + file_handle.handle_bytes);
const file_name = std.mem.span(file_name_z);
const lfh: FileHandle = .{ .handle = file_handle };
if (w.os.handle_table.getPtr(lfh)) |reaction_set| {
if (reaction_set.getPtr(".")) |glob_set|
any_dirty = markStepSetDirty(gpa, glob_set, any_dirty);
if (reaction_set.getPtr(file_name)) |step_set|
any_dirty = markStepSetDirty(gpa, step_set, any_dirty);
}
},
else => |t| std.log.warn("unexpected fanotify event '{s}'", .{@tagName(t)}),
}
}
}
}
fn getFanFd(os: *const @This()) posix.fd_t {
return os.poll_fds[0].fd;
}
fn update(w: *Watch, gpa: Allocator, steps: []const *Step) !void {
const fan_fd = w.os.getFanFd();
// Add missing marks and note persisted ones.
for (steps) |step| {
for (step.inputs.table.keys(), step.inputs.table.values()) |path, *files| {
const reaction_set = rs: {
const gop = try w.dir_table.getOrPut(gpa, path);
if (!gop.found_existing) {
const dir_handle = try Os.getDirHandle(gpa, path);
// `dir_handle` may already be present in the table in
// the case that we have multiple Cache.Path instances
// that compare inequal but ultimately point to the same
// directory on the file system.
// In such case, we must revert adding this directory, but keep
// the additions to the step set.
const dh_gop = try w.os.handle_table.getOrPut(gpa, dir_handle);
if (dh_gop.found_existing) {
_ = w.dir_table.pop();
} else {
assert(dh_gop.index == gop.index);
dh_gop.value_ptr.* = .{};
posix.fanotify_mark(fan_fd, .{
.ADD = true,
.ONLYDIR = true,
}, fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| {
fatal("unable to watch {}: {s}", .{ path, @errorName(err) });
};
}
break :rs dh_gop.value_ptr;
}
break :rs &w.os.handle_table.values()[gop.index];
};
for (files.items) |basename| {
const gop = try reaction_set.getOrPut(gpa, basename);
if (!gop.found_existing) gop.value_ptr.* = .{};
try gop.value_ptr.put(gpa, step, w.generation);
}
}
}
{
// Remove marks for files that are no longer inputs.
var i: usize = 0;
while (i < w.os.handle_table.entries.len) {
{
const reaction_set = &w.os.handle_table.values()[i];
var step_set_i: usize = 0;
while (step_set_i < reaction_set.entries.len) {
const step_set = &reaction_set.values()[step_set_i];
var dirent_i: usize = 0;
while (dirent_i < step_set.entries.len) {
const generations = step_set.values();
if (generations[dirent_i] == w.generation) {
dirent_i += 1;
continue;
}
step_set.swapRemoveAt(dirent_i);
}
if (step_set.entries.len > 0) {
step_set_i += 1;
continue;
}
reaction_set.swapRemoveAt(step_set_i);
}
if (reaction_set.entries.len > 0) {
i += 1;
continue;
}
}
const path = w.dir_table.keys()[i];
posix.fanotify_mark(fan_fd, .{
.REMOVE = true,
.ONLYDIR = true,
}, fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| switch (err) {
error.FileNotFound => {}, // Expected, harmless.
else => |e| std.log.warn("unable to unwatch '{}': {s}", .{ path, @errorName(e) }),
};
w.dir_table.swapRemoveAt(i);
w.os.handle_table.swapRemoveAt(i);
}
w.generation +%= 1;
}
}
},
else => void,
};
pub fn init() !Watch {
switch (builtin.os.tag) {
.linux => {
const fan_fd = try std.posix.fanotify_init(.{
.CLASS = .NOTIF,
.CLOEXEC = true,
.NONBLOCK = true,
.REPORT_NAME = true,
.REPORT_DIR_FID = true,
.REPORT_FID = true,
.REPORT_TARGET_FID = true,
}, 0);
return .{
.dir_table = .{},
.os = switch (builtin.os.tag) {
.linux => .{
.handle_table = .{},
.poll_fds = .{
.{
.fd = fan_fd,
.events = std.posix.POLL.IN,
.revents = undefined,
},
},
},
else => {},
},
.generation = 0,
};
},
else => @panic("unimplemented"),
}
}
pub const Match = struct {
/// Relative to the watched directory, the file path that triggers this
/// match.
basename: []const u8,
/// The step to re-run when file corresponding to `basename` is changed.
step: *Step,
pub const Context = struct {
pub fn hash(self: Context, a: Match) u32 {
_ = self;
var hasher = Hash.init(0);
std.hash.autoHash(&hasher, a.step);
hasher.update(a.basename);
return @truncate(hasher.final());
}
pub fn eql(self: Context, a: Match, b: Match, b_index: usize) bool {
_ = self;
_ = b_index;
return a.step == b.step and std.mem.eql(u8, a.basename, b.basename);
}
};
};
fn markAllFilesDirty(w: *Watch, gpa: Allocator) void {
for (w.os.handle_table.values()) |reaction_set| {
for (reaction_set.values()) |step_set| {
for (step_set.keys()) |step| {
step.recursiveReset(gpa);
}
}
}
}
fn markStepSetDirty(gpa: Allocator, step_set: *StepSet, any_dirty: bool) bool {
var this_any_dirty = false;
for (step_set.keys()) |step| {
if (step.state != .precheck_done) {
step.recursiveReset(gpa);
this_any_dirty = true;
}
}
return any_dirty or this_any_dirty;
}
pub fn update(w: *Watch, gpa: Allocator, steps: []const *Step) !void {
switch (builtin.os.tag) {
.linux => return Os.update(w, gpa, steps),
else => @compileError("unimplemented"),
}
}
pub const Timeout = union(enum) {
none,
ms: u16,
pub fn to_i32_ms(t: Timeout) i32 {
return switch (t) {
.none => -1,
.ms => |ms| ms,
};
}
};
pub const WaitResult = enum {
timeout,
/// File system watching triggered on files that were marked as inputs to at least one Step.
/// Relevant steps have been marked dirty.
dirty,
/// File system watching triggered but none of the events were relevant to
/// what we are listening to. There is nothing to do.
clean,
};
pub fn wait(w: *Watch, gpa: Allocator, timeout: Timeout) !WaitResult {
switch (builtin.os.tag) {
.linux => {
const events_len = try std.posix.poll(&w.os.poll_fds, timeout.to_i32_ms());
return if (events_len == 0)
.timeout
else if (try Os.markDirtySteps(w, gpa))
.dirty
else
.clean;
},
else => @compileError("unimplemented"),
}
}

View File

@ -698,12 +698,42 @@ pub fn inotify_rm_watch(fd: i32, wd: i32) usize {
return syscall2(.inotify_rm_watch, @as(usize, @bitCast(@as(isize, fd))), @as(usize, @bitCast(@as(isize, wd))));
}
pub fn fanotify_init(flags: u32, event_f_flags: u32) usize {
return syscall2(.fanotify_init, flags, event_f_flags);
pub fn fanotify_init(flags: fanotify.InitFlags, event_f_flags: u32) usize {
return syscall2(.fanotify_init, @as(u32, @bitCast(flags)), event_f_flags);
}
pub fn fanotify_mark(fd: i32, flags: u32, mask: u64, dirfd: i32, pathname: ?[*:0]const u8) usize {
return syscall5(.fanotify_mark, @as(usize, @bitCast(@as(isize, fd))), flags, mask, @as(usize, @bitCast(@as(isize, dirfd))), @intFromPtr(pathname));
pub fn fanotify_mark(
fd: fd_t,
flags: fanotify.MarkFlags,
mask: fanotify.MarkMask,
dirfd: fd_t,
pathname: ?[*:0]const u8,
) usize {
return syscall5(
.fanotify_mark,
@bitCast(@as(isize, fd)),
@as(u32, @bitCast(flags)),
@bitCast(mask),
@bitCast(@as(isize, dirfd)),
@intFromPtr(pathname),
);
}
pub fn name_to_handle_at(
dirfd: fd_t,
pathname: [*:0]const u8,
handle: *std.os.linux.file_handle,
mount_id: *i32,
flags: u32,
) usize {
return syscall5(
.name_to_handle_at,
@as(u32, @bitCast(dirfd)),
@intFromPtr(pathname),
@intFromPtr(handle),
@intFromPtr(mount_id),
flags,
);
}
pub fn readlink(noalias path: [*:0]const u8, noalias buf_ptr: [*]u8, buf_len: usize) usize {
@ -2916,6 +2946,8 @@ pub const AT = struct {
/// Apply to the entire subtree
pub const RECURSIVE = 0x8000;
pub const HANDLE_FID = REMOVEDIR;
};
pub const FALLOC = struct {
@ -4135,57 +4167,155 @@ pub const IN = struct {
pub const ONESHOT = 0x80000000;
};
pub const FAN = struct {
pub const ACCESS = 0x00000001;
pub const MODIFY = 0x00000002;
pub const CLOSE_WRITE = 0x00000008;
pub const CLOSE_NOWRITE = 0x00000010;
pub const OPEN = 0x00000020;
pub const Q_OVERFLOW = 0x00004000;
pub const OPEN_PERM = 0x00010000;
pub const ACCESS_PERM = 0x00020000;
pub const ONDIR = 0x40000000;
pub const EVENT_ON_CHILD = 0x08000000;
pub const CLOSE = CLOSE_WRITE | CLOSE_NOWRITE;
pub const CLOEXEC = 0x00000001;
pub const NONBLOCK = 0x00000002;
pub const CLASS_NOTIF = 0x00000000;
pub const CLASS_CONTENT = 0x00000004;
pub const CLASS_PRE_CONTENT = 0x00000008;
pub const ALL_CLASS_BITS = CLASS_NOTIF | CLASS_CONTENT | CLASS_PRE_CONTENT;
pub const UNLIMITED_QUEUE = 0x00000010;
pub const UNLIMITED_MARKS = 0x00000020;
pub const ALL_INIT_FLAGS = CLOEXEC | NONBLOCK | ALL_CLASS_BITS | UNLIMITED_QUEUE | UNLIMITED_MARKS;
pub const MARK_ADD = 0x00000001;
pub const MARK_REMOVE = 0x00000002;
pub const MARK_DONT_FOLLOW = 0x00000004;
pub const MARK_ONLYDIR = 0x00000008;
pub const MARK_MOUNT = 0x00000010;
pub const MARK_IGNORED_MASK = 0x00000020;
pub const MARK_IGNORED_SURV_MODIFY = 0x00000040;
pub const MARK_FLUSH = 0x00000080;
pub const ALL_MARK_FLAGS = MARK_ADD | MARK_REMOVE | MARK_DONT_FOLLOW | MARK_ONLYDIR | MARK_MOUNT | MARK_IGNORED_MASK | MARK_IGNORED_SURV_MODIFY | MARK_FLUSH;
pub const ALL_EVENTS = ACCESS | MODIFY | CLOSE | OPEN;
pub const ALL_PERM_EVENTS = OPEN_PERM | ACCESS_PERM;
pub const ALL_OUTGOING_EVENTS = ALL_EVENTS | ALL_PERM_EVENTS | Q_OVERFLOW;
pub const ALLOW = 0x01;
pub const DENY = 0x02;
pub const fanotify = struct {
pub const InitFlags = packed struct(u32) {
CLOEXEC: bool = false,
NONBLOCK: bool = false,
CLASS: enum(u2) {
NOTIF = 0,
CONTENT = 1,
PRE_CONTENT = 2,
} = .NOTIF,
UNLIMITED_QUEUE: bool = false,
UNLIMITED_MARKS: bool = false,
ENABLE_AUDIT: bool = false,
REPORT_PIDFD: bool = false,
REPORT_TID: bool = false,
REPORT_FID: bool = false,
REPORT_DIR_FID: bool = false,
REPORT_NAME: bool = false,
REPORT_TARGET_FID: bool = false,
_: u19 = 0,
};
pub const MarkFlags = packed struct(u32) {
ADD: bool = false,
REMOVE: bool = false,
DONT_FOLLOW: bool = false,
ONLYDIR: bool = false,
MOUNT: bool = false,
/// Mutually exclusive with `IGNORE`
IGNORED_MASK: bool = false,
IGNORED_SURV_MODIFY: bool = false,
FLUSH: bool = false,
FILESYSTEM: bool = false,
EVICTABLE: bool = false,
/// Mutually exclusive with `IGNORED_MASK`
IGNORE: bool = false,
_: u21 = 0,
};
pub const MarkMask = packed struct(u64) {
/// File was accessed
ACCESS: bool = false,
/// File was modified
MODIFY: bool = false,
/// Metadata changed
ATTRIB: bool = false,
/// Writtable file closed
CLOSE_WRITE: bool = false,
/// Unwrittable file closed
CLOSE_NOWRITE: bool = false,
/// File was opened
OPEN: bool = false,
/// File was moved from X
MOVED_FROM: bool = false,
/// File was moved to Y
MOVED_TO: bool = false,
/// Subfile was created
CREATE: bool = false,
/// Subfile was deleted
DELETE: bool = false,
/// Self was deleted
DELETE_SELF: bool = false,
/// Self was moved
MOVE_SELF: bool = false,
/// File was opened for exec
OPEN_EXEC: bool = false,
reserved13: u1 = 0,
/// Event queued overflowed
Q_OVERFLOW: bool = false,
/// Filesystem error
FS_ERROR: bool = false,
/// File open in perm check
OPEN_PERM: bool = false,
/// File accessed in perm check
ACCESS_PERM: bool = false,
/// File open/exec in perm check
OPEN_EXEC_PERM: bool = false,
reserved19: u8 = 0,
/// Interested in child events
EVENT_ON_CHILD: bool = false,
/// File was renamed
RENAME: bool = false,
reserved30: u1 = 0,
/// Event occurred against dir
ONDIR: bool = false,
reserved31: u33 = 0,
};
pub const event_metadata = extern struct {
event_len: u32,
vers: u8,
reserved: u8,
metadata_len: u16,
mask: MarkMask align(8),
fd: i32,
pid: i32,
pub const VERSION = 3;
};
pub const response = extern struct {
fd: i32,
response: u32,
};
/// Unique file identifier info record.
///
/// This structure is used for records of types `EVENT_INFO_TYPE.FID`.
/// `EVENT_INFO_TYPE.DFID` and `EVENT_INFO_TYPE.DFID_NAME`.
///
/// For `EVENT_INFO_TYPE.DFID_NAME` there is additionally a null terminated
/// name immediately after the file handle.
pub const event_info_fid = extern struct {
hdr: event_info_header,
fsid: kernel_fsid_t,
/// Following is an opaque struct file_handle that can be passed as
/// an argument to open_by_handle_at(2).
handle: [0]u8,
};
/// Variable length info record following event metadata.
pub const event_info_header = extern struct {
info_type: EVENT_INFO_TYPE,
pad: u8,
len: u16,
};
pub const EVENT_INFO_TYPE = enum(u8) {
FID = 1,
DFID_NAME = 2,
DFID = 3,
PIDFD = 4,
ERROR = 5,
OLD_DFID_NAME = 10,
OLD_DFID = 11,
NEW_DFID_NAME = 12,
NEW_DFID = 13,
};
};
pub const fanotify_event_metadata = extern struct {
event_len: u32,
vers: u8,
reserved: u8,
metadata_len: u16,
mask: u64 align(8),
fd: i32,
pid: i32,
pub const file_handle = extern struct {
handle_bytes: u32,
handle_type: i32,
f_handle: [0]u8,
};
pub const fanotify_response = extern struct {
fd: i32,
response: u32,
};
pub const kernel_fsid_t = fsid_t;
pub const fsid_t = [2]i32;
pub const S = struct {
pub const IFMT = 0o170000;

View File

@ -4501,7 +4501,7 @@ pub const FanotifyInitError = error{
PermissionDenied,
} || UnexpectedError;
pub fn fanotify_init(flags: u32, event_f_flags: u32) FanotifyInitError!i32 {
pub fn fanotify_init(flags: std.os.linux.fanotify.InitFlags, event_f_flags: u32) FanotifyInitError!i32 {
const rc = system.fanotify_init(flags, event_f_flags);
switch (errno(rc)) {
.SUCCESS => return @intCast(rc),
@ -4530,16 +4530,28 @@ pub const FanotifyMarkError = error{
NameTooLong,
} || UnexpectedError;
pub fn fanotify_mark(fanotify_fd: i32, flags: u32, mask: u64, dirfd: i32, pathname: ?[]const u8) FanotifyMarkError!void {
pub fn fanotify_mark(
fanotify_fd: fd_t,
flags: std.os.linux.fanotify.MarkFlags,
mask: std.os.linux.fanotify.MarkMask,
dirfd: fd_t,
pathname: ?[]const u8,
) FanotifyMarkError!void {
if (pathname) |path| {
const path_c = try toPosixPath(path);
return fanotify_markZ(fanotify_fd, flags, mask, dirfd, &path_c);
} else {
return fanotify_markZ(fanotify_fd, flags, mask, dirfd, null);
}
return fanotify_markZ(fanotify_fd, flags, mask, dirfd, null);
}
pub fn fanotify_markZ(fanotify_fd: i32, flags: u32, mask: u64, dirfd: i32, pathname: ?[*:0]const u8) FanotifyMarkError!void {
pub fn fanotify_markZ(
fanotify_fd: fd_t,
flags: std.os.linux.fanotify.MarkFlags,
mask: std.os.linux.fanotify.MarkMask,
dirfd: fd_t,
pathname: ?[*:0]const u8,
) FanotifyMarkError!void {
const rc = system.fanotify_mark(fanotify_fd, flags, mask, dirfd, pathname);
switch (errno(rc)) {
.SUCCESS => return,
@ -7274,6 +7286,44 @@ pub fn ptrace(request: u32, pid: pid_t, addr: usize, signal: usize) PtraceError!
};
}
pub const NameToFileHandleAtError = error{
FileNotFound,
NotDir,
OperationNotSupported,
NameTooLong,
Unexpected,
};
pub fn name_to_handle_at(
dirfd: fd_t,
pathname: []const u8,
handle: *std.os.linux.file_handle,
mount_id: *i32,
flags: u32,
) NameToFileHandleAtError!void {
const pathname_c = try toPosixPath(pathname);
return name_to_handle_atZ(dirfd, &pathname_c, handle, mount_id, flags);
}
pub fn name_to_handle_atZ(
dirfd: fd_t,
pathname_z: [*:0]const u8,
handle: *std.os.linux.file_handle,
mount_id: *i32,
flags: u32,
) NameToFileHandleAtError!void {
switch (errno(system.name_to_handle_at(dirfd, pathname_z, handle, mount_id, flags))) {
.SUCCESS => {},
.FAULT => unreachable, // pathname, mount_id, or handle outside accessible address space
.INVAL => unreachable, // bad flags, or handle_bytes too big
.NOENT => return error.FileNotFound,
.NOTDIR => return error.NotDir,
.OPNOTSUPP => return error.OperationNotSupported,
.OVERFLOW => return error.NameTooLong,
else => |err| return unexpectedErrno(err),
}
}
pub const IoCtl_SIOCGIFINDEX_Error = error{
FileSystem,
InterfaceNotFound,

View File

@ -20,10 +20,24 @@ pub const Message = struct {
test_metadata,
/// Body is a TestResults
test_results,
/// Body is a series of strings, delimited by null bytes.
/// Each string is a prefixed file path.
/// The first byte indicates the file prefix path (see prefixes fields
/// of Cache). This byte is sent over the wire incremented so that null
/// bytes are not confused with string terminators.
/// The remaining bytes is the file path relative to that prefix.
/// The prefixes are hard-coded in Compilation.create (cwd, zig lib dir, local cache dir)
file_system_inputs,
_,
};
pub const PathPrefix = enum(u8) {
cwd,
zig_lib,
local_cache,
};
/// Trailing:
/// * extra: [extra_len]u32,
/// * string_bytes: [string_bytes_len]u8,
@ -58,7 +72,7 @@ pub const Message = struct {
};
/// Trailing:
/// * the file system path the emitted binary can be found
/// * file system path where the emitted binary can be found
pub const EmitBinPath = extern struct {
flags: Flags,

View File

@ -235,6 +235,8 @@ astgen_wait_group: WaitGroup = .{},
llvm_opt_bisect_limit: c_int,
file_system_inputs: ?*std.ArrayListUnmanaged(u8),
pub const Emit = struct {
/// Where the output will go.
directory: Directory,
@ -1157,6 +1159,9 @@ pub const CreateOptions = struct {
error_limit: ?Zcu.ErrorInt = null,
global_cc_argv: []const []const u8 = &.{},
/// Tracks all files that can cause the Compilation to be invalidated and need a rebuild.
file_system_inputs: ?*std.ArrayListUnmanaged(u8) = null,
pub const Entry = link.File.OpenOptions.Entry;
};
@ -1332,6 +1337,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.gpa = gpa,
.manifest_dir = try options.local_cache_directory.handle.makeOpenPath("h", .{}),
};
// These correspond to std.zig.Server.Message.PathPrefix.
cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() });
cache.addPrefix(options.zig_lib_directory);
cache.addPrefix(options.local_cache_directory);
@ -1508,6 +1514,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.force_undefined_symbols = options.force_undefined_symbols,
.link_eh_frame_hdr = link_eh_frame_hdr,
.global_cc_argv = options.global_cc_argv,
.file_system_inputs = options.file_system_inputs,
};
// Prevent some footguns by making the "any" fields of config reflect
@ -2044,6 +2051,9 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
);
};
if (is_hit) {
// In this case the cache hit contains the full set of file system inputs. Nice!
if (comp.file_system_inputs) |buf| try man.populateFileSystemInputs(buf);
comp.last_update_was_cache_hit = true;
log.debug("CacheMode.whole cache hit for {s}", .{comp.root_name});
const digest = man.final();
@ -2103,12 +2113,24 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
.incremental => {},
}
// From this point we add a preliminary set of file system inputs that
// affects both incremental and whole cache mode. For incremental cache
// mode, the long-lived compiler state will track additional file system
// inputs discovered after this point. For whole cache mode, we rely on
// these inputs to make it past AstGen, and once there, we can rely on
// learning file system inputs from the Cache object.
// For compiling C objects, we rely on the cache hash system to avoid duplicating work.
// Add a Job for each C object.
try comp.c_object_work_queue.ensureUnusedCapacity(comp.c_object_table.count());
for (comp.c_object_table.keys()) |key| {
comp.c_object_work_queue.writeItemAssumeCapacity(key);
}
if (comp.file_system_inputs) |fsi| {
for (comp.c_object_table.keys()) |c_object| {
try comp.appendFileSystemInput(fsi, c_object.src.owner.root, c_object.src.src_path);
}
}
// For compiling Win32 resources, we rely on the cache hash system to avoid duplicating work.
// Add a Job for each Win32 resource file.
@ -2117,6 +2139,12 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
for (comp.win32_resource_table.keys()) |key| {
comp.win32_resource_work_queue.writeItemAssumeCapacity(key);
}
if (comp.file_system_inputs) |fsi| {
for (comp.win32_resource_table.keys()) |win32_resource| switch (win32_resource.src) {
.rc => |f| try comp.appendFileSystemInput(fsi, f.owner.root, f.src_path),
.manifest => continue,
};
}
}
if (comp.module) |zcu| {
@ -2151,12 +2179,25 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
if (zcu.fileByIndex(file_index).mod.isBuiltin()) continue;
comp.astgen_work_queue.writeItemAssumeCapacity(file_index);
}
if (comp.file_system_inputs) |fsi| {
for (zcu.import_table.values()) |file_index| {
const file = zcu.fileByIndex(file_index);
try comp.appendFileSystemInput(fsi, file.mod.root, file.sub_file_path);
}
}
// Put a work item in for checking if any files used with `@embedFile` changed.
try comp.embed_file_work_queue.ensureUnusedCapacity(zcu.embed_table.count());
for (zcu.embed_table.values()) |embed_file| {
comp.embed_file_work_queue.writeItemAssumeCapacity(embed_file);
}
if (comp.file_system_inputs) |fsi| {
const ip = &zcu.intern_pool;
for (zcu.embed_table.values()) |embed_file| {
const sub_file_path = embed_file.sub_file_path.toSlice(ip);
try comp.appendFileSystemInput(fsi, embed_file.owner.root, sub_file_path);
}
}
try comp.work_queue.writeItem(.{ .analyze_mod = std_mod });
if (comp.config.is_test) {
@ -2210,6 +2251,8 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
switch (comp.cache_use) {
.whole => |whole| {
if (comp.file_system_inputs) |buf| try man.populateFileSystemInputs(buf);
const digest = man.final();
// Rename the temporary directory into place.
@ -2297,6 +2340,30 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
}
}
fn appendFileSystemInput(
comp: *Compilation,
file_system_inputs: *std.ArrayListUnmanaged(u8),
root: Cache.Path,
sub_file_path: []const u8,
) Allocator.Error!void {
const gpa = comp.gpa;
const prefixes = comp.cache_parent.prefixes();
try file_system_inputs.ensureUnusedCapacity(gpa, root.sub_path.len + sub_file_path.len + 3);
if (file_system_inputs.items.len > 0) file_system_inputs.appendAssumeCapacity(0);
for (prefixes, 1..) |prefix_directory, i| {
if (prefix_directory.eql(root.root_dir)) {
file_system_inputs.appendAssumeCapacity(@intCast(i));
if (root.sub_path.len > 0) {
file_system_inputs.appendSliceAssumeCapacity(root.sub_path);
file_system_inputs.appendAssumeCapacity(std.fs.path.sep);
}
file_system_inputs.appendSliceAssumeCapacity(sub_file_path);
return;
}
}
std.debug.panic("missing prefix directory: {}, {s}", .{ root, sub_file_path });
}
fn flush(comp: *Compilation, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
if (comp.bin_file) |lf| {
// This is needed before reading the error flags.
@ -4204,6 +4271,9 @@ fn workerAstGenFile(
.token = item.data.token,
} }) catch continue;
}
if (res.is_new) if (comp.file_system_inputs) |fsi| {
comp.appendFileSystemInput(fsi, res.file.mod.root, res.file.sub_file_path) catch continue;
};
const imported_path_digest = pt.zcu.filePathDigest(res.file_index);
const imported_root_decl = pt.zcu.fileRootDecl(res.file_index);
break :blk .{ res, imported_path_digest, imported_root_decl };
@ -4574,7 +4644,7 @@ fn reportRetryableEmbedFileError(
const gpa = mod.gpa;
const src_loc = embed_file.src_loc;
const ip = &mod.intern_pool;
const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{
const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}/{s}': {s}", .{
embed_file.owner.root,
embed_file.sub_file_path.toSlice(ip),
@errorName(err),

View File

@ -728,7 +728,7 @@ pub const File = struct {
source_loaded: bool,
tree_loaded: bool,
zir_loaded: bool,
/// Relative to the owning package's root_src_dir.
/// Relative to the owning package's root source directory.
/// Memory is stored in gpa, owned by File.
sub_file_path: []const u8,
/// Whether this is populated depends on `source_loaded`.

View File

@ -2666,7 +2666,7 @@ pub fn reportRetryableAstGenError(
},
};
const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{
const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}/{s}': {s}", .{
file.mod.root, file.sub_file_path, @errorName(err),
});
errdefer err_msg.destroy(gpa);

View File

@ -3227,6 +3227,9 @@ fn buildOutputType(
process.raiseFileDescriptorLimit();
var file_system_inputs: std.ArrayListUnmanaged(u8) = .{};
defer file_system_inputs.deinit(gpa);
const comp = Compilation.create(gpa, arena, .{
.zig_lib_directory = zig_lib_directory,
.local_cache_directory = local_cache_directory,
@ -3350,6 +3353,7 @@ fn buildOutputType(
// than to any particular module. This feature can greatly reduce CLI
// noise when --search-prefix and --mod are combined.
.global_cc_argv = try cc_argv.toOwnedSlice(arena),
.file_system_inputs = &file_system_inputs,
}) catch |err| switch (err) {
error.LibCUnavailable => {
const triple_name = try target.zigTriple(arena);
@ -3433,7 +3437,7 @@ fn buildOutputType(
defer root_prog_node.end();
if (arg_mode == .translate_c) {
return cmdTranslateC(comp, arena, null, root_prog_node);
return cmdTranslateC(comp, arena, null, null, root_prog_node);
}
updateModule(comp, color, root_prog_node) catch |err| switch (err) {
@ -4059,6 +4063,7 @@ fn serve(
var child_pid: ?std.process.Child.Id = null;
const main_progress_node = std.Progress.start(.{});
const file_system_inputs = comp.file_system_inputs.?;
while (true) {
const hdr = try server.receiveMessage();
@ -4067,14 +4072,16 @@ fn serve(
.exit => return cleanExit(),
.update => {
tracy.frameMark();
file_system_inputs.clearRetainingCapacity();
if (arg_mode == .translate_c) {
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
const arena = arena_instance.allocator();
var output: Compilation.CImportResult = undefined;
try cmdTranslateC(comp, arena, &output, main_progress_node);
try cmdTranslateC(comp, arena, &output, file_system_inputs, main_progress_node);
defer output.deinit(gpa);
try server.serveStringMessage(.file_system_inputs, file_system_inputs.items);
if (output.errors.errorMessageCount() != 0) {
try server.serveErrorBundle(output.errors);
} else {
@ -4116,6 +4123,7 @@ fn serve(
},
.hot_update => {
tracy.frameMark();
file_system_inputs.clearRetainingCapacity();
if (child_pid) |pid| {
try comp.hotCodeSwap(main_progress_node, pid);
try serveUpdateResults(&server, comp);
@ -4147,6 +4155,12 @@ fn serve(
fn serveUpdateResults(s: *Server, comp: *Compilation) !void {
const gpa = comp.gpa;
if (comp.file_system_inputs) |file_system_inputs| {
assert(file_system_inputs.items.len > 0);
try s.serveStringMessage(.file_system_inputs, file_system_inputs.items);
}
var error_bundle = try comp.getAllErrorsAlloc();
defer error_bundle.deinit(gpa);
if (error_bundle.errorMessageCount() > 0) {
@ -4434,6 +4448,7 @@ fn cmdTranslateC(
comp: *Compilation,
arena: Allocator,
fancy_output: ?*Compilation.CImportResult,
file_system_inputs: ?*std.ArrayListUnmanaged(u8),
prog_node: std.Progress.Node,
) !void {
if (build_options.only_core_functionality) @panic("@translate-c is not available in a zig2.c build");
@ -4454,7 +4469,10 @@ fn cmdTranslateC(
};
if (fancy_output) |p| p.cache_hit = true;
const digest = if (try man.hit()) man.final() else digest: {
const digest = if (try man.hit()) digest: {
if (file_system_inputs) |buf| try man.populateFileSystemInputs(buf);
break :digest man.final();
} else digest: {
if (fancy_output) |p| p.cache_hit = false;
var argv = std.ArrayList([]const u8).init(arena);
switch (comp.config.c_frontend) {
@ -4566,6 +4584,8 @@ fn cmdTranslateC(
@errorName(err),
});
if (file_system_inputs) |buf| try man.populateFileSystemInputs(buf);
break :digest digest;
};
@ -4649,31 +4669,6 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
return cleanExit();
}
const usage_build =
\\Usage: zig build [steps] [options]
\\
\\ Build a project from build.zig.
\\
\\Options:
\\ -freference-trace[=num] How many lines of reference trace should be shown per compile error
\\ -fno-reference-trace Disable reference trace
\\ --summary [mode] Control the printing of the build summary
\\ all Print the build summary in its entirety
\\ failures (Default) Only print failed steps
\\ none Do not print the build summary
\\ -j<N> Limit concurrent jobs (default is to use all CPU cores)
\\ --build-file [file] Override path to build.zig
\\ --cache-dir [path] Override path to local Zig cache directory
\\ --global-cache-dir [path] Override path to global Zig cache directory
\\ --zig-lib-dir [arg] Override path to Zig lib directory
\\ --build-runner [file] Override path to build runner
\\ --prominent-compile-errors Buffer compile errors and display at end
\\ --seed [integer] For shuffling dependency traversal order (default: random)
\\ --fetch Exit after fetching dependency tree
\\ -h, --help Print this help and exit
\\
;
fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
var build_file: ?[]const u8 = null;
var override_lib_dir: ?[]const u8 = try EnvVar.ZIG_LIB_DIR.get(arena);
@ -4696,6 +4691,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
var verbose_llvm_cpu_features = false;
var fetch_only = false;
var system_pkg_dir_path: ?[]const u8 = null;
var debug_target: ?[]const u8 = null;
const argv_index_exe = child_argv.items.len;
_ = try child_argv.addOne();
@ -4703,6 +4699,9 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
const self_exe_path = try introspect.findZigExePath(arena);
try child_argv.append(self_exe_path);
const argv_index_zig_lib_dir = child_argv.items.len;
_ = try child_argv.addOne();
const argv_index_build_file = child_argv.items.len;
_ = try child_argv.addOne();
@ -4752,7 +4751,6 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg});
i += 1;
override_lib_dir = args[i];
try child_argv.appendSlice(&.{ arg, args[i] });
continue;
} else if (mem.eql(u8, arg, "--build-runner")) {
if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg});
@ -4802,6 +4800,14 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
} else {
warn("Zig was compiled without debug extensions. --debug-compile-errors has no effect.", .{});
}
} else if (mem.eql(u8, arg, "--debug-target")) {
if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg});
i += 1;
if (build_options.enable_debug_extensions) {
debug_target = args[i];
} else {
warn("Zig was compiled without debug extensions. --debug-target has no effect.", .{});
}
} else if (mem.eql(u8, arg, "--verbose-link")) {
verbose_link = true;
} else if (mem.eql(u8, arg, "--verbose-cc")) {
@ -4860,11 +4866,27 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
});
defer root_prog_node.end();
const target_query: std.Target.Query = .{};
const resolved_target: Package.Module.ResolvedTarget = .{
.result = std.zig.resolveTargetQueryOrFatal(target_query),
.is_native_os = true,
.is_native_abi = true,
// Normally the build runner is compiled for the host target but here is
// some code to help when debugging edits to the build runner so that you
// can make sure it compiles successfully on other targets.
const resolved_target: Package.Module.ResolvedTarget = t: {
if (build_options.enable_debug_extensions) {
if (debug_target) |triple| {
const target_query = try std.Target.Query.parse(.{
.arch_os_abi = triple,
});
break :t .{
.result = std.zig.resolveTargetQueryOrFatal(target_query),
.is_native_os = false,
.is_native_abi = false,
};
}
}
break :t .{
.result = std.zig.resolveTargetQueryOrFatal(.{}),
.is_native_os = true,
.is_native_abi = true,
};
};
const exe_basename = try std.zig.binNameAlloc(arena, .{
@ -4890,6 +4912,8 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
defer zig_lib_directory.handle.close();
const cwd_path = try process.getCwdAlloc(arena);
child_argv.items[argv_index_zig_lib_dir] = zig_lib_directory.path orelse cwd_path;
const build_root = try findBuildRoot(arena, .{
.cwd_path = cwd_path,
.build_file = build_file,

View File

@ -771,7 +771,7 @@ pub fn addCliTests(b: *std.Build) *Step {
run_run.expectStdErrEqual("All your codebase are belong to us.\n");
run_run.step.dependOn(&init_exe.step);
const cleanup = b.addRemoveDirTree(tmp_path);
const cleanup = b.addRemoveDirTree(.{ .cwd_relative = tmp_path });
cleanup.step.dependOn(&run_test.step);
cleanup.step.dependOn(&run_run.step);
cleanup.step.dependOn(&run_bad.step);
@ -816,7 +816,7 @@ pub fn addCliTests(b: *std.Build) *Step {
});
checkfile.setName("check godbolt.org CLI usage generating valid asm");
const cleanup = b.addRemoveDirTree(tmp_path);
const cleanup = b.addRemoveDirTree(.{ .cwd_relative = tmp_path });
cleanup.step.dependOn(&checkfile.step);
step.dependOn(&cleanup.step);
@ -882,7 +882,7 @@ pub fn addCliTests(b: *std.Build) *Step {
const unformatted_code_utf16 = "\xff\xfe \x00 \x00 \x00 \x00/\x00/\x00 \x00n\x00o\x00 \x00r\x00e\x00a\x00s\x00o\x00n\x00";
const fmt6_path = std.fs.path.join(b.allocator, &.{ tmp_path, "fmt6.zig" }) catch @panic("OOM");
const write6 = b.addWriteFiles();
const write6 = b.addUpdateSourceFiles();
write6.addBytesToSource(unformatted_code_utf16, fmt6_path);
write6.step.dependOn(&run5.step);
@ -902,7 +902,7 @@ pub fn addCliTests(b: *std.Build) *Step {
});
check6.step.dependOn(&run6.step);
const cleanup = b.addRemoveDirTree(tmp_path);
const cleanup = b.addRemoveDirTree(.{ .cwd_relative = tmp_path });
cleanup.step.dependOn(&check6.step);
step.dependOn(&cleanup.step);