mirror of
https://github.com/ziglang/zig.git
synced 2025-12-15 18:53:07 +00:00
build runner: implement --watch (work-in-progress)
I'm still learning how the fanotify API works but I think after playing with it in this commit, I finally know how to implement it, at least on Linux. This commit does not accomplish the goal but I want to take the code in a different direction and still be able to reference this point in time by viewing a source control diff. I think the move is going to be saving the file_handle for the parent directory, which combined with the dirent names is how we can correlate the events back to the Step instances that have registered file system inputs. I predict this to be similar to implementations on other operating systems.
This commit is contained in:
parent
deea36250f
commit
bbd90a562e
@ -8,6 +8,7 @@ const process = std.process;
|
|||||||
const ArrayList = std.ArrayList;
|
const ArrayList = std.ArrayList;
|
||||||
const File = std.fs.File;
|
const File = std.fs.File;
|
||||||
const Step = std.Build.Step;
|
const Step = std.Build.Step;
|
||||||
|
const Allocator = std.mem.Allocator;
|
||||||
|
|
||||||
pub const root = @import("@build");
|
pub const root = @import("@build");
|
||||||
pub const dependencies = @import("@dependencies");
|
pub const dependencies = @import("@dependencies");
|
||||||
@ -74,7 +75,6 @@ pub fn main() !void {
|
|||||||
.query = .{},
|
.query = .{},
|
||||||
.result = try std.zig.system.resolveTargetQuery(.{}),
|
.result = try std.zig.system.resolveTargetQuery(.{}),
|
||||||
},
|
},
|
||||||
.watch = null,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
graph.cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() });
|
graph.cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() });
|
||||||
@ -105,6 +105,7 @@ pub fn main() !void {
|
|||||||
var help_menu = false;
|
var help_menu = false;
|
||||||
var steps_menu = false;
|
var steps_menu = false;
|
||||||
var output_tmp_nonce: ?[16]u8 = null;
|
var output_tmp_nonce: ?[16]u8 = null;
|
||||||
|
var watch = false;
|
||||||
|
|
||||||
while (nextArg(args, &arg_idx)) |arg| {
|
while (nextArg(args, &arg_idx)) |arg| {
|
||||||
if (mem.startsWith(u8, arg, "-Z")) {
|
if (mem.startsWith(u8, arg, "-Z")) {
|
||||||
@ -229,9 +230,7 @@ pub fn main() !void {
|
|||||||
} else if (mem.eql(u8, arg, "--prominent-compile-errors")) {
|
} else if (mem.eql(u8, arg, "--prominent-compile-errors")) {
|
||||||
prominent_compile_errors = true;
|
prominent_compile_errors = true;
|
||||||
} else if (mem.eql(u8, arg, "--watch")) {
|
} else if (mem.eql(u8, arg, "--watch")) {
|
||||||
const watch = try arena.create(std.Build.Watch);
|
watch = true;
|
||||||
watch.* = std.Build.Watch.init;
|
|
||||||
graph.watch = watch;
|
|
||||||
} else if (mem.eql(u8, arg, "-fwine")) {
|
} else if (mem.eql(u8, arg, "-fwine")) {
|
||||||
builder.enable_wine = true;
|
builder.enable_wine = true;
|
||||||
} else if (mem.eql(u8, arg, "-fno-wine")) {
|
} else if (mem.eql(u8, arg, "-fno-wine")) {
|
||||||
@ -297,6 +296,7 @@ pub fn main() !void {
|
|||||||
const main_progress_node = std.Progress.start(.{
|
const main_progress_node = std.Progress.start(.{
|
||||||
.disable_printing = (color == .off),
|
.disable_printing = (color == .off),
|
||||||
});
|
});
|
||||||
|
defer main_progress_node.end();
|
||||||
|
|
||||||
builder.debug_log_scopes = debug_log_scopes.items;
|
builder.debug_log_scopes = debug_log_scopes.items;
|
||||||
builder.resolveInstallPrefix(install_prefix, dir_list);
|
builder.resolveInstallPrefix(install_prefix, dir_list);
|
||||||
@ -345,13 +345,16 @@ pub fn main() !void {
|
|||||||
.max_rss_is_default = false,
|
.max_rss_is_default = false,
|
||||||
.max_rss_mutex = .{},
|
.max_rss_mutex = .{},
|
||||||
.skip_oom_steps = skip_oom_steps,
|
.skip_oom_steps = skip_oom_steps,
|
||||||
|
.watch = watch,
|
||||||
.memory_blocked_steps = std.ArrayList(*Step).init(arena),
|
.memory_blocked_steps = std.ArrayList(*Step).init(arena),
|
||||||
|
.step_stack = .{},
|
||||||
.prominent_compile_errors = prominent_compile_errors,
|
.prominent_compile_errors = prominent_compile_errors,
|
||||||
|
|
||||||
.claimed_rss = 0,
|
.claimed_rss = 0,
|
||||||
.summary = summary orelse if (graph.watch != null) .new else .failures,
|
.summary = summary orelse if (watch) .new else .failures,
|
||||||
.ttyconf = ttyconf,
|
.ttyconf = ttyconf,
|
||||||
.stderr = stderr,
|
.stderr = stderr,
|
||||||
|
.thread_pool = undefined,
|
||||||
};
|
};
|
||||||
|
|
||||||
if (run.max_rss == 0) {
|
if (run.max_rss == 0) {
|
||||||
@ -359,30 +362,311 @@ pub fn main() !void {
|
|||||||
run.max_rss_is_default = true;
|
run.max_rss_is_default = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const gpa = arena;
|
||||||
|
prepare(gpa, arena, builder, targets.items, &run, seed) catch |err| switch (err) {
|
||||||
|
error.UncleanExit => process.exit(1),
|
||||||
|
else => return err,
|
||||||
|
};
|
||||||
|
|
||||||
|
var w = Watch.init;
|
||||||
|
if (watch) {
|
||||||
|
w.fan_fd = try std.posix.fanotify_init(.{
|
||||||
|
.CLASS = .NOTIF,
|
||||||
|
.CLOEXEC = true,
|
||||||
|
.NONBLOCK = true,
|
||||||
|
.REPORT_NAME = true,
|
||||||
|
.REPORT_DIR_FID = true,
|
||||||
|
.REPORT_FID = true,
|
||||||
|
.REPORT_TARGET_FID = true,
|
||||||
|
}, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
try run.thread_pool.init(thread_pool_options);
|
||||||
|
defer run.thread_pool.deinit();
|
||||||
|
|
||||||
|
rebuild: while (true) {
|
||||||
runStepNames(
|
runStepNames(
|
||||||
arena,
|
gpa,
|
||||||
builder,
|
builder,
|
||||||
targets.items,
|
targets.items,
|
||||||
main_progress_node,
|
main_progress_node,
|
||||||
thread_pool_options,
|
|
||||||
&run,
|
&run,
|
||||||
seed,
|
|
||||||
) catch |err| switch (err) {
|
) catch |err| switch (err) {
|
||||||
error.UncleanExit => {
|
error.UncleanExit => {
|
||||||
if (graph.watch == null)
|
assert(!run.watch);
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
},
|
},
|
||||||
else => return err,
|
else => return err,
|
||||||
};
|
};
|
||||||
|
if (!watch) return cleanExit();
|
||||||
|
|
||||||
|
// Clear all file handles.
|
||||||
|
for (w.handle_table.keys(), w.handle_table.values()) |lfh, *step_set| {
|
||||||
|
lfh.destroy(gpa);
|
||||||
|
step_set.clearAndFree(gpa);
|
||||||
|
}
|
||||||
|
w.handle_table.clearRetainingCapacity();
|
||||||
|
|
||||||
|
// Add missing marks and note persisted ones.
|
||||||
|
for (run.step_stack.keys()) |step| {
|
||||||
|
for (step.inputs.table.keys(), step.inputs.table.values()) |path, *files| {
|
||||||
|
{
|
||||||
|
const gop = try w.dir_table.getOrPut(gpa, path);
|
||||||
|
gop.value_ptr.* = w.generation;
|
||||||
|
if (!gop.found_existing) {
|
||||||
|
try std.posix.fanotify_mark(w.fan_fd, .{
|
||||||
|
.ADD = true,
|
||||||
|
.ONLYDIR = true,
|
||||||
|
}, Watch.fan_mask, path.root_dir.handle.fd, path.subPathOpt());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (files.items) |basename| {
|
||||||
|
const file_handle = try Watch.getFileHandle(gpa, path, basename);
|
||||||
|
std.debug.print("watching file_handle '{}{s}' = {}\n", .{
|
||||||
|
path, basename, std.fmt.fmtSliceHexLower(file_handle.slice()),
|
||||||
|
});
|
||||||
|
const gop = try w.handle_table.getOrPut(gpa, file_handle);
|
||||||
|
if (!gop.found_existing) gop.value_ptr.* = .{};
|
||||||
|
try gop.value_ptr.put(gpa, step, {});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Remove marks for files that are no longer inputs.
|
||||||
|
var i: usize = 0;
|
||||||
|
while (i < w.dir_table.entries.len) {
|
||||||
|
const generations = w.dir_table.values();
|
||||||
|
if (generations[i] == w.generation) {
|
||||||
|
i += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const path = w.dir_table.keys()[i];
|
||||||
|
|
||||||
|
try std.posix.fanotify_mark(w.fan_fd, .{
|
||||||
|
.REMOVE = true,
|
||||||
|
.ONLYDIR = true,
|
||||||
|
}, Watch.fan_mask, path.root_dir.handle.fd, path.subPathOpt());
|
||||||
|
|
||||||
|
w.dir_table.swapRemoveAt(i);
|
||||||
|
}
|
||||||
|
w.generation +%= 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until a file system notification arrives. Read all such events
|
||||||
|
// until the buffer is empty. Then wait for a debounce interval, resetting
|
||||||
|
// if any more events come in. After the debounce interval has passed,
|
||||||
|
// trigger a rebuild on all steps with modified inputs, as well as their
|
||||||
|
// recursive dependants.
|
||||||
|
const debounce_interval_ms = 10;
|
||||||
|
var poll_fds: [1]std.posix.pollfd = .{
|
||||||
|
.{
|
||||||
|
.fd = w.fan_fd,
|
||||||
|
.events = std.posix.POLL.IN,
|
||||||
|
.revents = undefined,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
var caption_buf: [40]u8 = undefined;
|
||||||
|
const caption = std.fmt.bufPrint(&caption_buf, "Watching {d} Directories", .{
|
||||||
|
w.dir_table.entries.len,
|
||||||
|
}) catch &caption_buf;
|
||||||
|
var debouncing_node = main_progress_node.start(caption, 0);
|
||||||
|
var debouncing = false;
|
||||||
|
while (true) {
|
||||||
|
const timeout: i32 = if (debouncing) debounce_interval_ms else -1;
|
||||||
|
const events_len = try std.posix.poll(&poll_fds, timeout);
|
||||||
|
if (events_len == 0) {
|
||||||
|
debouncing_node.end();
|
||||||
|
continue :rebuild;
|
||||||
|
}
|
||||||
|
if (try markDirtySteps(&w)) {
|
||||||
|
if (!debouncing) {
|
||||||
|
debouncing = true;
|
||||||
|
debouncing_node.end();
|
||||||
|
debouncing_node = main_progress_node.start("Debouncing (Change Detected)", 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn markDirtySteps(w: *Watch) !bool {
|
||||||
|
const fanotify = std.os.linux.fanotify;
|
||||||
|
const M = fanotify.event_metadata;
|
||||||
|
var events_buf: [256 + 4096]u8 = undefined;
|
||||||
|
var any_dirty = false;
|
||||||
|
while (true) {
|
||||||
|
var len = std.posix.read(w.fan_fd, &events_buf) catch |err| switch (err) {
|
||||||
|
error.WouldBlock => return any_dirty,
|
||||||
|
else => |e| return e,
|
||||||
|
};
|
||||||
|
//std.debug.dump_hex(events_buf[0..len]);
|
||||||
|
var meta: [*]align(1) M = @ptrCast(&events_buf);
|
||||||
|
while (len >= @sizeOf(M) and meta[0].event_len >= @sizeOf(M) and meta[0].event_len <= len) : ({
|
||||||
|
len -= meta[0].event_len;
|
||||||
|
meta = @ptrCast(@as([*]u8, @ptrCast(meta)) + meta[0].event_len);
|
||||||
|
}) {
|
||||||
|
assert(meta[0].vers == M.VERSION);
|
||||||
|
std.debug.print("meta = {any}\n", .{meta[0]});
|
||||||
|
const fid: *align(1) fanotify.event_info_fid = @ptrCast(meta + 1);
|
||||||
|
switch (fid.hdr.info_type) {
|
||||||
|
.DFID_NAME => {
|
||||||
|
const file_handle: *align(1) std.os.linux.file_handle = @ptrCast(&fid.handle);
|
||||||
|
const file_name_z: [*:0]u8 = @ptrCast((&file_handle.f_handle).ptr + file_handle.handle_bytes);
|
||||||
|
const file_name = mem.span(file_name_z);
|
||||||
|
std.debug.print("DFID_NAME file_handle = {any}, found: '{s}'\n", .{ file_handle.*, file_name });
|
||||||
|
const lfh: Watch.LinuxFileHandle = .{ .handle = file_handle };
|
||||||
|
if (w.handle_table.get(lfh)) |step_set| {
|
||||||
|
for (step_set.keys()) |step| {
|
||||||
|
std.debug.print("DFID_NAME marking step '{s}' dirty\n", .{step.name});
|
||||||
|
step.state = .precheck_done;
|
||||||
|
any_dirty = true;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
std.debug.print("DFID_NAME changed file did not match any steps: '{}'\n", .{
|
||||||
|
std.fmt.fmtSliceHexLower(lfh.slice()),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
.FID => {
|
||||||
|
const file_handle: *align(1) std.os.linux.file_handle = @ptrCast(&fid.handle);
|
||||||
|
const lfh: Watch.LinuxFileHandle = .{ .handle = file_handle };
|
||||||
|
if (w.handle_table.get(lfh)) |step_set| {
|
||||||
|
for (step_set.keys()) |step| {
|
||||||
|
std.debug.print("FID marking step '{s}' dirty\n", .{step.name});
|
||||||
|
step.state = .precheck_done;
|
||||||
|
any_dirty = true;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
std.debug.print("FID changed file did not match any steps: '{}'\n", .{
|
||||||
|
std.fmt.fmtSliceHexLower(lfh.slice()),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
.DFID => {
|
||||||
|
const file_handle: *align(1) std.os.linux.file_handle = @ptrCast(&fid.handle);
|
||||||
|
const lfh: Watch.LinuxFileHandle = .{ .handle = file_handle };
|
||||||
|
if (w.handle_table.get(lfh)) |step_set| {
|
||||||
|
for (step_set.keys()) |step| {
|
||||||
|
std.debug.print("DFID marking step '{s}' dirty\n", .{step.name});
|
||||||
|
step.state = .precheck_done;
|
||||||
|
any_dirty = true;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
std.debug.print("DFID changed file did not match any steps\n", .{});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
else => |t| {
|
||||||
|
std.debug.panic("TODO: received event type '{s}'", .{@tagName(t)});
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const Watch = struct {
|
||||||
|
dir_table: DirTable,
|
||||||
|
handle_table: HandleTable,
|
||||||
|
fan_fd: std.posix.fd_t,
|
||||||
|
generation: u8,
|
||||||
|
|
||||||
|
const fan_mask: std.os.linux.fanotify.MarkMask = .{
|
||||||
|
.CLOSE_WRITE = true,
|
||||||
|
.DELETE = true,
|
||||||
|
.MOVED_FROM = true,
|
||||||
|
.MOVED_TO = true,
|
||||||
|
.EVENT_ON_CHILD = true,
|
||||||
|
};
|
||||||
|
|
||||||
|
const init: Watch = .{
|
||||||
|
.dir_table = .{},
|
||||||
|
.handle_table = .{},
|
||||||
|
.fan_fd = -1,
|
||||||
|
.generation = 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Key is the directory to watch which contains one or more files we are
|
||||||
|
/// interested in noticing changes to.
|
||||||
|
///
|
||||||
|
/// Value is generation.
|
||||||
|
const DirTable = std.ArrayHashMapUnmanaged(Cache.Path, u8, Cache.Path.TableAdapter, false);
|
||||||
|
|
||||||
|
const HandleTable = std.ArrayHashMapUnmanaged(LinuxFileHandle, StepSet, LinuxFileHandle.Adapter, false);
|
||||||
|
const StepSet = std.AutoArrayHashMapUnmanaged(*Step, void);
|
||||||
|
|
||||||
|
const Hash = std.hash.Wyhash;
|
||||||
|
const Cache = std.Build.Cache;
|
||||||
|
|
||||||
|
const LinuxFileHandle = struct {
|
||||||
|
handle: *align(1) std.os.linux.file_handle,
|
||||||
|
|
||||||
|
fn clone(lfh: LinuxFileHandle, gpa: Allocator) Allocator.Error!LinuxFileHandle {
|
||||||
|
const bytes = lfh.slice();
|
||||||
|
const new_ptr = try gpa.alignedAlloc(
|
||||||
|
u8,
|
||||||
|
@alignOf(std.os.linux.file_handle),
|
||||||
|
@sizeOf(std.os.linux.file_handle) + bytes.len,
|
||||||
|
);
|
||||||
|
const new_header: *std.os.linux.file_handle = @ptrCast(new_ptr);
|
||||||
|
new_header.* = lfh.handle.*;
|
||||||
|
const new: LinuxFileHandle = .{ .handle = new_header };
|
||||||
|
@memcpy(new.slice(), lfh.slice());
|
||||||
|
return new;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn destroy(lfh: LinuxFileHandle, gpa: Allocator) void {
|
||||||
|
const ptr: [*]u8 = @ptrCast(lfh.handle);
|
||||||
|
const allocated_slice = ptr[0 .. @sizeOf(std.os.linux.file_handle) + lfh.handle.handle_bytes];
|
||||||
|
return gpa.free(allocated_slice);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn slice(lfh: LinuxFileHandle) []u8 {
|
||||||
|
const ptr: [*]u8 = &lfh.handle.f_handle;
|
||||||
|
return ptr[0..lfh.handle.handle_bytes];
|
||||||
|
}
|
||||||
|
|
||||||
|
const Adapter = struct {
|
||||||
|
pub fn hash(self: Adapter, a: LinuxFileHandle) u32 {
|
||||||
|
_ = self;
|
||||||
|
const unsigned_type: u32 = @bitCast(a.handle.handle_type);
|
||||||
|
return @truncate(Hash.hash(unsigned_type, a.slice()));
|
||||||
|
}
|
||||||
|
pub fn eql(self: Adapter, a: LinuxFileHandle, b: LinuxFileHandle, b_index: usize) bool {
|
||||||
|
_ = self;
|
||||||
|
_ = b_index;
|
||||||
|
return a.handle.handle_type == b.handle.handle_type and mem.eql(u8, a.slice(), b.slice());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
fn getFileHandle(gpa: Allocator, path: std.Build.Cache.Path, basename: []const u8) !LinuxFileHandle {
|
||||||
|
var file_handle_buffer: [@sizeOf(std.os.linux.file_handle) + 128]u8 align(@alignOf(std.os.linux.file_handle)) = undefined;
|
||||||
|
var mount_id: i32 = undefined;
|
||||||
|
var buf: [std.fs.max_path_bytes]u8 = undefined;
|
||||||
|
const joined_path = if (path.sub_path.len == 0) basename else path: {
|
||||||
|
break :path std.fmt.bufPrint(&buf, "{s}" ++ std.fs.path.sep_str ++ "{s}", .{
|
||||||
|
path.sub_path, basename,
|
||||||
|
}) catch return error.NameTooLong;
|
||||||
|
};
|
||||||
|
const stack_ptr: *std.os.linux.file_handle = @ptrCast(&file_handle_buffer);
|
||||||
|
stack_ptr.handle_bytes = file_handle_buffer.len - @sizeOf(std.os.linux.file_handle);
|
||||||
|
try std.posix.name_to_handle_at(path.root_dir.handle.fd, joined_path, stack_ptr, &mount_id, 0);
|
||||||
|
const stack_lfh: LinuxFileHandle = .{ .handle = stack_ptr };
|
||||||
|
return stack_lfh.clone(gpa);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
const Run = struct {
|
const Run = struct {
|
||||||
max_rss: u64,
|
max_rss: u64,
|
||||||
max_rss_is_default: bool,
|
max_rss_is_default: bool,
|
||||||
max_rss_mutex: std.Thread.Mutex,
|
max_rss_mutex: std.Thread.Mutex,
|
||||||
skip_oom_steps: bool,
|
skip_oom_steps: bool,
|
||||||
|
watch: bool,
|
||||||
memory_blocked_steps: std.ArrayList(*Step),
|
memory_blocked_steps: std.ArrayList(*Step),
|
||||||
|
step_stack: std.AutoArrayHashMapUnmanaged(*Step, void),
|
||||||
prominent_compile_errors: bool,
|
prominent_compile_errors: bool,
|
||||||
|
thread_pool: std.Thread.Pool,
|
||||||
|
|
||||||
claimed_rss: usize,
|
claimed_rss: usize,
|
||||||
summary: Summary,
|
summary: Summary,
|
||||||
@ -390,18 +674,15 @@ const Run = struct {
|
|||||||
stderr: File,
|
stderr: File,
|
||||||
};
|
};
|
||||||
|
|
||||||
fn runStepNames(
|
fn prepare(
|
||||||
arena: std.mem.Allocator,
|
gpa: Allocator,
|
||||||
|
arena: Allocator,
|
||||||
b: *std.Build,
|
b: *std.Build,
|
||||||
step_names: []const []const u8,
|
step_names: []const []const u8,
|
||||||
parent_prog_node: std.Progress.Node,
|
|
||||||
thread_pool_options: std.Thread.Pool.Options,
|
|
||||||
run: *Run,
|
run: *Run,
|
||||||
seed: u32,
|
seed: u32,
|
||||||
) !void {
|
) !void {
|
||||||
const gpa = b.allocator;
|
const step_stack = &run.step_stack;
|
||||||
var step_stack: std.AutoArrayHashMapUnmanaged(*Step, void) = .{};
|
|
||||||
defer step_stack.deinit(gpa);
|
|
||||||
|
|
||||||
if (step_names.len == 0) {
|
if (step_names.len == 0) {
|
||||||
try step_stack.put(gpa, b.default_step, {});
|
try step_stack.put(gpa, b.default_step, {});
|
||||||
@ -424,7 +705,7 @@ fn runStepNames(
|
|||||||
rand.shuffle(*Step, starting_steps);
|
rand.shuffle(*Step, starting_steps);
|
||||||
|
|
||||||
for (starting_steps) |s| {
|
for (starting_steps) |s| {
|
||||||
constructGraphAndCheckForDependencyLoop(b, s, &step_stack, rand) catch |err| switch (err) {
|
constructGraphAndCheckForDependencyLoop(b, s, &run.step_stack, rand) catch |err| switch (err) {
|
||||||
error.DependencyLoopDetected => return uncleanExit(),
|
error.DependencyLoopDetected => return uncleanExit(),
|
||||||
else => |e| return e,
|
else => |e| return e,
|
||||||
};
|
};
|
||||||
@ -453,14 +734,19 @@ fn runStepNames(
|
|||||||
return uncleanExit();
|
return uncleanExit();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var thread_pool: std.Thread.Pool = undefined;
|
fn runStepNames(
|
||||||
try thread_pool.init(thread_pool_options);
|
gpa: Allocator,
|
||||||
defer thread_pool.deinit();
|
b: *std.Build,
|
||||||
|
step_names: []const []const u8,
|
||||||
|
parent_prog_node: std.Progress.Node,
|
||||||
|
run: *Run,
|
||||||
|
) !void {
|
||||||
|
const step_stack = &run.step_stack;
|
||||||
|
const thread_pool = &run.thread_pool;
|
||||||
|
|
||||||
{
|
{
|
||||||
defer parent_prog_node.end();
|
|
||||||
|
|
||||||
const step_prog = parent_prog_node.start("steps", step_stack.count());
|
const step_prog = parent_prog_node.start("steps", step_stack.count());
|
||||||
defer step_prog.end();
|
defer step_prog.end();
|
||||||
|
|
||||||
@ -476,7 +762,7 @@ fn runStepNames(
|
|||||||
if (step.state == .skipped_oom) continue;
|
if (step.state == .skipped_oom) continue;
|
||||||
|
|
||||||
thread_pool.spawnWg(&wait_group, workerMakeOneStep, .{
|
thread_pool.spawnWg(&wait_group, workerMakeOneStep, .{
|
||||||
&wait_group, &thread_pool, b, step, step_prog, run,
|
&wait_group, b, step, step_prog, run,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -493,8 +779,6 @@ fn runStepNames(
|
|||||||
var failure_count: usize = 0;
|
var failure_count: usize = 0;
|
||||||
var pending_count: usize = 0;
|
var pending_count: usize = 0;
|
||||||
var total_compile_errors: usize = 0;
|
var total_compile_errors: usize = 0;
|
||||||
var compile_error_steps: std.ArrayListUnmanaged(*Step) = .{};
|
|
||||||
defer compile_error_steps.deinit(gpa);
|
|
||||||
|
|
||||||
for (step_stack.keys()) |s| {
|
for (step_stack.keys()) |s| {
|
||||||
test_fail_count += s.test_results.fail_count;
|
test_fail_count += s.test_results.fail_count;
|
||||||
@ -524,7 +808,6 @@ fn runStepNames(
|
|||||||
const compile_errors_len = s.result_error_bundle.errorMessageCount();
|
const compile_errors_len = s.result_error_bundle.errorMessageCount();
|
||||||
if (compile_errors_len > 0) {
|
if (compile_errors_len > 0) {
|
||||||
total_compile_errors += compile_errors_len;
|
total_compile_errors += compile_errors_len;
|
||||||
try compile_error_steps.append(gpa, s);
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -537,8 +820,8 @@ fn runStepNames(
|
|||||||
else => false,
|
else => false,
|
||||||
};
|
};
|
||||||
if (failure_count == 0 and failures_only) {
|
if (failure_count == 0 and failures_only) {
|
||||||
if (b.graph.watch != null) return;
|
if (!run.watch) cleanExit();
|
||||||
return cleanExit();
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const ttyconf = run.ttyconf;
|
const ttyconf = run.ttyconf;
|
||||||
@ -561,10 +844,13 @@ fn runStepNames(
|
|||||||
stderr.writeAll("\n") catch {};
|
stderr.writeAll("\n") catch {};
|
||||||
|
|
||||||
// Print a fancy tree with build results.
|
// Print a fancy tree with build results.
|
||||||
|
var step_stack_copy = try step_stack.clone(gpa);
|
||||||
|
defer step_stack_copy.deinit(gpa);
|
||||||
|
|
||||||
var print_node: PrintNode = .{ .parent = null };
|
var print_node: PrintNode = .{ .parent = null };
|
||||||
if (step_names.len == 0) {
|
if (step_names.len == 0) {
|
||||||
print_node.last = true;
|
print_node.last = true;
|
||||||
printTreeStep(b, b.default_step, run, stderr, ttyconf, &print_node, &step_stack) catch {};
|
printTreeStep(b, b.default_step, run, stderr, ttyconf, &print_node, &step_stack_copy) catch {};
|
||||||
} else {
|
} else {
|
||||||
const last_index = if (run.summary == .all) b.top_level_steps.count() else blk: {
|
const last_index = if (run.summary == .all) b.top_level_steps.count() else blk: {
|
||||||
var i: usize = step_names.len;
|
var i: usize = step_names.len;
|
||||||
@ -583,44 +869,34 @@ fn runStepNames(
|
|||||||
for (step_names, 0..) |step_name, i| {
|
for (step_names, 0..) |step_name, i| {
|
||||||
const tls = b.top_level_steps.get(step_name).?;
|
const tls = b.top_level_steps.get(step_name).?;
|
||||||
print_node.last = i + 1 == last_index;
|
print_node.last = i + 1 == last_index;
|
||||||
printTreeStep(b, &tls.step, run, stderr, ttyconf, &print_node, &step_stack) catch {};
|
printTreeStep(b, &tls.step, run, stderr, ttyconf, &print_node, &step_stack_copy) catch {};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (failure_count == 0) {
|
if (failure_count == 0) {
|
||||||
if (b.graph.watch != null) return;
|
if (!run.watch) cleanExit();
|
||||||
return cleanExit();
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally, render compile errors at the bottom of the terminal.
|
// Finally, render compile errors at the bottom of the terminal.
|
||||||
// We use a separate compile_error_steps array list because step_stack is destructively
|
|
||||||
// mutated in printTreeStep above.
|
|
||||||
if (run.prominent_compile_errors and total_compile_errors > 0) {
|
if (run.prominent_compile_errors and total_compile_errors > 0) {
|
||||||
for (compile_error_steps.items) |s| {
|
for (step_stack.keys()) |s| {
|
||||||
if (s.result_error_bundle.errorMessageCount() > 0) {
|
if (s.result_error_bundle.errorMessageCount() > 0) {
|
||||||
s.result_error_bundle.renderToStdErr(renderOptions(ttyconf));
|
s.result_error_bundle.renderToStdErr(renderOptions(ttyconf));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (b.graph.watch != null) return uncleanExit();
|
if (!run.watch) {
|
||||||
|
|
||||||
// Signal to parent process that we have printed compile errors. The
|
// Signal to parent process that we have printed compile errors. The
|
||||||
// parent process may choose to omit the "following command failed"
|
// parent process may choose to omit the "following command failed"
|
||||||
// line in this case.
|
// line in this case.
|
||||||
|
std.debug.lockStdErr();
|
||||||
process.exit(2);
|
process.exit(2);
|
||||||
}
|
}
|
||||||
|
|
||||||
return uncleanExit();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn uncleanExit() error{UncleanExit}!void {
|
|
||||||
if (builtin.mode == .Debug) {
|
|
||||||
return error.UncleanExit;
|
|
||||||
} else {
|
|
||||||
std.debug.lockStdErr();
|
|
||||||
process.exit(1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!run.watch) return uncleanExit();
|
||||||
}
|
}
|
||||||
|
|
||||||
const PrintNode = struct {
|
const PrintNode = struct {
|
||||||
@ -912,12 +1188,13 @@ fn constructGraphAndCheckForDependencyLoop(
|
|||||||
|
|
||||||
fn workerMakeOneStep(
|
fn workerMakeOneStep(
|
||||||
wg: *std.Thread.WaitGroup,
|
wg: *std.Thread.WaitGroup,
|
||||||
thread_pool: *std.Thread.Pool,
|
|
||||||
b: *std.Build,
|
b: *std.Build,
|
||||||
s: *Step,
|
s: *Step,
|
||||||
prog_node: std.Progress.Node,
|
prog_node: std.Progress.Node,
|
||||||
run: *Run,
|
run: *Run,
|
||||||
) void {
|
) void {
|
||||||
|
const thread_pool = &run.thread_pool;
|
||||||
|
|
||||||
// First, check the conditions for running this step. If they are not met,
|
// First, check the conditions for running this step. If they are not met,
|
||||||
// then we return without doing the step, relying on another worker to
|
// then we return without doing the step, relying on another worker to
|
||||||
// queue this step up again when dependencies are met.
|
// queue this step up again when dependencies are met.
|
||||||
@ -997,7 +1274,7 @@ fn workerMakeOneStep(
|
|||||||
// Successful completion of a step, so we queue up its dependants as well.
|
// Successful completion of a step, so we queue up its dependants as well.
|
||||||
for (s.dependants.items) |dep| {
|
for (s.dependants.items) |dep| {
|
||||||
thread_pool.spawnWg(wg, workerMakeOneStep, .{
|
thread_pool.spawnWg(wg, workerMakeOneStep, .{
|
||||||
wg, thread_pool, b, dep, prog_node, run,
|
wg, b, dep, prog_node, run,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1022,7 +1299,7 @@ fn workerMakeOneStep(
|
|||||||
remaining -= dep.max_rss;
|
remaining -= dep.max_rss;
|
||||||
|
|
||||||
thread_pool.spawnWg(wg, workerMakeOneStep, .{
|
thread_pool.spawnWg(wg, workerMakeOneStep, .{
|
||||||
wg, thread_pool, b, dep, prog_node, run,
|
wg, b, dep, prog_node, run,
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
run.memory_blocked_steps.items[i] = dep;
|
run.memory_blocked_steps.items[i] = dep;
|
||||||
@ -1242,13 +1519,22 @@ fn argsRest(args: [][:0]const u8, idx: usize) ?[][:0]const u8 {
|
|||||||
return args[idx..];
|
return args[idx..];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Perhaps in the future there could be an Advanced Options flag such as
|
||||||
|
/// --debug-build-runner-leaks which would make this function return instead of
|
||||||
|
/// calling exit.
|
||||||
fn cleanExit() void {
|
fn cleanExit() void {
|
||||||
// Perhaps in the future there could be an Advanced Options flag such as
|
std.debug.lockStdErr();
|
||||||
// --debug-build-runner-leaks which would make this function return instead
|
|
||||||
// of calling exit.
|
|
||||||
process.exit(0);
|
process.exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Perhaps in the future there could be an Advanced Options flag such as
|
||||||
|
/// --debug-build-runner-leaks which would make this function return instead of
|
||||||
|
/// calling exit.
|
||||||
|
fn uncleanExit() error{UncleanExit} {
|
||||||
|
std.debug.lockStdErr();
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
const Color = std.zig.Color;
|
const Color = std.zig.Color;
|
||||||
const Summary = enum { all, new, failures, none };
|
const Summary = enum { all, new, failures, none };
|
||||||
|
|
||||||
|
|||||||
@ -119,61 +119,6 @@ pub const Graph = struct {
|
|||||||
needed_lazy_dependencies: std.StringArrayHashMapUnmanaged(void) = .{},
|
needed_lazy_dependencies: std.StringArrayHashMapUnmanaged(void) = .{},
|
||||||
/// Information about the native target. Computed before build() is invoked.
|
/// Information about the native target. Computed before build() is invoked.
|
||||||
host: ResolvedTarget,
|
host: ResolvedTarget,
|
||||||
/// When `--watch` is provided, collects the set of files that should be
|
|
||||||
/// watched and the state to required to poll the system for changes.
|
|
||||||
watch: ?*Watch,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const Watch = struct {
|
|
||||||
table: Table,
|
|
||||||
|
|
||||||
pub const init: Watch = .{
|
|
||||||
.table = .{},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Key is the directory to watch which contains one or more files we are
|
|
||||||
/// interested in noticing changes to.
|
|
||||||
pub const Table = std.ArrayHashMapUnmanaged(Cache.Path, ReactionSet, TableContext, false);
|
|
||||||
|
|
||||||
const Hash = std.hash.Wyhash;
|
|
||||||
|
|
||||||
pub const TableContext = struct {
|
|
||||||
pub fn hash(self: TableContext, a: Cache.Path) u32 {
|
|
||||||
_ = self;
|
|
||||||
const seed: u32 = @bitCast(a.root_dir.handle.fd);
|
|
||||||
return @truncate(Hash.hash(seed, a.sub_path));
|
|
||||||
}
|
|
||||||
pub fn eql(self: TableContext, a: Cache.Path, b: Cache.Path, b_index: usize) bool {
|
|
||||||
_ = self;
|
|
||||||
_ = b_index;
|
|
||||||
return a.eql(b);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const ReactionSet = std.ArrayHashMapUnmanaged(Match, void, Match.Context, false);
|
|
||||||
|
|
||||||
pub const Match = struct {
|
|
||||||
/// Relative to the watched directory, the file path that triggers this
|
|
||||||
/// match.
|
|
||||||
basename: []const u8,
|
|
||||||
/// The step to re-run when file corresponding to `basename` is changed.
|
|
||||||
step: *Step,
|
|
||||||
|
|
||||||
pub const Context = struct {
|
|
||||||
pub fn hash(self: Context, a: Match) u32 {
|
|
||||||
_ = self;
|
|
||||||
var hasher = Hash.init(0);
|
|
||||||
std.hash.autoHash(&hasher, a.step);
|
|
||||||
hasher.update(a.basename);
|
|
||||||
return @truncate(hasher.final());
|
|
||||||
}
|
|
||||||
pub fn eql(self: Context, a: Match, b: Match, b_index: usize) bool {
|
|
||||||
_ = self;
|
|
||||||
_ = b_index;
|
|
||||||
return a.step == b.step and mem.eql(u8, a.basename, b.basename);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const AvailableDeps = []const struct { []const u8, []const u8 };
|
const AvailableDeps = []const struct { []const u8, []const u8 };
|
||||||
|
|||||||
@ -7,6 +7,16 @@ dependencies: std.ArrayList(*Step),
|
|||||||
/// This field is empty during execution of the user's build script, and
|
/// This field is empty during execution of the user's build script, and
|
||||||
/// then populated during dependency loop checking in the build runner.
|
/// then populated during dependency loop checking in the build runner.
|
||||||
dependants: std.ArrayListUnmanaged(*Step),
|
dependants: std.ArrayListUnmanaged(*Step),
|
||||||
|
/// Collects the set of files that retrigger this step to run.
|
||||||
|
///
|
||||||
|
/// This is used by the build system's implementation of `--watch` but it can
|
||||||
|
/// also be potentially useful for IDEs to know what effects editing a
|
||||||
|
/// particular file has.
|
||||||
|
///
|
||||||
|
/// Populated within `make`. Implementation may choose to clear and repopulate,
|
||||||
|
/// retain previous value, or update.
|
||||||
|
inputs: Inputs,
|
||||||
|
|
||||||
state: State,
|
state: State,
|
||||||
/// Set this field to declare an upper bound on the amount of bytes of memory it will
|
/// Set this field to declare an upper bound on the amount of bytes of memory it will
|
||||||
/// take to run the step. Zero means no limit.
|
/// take to run the step. Zero means no limit.
|
||||||
@ -63,6 +73,11 @@ pub const MakeFn = *const fn (step: *Step, prog_node: std.Progress.Node) anyerro
|
|||||||
pub const State = enum {
|
pub const State = enum {
|
||||||
precheck_unstarted,
|
precheck_unstarted,
|
||||||
precheck_started,
|
precheck_started,
|
||||||
|
/// This is also used to indicate "dirty" steps that have been modified
|
||||||
|
/// after a previous build completed, in which case, the step may or may
|
||||||
|
/// not have been completed before. Either way, one or more of its direct
|
||||||
|
/// file system inputs have been modified, meaning that the step needs to
|
||||||
|
/// be re-evaluated.
|
||||||
precheck_done,
|
precheck_done,
|
||||||
running,
|
running,
|
||||||
dependency_failure,
|
dependency_failure,
|
||||||
@ -134,6 +149,26 @@ pub const Run = @import("Step/Run.zig");
|
|||||||
pub const TranslateC = @import("Step/TranslateC.zig");
|
pub const TranslateC = @import("Step/TranslateC.zig");
|
||||||
pub const WriteFile = @import("Step/WriteFile.zig");
|
pub const WriteFile = @import("Step/WriteFile.zig");
|
||||||
|
|
||||||
|
pub const Inputs = struct {
|
||||||
|
table: Table,
|
||||||
|
|
||||||
|
pub const init: Inputs = .{
|
||||||
|
.table = .{},
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const Table = std.ArrayHashMapUnmanaged(Build.Cache.Path, Files, Build.Cache.Path.TableAdapter, false);
|
||||||
|
pub const Files = std.ArrayListUnmanaged([]const u8);
|
||||||
|
|
||||||
|
pub fn populated(inputs: *Inputs) bool {
|
||||||
|
return inputs.table.count() != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn clear(inputs: *Inputs, gpa: Allocator) void {
|
||||||
|
for (inputs.table.values()) |*files| files.deinit(gpa);
|
||||||
|
inputs.table.clearRetainingCapacity();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
pub const StepOptions = struct {
|
pub const StepOptions = struct {
|
||||||
id: Id,
|
id: Id,
|
||||||
name: []const u8,
|
name: []const u8,
|
||||||
@ -153,6 +188,7 @@ pub fn init(options: StepOptions) Step {
|
|||||||
.makeFn = options.makeFn,
|
.makeFn = options.makeFn,
|
||||||
.dependencies = std.ArrayList(*Step).init(arena),
|
.dependencies = std.ArrayList(*Step).init(arena),
|
||||||
.dependants = .{},
|
.dependants = .{},
|
||||||
|
.inputs = Inputs.init,
|
||||||
.state = .precheck_unstarted,
|
.state = .precheck_unstarted,
|
||||||
.max_rss = options.max_rss,
|
.max_rss = options.max_rss,
|
||||||
.debug_stack_trace = blk: {
|
.debug_stack_trace = blk: {
|
||||||
@ -542,19 +578,19 @@ pub fn allocPrintCmd2(
|
|||||||
return buf.toOwnedSlice(arena);
|
return buf.toOwnedSlice(arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn cacheHit(s: *Step, man: *std.Build.Cache.Manifest) !bool {
|
pub fn cacheHit(s: *Step, man: *Build.Cache.Manifest) !bool {
|
||||||
s.result_cached = man.hit() catch |err| return failWithCacheError(s, man, err);
|
s.result_cached = man.hit() catch |err| return failWithCacheError(s, man, err);
|
||||||
return s.result_cached;
|
return s.result_cached;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn failWithCacheError(s: *Step, man: *const std.Build.Cache.Manifest, err: anyerror) anyerror {
|
fn failWithCacheError(s: *Step, man: *const Build.Cache.Manifest, err: anyerror) anyerror {
|
||||||
const i = man.failed_file_index orelse return err;
|
const i = man.failed_file_index orelse return err;
|
||||||
const pp = man.files.keys()[i].prefixed_path;
|
const pp = man.files.keys()[i].prefixed_path;
|
||||||
const prefix = man.cache.prefixes()[pp.prefix].path orelse "";
|
const prefix = man.cache.prefixes()[pp.prefix].path orelse "";
|
||||||
return s.fail("{s}: {s}/{s}", .{ @errorName(err), prefix, pp.sub_path });
|
return s.fail("{s}: {s}/{s}", .{ @errorName(err), prefix, pp.sub_path });
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn writeManifest(s: *Step, man: *std.Build.Cache.Manifest) !void {
|
pub fn writeManifest(s: *Step, man: *Build.Cache.Manifest) !void {
|
||||||
if (s.test_results.isSuccess()) {
|
if (s.test_results.isSuccess()) {
|
||||||
man.writeManifest() catch |err| {
|
man.writeManifest() catch |err| {
|
||||||
try s.addError("unable to write cache manifest: {s}", .{@errorName(err)});
|
try s.addError("unable to write cache manifest: {s}", .{@errorName(err)});
|
||||||
@ -568,44 +604,37 @@ fn oom(err: anytype) noreturn {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn addWatchInput(step: *Step, lazy_path: std.Build.LazyPath) void {
|
pub fn addWatchInput(step: *Step, lazy_path: Build.LazyPath) void {
|
||||||
errdefer |err| oom(err);
|
errdefer |err| oom(err);
|
||||||
const w = step.owner.graph.watch orelse return;
|
|
||||||
switch (lazy_path) {
|
switch (lazy_path) {
|
||||||
.src_path => |src_path| try addWatchInputFromBuilder(step, w, src_path.owner, src_path.sub_path),
|
.src_path => |src_path| try addWatchInputFromBuilder(step, src_path.owner, src_path.sub_path),
|
||||||
.dependency => |d| try addWatchInputFromBuilder(step, w, d.dependency.builder, d.sub_path),
|
.dependency => |d| try addWatchInputFromBuilder(step, d.dependency.builder, d.sub_path),
|
||||||
.cwd_relative => |path_string| {
|
.cwd_relative => |path_string| {
|
||||||
try addWatchInputFromPath(w, .{
|
try addWatchInputFromPath(step, .{
|
||||||
.root_dir = .{
|
.root_dir = .{
|
||||||
.path = null,
|
.path = null,
|
||||||
.handle = std.fs.cwd(),
|
.handle = std.fs.cwd(),
|
||||||
},
|
},
|
||||||
.sub_path = std.fs.path.dirname(path_string) orelse "",
|
.sub_path = std.fs.path.dirname(path_string) orelse "",
|
||||||
}, .{
|
}, std.fs.path.basename(path_string));
|
||||||
.step = step,
|
|
||||||
.basename = std.fs.path.basename(path_string),
|
|
||||||
});
|
|
||||||
},
|
},
|
||||||
// Nothing to watch because this dependency edge is modeled instead via `dependants`.
|
// Nothing to watch because this dependency edge is modeled instead via `dependants`.
|
||||||
.generated => {},
|
.generated => {},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn addWatchInputFromBuilder(step: *Step, w: *std.Build.Watch, builder: *std.Build, sub_path: []const u8) !void {
|
fn addWatchInputFromBuilder(step: *Step, builder: *Build, sub_path: []const u8) !void {
|
||||||
return addWatchInputFromPath(w, .{
|
return addWatchInputFromPath(step, .{
|
||||||
.root_dir = builder.build_root,
|
.root_dir = builder.build_root,
|
||||||
.sub_path = std.fs.path.dirname(sub_path) orelse "",
|
.sub_path = std.fs.path.dirname(sub_path) orelse "",
|
||||||
}, .{
|
}, std.fs.path.basename(sub_path));
|
||||||
.step = step,
|
|
||||||
.basename = std.fs.path.basename(sub_path),
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn addWatchInputFromPath(w: *std.Build.Watch, path: std.Build.Cache.Path, match: std.Build.Watch.Match) !void {
|
fn addWatchInputFromPath(step: *Step, path: Build.Cache.Path, basename: []const u8) !void {
|
||||||
const gpa = match.step.owner.allocator;
|
const gpa = step.owner.allocator;
|
||||||
const gop = try w.table.getOrPut(gpa, path);
|
const gop = try step.inputs.table.getOrPut(gpa, path);
|
||||||
if (!gop.found_existing) gop.value_ptr.* = .{};
|
if (!gop.found_existing) gop.value_ptr.* = .{};
|
||||||
try gop.value_ptr.put(gpa, match, {});
|
try gop.value_ptr.append(gpa, basename);
|
||||||
}
|
}
|
||||||
|
|
||||||
test {
|
test {
|
||||||
|
|||||||
@ -39,7 +39,10 @@ fn make(step: *Step, prog_node: std.Progress.Node) !void {
|
|||||||
_ = prog_node;
|
_ = prog_node;
|
||||||
const b = step.owner;
|
const b = step.owner;
|
||||||
const install_file: *InstallFile = @fieldParentPtr("step", step);
|
const install_file: *InstallFile = @fieldParentPtr("step", step);
|
||||||
step.addWatchInput(install_file.source);
|
|
||||||
|
// Inputs never change when re-running `make`.
|
||||||
|
if (!step.inputs.populated()) step.addWatchInput(install_file.source);
|
||||||
|
|
||||||
const full_src_path = install_file.source.getPath2(b, step);
|
const full_src_path = install_file.source.getPath2(b, step);
|
||||||
const full_dest_path = b.getInstallPath(install_file.dir, install_file.dest_rel_path);
|
const full_dest_path = b.getInstallPath(install_file.dir, install_file.dest_rel_path);
|
||||||
const cwd = std.fs.cwd();
|
const cwd = std.fs.cwd();
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user