diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index d9ff03a3fe..030f3f0a28 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -234,6 +234,28 @@ pub const Node = struct { _ = @atomicRmw(u32, &storage.completed_count, .Add, 1, .monotonic); } + /// Thread-safe. Bytes after '0' in `new_name` are ignored. + pub fn setName(n: Node, new_name: []const u8) void { + const index = n.index.unwrap() orelse return; + const storage = storageByIndex(index); + + const name_len = @min(max_name_len, std.mem.indexOfScalar(u8, new_name, 0) orelse new_name.len); + + copyAtomicStore(storage.name[0..name_len], new_name[0..name_len]); + if (name_len < storage.name.len) + @atomicStore(u8, &storage.name[name_len], 0, .monotonic); + } + + /// Gets the name of this `Node`. + /// A pointer to this array can later be passed to `setName` to restore the name. + pub fn getName(n: Node) [max_name_len]u8 { + var dest: [max_name_len]u8 align(@alignOf(usize)) = undefined; + if (n.index.unwrap()) |index| { + copyAtomicLoad(&dest, &storageByIndex(index).name); + } + return dest; + } + /// Thread-safe. pub fn setCompletedItems(n: Node, completed_items: usize) void { const index = n.index.unwrap() orelse return; diff --git a/src/Compilation.zig b/src/Compilation.zig index fe4671848d..74f841723e 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -255,7 +255,7 @@ test_filters: []const []const u8, test_name_prefix: ?[]const u8, link_task_wait_group: WaitGroup = .{}, -work_queue_progress_node: std.Progress.Node = .none, +link_prog_node: std.Progress.Node = std.Progress.Node.none, llvm_opt_bisect_limit: c_int, @@ -2795,6 +2795,17 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { } } + // The linker progress node is set up here instead of in `performAllTheWork`, because + // we also want it around during `flush`. + const have_link_node = comp.bin_file != null; + if (have_link_node) { + comp.link_prog_node = main_progress_node.start("Linking", 0); + } + defer if (have_link_node) { + comp.link_prog_node.end(); + comp.link_prog_node = .none; + }; + try comp.performAllTheWork(main_progress_node); if (comp.zcu) |zcu| { @@ -2843,7 +2854,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { switch (comp.cache_use) { .none, .incremental => { - try flush(comp, arena, .main, main_progress_node); + try flush(comp, arena, .main); }, .whole => |whole| { if (comp.file_system_inputs) |buf| try man.populateFileSystemInputs(buf); @@ -2919,7 +2930,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { } } - try flush(comp, arena, .main, main_progress_node); + try flush(comp, arena, .main); // Calling `flush` may have produced errors, in which case the // cache manifest must not be written. @@ -3009,13 +3020,12 @@ fn flush( comp: *Compilation, arena: Allocator, tid: Zcu.PerThread.Id, - prog_node: std.Progress.Node, ) !void { if (comp.zcu) |zcu| { if (zcu.llvm_object) |llvm_object| { // Emit the ZCU object from LLVM now; it's required to flush the output file. // If there's an output file, it wants to decide where the LLVM object goes! - const sub_prog_node = prog_node.start("LLVM Emit Object", 0); + const sub_prog_node = comp.link_prog_node.start("LLVM Emit Object", 0); defer sub_prog_node.end(); try llvm_object.emit(.{ .pre_ir_path = comp.verbose_llvm_ir, @@ -3053,7 +3063,7 @@ fn flush( } if (comp.bin_file) |lf| { // This is needed before reading the error flags. - lf.flush(arena, tid, prog_node) catch |err| switch (err) { + lf.flush(arena, tid, comp.link_prog_node) catch |err| switch (err) { error.LinkFailure => {}, // Already reported. error.OutOfMemory => return error.OutOfMemory, }; @@ -4172,28 +4182,15 @@ pub fn addWholeFileError( } } -pub fn performAllTheWork( +fn performAllTheWork( comp: *Compilation, main_progress_node: std.Progress.Node, ) JobError!void { - comp.work_queue_progress_node = main_progress_node; - defer comp.work_queue_progress_node = .none; - + // Regardless of errors, `comp.zcu` needs to update its generation number. defer if (comp.zcu) |zcu| { - zcu.sema_prog_node.end(); - zcu.sema_prog_node = .none; - zcu.codegen_prog_node.end(); - zcu.codegen_prog_node = .none; - zcu.generation += 1; }; - try comp.performAllTheWorkInner(main_progress_node); -} -fn performAllTheWorkInner( - comp: *Compilation, - main_progress_node: std.Progress.Node, -) JobError!void { // Here we queue up all the AstGen tasks first, followed by C object compilation. // We wait until the AstGen tasks are all completed before proceeding to the // (at least for now) single-threaded main work queue. However, C object compilation @@ -4513,8 +4510,24 @@ fn performAllTheWorkInner( } zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); - zcu.codegen_prog_node = if (comp.bin_file != null) main_progress_node.start("Code Generation", 0) else .none; + if (comp.bin_file != null) { + zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0); + } + // We increment `pending_codegen_jobs` so that it doesn't reach 0 until after analysis finishes. + // That prevents the "Code Generation" node from constantly disappearing and reappearing when + // we're probably going to analyze more functions at some point. + assert(zcu.pending_codegen_jobs.swap(1, .monotonic) == 0); // don't let this become 0 until analysis finishes } + // When analysis ends, delete the progress nodes for "Semantic Analysis" and possibly "Code Generation". + defer if (comp.zcu) |zcu| { + zcu.sema_prog_node.end(); + zcu.sema_prog_node = .none; + if (zcu.pending_codegen_jobs.rmw(.Sub, 1, .monotonic) == 1) { + // Decremented to 0, so all done. + zcu.codegen_prog_node.end(); + zcu.codegen_prog_node = .none; + } + }; if (!comp.separateCodegenThreadOk()) { // Waits until all input files have been parsed. @@ -4583,6 +4596,7 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job) JobError!void { .status = .init(.pending), .value = undefined, }; + assert(zcu.pending_codegen_jobs.rmw(.Add, 1, .monotonic) > 0); // the "Code Generation" node hasn't been ended if (comp.separateCodegenThreadOk()) { // `workerZcuCodegen` takes ownership of `air`. comp.thread_pool.spawnWgId(&comp.link_task_wait_group, workerZcuCodegen, .{ comp, func.func, air, shared_mir }); diff --git a/src/Zcu.zig b/src/Zcu.zig index 91d2c0ffff..513492e818 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -66,8 +66,18 @@ root_mod: *Package.Module, /// `root_mod` is the test runner, and `main_mod` is the user's source file which has the tests. main_mod: *Package.Module, std_mod: *Package.Module, -sema_prog_node: std.Progress.Node = std.Progress.Node.none, -codegen_prog_node: std.Progress.Node = std.Progress.Node.none, +sema_prog_node: std.Progress.Node = .none, +codegen_prog_node: std.Progress.Node = .none, +/// The number of codegen jobs which are pending or in-progress. Whichever thread drops this value +/// to 0 is responsible for ending `codegen_prog_node`. While semantic analysis is happening, this +/// value bottoms out at 1 instead of 0, to ensure that it can only drop to 0 after analysis is +/// completed (since semantic analysis could trigger more codegen work). +pending_codegen_jobs: std.atomic.Value(u32) = .init(0), + +/// This is the progress node *under* `sema_prog_node` which is currently running. +/// When we have to pause to analyze something else, we just temporarily rename this node. +/// Eventually, when we thread semantic analysis, we will want one of these per thread. +cur_sema_prog_node: std.Progress.Node = .none, /// Used by AstGen worker to load and store ZIR cache. global_zir_cache: Cache.Directory, @@ -4753,3 +4763,27 @@ fn explainWhyFileIsInModule( import = importer_ref.import; } } + +const SemaProgNode = struct { + /// `null` means we created the node, so should end it. + old_name: ?[std.Progress.Node.max_name_len]u8, + pub fn end(spn: SemaProgNode, zcu: *Zcu) void { + if (spn.old_name) |old_name| { + zcu.sema_prog_node.completeOne(); // we're just renaming, but it's effectively completion + zcu.cur_sema_prog_node.setName(&old_name); + } else { + zcu.cur_sema_prog_node.end(); + zcu.cur_sema_prog_node = .none; + } + } +}; +pub fn startSemaProgNode(zcu: *Zcu, name: []const u8) SemaProgNode { + if (zcu.cur_sema_prog_node.index != .none) { + const old_name = zcu.cur_sema_prog_node.getName(); + zcu.cur_sema_prog_node.setName(name); + return .{ .old_name = old_name }; + } else { + zcu.cur_sema_prog_node = zcu.sema_prog_node.start(name, 0); + return .{ .old_name = null }; + } +} diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index f8efa40dc0..8bc723f2e8 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -796,8 +796,8 @@ pub fn ensureComptimeUnitUpToDate(pt: Zcu.PerThread, cu_id: InternPool.ComptimeU info.deps.clearRetainingCapacity(); } - const unit_prog_node = zcu.sema_prog_node.start("comptime", 0); - defer unit_prog_node.end(); + const unit_prog_node = zcu.startSemaProgNode("comptime"); + defer unit_prog_node.end(zcu); return pt.analyzeComptimeUnit(cu_id) catch |err| switch (err) { error.AnalysisFail => { @@ -976,8 +976,8 @@ pub fn ensureNavValUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu info.deps.clearRetainingCapacity(); } - const unit_prog_node = zcu.sema_prog_node.start(nav.fqn.toSlice(ip), 0); - defer unit_prog_node.end(); + const unit_prog_node = zcu.startSemaProgNode(nav.fqn.toSlice(ip)); + defer unit_prog_node.end(zcu); const invalidate_value: bool, const new_failed: bool = if (pt.analyzeNavVal(nav_id)) |result| res: { break :res .{ @@ -1396,8 +1396,8 @@ pub fn ensureNavTypeUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zc info.deps.clearRetainingCapacity(); } - const unit_prog_node = zcu.sema_prog_node.start(nav.fqn.toSlice(ip), 0); - defer unit_prog_node.end(); + const unit_prog_node = zcu.startSemaProgNode(nav.fqn.toSlice(ip)); + defer unit_prog_node.end(zcu); const invalidate_type: bool, const new_failed: bool = if (pt.analyzeNavType(nav_id)) |result| res: { break :res .{ @@ -1617,8 +1617,8 @@ pub fn ensureFuncBodyUpToDate(pt: Zcu.PerThread, maybe_coerced_func_index: Inter info.deps.clearRetainingCapacity(); } - const func_prog_node = zcu.sema_prog_node.start(ip.getNav(func.owner_nav).fqn.toSlice(ip), 0); - defer func_prog_node.end(); + const func_prog_node = zcu.startSemaProgNode(ip.getNav(func.owner_nav).fqn.toSlice(ip)); + defer func_prog_node.end(zcu); const ies_outdated, const new_failed = if (pt.analyzeFuncBody(func_index)) |result| .{ prev_failed or result.ies_outdated, false } @@ -3360,6 +3360,7 @@ pub fn populateTestFunctions( ip.mutateVarInit(test_fns_val.toIntern(), new_init); } { + assert(zcu.codegen_prog_node.index == .none); zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0); defer { zcu.codegen_prog_node.end(); @@ -4393,6 +4394,11 @@ pub fn runCodegen(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air, ou }, } zcu.comp.link_task_queue.mirReady(zcu.comp, out); + if (zcu.pending_codegen_jobs.rmw(.Sub, 1, .monotonic) == 1) { + // Decremented to 0, so all done. + zcu.codegen_prog_node.end(); + zcu.codegen_prog_node = .none; + } } fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) error{ OutOfMemory, diff --git a/src/link.zig b/src/link.zig index 844ea7a85c..7d522b94d3 100644 --- a/src/link.zig +++ b/src/link.zig @@ -1074,7 +1074,7 @@ pub const File = struct { /// Called when all linker inputs have been sent via `loadInput`. After /// this, `loadInput` will not be called anymore. - pub fn prelink(base: *File, prog_node: std.Progress.Node) FlushError!void { + pub fn prelink(base: *File) FlushError!void { assert(!base.post_prelink); // In this case, an object file is created by the LLVM backend, so @@ -1085,7 +1085,7 @@ pub const File = struct { switch (base.tag) { inline .wasm => |tag| { dev.check(tag.devFeature()); - return @as(*tag.Type(), @fieldParentPtr("base", base)).prelink(prog_node); + return @as(*tag.Type(), @fieldParentPtr("base", base)).prelink(base.comp.link_prog_node); }, else => {}, } @@ -1293,7 +1293,7 @@ pub fn doPrelinkTask(comp: *Compilation, task: PrelinkTask) void { const base = comp.bin_file orelse return; switch (task) { .load_explicitly_provided => { - const prog_node = comp.work_queue_progress_node.start("Parse Linker Inputs", comp.link_inputs.len); + const prog_node = comp.link_prog_node.start("Parse Inputs", comp.link_inputs.len); defer prog_node.end(); for (comp.link_inputs) |input| { base.loadInput(input) catch |err| switch (err) { @@ -1310,7 +1310,7 @@ pub fn doPrelinkTask(comp: *Compilation, task: PrelinkTask) void { } }, .load_host_libc => { - const prog_node = comp.work_queue_progress_node.start("Linker Parse Host libc", 0); + const prog_node = comp.link_prog_node.start("Parse Host libc", 0); defer prog_node.end(); const target = comp.root_mod.resolved_target.result; @@ -1369,7 +1369,7 @@ pub fn doPrelinkTask(comp: *Compilation, task: PrelinkTask) void { } }, .load_object => |path| { - const prog_node = comp.work_queue_progress_node.start("Linker Parse Object", 0); + const prog_node = comp.link_prog_node.start("Parse Object", 0); defer prog_node.end(); base.openLoadObject(path) catch |err| switch (err) { error.LinkFailure => return, // error reported via diags @@ -1377,7 +1377,7 @@ pub fn doPrelinkTask(comp: *Compilation, task: PrelinkTask) void { }; }, .load_archive => |path| { - const prog_node = comp.work_queue_progress_node.start("Linker Parse Archive", 0); + const prog_node = comp.link_prog_node.start("Parse Archive", 0); defer prog_node.end(); base.openLoadArchive(path, null) catch |err| switch (err) { error.LinkFailure => return, // error reported via link_diags @@ -1385,7 +1385,7 @@ pub fn doPrelinkTask(comp: *Compilation, task: PrelinkTask) void { }; }, .load_dso => |path| { - const prog_node = comp.work_queue_progress_node.start("Linker Parse Shared Library", 0); + const prog_node = comp.link_prog_node.start("Parse Shared Library", 0); defer prog_node.end(); base.openLoadDso(path, .{ .preferred_mode = .dynamic, @@ -1396,7 +1396,7 @@ pub fn doPrelinkTask(comp: *Compilation, task: PrelinkTask) void { }; }, .load_input => |input| { - const prog_node = comp.work_queue_progress_node.start("Linker Parse Input", 0); + const prog_node = comp.link_prog_node.start("Parse Input", 0); defer prog_node.end(); base.loadInput(input) catch |err| switch (err) { error.LinkFailure => return, // error reported via link_diags @@ -1418,6 +1418,9 @@ pub fn doZcuTask(comp: *Compilation, tid: usize, task: ZcuTask) void { const zcu = comp.zcu.?; const pt: Zcu.PerThread = .activate(zcu, @enumFromInt(tid)); defer pt.deactivate(); + const fqn_slice = zcu.intern_pool.getNav(nav_index).fqn.toSlice(&zcu.intern_pool); + const nav_prog_node = comp.link_prog_node.start(fqn_slice, 0); + defer nav_prog_node.end(); if (zcu.llvm_object) |llvm_object| { llvm_object.updateNav(pt, nav_index) catch |err| switch (err) { error.OutOfMemory => diags.setAllocFailure(), @@ -1441,6 +1444,9 @@ pub fn doZcuTask(comp: *Compilation, tid: usize, task: ZcuTask) void { const nav = zcu.funcInfo(func.func).owner_nav; const pt: Zcu.PerThread = .activate(zcu, @enumFromInt(tid)); defer pt.deactivate(); + const fqn_slice = zcu.intern_pool.getNav(nav).fqn.toSlice(&zcu.intern_pool); + const nav_prog_node = comp.link_prog_node.start(fqn_slice, 0); + defer nav_prog_node.end(); switch (func.mir.status.load(.monotonic)) { .pending => unreachable, .ready => {}, diff --git a/src/link/Lld.zig b/src/link/Lld.zig index dd50bd2a2f..4ea809428e 100644 --- a/src/link/Lld.zig +++ b/src/link/Lld.zig @@ -267,6 +267,9 @@ pub fn flush( const comp = lld.base.comp; const result = if (comp.config.output_mode == .Lib and comp.config.link_mode == .static) r: { + if (!@import("build_options").have_llvm or !comp.config.use_lib_llvm) { + return lld.base.comp.link_diags.fail("using lld without libllvm not implemented", .{}); + } break :r linkAsArchive(lld, arena); } else switch (lld.ofmt) { .coff => coffLink(lld, arena), diff --git a/src/link/Queue.zig b/src/link/Queue.zig index 3436be5921..ab5fd89699 100644 --- a/src/link/Queue.zig +++ b/src/link/Queue.zig @@ -180,7 +180,7 @@ fn flushTaskQueue(tid: usize, q: *Queue, comp: *Compilation) void { // We've finished the prelink tasks, so run prelink if necessary. if (comp.bin_file) |lf| { if (!lf.post_prelink) { - if (lf.prelink(comp.work_queue_progress_node)) |_| { + if (lf.prelink()) |_| { lf.post_prelink = true; } else |err| switch (err) { error.OutOfMemory => comp.link_diags.setAllocFailure(),