diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index a7e5b8f289..2abc7ed59f 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -4395,7 +4395,7 @@ pub fn runCodegen(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air, ou }; // release `out.value` with this store; synchronizes with acquire loads in `link` out.status.store(if (success) .ready else .failed, .release); - zcu.comp.link_task_queue.mirReady(zcu.comp, out); + zcu.comp.link_task_queue.mirReady(zcu.comp, func_index, out); if (zcu.pending_codegen_jobs.rmw(.Sub, 1, .monotonic) == 1) { // Decremented to 0, so all done. zcu.codegen_prog_node.end(); diff --git a/src/link/Queue.zig b/src/link/Queue.zig index 90f34804f6..d1595636ac 100644 --- a/src/link/Queue.zig +++ b/src/link/Queue.zig @@ -64,7 +64,7 @@ state: union(enum) { finished, /// The link thread is not running or queued, because it is waiting for this MIR to be populated. /// Once codegen completes, it must call `mirReady` which will restart the link thread. - wait_for_mir: *ZcuTask.LinkFunc.SharedMir, + wait_for_mir: InternPool.Index, }, /// In the worst observed case, MIR is around 50 times as large as AIR. More typically, the ratio is @@ -113,7 +113,7 @@ pub fn start(q: *Queue, comp: *Compilation) void { /// Called by codegen workers after they have populated a `ZcuTask.LinkFunc.SharedMir`. If the link /// thread was waiting for this MIR, it can resume. -pub fn mirReady(q: *Queue, comp: *Compilation, mir: *ZcuTask.LinkFunc.SharedMir) void { +pub fn mirReady(q: *Queue, comp: *Compilation, func_index: InternPool.Index, mir: *ZcuTask.LinkFunc.SharedMir) void { // We would like to assert that `mir` is not pending, but that would race with a worker thread // potentially freeing it. { @@ -121,7 +121,7 @@ pub fn mirReady(q: *Queue, comp: *Compilation, mir: *ZcuTask.LinkFunc.SharedMir) defer q.mutex.unlock(); switch (q.state) { .finished, .running => return, - .wait_for_mir => |wait_for| if (wait_for != mir) return, + .wait_for_mir => |wait_for| if (wait_for != func_index) return, } // We were waiting for `mir`, so we will restart the linker thread. q.state = .running; @@ -171,7 +171,7 @@ pub fn enqueueZcu(q: *Queue, comp: *Compilation, task: ZcuTask) Allocator.Error! } // Restart the linker thread, unless it would immediately be blocked if (task == .link_func and task.link_func.mir.status.load(.acquire) == .pending) { - q.state = .{ .wait_for_mir = task.link_func.mir }; + q.state = .{ .wait_for_mir = task.link_func.func }; return; } q.state = .running; @@ -248,7 +248,7 @@ fn flushTaskQueue(tid: usize, q: *Queue, comp: *Compilation) void { defer q.mutex.unlock(); if (status_ptr.load(.acquire) != .pending) break :pending; // We will stop for now, and get restarted once this MIR is ready. - q.state = .{ .wait_for_mir = task.link_func.mir }; + q.state = .{ .wait_for_mir = task.link_func.func }; q.flush_safety.unlock(); return; } @@ -273,6 +273,7 @@ const std = @import("std"); const assert = std.debug.assert; const Allocator = std.mem.Allocator; const Compilation = @import("../Compilation.zig"); +const InternPool = @import("../InternPool.zig"); const link = @import("../link.zig"); const PrelinkTask = link.PrelinkTask; const ZcuTask = link.ZcuTask;