mirror of
https://github.com/ziglang/zig.git
synced 2026-02-12 20:37:54 +00:00
compiler: fix atomic orderings
I messed up atomic orderings on this variable because they changed in a local refactor at some point. We need to always release on the store and acquire on the loads so that a linker thread observing `.ready` sees the stored MIR.
This commit is contained in:
parent
dd75e7bcb1
commit
121d620443
@ -4373,33 +4373,28 @@ pub fn addDependency(pt: Zcu.PerThread, unit: AnalUnit, dependee: InternPool.Dep
|
||||
/// codegen thread, depending on whether the backend supports `Zcu.Feature.separate_thread`.
|
||||
pub fn runCodegen(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air, out: *@import("../link.zig").ZcuTask.LinkFunc.SharedMir) void {
|
||||
const zcu = pt.zcu;
|
||||
if (runCodegenInner(pt, func_index, air)) |mir| {
|
||||
const success: bool = if (runCodegenInner(pt, func_index, air)) |mir| success: {
|
||||
out.value = mir;
|
||||
out.status.store(.ready, .release);
|
||||
} else |err| switch (err) {
|
||||
error.OutOfMemory => {
|
||||
zcu.comp.setAllocFailure();
|
||||
out.status.store(.failed, .monotonic);
|
||||
},
|
||||
error.CodegenFail => {
|
||||
zcu.assertCodegenFailed(zcu.funcInfo(func_index).owner_nav);
|
||||
out.status.store(.failed, .monotonic);
|
||||
},
|
||||
error.NoLinkFile => {
|
||||
assert(zcu.comp.bin_file == null);
|
||||
out.status.store(.failed, .monotonic);
|
||||
},
|
||||
error.BackendDoesNotProduceMir => {
|
||||
const backend = target_util.zigBackend(zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
|
||||
switch (backend) {
|
||||
break :success true;
|
||||
} else |err| success: {
|
||||
switch (err) {
|
||||
error.OutOfMemory => zcu.comp.setAllocFailure(),
|
||||
error.CodegenFail => zcu.assertCodegenFailed(zcu.funcInfo(func_index).owner_nav),
|
||||
error.NoLinkFile => assert(zcu.comp.bin_file == null),
|
||||
error.BackendDoesNotProduceMir => switch (target_util.zigBackend(
|
||||
zcu.root_mod.resolved_target.result,
|
||||
zcu.comp.config.use_llvm,
|
||||
)) {
|
||||
else => unreachable, // assertion failure
|
||||
.stage2_spirv64,
|
||||
.stage2_llvm,
|
||||
=> {},
|
||||
}
|
||||
out.status.store(.failed, .monotonic);
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
break :success false;
|
||||
};
|
||||
// release `out.value` with this store; synchronizes with acquire loads in `link`
|
||||
out.status.store(if (success) .ready else .failed, .release);
|
||||
zcu.comp.link_task_queue.mirReady(zcu.comp, out);
|
||||
if (zcu.pending_codegen_jobs.rmw(.Sub, 1, .monotonic) == 1) {
|
||||
// Decremented to 0, so all done.
|
||||
|
||||
@ -1249,7 +1249,7 @@ pub const ZcuTask = union(enum) {
|
||||
.update_line_number,
|
||||
=> {},
|
||||
.link_func => |link_func| {
|
||||
switch (link_func.mir.status.load(.monotonic)) {
|
||||
switch (link_func.mir.status.load(.acquire)) {
|
||||
.pending => unreachable, // cannot deinit until MIR done
|
||||
.failed => {}, // MIR not populated so doesn't need freeing
|
||||
.ready => link_func.mir.value.deinit(zcu),
|
||||
@ -1453,7 +1453,7 @@ pub fn doZcuTask(comp: *Compilation, tid: usize, task: ZcuTask) void {
|
||||
const fqn_slice = ip.getNav(nav).fqn.toSlice(ip);
|
||||
const nav_prog_node = comp.link_prog_node.start(fqn_slice, 0);
|
||||
defer nav_prog_node.end();
|
||||
switch (func.mir.status.load(.monotonic)) {
|
||||
switch (func.mir.status.load(.acquire)) {
|
||||
.pending => unreachable,
|
||||
.ready => {},
|
||||
.failed => return,
|
||||
|
||||
@ -126,7 +126,7 @@ pub fn mirReady(q: *Queue, comp: *Compilation, mir: *ZcuTask.LinkFunc.SharedMir)
|
||||
// We were waiting for `mir`, so we will restart the linker thread.
|
||||
q.state = .running;
|
||||
}
|
||||
assert(mir.status.load(.monotonic) != .pending);
|
||||
assert(mir.status.load(.acquire) != .pending);
|
||||
comp.thread_pool.spawnWgId(&comp.link_task_wait_group, flushTaskQueue, .{ q, comp });
|
||||
}
|
||||
|
||||
@ -170,7 +170,7 @@ pub fn enqueueZcu(q: *Queue, comp: *Compilation, task: ZcuTask) Allocator.Error!
|
||||
.finished => if (q.pending_prelink_tasks != 0) return,
|
||||
}
|
||||
// Restart the linker thread, unless it would immediately be blocked
|
||||
if (task == .link_func and task.link_func.mir.status.load(.monotonic) == .pending) {
|
||||
if (task == .link_func and task.link_func.mir.status.load(.acquire) == .pending) {
|
||||
q.state = .{ .wait_for_mir = task.link_func.mir };
|
||||
return;
|
||||
}
|
||||
@ -243,10 +243,10 @@ fn flushTaskQueue(tid: usize, q: *Queue, comp: *Compilation) void {
|
||||
if (task != .link_func) break :pending;
|
||||
const status_ptr = &task.link_func.mir.status;
|
||||
// First check without the mutex to optimize for the common case where MIR is ready.
|
||||
if (status_ptr.load(.monotonic) != .pending) break :pending;
|
||||
if (status_ptr.load(.acquire) != .pending) break :pending;
|
||||
q.mutex.lock();
|
||||
defer q.mutex.unlock();
|
||||
if (status_ptr.load(.monotonic) != .pending) break :pending;
|
||||
if (status_ptr.load(.acquire) != .pending) break :pending;
|
||||
// We will stop for now, and get restarted once this MIR is ready.
|
||||
q.state = .{ .wait_for_mir = task.link_func.mir };
|
||||
q.flush_safety.unlock();
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user