frontend: move updateZirRefs to be single-threaded

for simplicity's sake. This makes it O(M) instead of O(N*M) where N is
tracked insts and M is number of changed source files.
This commit is contained in:
Andrew Kelley 2024-07-15 18:54:41 -07:00
parent 888708ec8a
commit 60318a1e39
3 changed files with 147 additions and 124 deletions

View File

@ -3596,6 +3596,11 @@ fn performAllTheWorkInner(
if (comp.module) |zcu| { if (comp.module) |zcu| {
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = .main }; const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = .main };
if (comp.incremental) {
const update_zir_refs_node = main_progress_node.start("Update ZIR References", 0);
defer update_zir_refs_node.end();
try pt.updateZirRefs();
}
try reportMultiModuleErrors(pt); try reportMultiModuleErrors(pt);
try zcu.flushRetryableFailures(); try zcu.flushRetryableFailures();
zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0); zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0);
@ -4306,7 +4311,7 @@ fn workerAstGenFile(
defer child_prog_node.end(); defer child_prog_node.end();
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) }; const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
pt.astGenFile(file, file_index, path_digest, root_decl) catch |err| switch (err) { pt.astGenFile(file, path_digest, root_decl) catch |err| switch (err) {
error.AnalysisFail => return, error.AnalysisFail => return,
else => { else => {
file.status = .retryable_failure; file.status = .retryable_failure;

View File

@ -6065,7 +6065,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
const path_digest = zcu.filePathDigest(result.file_index); const path_digest = zcu.filePathDigest(result.file_index);
const root_decl = zcu.fileRootDecl(result.file_index); const root_decl = zcu.fileRootDecl(result.file_index);
pt.astGenFile(result.file, result.file_index, path_digest, root_decl) catch |err| pt.astGenFile(result.file, path_digest, root_decl) catch |err|
return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
try pt.ensureFileAnalyzed(result.file_index); try pt.ensureFileAnalyzed(result.file_index);

View File

@ -60,10 +60,6 @@ pub fn destroyFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) void {
pub fn astGenFile( pub fn astGenFile(
pt: Zcu.PerThread, pt: Zcu.PerThread,
file: *Zcu.File, file: *Zcu.File,
/// This parameter is provided separately from `file` because it is not
/// safe to access `import_table` without a lock, and this index is needed
/// in the call to `updateZirRefs`.
file_index: Zcu.File.Index,
path_digest: Cache.BinDigest, path_digest: Cache.BinDigest,
opt_root_decl: Zcu.Decl.OptionalIndex, opt_root_decl: Zcu.Decl.OptionalIndex,
) !void { ) !void {
@ -210,13 +206,18 @@ pub fn astGenFile(
pt.lockAndClearFileCompileError(file); pt.lockAndClearFileCompileError(file);
// If the previous ZIR does not have compile errors, keep it around // Previous ZIR is kept for two reasons:
// in case parsing or new ZIR fails. In case of successful ZIR update //
// at the end of this function we will free it. // 1. In case an update to the file causes a Parse or AstGen failure, we
// We keep the previous ZIR loaded so that we can use it // need to compare two successful ZIR files in order to proceed with an
// for the update next time it does not have any compile errors. This avoids // incremental update. This avoids needlessly tossing out semantic
// needlessly tossing out semantic analysis work when an error is // analysis work when an error is temporarily introduced.
// temporarily introduced. //
// 2. In order to detect updates, we need to iterate over the intern pool
// values while comparing old ZIR to new ZIR. This is better done in a
// single-threaded context, so we need to keep both versions around
// until that point in the pipeline. Previous ZIR data is freed after
// that.
if (file.zir_loaded and !file.zir.hasCompileErrors()) { if (file.zir_loaded and !file.zir.hasCompileErrors()) {
assert(file.prev_zir == null); assert(file.prev_zir == null);
const prev_zir_ptr = try gpa.create(Zir); const prev_zir_ptr = try gpa.create(Zir);
@ -320,14 +321,6 @@ pub fn astGenFile(
return error.AnalysisFail; return error.AnalysisFail;
} }
if (file.prev_zir) |prev_zir| {
try pt.updateZirRefs(file, file_index, prev_zir.*);
// No need to keep previous ZIR.
prev_zir.deinit(gpa);
gpa.destroy(prev_zir);
file.prev_zir = null;
}
if (opt_root_decl.unwrap()) |root_decl| { if (opt_root_decl.unwrap()) |root_decl| {
// The root of this file must be re-analyzed, since the file has changed. // The root of this file must be re-analyzed, since the file has changed.
comp.mutex.lock(); comp.mutex.lock();
@ -338,30 +331,56 @@ pub fn astGenFile(
} }
} }
/// This is called from the AstGen thread pool, so must acquire const UpdatedFile = struct {
/// the Compilation mutex when acting on shared state. file_index: Zcu.File.Index,
fn updateZirRefs(pt: Zcu.PerThread, file: *Zcu.File, file_index: Zcu.File.Index, old_zir: Zir) !void { file: *Zcu.File,
inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index),
};
fn cleanupUpdatedFiles(gpa: Allocator, updated_files: *std.ArrayListUnmanaged(UpdatedFile)) void {
for (updated_files.items) |*elem| elem.inst_map.deinit(gpa);
updated_files.deinit(gpa);
}
pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
assert(pt.tid == .main);
const zcu = pt.zcu; const zcu = pt.zcu;
const ip = &zcu.intern_pool; const ip = &zcu.intern_pool;
const gpa = zcu.gpa; const gpa = zcu.gpa;
// We need to visit every updated File for every TrackedInst in InternPool.
var updated_files: std.ArrayListUnmanaged(UpdatedFile) = .{};
defer cleanupUpdatedFiles(gpa, &updated_files);
for (zcu.import_table.values()) |file_index| {
const file = zcu.fileByIndex(file_index);
const old_zir = file.prev_zir orelse continue;
const new_zir = file.zir; const new_zir = file.zir;
try updated_files.append(gpa, .{
.file_index = file_index,
.file = file,
.inst_map = .{},
});
const inst_map = &updated_files.items[updated_files.items.len - 1].inst_map;
try Zcu.mapOldZirToNew(gpa, old_zir.*, new_zir, inst_map);
}
var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}; if (updated_files.items.len == 0)
defer inst_map.deinit(gpa); return;
try Zcu.mapOldZirToNew(gpa, old_zir, new_zir, &inst_map);
const old_tag = old_zir.instructions.items(.tag);
const old_data = old_zir.instructions.items(.data);
// TODO: this should be done after all AstGen workers complete, to avoid
// iterating over this full set for every updated file.
for (ip.locals, 0..) |*local, tid| { for (ip.locals, 0..) |*local, tid| {
local.mutate.tracked_insts.mutex.lock();
defer local.mutate.tracked_insts.mutex.unlock();
const tracked_insts_list = local.getMutableTrackedInsts(gpa); const tracked_insts_list = local.getMutableTrackedInsts(gpa);
for (tracked_insts_list.view().items(.@"0"), 0..) |*tracked_inst, tracked_inst_unwrapped_index| { for (tracked_insts_list.view().items(.@"0"), 0..) |*tracked_inst, tracked_inst_unwrapped_index| {
for (updated_files.items) |updated_file| {
const file_index = updated_file.file_index;
if (tracked_inst.file != file_index) continue; if (tracked_inst.file != file_index) continue;
const file = updated_file.file;
const old_zir = file.prev_zir.?.*;
const new_zir = file.zir;
const old_tag = old_zir.instructions.items(.tag);
const old_data = old_zir.instructions.items(.data);
const inst_map = &updated_file.inst_map;
const old_inst = tracked_inst.inst; const old_inst = tracked_inst.inst;
const tracked_inst_index = (InternPool.TrackedInst.Index.Unwrapped{ const tracked_inst_index = (InternPool.TrackedInst.Index.Unwrapped{
.tid = @enumFromInt(tid), .tid = @enumFromInt(tid),
@ -369,8 +388,6 @@ fn updateZirRefs(pt: Zcu.PerThread, file: *Zcu.File, file_index: Zcu.File.Index,
}).wrap(ip); }).wrap(ip);
tracked_inst.inst = inst_map.get(old_inst) orelse { tracked_inst.inst = inst_map.get(old_inst) orelse {
// Tracking failed for this instruction. Invalidate associated `src_hash` deps. // Tracking failed for this instruction. Invalidate associated `src_hash` deps.
zcu.comp.mutex.lock();
defer zcu.comp.mutex.unlock();
log.debug("tracking failed for %{d}", .{old_inst}); log.debug("tracking failed for %{d}", .{old_inst});
try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index }); try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index });
continue; continue;
@ -389,8 +406,6 @@ fn updateZirRefs(pt: Zcu.PerThread, file: *Zcu.File, file_index: Zcu.File.Index,
}); });
} }
// The source hash associated with this instruction changed - invalidate relevant dependencies. // The source hash associated with this instruction changed - invalidate relevant dependencies.
zcu.comp.mutex.lock();
defer zcu.comp.mutex.unlock();
try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index }); try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index });
} }
@ -443,8 +458,6 @@ fn updateZirRefs(pt: Zcu.PerThread, file: *Zcu.File, file_index: Zcu.File.Index,
if (!old_names.swapRemove(name_ip)) continue; if (!old_names.swapRemove(name_ip)) continue;
// Name added // Name added
any_change = true; any_change = true;
zcu.comp.mutex.lock();
defer zcu.comp.mutex.unlock();
try zcu.markDependeeOutdated(.{ .namespace_name = .{ try zcu.markDependeeOutdated(.{ .namespace_name = .{
.namespace = tracked_inst_index, .namespace = tracked_inst_index,
.name = name_ip, .name = name_ip,
@ -454,8 +467,6 @@ fn updateZirRefs(pt: Zcu.PerThread, file: *Zcu.File, file_index: Zcu.File.Index,
// The only elements remaining in `old_names` now are any names which were removed. // The only elements remaining in `old_names` now are any names which were removed.
for (old_names.keys()) |name_ip| { for (old_names.keys()) |name_ip| {
any_change = true; any_change = true;
zcu.comp.mutex.lock();
defer zcu.comp.mutex.unlock();
try zcu.markDependeeOutdated(.{ .namespace_name = .{ try zcu.markDependeeOutdated(.{ .namespace_name = .{
.namespace = tracked_inst_index, .namespace = tracked_inst_index,
.name = name_ip, .name = name_ip,
@ -463,12 +474,19 @@ fn updateZirRefs(pt: Zcu.PerThread, file: *Zcu.File, file_index: Zcu.File.Index,
} }
if (any_change) { if (any_change) {
zcu.comp.mutex.lock();
defer zcu.comp.mutex.unlock();
try zcu.markDependeeOutdated(.{ .namespace = tracked_inst_index }); try zcu.markDependeeOutdated(.{ .namespace = tracked_inst_index });
} }
} }
} }
}
for (updated_files.items) |updated_file| {
const file = updated_file.file;
const prev_zir = file.prev_zir.?;
file.prev_zir = null;
prev_zir.deinit(gpa);
gpa.destroy(prev_zir);
}
} }
/// Like `ensureDeclAnalyzed`, but the Decl is a file's root Decl. /// Like `ensureDeclAnalyzed`, but the Decl is a file's root Decl.