From a7dd34bfc57f4f84bb5290177f26e2d1f0bdc27e Mon Sep 17 00:00:00 2001 From: mlugg Date: Wed, 16 Oct 2024 14:21:13 +0100 Subject: [PATCH 1/4] incremental: add new test case This isn't exactly the case provided in #11290, but is a slightly simpler case which I know would have triggered the same bug in the old implementation of incremental compilation. Resolves: #11290 --- .../remove_invalid_union_backing_enum | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 test/incremental/remove_invalid_union_backing_enum diff --git a/test/incremental/remove_invalid_union_backing_enum b/test/incremental/remove_invalid_union_backing_enum new file mode 100644 index 0000000000..ded6304531 --- /dev/null +++ b/test/incremental/remove_invalid_union_backing_enum @@ -0,0 +1,30 @@ +#target=x86_64-linux-selfhosted +#target=x86_64-linux-cbe +#target=x86_64-windows-cbe +#update=initial version +#file=main.zig +const E = enum { a, b, c }; +const U = union(E) { + a: i32, + b: f64, + c: f64, + d: f64, +}; +pub fn main() void { + const u: U = .{ .a = 123 }; + _ = u; +} +#expect_error=ignored +#update=remove invalid backing enum +#file=main.zig +const U = union { + a: i32, + b: f64, + c: f64, + d: f64, +}; +pub fn main() void { + const u: U = .{ .a = 123 }; + _ = u; +} +#expect_stdout="" From c6842b58d488c236aca74dea82082eec365eb117 Mon Sep 17 00:00:00 2001 From: mlugg Date: Wed, 16 Oct 2024 15:59:27 +0100 Subject: [PATCH 2/4] Zcu: cache output of `resolveReferences` between calls This not only simplifies the error bundling logic, but also improves efficiency by allowing the result to be cached between, for instance, multiple calls to `totalErrorCount`. --- src/Compilation.zig | 60 +++++++++++++++++++-------------------------- src/Sema.zig | 3 +-- src/Zcu.zig | 27 +++++++++++++++++++- 3 files changed, 52 insertions(+), 38 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 1451c2adc3..a61bc215e1 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3076,15 +3076,12 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { }); } - var all_references: ?std.AutoHashMapUnmanaged(InternPool.AnalUnit, ?Zcu.ResolvedReference) = null; - defer if (all_references) |*a| a.deinit(gpa); - if (comp.zcu) |zcu| { const ip = &zcu.intern_pool; for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| { if (error_msg) |msg| { - try addModuleErrorMsg(zcu, &bundle, msg.*, &all_references); + try addModuleErrorMsg(zcu, &bundle, msg.*); } else { // Must be ZIR errors. Note that this may include AST errors. // addZirErrorMessages asserts that the tree is loaded. @@ -3093,7 +3090,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } } for (zcu.failed_embed_files.values()) |error_msg| { - try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references); + try addModuleErrorMsg(zcu, &bundle, error_msg.*); } { const SortOrder = struct { @@ -3136,10 +3133,8 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } for (zcu.failed_analysis.keys(), zcu.failed_analysis.values()) |anal_unit, error_msg| { if (comp.incremental) { - if (all_references == null) { - all_references = try zcu.resolveReferences(); - } - if (!all_references.?.contains(anal_unit)) continue; + const refs = try zcu.resolveReferences(); + if (!refs.contains(anal_unit)) continue; } const file_index = switch (anal_unit.unwrap()) { @@ -3151,7 +3146,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { // We'll try again once parsing succeeds. if (!zcu.fileByIndex(file_index).okToReportErrors()) continue; - try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references); + try addModuleErrorMsg(zcu, &bundle, error_msg.*); if (zcu.cimport_errors.get(anal_unit)) |errors| { for (errors.getMessages()) |err_msg_index| { const err_msg = errors.getErrorMessage(err_msg_index); @@ -3175,10 +3170,10 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } for (zcu.failed_codegen.keys(), zcu.failed_codegen.values()) |nav, error_msg| { if (!zcu.navFileScope(nav).okToReportErrors()) continue; - try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references); + try addModuleErrorMsg(zcu, &bundle, error_msg.*); } for (zcu.failed_exports.values()) |value| { - try addModuleErrorMsg(zcu, &bundle, value.*, &all_references); + try addModuleErrorMsg(zcu, &bundle, value.*); } const actual_error_count = zcu.intern_pool.global_error_set.getNamesFromMainThread().len; @@ -3252,17 +3247,15 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { }; } - try addModuleErrorMsg(zcu, &bundle, err_msg, &all_references); + try addModuleErrorMsg(zcu, &bundle, err_msg); } } if (comp.zcu) |zcu| { if (comp.incremental and bundle.root_list.items.len == 0) { const should_have_error = for (zcu.transitive_failed_analysis.keys()) |failed_unit| { - if (all_references == null) { - all_references = try zcu.resolveReferences(); - } - if (all_references.?.contains(failed_unit)) break true; + const refs = try zcu.resolveReferences(); + if (refs.contains(failed_unit)) break true; } else false; if (should_have_error) { @panic("referenced transitive analysis errors, but none actually emitted"); @@ -3331,14 +3324,13 @@ pub const ErrorNoteHashContext = struct { }; pub fn addModuleErrorMsg( - mod: *Zcu, + zcu: *Zcu, eb: *ErrorBundle.Wip, module_err_msg: Zcu.ErrorMsg, - all_references: *?std.AutoHashMapUnmanaged(InternPool.AnalUnit, ?Zcu.ResolvedReference), ) !void { const gpa = eb.gpa; - const ip = &mod.intern_pool; - const err_src_loc = module_err_msg.src_loc.upgrade(mod); + const ip = &zcu.intern_pool; + const err_src_loc = module_err_msg.src_loc.upgrade(zcu); const err_source = err_src_loc.file_scope.getSource(gpa) catch |err| { const file_path = try err_src_loc.file_scope.fullPath(gpa); defer gpa.free(file_path); @@ -3358,22 +3350,20 @@ pub fn addModuleErrorMsg( defer ref_traces.deinit(gpa); if (module_err_msg.reference_trace_root.unwrap()) |rt_root| { - if (all_references.* == null) { - all_references.* = try mod.resolveReferences(); - } + const all_references = try zcu.resolveReferences(); var seen: std.AutoHashMapUnmanaged(InternPool.AnalUnit, void) = .empty; defer seen.deinit(gpa); - const max_references = mod.comp.reference_trace orelse Sema.default_reference_trace_len; + const max_references = zcu.comp.reference_trace orelse Sema.default_reference_trace_len; var referenced_by = rt_root; - while (all_references.*.?.get(referenced_by)) |maybe_ref| { + while (all_references.get(referenced_by)) |maybe_ref| { const ref = maybe_ref orelse break; const gop = try seen.getOrPut(gpa, ref.referencer); if (gop.found_existing) break; if (ref_traces.items.len < max_references) { - const src = ref.src.upgrade(mod); + const src = ref.src.upgrade(zcu); const source = try src.file_scope.getSource(gpa); const span = try src.span(gpa); const loc = std.zig.findLineColumn(source.bytes, span.main); @@ -3385,7 +3375,7 @@ pub fn addModuleErrorMsg( .type => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip), .none => "comptime", }, - .func => |f| ip.getNav(mod.funcInfo(f).owner_nav).name.toSlice(ip), + .func => |f| ip.getNav(zcu.funcInfo(f).owner_nav).name.toSlice(ip), }; try ref_traces.append(gpa, .{ .decl_name = try eb.addString(name), @@ -3435,7 +3425,7 @@ pub fn addModuleErrorMsg( defer notes.deinit(gpa); for (module_err_msg.notes) |module_note| { - const note_src_loc = module_note.src_loc.upgrade(mod); + const note_src_loc = module_note.src_loc.upgrade(zcu); const source = try note_src_loc.file_scope.getSource(gpa); const span = try note_src_loc.span(gpa); const loc = std.zig.findLineColumn(source.bytes, span.main); @@ -3488,13 +3478,13 @@ pub fn performAllTheWork( comp: *Compilation, main_progress_node: std.Progress.Node, ) JobError!void { - defer if (comp.zcu) |mod| { - mod.sema_prog_node.end(); - mod.sema_prog_node = std.Progress.Node.none; - mod.codegen_prog_node.end(); - mod.codegen_prog_node = std.Progress.Node.none; + defer if (comp.zcu) |zcu| { + zcu.sema_prog_node.end(); + zcu.sema_prog_node = std.Progress.Node.none; + zcu.codegen_prog_node.end(); + zcu.codegen_prog_node = std.Progress.Node.none; - mod.generation += 1; + zcu.generation += 1; }; try comp.performAllTheWorkInner(main_progress_node); if (!InternPool.single_threaded) if (comp.codegen_work.job_error) |job_error| return job_error; diff --git a/src/Sema.zig b/src/Sema.zig index 1250a06a9c..d4a6d03878 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2559,10 +2559,9 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Zcu.ErrorMsg const zcu = sema.pt.zcu; if (build_options.enable_debug_extensions and zcu.comp.debug_compile_errors) { - var all_references: ?std.AutoHashMapUnmanaged(AnalUnit, ?Zcu.ResolvedReference) = null; var wip_errors: std.zig.ErrorBundle.Wip = undefined; wip_errors.init(gpa) catch @panic("out of memory"); - Compilation.addModuleErrorMsg(zcu, &wip_errors, err_msg.*, &all_references) catch @panic("out of memory"); + Compilation.addModuleErrorMsg(zcu, &wip_errors, err_msg.*) catch @panic("out of memory"); std.debug.print("compile error during Sema:\n", .{}); var error_bundle = wip_errors.toOwnedBundle("") catch @panic("out of memory"); error_bundle.renderToStdErr(.{ .ttyconf = .no_color }); diff --git a/src/Zcu.zig b/src/Zcu.zig index 641c7fc880..5e9aee0771 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -173,6 +173,10 @@ retryable_failures: std.ArrayListUnmanaged(AnalUnit) = .empty, /// These are the modules which we initially queue for analysis in `Compilation.update`. /// `resolveReferences` will use these as the root of its reachability traversal. analysis_roots: std.BoundedArray(*Package.Module, 3) = .{}, +/// This is the cached result of `Zcu.resolveReferences`. It is computed on-demand, and +/// reset to `null` when any semantic analysis occurs (since this invalidates the data). +/// Allocated into `gpa`. +resolved_references: ?std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) = null, stage1_flags: packed struct { have_winmain: bool = false, @@ -2192,6 +2196,8 @@ pub fn deinit(zcu: *Zcu) void { zcu.all_type_references.deinit(gpa); zcu.free_type_references.deinit(gpa); + if (zcu.resolved_references) |*r| r.deinit(gpa); + zcu.intern_pool.deinit(gpa); } @@ -2760,6 +2766,8 @@ pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void { pub fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void { const gpa = zcu.gpa; + zcu.clearCachedResolvedReferences(); + unit_refs: { const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse break :unit_refs; var idx = kv.value; @@ -2792,6 +2800,8 @@ pub fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void { pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit, ref_src: LazySrcLoc) Allocator.Error!void { const gpa = zcu.gpa; + zcu.clearCachedResolvedReferences(); + try zcu.reference_table.ensureUnusedCapacity(gpa, 1); const ref_idx = zcu.free_references.popOrNull() orelse idx: { @@ -2815,6 +2825,8 @@ pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit pub fn addTypeReference(zcu: *Zcu, src_unit: AnalUnit, referenced_type: InternPool.Index, ref_src: LazySrcLoc) Allocator.Error!void { const gpa = zcu.gpa; + zcu.clearCachedResolvedReferences(); + try zcu.type_reference_table.ensureUnusedCapacity(gpa, 1); const ref_idx = zcu.free_type_references.popOrNull() orelse idx: { @@ -2835,6 +2847,11 @@ pub fn addTypeReference(zcu: *Zcu, src_unit: AnalUnit, referenced_type: InternPo gop.value_ptr.* = @intCast(ref_idx); } +fn clearCachedResolvedReferences(zcu: *Zcu) void { + if (zcu.resolved_references) |*r| r.deinit(zcu.gpa); + zcu.resolved_references = null; +} + pub fn errorSetBits(zcu: *const Zcu) u16 { if (zcu.error_limit == 0) return 0; return @as(u16, std.math.log2_int(ErrorInt, zcu.error_limit)) + 1; @@ -3138,7 +3155,15 @@ pub const ResolvedReference = struct { /// Returns a mapping from an `AnalUnit` to where it is referenced. /// If the value is `null`, the `AnalUnit` is a root of analysis. /// If an `AnalUnit` is not in the returned map, it is unreferenced. -pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) { +/// The returned hashmap is owned by the `Zcu`, so should not be freed by the caller. +/// This hashmap is cached, so repeated calls to this function are cheap. +pub fn resolveReferences(zcu: *Zcu) !*const std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) { + if (zcu.resolved_references == null) { + zcu.resolved_references = try zcu.resolveReferencesInner(); + } + return &zcu.resolved_references.?; +} +fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) { const gpa = zcu.gpa; const comp = zcu.comp; const ip = &zcu.intern_pool; From 22539783ad15be3028ed07983eb8e23910171f11 Mon Sep 17 00:00:00 2001 From: mlugg Date: Wed, 16 Oct 2024 15:56:48 +0100 Subject: [PATCH 3/4] incremental: introduce `file` dependencies to handle AstGen failures The re-analysis here is a little coarse; it'd be nice in the future to have a way for an AstGen failure to preserve *all* analysis which depends on the last success, and just hide the compile errors which depend on it somehow. But I'm not sure how we'd achieve that, so this works fine for now. Resolves: #21223 --- src/Compilation.zig | 4 ++++ src/InternPool.zig | 12 ++++++++++ src/Package/Module.zig | 1 + src/Sema.zig | 8 ++----- src/Zcu.zig | 21 +++++++++++------ src/Zcu/PerThread.zig | 36 +++++++++++++++++++++-------- src/main.zig | 3 +++ test/incremental/fix_astgen_failure | 35 ++++++++++++++++++++++++++++ 8 files changed, 97 insertions(+), 23 deletions(-) create mode 100644 test/incremental/fix_astgen_failure diff --git a/src/Compilation.zig b/src/Compilation.zig index a61bc215e1..c8ade90438 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2901,6 +2901,7 @@ pub fn makeBinFileWritable(comp: *Compilation) !void { const Header = extern struct { intern_pool: extern struct { thread_count: u32, + file_deps_len: u32, src_hash_deps_len: u32, nav_val_deps_len: u32, namespace_deps_len: u32, @@ -2943,6 +2944,7 @@ pub fn saveState(comp: *Compilation) !void { const header: Header = .{ .intern_pool = .{ .thread_count = @intCast(ip.locals.len), + .file_deps_len = @intCast(ip.file_deps.count()), .src_hash_deps_len = @intCast(ip.src_hash_deps.count()), .nav_val_deps_len = @intCast(ip.nav_val_deps.count()), .namespace_deps_len = @intCast(ip.namespace_deps.count()), @@ -2969,6 +2971,8 @@ pub fn saveState(comp: *Compilation) !void { addBuf(&bufs, mem.asBytes(&header)); addBuf(&bufs, mem.sliceAsBytes(pt_headers.items)); + addBuf(&bufs, mem.sliceAsBytes(ip.file_deps.keys())); + addBuf(&bufs, mem.sliceAsBytes(ip.file_deps.values())); addBuf(&bufs, mem.sliceAsBytes(ip.src_hash_deps.keys())); addBuf(&bufs, mem.sliceAsBytes(ip.src_hash_deps.values())); addBuf(&bufs, mem.sliceAsBytes(ip.nav_val_deps.keys())); diff --git a/src/InternPool.zig b/src/InternPool.zig index f41f780e0a..4315855d83 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -17,6 +17,13 @@ tid_shift_31: if (single_threaded) u0 else std.math.Log2Int(u32), /// Cached shift amount to put a `tid` in the top bits of a 32-bit value. tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32), +/// Dependencies on whether an entire file gets past AstGen. +/// These are triggered by `@import`, so that: +/// * if a file initially fails AstGen, triggering a transitive failure, when a future update +/// causes it to succeed AstGen, the `@import` is re-analyzed, allowing analysis to proceed +/// * if a file initially succeds AstGen, but a future update causes the file to fail it, +/// the `@import` is re-analyzed, registering a transitive failure +file_deps: std.AutoArrayHashMapUnmanaged(FileIndex, DepEntry.Index), /// Dependencies on the source code hash associated with a ZIR instruction. /// * For a `declaration`, this is the entire declaration body. /// * For a `struct_decl`, `union_decl`, etc, this is the source of the fields (but not declarations). @@ -70,6 +77,7 @@ pub const empty: InternPool = .{ .tid_shift_30 = if (single_threaded) 0 else 31, .tid_shift_31 = if (single_threaded) 0 else 31, .tid_shift_32 = if (single_threaded) 0 else 31, + .file_deps = .empty, .src_hash_deps = .empty, .nav_val_deps = .empty, .interned_deps = .empty, @@ -656,6 +664,7 @@ pub const Nav = struct { }; pub const Dependee = union(enum) { + file: FileIndex, src_hash: TrackedInst.Index, nav_val: Nav.Index, interned: Index, @@ -704,6 +713,7 @@ pub const DependencyIterator = struct { pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyIterator { const first_entry = switch (dependee) { + .file => |x| ip.file_deps.get(x), .src_hash => |x| ip.src_hash_deps.get(x), .nav_val => |x| ip.nav_val_deps.get(x), .interned => |x| ip.interned_deps.get(x), @@ -740,6 +750,7 @@ pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, depend const new_index: DepEntry.Index = switch (dependee) { inline else => |dependee_payload, tag| new_index: { const gop = try switch (tag) { + .file => ip.file_deps, .src_hash => ip.src_hash_deps, .nav_val => ip.nav_val_deps, .interned => ip.interned_deps, @@ -6268,6 +6279,7 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { } pub fn deinit(ip: *InternPool, gpa: Allocator) void { + ip.file_deps.deinit(gpa); ip.src_hash_deps.deinit(gpa); ip.nav_val_deps.deinit(gpa); ip.interned_deps.deinit(gpa); diff --git a/src/Package/Module.zig b/src/Package/Module.zig index 02d9921016..0a3d754e7a 100644 --- a/src/Package/Module.zig +++ b/src/Package/Module.zig @@ -454,6 +454,7 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module { .tree = undefined, .zir = undefined, .status = .never_loaded, + .prev_status = .never_loaded, .mod = new, }; break :b new; diff --git a/src/Sema.zig b/src/Sema.zig index d4a6d03878..0341c3a8d8 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -6024,9 +6024,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr pt.astGenFile(result.file, path_digest) catch |err| return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)}); - // TODO: register some kind of dependency on the file. - // That way, if this returns `error.AnalysisFail`, we have the dependency banked ready to - // trigger re-analysis later. + try sema.declareDependency(.{ .file = result.file_index }); try pt.ensureFileAnalyzed(result.file_index); const ty = zcu.fileRootType(result.file_index); try sema.declareDependency(.{ .interned = ty }); @@ -14347,9 +14345,7 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ operand, @errorName(err) }); }, }; - // TODO: register some kind of dependency on the file. - // That way, if this returns `error.AnalysisFail`, we have the dependency banked ready to - // trigger re-analysis later. + try sema.declareDependency(.{ .file = result.file_index }); try pt.ensureFileAnalyzed(result.file_index); const ty = zcu.fileRootType(result.file_index); try sema.declareDependency(.{ .interned = ty }); diff --git a/src/Zcu.zig b/src/Zcu.zig index 5e9aee0771..eeff80ee30 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -424,13 +424,8 @@ pub const Namespace = struct { }; pub const File = struct { - status: enum { - never_loaded, - retryable_failure, - parse_failure, - astgen_failure, - success_zir, - }, + status: Status, + prev_status: Status, source_loaded: bool, tree_loaded: bool, zir_loaded: bool, @@ -458,6 +453,14 @@ pub const File = struct { /// successful, this field is unloaded. prev_zir: ?*Zir = null, + pub const Status = enum { + never_loaded, + retryable_failure, + parse_failure, + astgen_failure, + success_zir, + }; + /// A single reference to a file. pub const Reference = union(enum) { /// The file is imported directly (i.e. not as a package) with @import. @@ -3474,6 +3477,10 @@ fn formatDependee(data: struct { dependee: InternPool.Dependee, zcu: *Zcu }, com const zcu = data.zcu; const ip = &zcu.intern_pool; switch (data.dependee) { + .file => |file| { + const file_path = zcu.fileByIndex(file).sub_file_path; + return writer.print("file('{s}')", .{file_path}); + }, .src_hash => |ti| { const info = ti.resolveFull(ip) orelse { return writer.writeAll("inst()"); diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 8cc72dc4d9..32928e17e1 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -179,10 +179,10 @@ pub fn astGenFile( .inode = header.stat_inode, .mtime = header.stat_mtime, }; + file.prev_status = file.status; file.status = .success_zir; log.debug("AstGen cached success: {s}", .{file.sub_file_path}); - // TODO don't report compile errors until Sema @importFile if (file.zir.hasCompileErrors()) { { comp.mutex.lock(); @@ -258,6 +258,7 @@ pub fn astGenFile( // Any potential AST errors are converted to ZIR errors here. file.zir = try AstGen.generate(gpa, file.tree); file.zir_loaded = true; + file.prev_status = file.status; file.status = .success_zir; log.debug("AstGen fresh success: {s}", .{file.sub_file_path}); @@ -350,6 +351,9 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { defer cleanupUpdatedFiles(gpa, &updated_files); for (zcu.import_table.values()) |file_index| { const file = zcu.fileByIndex(file_index); + if (file.prev_status != file.status and file.prev_status != .never_loaded) { + try zcu.markDependeeOutdated(.not_marked_po, .{ .file = file_index }); + } const old_zir = file.prev_zir orelse continue; const new_zir = file.zir; const gop = try updated_files.getOrPut(gpa, file_index); @@ -551,11 +555,13 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu const cau_outdated = zcu.outdated.swapRemove(anal_unit) or zcu.potentially_outdated.swapRemove(anal_unit); + const prev_failed = zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit); + if (cau_outdated) { _ = zcu.outdated_ready.swapRemove(anal_unit); } else { // We can trust the current information about this `Cau`. - if (zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit)) { + if (prev_failed) { return error.AnalysisFail; } // If it wasn't failed and wasn't marked outdated, then either... @@ -578,9 +584,13 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu // Since it does not, this must be a transitive failure. try zcu.transitive_failed_analysis.put(gpa, anal_unit, {}); } - // We treat errors as up-to-date, since those uses would just trigger a transitive error. - // The exception is types, since type declarations may require re-analysis if the type, e.g. its captures, changed. - const outdated = cau.owner.unwrap() == .type; + // We consider this `Cau` to be outdated if: + // * Previous analysis succeeded; in this case, we need to re-analyze dependants to ensure + // they hit a transitive error here, rather than reporting a different error later (which + // may now be invalid). + // * The `Cau` is a type; in this case, the declaration site may require re-analysis to + // construct a valid type. + const outdated = !prev_failed or cau.owner.unwrap() == .type; break :res .{ .{ .invalidate_decl_val = outdated, .invalidate_decl_ref = outdated, @@ -597,10 +607,9 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu ); zcu.retryable_failures.appendAssumeCapacity(anal_unit); zcu.failed_analysis.putAssumeCapacityNoClobber(anal_unit, msg); - // We treat errors as up-to-date, since those uses would just trigger a transitive error break :res .{ .{ - .invalidate_decl_val = false, - .invalidate_decl_ref = false, + .invalidate_decl_val = true, + .invalidate_decl_ref = true, }, true }; }, }; @@ -707,11 +716,13 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter const func_outdated = zcu.outdated.swapRemove(anal_unit) or zcu.potentially_outdated.swapRemove(anal_unit); + const prev_failed = zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit); + if (func_outdated) { _ = zcu.outdated_ready.swapRemove(anal_unit); } else { // We can trust the current information about this function. - if (zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit)) { + if (prev_failed) { return error.AnalysisFail; } switch (func.analysisUnordered(ip).state) { @@ -730,7 +741,10 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter // Since it does not, this must be a transitive failure. try zcu.transitive_failed_analysis.put(gpa, anal_unit, {}); } - break :res .{ false, true }; // we treat errors as up-to-date IES, since those uses would just trigger a transitive error + // We consider the IES to be outdated if the function previously succeeded analysis; in this case, + // we need to re-analyze dependants to ensure they hit a transitive error here, rather than reporting + // a different error later (which may now be invalid). + break :res .{ !prev_failed, true }; }, error.OutOfMemory => return error.OutOfMemory, // TODO: graceful handling like `ensureCauAnalyzed` }; @@ -1445,6 +1459,7 @@ pub fn importPkg(pt: Zcu.PerThread, mod: *Module) !Zcu.ImportFileResult { .tree = undefined, .zir = undefined, .status = .never_loaded, + .prev_status = .never_loaded, .mod = mod, }; @@ -1555,6 +1570,7 @@ pub fn importFile( .tree = undefined, .zir = undefined, .status = .never_loaded, + .prev_status = .never_loaded, .mod = mod, }; diff --git a/src/main.zig b/src/main.zig index 0ebb09fb3c..75a273b560 100644 --- a/src/main.zig +++ b/src/main.zig @@ -6118,6 +6118,7 @@ fn cmdAstCheck( var file: Zcu.File = .{ .status = .never_loaded, + .prev_status = .never_loaded, .source_loaded = false, .tree_loaded = false, .zir_loaded = false, @@ -6441,6 +6442,7 @@ fn cmdDumpZir( var file: Zcu.File = .{ .status = .never_loaded, + .prev_status = .never_loaded, .source_loaded = false, .tree_loaded = false, .zir_loaded = true, @@ -6508,6 +6510,7 @@ fn cmdChangelist( var file: Zcu.File = .{ .status = .never_loaded, + .prev_status = .never_loaded, .source_loaded = false, .tree_loaded = false, .zir_loaded = false, diff --git a/test/incremental/fix_astgen_failure b/test/incremental/fix_astgen_failure new file mode 100644 index 0000000000..2298bc5248 --- /dev/null +++ b/test/incremental/fix_astgen_failure @@ -0,0 +1,35 @@ +#target=x86_64-linux-selfhosted +#target=x86_64-linux-cbe +#target=x86_64-windows-cbe +#update=initial version with error +#file=main.zig +pub fn main() !void { + try @import("foo.zig").hello(); +} +#file=foo.zig +pub fn hello() !void { + try std.io.getStdOut().writeAll("Hello, World!\n"); +} +#expect_error=ignored +#update=fix the error +#file=foo.zig +const std = @import("std"); +pub fn hello() !void { + try std.io.getStdOut().writeAll("Hello, World!\n"); +} +#expect_stdout="Hello, World!\n" +#update=add new error +#file=foo.zig +const std = @import("std"); +pub fn hello() !void { + try std.io.getStdOut().writeAll(hello_str); +} +#expect_error=ignored +#update=fix the new error +#file=foo.zig +const std = @import("std"); +const hello_str = "Hello, World! Again!\n"; +pub fn hello() !void { + try std.io.getStdOut().writeAll(hello_str); +} +#expect_stdout="Hello, World! Again!\n" From 3ba4f86198267fee0f6a8d50496908794e743cbd Mon Sep 17 00:00:00 2001 From: mlugg Date: Wed, 16 Oct 2024 16:29:47 +0100 Subject: [PATCH 4/4] incremental: disable failing test The previous commit exposed a linker bug. --- test/incremental/add_decl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/incremental/add_decl b/test/incremental/add_decl index 6b3a0dad84..a2863d6bde 100644 --- a/test/incremental/add_decl +++ b/test/incremental/add_decl @@ -1,4 +1,5 @@ -#target=x86_64-linux-selfhosted +// Disabled on self-hosted due to linker crash +// #target=x86_64-linux-selfhosted #target=x86_64-linux-cbe #target=x86_64-windows-cbe #update=initial version