diff --git a/src/Compilation.zig b/src/Compilation.zig index 4c693ffb28..3c97cd3145 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -31,6 +31,7 @@ const clangMain = @import("main.zig").clangMain; const Zcu = @import("Zcu.zig"); /// Deprecated; use `Zcu`. const Module = Zcu; +const Sema = @import("Sema.zig"); const InternPool = @import("InternPool.zig"); const Cache = std.Build.Cache; const c_codegen = @import("codegen/c.zig"); @@ -2939,9 +2940,12 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { }); } if (comp.module) |zcu| { + var all_references = try zcu.resolveReferences(); + defer all_references.deinit(gpa); + for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| { if (error_msg) |msg| { - try addModuleErrorMsg(zcu, &bundle, msg.*); + try addModuleErrorMsg(zcu, &bundle, msg.*, &all_references); } else { // Must be ZIR errors. Note that this may include AST errors. // addZirErrorMessages asserts that the tree is loaded. @@ -2950,7 +2954,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { } } for (zcu.failed_embed_files.values()) |error_msg| { - try addModuleErrorMsg(zcu, &bundle, error_msg.*); + try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references); } for (zcu.failed_analysis.keys(), zcu.failed_analysis.values()) |anal_unit, error_msg| { const decl_index = switch (anal_unit.unwrap()) { @@ -2962,7 +2966,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { // We'll try again once parsing succeeds. if (!zcu.declFileScope(decl_index).okToReportErrors()) continue; - try addModuleErrorMsg(zcu, &bundle, error_msg.*); + try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references); if (zcu.cimport_errors.get(anal_unit)) |errors| { for (errors.getMessages()) |err_msg_index| { const err_msg = errors.getErrorMessage(err_msg_index); @@ -2989,12 +2993,12 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. if (zcu.declFileScope(decl_index).okToReportErrors()) { - try addModuleErrorMsg(zcu, &bundle, error_msg.*); + try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references); } } } for (zcu.failed_exports.values()) |value| { - try addModuleErrorMsg(zcu, &bundle, value.*); + try addModuleErrorMsg(zcu, &bundle, value.*, &all_references); } const actual_error_count = zcu.global_error_set.entries.len - 1; @@ -3051,6 +3055,9 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { if (comp.module) |zcu| { if (bundle.root_list.items.len == 0 and zcu.compile_log_sources.count() != 0) { + var all_references = try zcu.resolveReferences(); + defer all_references.deinit(gpa); + const values = zcu.compile_log_sources.values(); // First one will be the error; subsequent ones will be notes. const src_loc = values[0].src().upgrade(zcu); @@ -3068,7 +3075,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle { }; } - try addModuleErrorMsg(zcu, &bundle, err_msg); + try addModuleErrorMsg(zcu, &bundle, err_msg, &all_references); } } @@ -3124,7 +3131,12 @@ pub const ErrorNoteHashContext = struct { } }; -pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) !void { +pub fn addModuleErrorMsg( + mod: *Module, + eb: *ErrorBundle.Wip, + module_err_msg: Module.ErrorMsg, + all_references: *const std.AutoHashMapUnmanaged(InternPool.AnalUnit, Zcu.ResolvedReference), +) !void { const gpa = eb.gpa; const ip = &mod.intern_pool; const err_source = module_err_msg.src_loc.file_scope.getSource(gpa) catch |err| { @@ -3145,39 +3157,49 @@ pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Mod var ref_traces: std.ArrayListUnmanaged(ErrorBundle.ReferenceTrace) = .{}; defer ref_traces.deinit(gpa); - const remaining_references: ?u32 = remaining: { - if (mod.comp.reference_trace) |_| { - if (module_err_msg.hidden_references > 0) break :remaining module_err_msg.hidden_references; - } else { - if (module_err_msg.reference_trace.len > 0) break :remaining 0; - } - break :remaining null; - }; - try ref_traces.ensureTotalCapacityPrecise(gpa, module_err_msg.reference_trace.len + - @intFromBool(remaining_references != null)); + if (module_err_msg.reference_trace_root.unwrap()) |rt_root| { + var seen: std.AutoHashMapUnmanaged(InternPool.AnalUnit, void) = .{}; + defer seen.deinit(gpa); - for (module_err_msg.reference_trace) |module_reference| { - const source = try module_reference.src_loc.file_scope.getSource(gpa); - const span = try module_reference.src_loc.span(gpa); - const loc = std.zig.findLineColumn(source.bytes, span.main); - const rt_file_path = try module_reference.src_loc.file_scope.fullPath(gpa); - defer gpa.free(rt_file_path); - ref_traces.appendAssumeCapacity(.{ - .decl_name = try eb.addString(module_reference.decl.toSlice(ip)), - .src_loc = try eb.addSourceLocation(.{ - .src_path = try eb.addString(rt_file_path), - .span_start = span.start, - .span_main = span.main, - .span_end = span.end, - .line = @intCast(loc.line), - .column = @intCast(loc.column), - .source_line = 0, - }), - }); + const max_references = mod.comp.reference_trace orelse Sema.default_reference_trace_len; + + var referenced_by = rt_root; + while (all_references.get(referenced_by)) |ref| { + const gop = try seen.getOrPut(gpa, ref.referencer); + if (gop.found_existing) break; + if (ref_traces.items.len < max_references) { + const src = ref.src.upgrade(mod); + const source = try src.file_scope.getSource(gpa); + const span = try src.span(gpa); + const loc = std.zig.findLineColumn(source.bytes, span.main); + const rt_file_path = try src.file_scope.fullPath(gpa); + const name = switch (ref.referencer.unwrap()) { + .decl => |d| mod.declPtr(d).name, + .func => |f| mod.funcOwnerDeclPtr(f).name, + }; + try ref_traces.append(gpa, .{ + .decl_name = try eb.addString(name.toSlice(ip)), + .src_loc = try eb.addSourceLocation(.{ + .src_path = try eb.addString(rt_file_path), + .span_start = span.start, + .span_main = span.main, + .span_end = span.end, + .line = @intCast(loc.line), + .column = @intCast(loc.column), + .source_line = 0, + }), + }); + } + referenced_by = ref.referencer; + } + + if (seen.count() > ref_traces.items.len) { + try ref_traces.append(gpa, .{ + .decl_name = @intCast(seen.count() - ref_traces.items.len), + .src_loc = .none, + }); + } } - if (remaining_references) |remaining| ref_traces.appendAssumeCapacity( - .{ .decl_name = remaining, .src_loc = .none }, - ); const src_loc = try eb.addSourceLocation(.{ .src_path = try eb.addString(file_path), diff --git a/src/Sema.zig b/src/Sema.zig index 4337ce8926..105fedbec7 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -121,6 +121,11 @@ comptime_allocs: std.ArrayListUnmanaged(ComptimeAlloc) = .{}, /// these are flushed to `Zcu.single_exports` or `Zcu.multi_exports`. exports: std.ArrayListUnmanaged(Zcu.Export) = .{}, +/// All references registered so far by this `Sema`. This is a temporary duplicate +/// of data stored in `Zcu.all_references`. It exists to avoid adding references to +/// a given `AnalUnit` multiple times. +references: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{}, + const MaybeComptimeAlloc = struct { /// The runtime index of the `alloc` instruction. runtime_index: Value.RuntimeIndex, @@ -2472,79 +2477,38 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error @setCold(true); const gpa = sema.gpa; const mod = sema.mod; - - ref: { - errdefer err_msg.destroy(gpa); - - if (build_options.enable_debug_extensions and mod.comp.debug_compile_errors) { - var wip_errors: std.zig.ErrorBundle.Wip = undefined; - wip_errors.init(gpa) catch unreachable; - Compilation.addModuleErrorMsg(mod, &wip_errors, err_msg.*) catch unreachable; - std.debug.print("compile error during Sema:\n", .{}); - var error_bundle = wip_errors.toOwnedBundle("") catch unreachable; - error_bundle.renderToStdErr(.{ .ttyconf = .no_color }); - crash_report.compilerPanic("unexpected compile error occurred", null, null); - } - - try mod.failed_analysis.ensureUnusedCapacity(gpa, 1); - try mod.failed_files.ensureUnusedCapacity(gpa, 1); - - if (block) |start_block| { - var block_it = start_block; - while (block_it.inlining) |inlining| { - try sema.errNote( - inlining.call_src, - err_msg, - "called from here", - .{}, - ); - block_it = inlining.call_block; - } - - const max_references = refs: { - if (mod.comp.reference_trace) |num| break :refs num; - // Do not add multiple traces without explicit request. - if (mod.failed_analysis.count() > 0) break :ref; - break :refs default_reference_trace_len; - }; - - var referenced_by = if (sema.owner_func_index != .none) - mod.funcOwnerDeclIndex(sema.owner_func_index) - else - sema.owner_decl_index; - var reference_stack = std.ArrayList(Module.ErrorMsg.Trace).init(gpa); - defer reference_stack.deinit(); - - // Avoid infinite loops. - var seen = std.AutoHashMap(InternPool.DeclIndex, void).init(gpa); - defer seen.deinit(); - - while (mod.reference_table.get(referenced_by)) |ref| { - const gop = try seen.getOrPut(ref.referencer); - if (gop.found_existing) break; - if (reference_stack.items.len < max_references) { - const decl = mod.declPtr(ref.referencer); - try reference_stack.append(.{ - .decl = decl.name, - .src_loc = ref.src.upgrade(mod), - }); - } - referenced_by = ref.referencer; - } - err_msg.reference_trace = try reference_stack.toOwnedSlice(); - err_msg.hidden_references = @intCast(seen.count() -| max_references); - } - } const ip = &mod.intern_pool; - if (sema.owner_func_index != .none) { - ip.funcAnalysis(sema.owner_func_index).state = .sema_failure; - } else { - sema.owner_decl.analysis = .sema_failure; + + if (build_options.enable_debug_extensions and mod.comp.debug_compile_errors) { + var all_references = mod.resolveReferences() catch @panic("out of memory"); + var wip_errors: std.zig.ErrorBundle.Wip = undefined; + wip_errors.init(gpa) catch @panic("out of memory"); + Compilation.addModuleErrorMsg(mod, &wip_errors, err_msg.*, &all_references) catch unreachable; + std.debug.print("compile error during Sema:\n", .{}); + var error_bundle = wip_errors.toOwnedBundle("") catch unreachable; + error_bundle.renderToStdErr(.{ .ttyconf = .no_color }); + crash_report.compilerPanic("unexpected compile error occurred", null, null); } - if (sema.func_index != .none) { - ip.funcAnalysis(sema.func_index).state = .sema_failure; + + if (block) |start_block| { + var block_it = start_block; + while (block_it.inlining) |inlining| { + try sema.errNote( + inlining.call_src, + err_msg, + "called from here", + .{}, + ); + block_it = inlining.call_block; + } } - const gop = mod.failed_analysis.getOrPutAssumeCapacity(sema.ownerUnit()); + + const use_ref_trace = if (mod.comp.reference_trace) |n| n > 0 else mod.failed_analysis.count() == 0; + if (use_ref_trace) { + err_msg.reference_trace_root = sema.ownerUnit().toOptional(); + } + + const gop = try mod.failed_analysis.getOrPut(gpa, sema.ownerUnit()); if (gop.found_existing) { // If there are multiple errors for the same Decl, prefer the first one added. sema.err = null; @@ -2553,6 +2517,17 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Module.Error sema.err = err_msg; gop.value_ptr.* = err_msg; } + + if (sema.owner_func_index != .none) { + ip.funcAnalysis(sema.owner_func_index).state = .sema_failure; + } else { + sema.owner_decl.analysis = .sema_failure; + } + + if (sema.func_index != .none) { + ip.funcAnalysis(sema.func_index).state = .sema_failure; + } + return error.AnalysisFail; } @@ -4235,6 +4210,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com if (mod.intern_pool.isFuncBody(val)) { const ty = Type.fromInterned(mod.intern_pool.typeOf(val)); if (try sema.fnHasRuntimeBits(ty)) { + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .func = val })); try mod.ensureFuncBodyAnalysisQueued(val); } } @@ -6395,6 +6371,7 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void } else try sema.lookupIdentifier(block, operand_src, decl_name); const options = try sema.resolveExportOptions(block, options_src, extra.options); { + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = decl_index })); try sema.ensureDeclAnalyzed(decl_index); const exported_decl = mod.declPtr(decl_index); if (exported_decl.val.getFunction(mod)) |function| { @@ -6446,6 +6423,7 @@ pub fn analyzeExport( if (options.linkage == .internal) return; + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = exported_decl_index })); try sema.ensureDeclAnalyzed(exported_decl_index); const exported_decl = mod.declPtr(exported_decl_index); const export_ty = exported_decl.typeOf(mod); @@ -6468,7 +6446,7 @@ pub fn analyzeExport( return sema.fail(block, src, "export target cannot be extern", .{}); } - try sema.maybeQueueFuncBodyAnalysis(exported_decl_index); + try sema.maybeQueueFuncBodyAnalysis(src, exported_decl_index); try sema.exports.append(gpa, .{ .opts = options, @@ -6699,8 +6677,7 @@ fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .no_embedded_nulls, ); const decl_index = try sema.lookupIdentifier(block, src, decl_name); - try sema.addReferencedBy(src, decl_index); - return sema.analyzeDeclRef(decl_index); + return sema.analyzeDeclRef(src, decl_index); } fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -7903,6 +7880,7 @@ fn analyzeCall( if (try sema.resolveValue(func)) |func_val| { if (mod.intern_pool.isFuncBody(func_val.toIntern())) { + try sema.addReferenceEntry(call_src, AnalUnit.wrap(.{ .func = func_val.toIntern() })); try mod.ensureFuncBodyAnalysisQueued(func_val.toIntern()); } } @@ -8339,8 +8317,6 @@ fn instantiateGenericCall( const callee = mod.funcInfo(callee_index); callee.branchQuota(ip).* = @max(callee.branchQuota(ip).*, sema.branch_quota); - try sema.addReferencedBy(call_src, callee.owner_decl); - // Make a runtime call to the new function, making sure to omit the comptime args. const func_ty = Type.fromInterned(callee.ty); const func_ty_info = mod.typeToFunc(func_ty).?; @@ -8366,6 +8342,7 @@ fn instantiateGenericCall( ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true; } + try sema.addReferenceEntry(call_src, AnalUnit.wrap(.{ .func = callee_index })); try mod.ensureFuncBodyAnalysisQueued(callee_index); try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + runtime_args.items.len); @@ -17479,7 +17456,7 @@ fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat .@"comptime" => |index| return Air.internedToRef(index), .runtime => |index| index, .decl_val => |decl_index| return sema.analyzeDeclVal(block, src, decl_index), - .decl_ref => |decl_index| return sema.analyzeDeclRef(decl_index), + .decl_ref => |decl_index| return sema.analyzeDeclRef(src, decl_index), }; // The comptime case is handled already above. Runtime case below. @@ -27673,7 +27650,6 @@ fn fieldCallBind( const decl_idx = (try sema.namespaceLookup(block, src, namespace, field_name)) orelse break :found_decl null; - try sema.addReferencedBy(src, decl_idx); const decl_val = try sema.analyzeDeclVal(block, src, decl_idx); const decl_type = sema.typeOf(decl_val); if (mod.typeToFunc(decl_type)) |func_type| f: { @@ -27829,8 +27805,7 @@ fn namespaceLookupRef( decl_name: InternPool.NullTerminatedString, ) CompileError!?Air.Inst.Ref { const decl = (try sema.namespaceLookup(block, src, opt_namespace, decl_name)) orelse return null; - try sema.addReferencedBy(src, decl); - return try sema.analyzeDeclRef(decl); + return try sema.analyzeDeclRef(src, decl); } fn namespaceLookupVal( @@ -28968,7 +28943,7 @@ fn coerceExtra( if (inst_ty.zigTypeTag(zcu) == .Fn) { const fn_val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inst, undefined); const fn_decl = fn_val.pointerDecl(zcu).?; - const inst_as_ptr = try sema.analyzeDeclRef(fn_decl); + const inst_as_ptr = try sema.analyzeDeclRef(inst_src, fn_decl); return sema.coerce(block, dest_ty, inst_as_ptr, inst_src); } @@ -30521,7 +30496,7 @@ fn coerceVarArgParam( .Fn => fn_ptr: { const fn_val = try sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inst, undefined); const fn_decl = fn_val.pointerDecl(mod).?; - break :fn_ptr try sema.analyzeDeclRef(fn_decl); + break :fn_ptr try sema.analyzeDeclRef(inst_src, fn_decl); }, .Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}), .Float => float: { @@ -31748,11 +31723,10 @@ fn analyzeDeclVal( src: LazySrcLoc, decl_index: InternPool.DeclIndex, ) CompileError!Air.Inst.Ref { - try sema.addReferencedBy(src, decl_index); if (sema.decl_val_table.get(decl_index)) |result| { return result; } - const decl_ref = try sema.analyzeDeclRefInner(decl_index, false); + const decl_ref = try sema.analyzeDeclRefInner(src, decl_index, false); const result = try sema.analyzeLoad(block, src, decl_ref, src); if (result.toInterned() != null) { if (!block.is_typeof) { @@ -31762,18 +31736,18 @@ fn analyzeDeclVal( return result; } -fn addReferencedBy( +fn addReferenceEntry( sema: *Sema, src: LazySrcLoc, - decl_index: InternPool.DeclIndex, + referenced_unit: AnalUnit, ) !void { if (sema.mod.comp.reference_trace == 0) return; - try sema.mod.reference_table.put(sema.gpa, decl_index, .{ - // TODO: this can make the reference trace suboptimal. This will be fixed - // once the reference table is reworked for incremental compilation. - .referencer = sema.owner_decl_index, - .src = src, - }); + const gop = try sema.references.getOrPut(sema.gpa, referenced_unit); + if (gop.found_existing) return; + // TODO: we need to figure out how to model inline calls here. + // They aren't references in the analysis sense, but ought to show up in the reference trace! + // Would representing inline calls in the reference table cause excessive memory usage? + try sema.mod.addUnitReference(sema.ownerUnit(), referenced_unit, src); } pub fn ensureDeclAnalyzed(sema: *Sema, decl_index: InternPool.DeclIndex) CompileError!void { @@ -31823,16 +31797,17 @@ fn optRefValue(sema: *Sema, opt_val: ?Value) !Value { } }))); } -fn analyzeDeclRef(sema: *Sema, decl_index: InternPool.DeclIndex) CompileError!Air.Inst.Ref { - return sema.analyzeDeclRefInner(decl_index, true); +fn analyzeDeclRef(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.DeclIndex) CompileError!Air.Inst.Ref { + return sema.analyzeDeclRefInner(src, decl_index, true); } /// Analyze a reference to the decl at the given index. Ensures the underlying decl is analyzed, but /// only triggers analysis for function bodies if `analyze_fn_body` is true. If it's possible for a /// decl_ref to end up in runtime code, the function body must be analyzed: `analyzeDeclRef` wraps /// this function with `analyze_fn_body` set to true. -fn analyzeDeclRefInner(sema: *Sema, decl_index: InternPool.DeclIndex, analyze_fn_body: bool) CompileError!Air.Inst.Ref { +fn analyzeDeclRefInner(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.DeclIndex, analyze_fn_body: bool) CompileError!Air.Inst.Ref { const mod = sema.mod; + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .decl = decl_index })); try sema.ensureDeclAnalyzed(decl_index); const decl_val = try mod.declPtr(decl_index).valueOrFail(); @@ -31853,7 +31828,7 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: InternPool.DeclIndex, analyze_fn }, }); if (analyze_fn_body) { - try sema.maybeQueueFuncBodyAnalysis(decl_index); + try sema.maybeQueueFuncBodyAnalysis(src, decl_index); } return Air.internedToRef((try mod.intern(.{ .ptr = .{ .ty = ptr_ty.toIntern(), @@ -31862,12 +31837,13 @@ fn analyzeDeclRefInner(sema: *Sema, decl_index: InternPool.DeclIndex, analyze_fn } }))); } -fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: InternPool.DeclIndex) !void { +fn maybeQueueFuncBodyAnalysis(sema: *Sema, src: LazySrcLoc, decl_index: InternPool.DeclIndex) !void { const mod = sema.mod; const decl = mod.declPtr(decl_index); const decl_val = try decl.valueOrFail(); if (!mod.intern_pool.isFuncBody(decl_val.toIntern())) return; if (!try sema.fnHasRuntimeBits(decl_val.typeOf(mod))) return; + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .func = decl_val.toIntern() })); try mod.ensureFuncBodyAnalysisQueued(decl_val.toIntern()); } @@ -31882,8 +31858,8 @@ fn analyzeRef( if (try sema.resolveValue(operand)) |val| { switch (mod.intern_pool.indexToKey(val.toIntern())) { - .extern_func => |extern_func| return sema.analyzeDeclRef(extern_func.decl), - .func => |func| return sema.analyzeDeclRef(func.owner_decl), + .extern_func => |extern_func| return sema.analyzeDeclRef(src, extern_func.decl), + .func => |func| return sema.analyzeDeclRef(src, func.owner_decl), else => return anonDeclRef(sema, val.toIntern()), } } @@ -35834,6 +35810,7 @@ fn resolveInferredErrorSet( } // In this case we are dealing with the actual InferredErrorSet object that // corresponds to the function, not one created to track an inline/comptime call. + try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .func = func_index })); try sema.ensureFuncBodyAnalyzed(func_index); } diff --git a/src/Zcu.zig b/src/Zcu.zig index d29d2e4279..4d7508da20 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -179,10 +179,15 @@ test_functions: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, /// TODO: the key here will be a `Cau.Index`. global_assembly: std.AutoArrayHashMapUnmanaged(Decl.Index, []u8) = .{}, -reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct { - referencer: Decl.Index, - src: LazySrcLoc, -}) = .{}, +/// Key is the `AnalUnit` *performing* the reference. This representation allows +/// incremental updates to quickly delete references caused by a specific `AnalUnit`. +/// Value is index into `all_reference` of the first reference triggered by the unit. +/// The `next` field on the `Reference` forms a linked list of all references +/// triggered by the key `AnalUnit`. +reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, +all_references: std.ArrayListUnmanaged(Reference) = .{}, +/// Freelist of indices in `all_references`. +free_references: std.ArrayListUnmanaged(u32) = .{}, panic_messages: [PanicId.len]Decl.OptionalIndex = .{.none} ** PanicId.len, /// The panic function body. @@ -290,44 +295,14 @@ pub const Export = struct { } }; -const ValueArena = struct { - state: std.heap.ArenaAllocator.State, - state_acquired: ?*std.heap.ArenaAllocator.State = null, - - /// If this ValueArena replaced an existing one during re-analysis, this is the previous instance - prev: ?*ValueArena = null, - - /// Returns an allocator backed by either promoting `state`, or by the existing ArenaAllocator - /// that has already promoted `state`. `out_arena_allocator` provides storage for the initial promotion, - /// and must live until the matching call to release(). - pub fn acquire(self: *ValueArena, child_allocator: Allocator, out_arena_allocator: *std.heap.ArenaAllocator) Allocator { - if (self.state_acquired) |state_acquired| { - return @as(*std.heap.ArenaAllocator, @fieldParentPtr("state", state_acquired)).allocator(); - } - - out_arena_allocator.* = self.state.promote(child_allocator); - self.state_acquired = &out_arena_allocator.state; - return out_arena_allocator.allocator(); - } - - /// Releases the allocator acquired by `acquire. `arena_allocator` must match the one passed to `acquire`. - pub fn release(self: *ValueArena, arena_allocator: *std.heap.ArenaAllocator) void { - if (@as(*std.heap.ArenaAllocator, @fieldParentPtr("state", self.state_acquired.?)) == arena_allocator) { - self.state = self.state_acquired.?.*; - self.state_acquired = null; - } - } - - pub fn deinit(self: ValueArena, child_allocator: Allocator) void { - assert(self.state_acquired == null); - - const prev = self.prev; - self.state.promote(child_allocator).deinit(); - - if (prev) |p| { - p.deinit(child_allocator); - } - } +pub const Reference = struct { + /// The `AnalUnit` whose semantic analysis was triggered by this reference. + referenced: AnalUnit, + /// Index into `all_references` of the next `Reference` triggered by the same `AnalUnit`. + /// `std.math.maxInt(u32)` is the sentinel. + next: u32, + /// The source location of the reference. + src: LazySrcLoc, }; pub const Decl = struct { @@ -758,7 +733,7 @@ pub const File = struct { /// Whether this file is a part of multiple packages. This is an error condition which will be reported after AstGen. multi_pkg: bool = false, /// List of references to this file, used for multi-package errors. - references: std.ArrayListUnmanaged(Reference) = .{}, + references: std.ArrayListUnmanaged(File.Reference) = .{}, /// The hash of the path to this file, used to store `InternPool.TrackedInst`. path_digest: Cache.BinDigest, @@ -925,7 +900,7 @@ pub const File = struct { } /// Add a reference to this file during AstGen. - pub fn addReference(file: *File, mod: Module, ref: Reference) !void { + pub fn addReference(file: *File, mod: Module, ref: File.Reference) !void { // Don't add the same module root twice. Note that since we always add module roots at the // front of the references array (see below), this loop is actually O(1) on valid code. if (ref == .root) { @@ -1002,8 +977,7 @@ pub const ErrorMsg = struct { src_loc: SrcLoc, msg: []const u8, notes: []ErrorMsg = &.{}, - reference_trace: []Trace = &.{}, - hidden_references: u32 = 0, + reference_trace_root: AnalUnit.Optional = .none, pub const Trace = struct { decl: InternPool.NullTerminatedString, @@ -1048,7 +1022,6 @@ pub const ErrorMsg = struct { } gpa.free(err_msg.notes); gpa.free(err_msg.msg); - gpa.free(err_msg.reference_trace); err_msg.* = undefined; } }; @@ -2520,6 +2493,8 @@ pub fn deinit(zcu: *Zcu) void { zcu.global_assembly.deinit(gpa); zcu.reference_table.deinit(gpa); + zcu.all_references.deinit(gpa); + zcu.free_references.deinit(gpa); { var it = zcu.intern_pool.allocated_namespaces.iterator(0); @@ -3462,7 +3437,8 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { // The exports this Decl performs will be re-discovered, so we remove them here // prior to re-analysis. if (build_options.only_c) unreachable; - mod.deleteUnitExports(AnalUnit.wrap(.{ .decl = decl_index })); + mod.deleteUnitExports(decl_as_depender); + mod.deleteUnitReferences(decl_as_depender); } const sema_result: SemaDeclResult = blk: { @@ -3591,7 +3567,8 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.In if (was_outdated) { if (build_options.only_c) unreachable; _ = zcu.outdated_ready.swapRemove(func_as_depender); - zcu.deleteUnitExports(AnalUnit.wrap(.{ .func = func_index })); + zcu.deleteUnitExports(func_as_depender); + zcu.deleteUnitReferences(func_as_depender); } switch (func.analysis(ip).state) { @@ -4967,6 +4944,47 @@ pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void { } } +/// Delete all references in `reference_table` which are caused by this `AnalUnit`. +/// Re-analysis of the `AnalUnit` will cause appropriate references to be recreated. +fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void { + const gpa = zcu.gpa; + + const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse return; + var idx = kv.value; + + while (idx != std.math.maxInt(u32)) { + zcu.free_references.append(gpa, idx) catch { + // This space will be reused eventually, so we need not propagate this error. + // Just leak it for now, and let GC reclaim it later on. + return; + }; + idx = zcu.all_references.items[idx].next; + } +} + +pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit, ref_src: LazySrcLoc) Allocator.Error!void { + const gpa = zcu.gpa; + + try zcu.reference_table.ensureUnusedCapacity(gpa, 1); + + const ref_idx = zcu.free_references.popOrNull() orelse idx: { + _ = try zcu.all_references.addOne(gpa); + break :idx zcu.all_references.items.len - 1; + }; + + errdefer comptime unreachable; + + const gop = zcu.reference_table.getOrPutAssumeCapacity(src_unit); + + zcu.all_references.items[ref_idx] = .{ + .referenced = referenced_unit, + .next = if (gop.found_existing) gop.value_ptr.* else std.math.maxInt(u32), + .src = ref_src, + }; + + gop.value_ptr.* = @intCast(ref_idx); +} + pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocator) SemaError!Air { const tracy = trace(@src()); defer tracy.end(); @@ -6447,3 +6465,36 @@ pub fn structPackedFieldBitOffset( } unreachable; // index out of bounds } + +pub const ResolvedReference = struct { + referencer: AnalUnit, + src: LazySrcLoc, +}; + +/// Returns a mapping from an `AnalUnit` to where it is referenced. +/// TODO: in future, this must be adapted to traverse from roots of analysis. That way, we can +/// use the returned map to determine which units have become unreferenced in an incremental update. +pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ResolvedReference) { + const gpa = zcu.gpa; + + var result: std.AutoHashMapUnmanaged(AnalUnit, ResolvedReference) = .{}; + errdefer result.deinit(gpa); + + // This is not a sufficient size, but a lower bound. + try result.ensureTotalCapacity(gpa, @intCast(zcu.reference_table.count())); + + for (zcu.reference_table.keys(), zcu.reference_table.values()) |referencer, first_ref_idx| { + assert(first_ref_idx != std.math.maxInt(u32)); + var ref_idx = first_ref_idx; + while (ref_idx != std.math.maxInt(u32)) { + const ref = zcu.all_references.items[ref_idx]; + const gop = try result.getOrPut(gpa, ref.referenced); + if (!gop.found_existing) { + gop.value_ptr.* = .{ .referencer = referencer, .src = ref.src }; + } + ref_idx = ref.next; + } + } + + return result; +}