mirror of
https://github.com/ziglang/zig.git
synced 2026-02-14 13:30:45 +00:00
Merge pull request #19273 from mlugg/incremental-some-more
compiler: more progress on incremental
This commit is contained in:
commit
39459e78ad
@ -13496,6 +13496,15 @@ fn scanDecls(astgen: *AstGen, namespace: *Scope.Namespace, members: []const Ast.
|
||||
const node_tags = tree.nodes.items(.tag);
|
||||
const main_tokens = tree.nodes.items(.main_token);
|
||||
const token_tags = tree.tokens.items(.tag);
|
||||
|
||||
// We don't have shadowing for test names, so we just track those for duplicate reporting locally.
|
||||
var named_tests: std.AutoHashMapUnmanaged(Zir.NullTerminatedString, Ast.Node.Index) = .{};
|
||||
var decltests: std.AutoHashMapUnmanaged(Zir.NullTerminatedString, Ast.Node.Index) = .{};
|
||||
defer {
|
||||
named_tests.deinit(gpa);
|
||||
decltests.deinit(gpa);
|
||||
}
|
||||
|
||||
var decl_count: u32 = 0;
|
||||
for (members) |member_node| {
|
||||
const name_token = switch (node_tags[member_node]) {
|
||||
@ -13525,11 +13534,50 @@ fn scanDecls(astgen: *AstGen, namespace: *Scope.Namespace, members: []const Ast.
|
||||
break :blk ident;
|
||||
},
|
||||
|
||||
.@"comptime", .@"usingnamespace", .test_decl => {
|
||||
.@"comptime", .@"usingnamespace" => {
|
||||
decl_count += 1;
|
||||
continue;
|
||||
},
|
||||
|
||||
.test_decl => {
|
||||
decl_count += 1;
|
||||
// We don't want shadowing detection here, and test names work a bit differently, so
|
||||
// we must do the redeclaration detection ourselves.
|
||||
const test_name_token = main_tokens[member_node] + 1;
|
||||
switch (token_tags[test_name_token]) {
|
||||
else => {}, // unnamed test
|
||||
.string_literal => {
|
||||
const name = try astgen.strLitAsString(test_name_token);
|
||||
const gop = try named_tests.getOrPut(gpa, name.index);
|
||||
if (gop.found_existing) {
|
||||
const name_slice = astgen.string_bytes.items[@intFromEnum(name.index)..][0..name.len];
|
||||
const name_duped = try gpa.dupe(u8, name_slice);
|
||||
defer gpa.free(name_duped);
|
||||
try astgen.appendErrorNodeNotes(member_node, "duplicate test name '{s}'", .{name_duped}, &.{
|
||||
try astgen.errNoteNode(gop.value_ptr.*, "other test here", .{}),
|
||||
});
|
||||
} else {
|
||||
gop.value_ptr.* = member_node;
|
||||
}
|
||||
},
|
||||
.identifier => {
|
||||
const name = try astgen.identAsString(test_name_token);
|
||||
const gop = try decltests.getOrPut(gpa, name);
|
||||
if (gop.found_existing) {
|
||||
const name_slice = mem.span(astgen.nullTerminatedString(name));
|
||||
const name_duped = try gpa.dupe(u8, name_slice);
|
||||
defer gpa.free(name_duped);
|
||||
try astgen.appendErrorNodeNotes(member_node, "duplicate decltest '{s}'", .{name_duped}, &.{
|
||||
try astgen.errNoteNode(gop.value_ptr.*, "other decltest here", .{}),
|
||||
});
|
||||
} else {
|
||||
gop.value_ptr.* = member_node;
|
||||
}
|
||||
},
|
||||
}
|
||||
continue;
|
||||
},
|
||||
|
||||
else => continue,
|
||||
};
|
||||
|
||||
|
||||
@ -67,6 +67,9 @@ src_hash_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index)
|
||||
/// Dependencies on the value of a Decl.
|
||||
/// Value is index into `dep_entries` of the first dependency on this Decl value.
|
||||
decl_val_deps: std.AutoArrayHashMapUnmanaged(DeclIndex, DepEntry.Index) = .{},
|
||||
/// Dependencies on the IES of a runtime function.
|
||||
/// Value is index into `dep_entries` of the first dependency on this Decl value.
|
||||
func_ies_deps: std.AutoArrayHashMapUnmanaged(Index, DepEntry.Index) = .{},
|
||||
/// Dependencies on the full set of names in a ZIR namespace.
|
||||
/// Key refers to a `struct_decl`, `union_decl`, etc.
|
||||
/// Value is index into `dep_entries` of the first dependency on this namespace.
|
||||
@ -167,6 +170,7 @@ pub const Depender = enum(u32) {
|
||||
pub const Dependee = union(enum) {
|
||||
src_hash: TrackedInst.Index,
|
||||
decl_val: DeclIndex,
|
||||
func_ies: Index,
|
||||
namespace: TrackedInst.Index,
|
||||
namespace_name: NamespaceNameKey,
|
||||
};
|
||||
@ -212,6 +216,7 @@ pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyI
|
||||
const first_entry = switch (dependee) {
|
||||
.src_hash => |x| ip.src_hash_deps.get(x),
|
||||
.decl_val => |x| ip.decl_val_deps.get(x),
|
||||
.func_ies => |x| ip.func_ies_deps.get(x),
|
||||
.namespace => |x| ip.namespace_deps.get(x),
|
||||
.namespace_name => |x| ip.namespace_name_deps.get(x),
|
||||
} orelse return .{
|
||||
@ -251,6 +256,7 @@ pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: Depender, depend
|
||||
const gop = try switch (tag) {
|
||||
.src_hash => ip.src_hash_deps,
|
||||
.decl_val => ip.decl_val_deps,
|
||||
.func_ies => ip.func_ies_deps,
|
||||
.namespace => ip.namespace_deps,
|
||||
.namespace_name => ip.namespace_name_deps,
|
||||
}.getOrPut(gpa, dependee_payload);
|
||||
@ -4324,6 +4330,7 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void {
|
||||
|
||||
ip.src_hash_deps.deinit(gpa);
|
||||
ip.decl_val_deps.deinit(gpa);
|
||||
ip.func_ies_deps.deinit(gpa);
|
||||
ip.namespace_deps.deinit(gpa);
|
||||
ip.namespace_name_deps.deinit(gpa);
|
||||
|
||||
@ -7103,7 +7110,7 @@ pub fn getGeneratedTagEnumType(ip: *InternPool, gpa: Allocator, ini: GeneratedTa
|
||||
return @enumFromInt(gop.index);
|
||||
}
|
||||
|
||||
pub const OpaqueTypeIni = struct {
|
||||
pub const OpaqueTypeInit = struct {
|
||||
has_namespace: bool,
|
||||
key: union(enum) {
|
||||
declared: struct {
|
||||
@ -7117,7 +7124,7 @@ pub const OpaqueTypeIni = struct {
|
||||
},
|
||||
};
|
||||
|
||||
pub fn getOpaqueType(ip: *InternPool, gpa: Allocator, ini: OpaqueTypeIni) Allocator.Error!WipNamespaceType.Result {
|
||||
pub fn getOpaqueType(ip: *InternPool, gpa: Allocator, ini: OpaqueTypeInit) Allocator.Error!WipNamespaceType.Result {
|
||||
const adapter: KeyAdapter = .{ .intern_pool = ip };
|
||||
const gop = try ip.map.getOrPutAdapted(gpa, Key{ .opaque_type = switch (ini.key) {
|
||||
.declared => |d| .{ .declared = .{
|
||||
@ -9216,7 +9223,7 @@ pub fn funcTypeParamsLen(ip: *const InternPool, i: Index) u32 {
|
||||
return ip.extra.items[start + std.meta.fieldIndex(Tag.TypeFunction, "params_len").?];
|
||||
}
|
||||
|
||||
fn unwrapCoercedFunc(ip: *const InternPool, i: Index) Index {
|
||||
pub fn unwrapCoercedFunc(ip: *const InternPool, i: Index) Index {
|
||||
const tags = ip.items.items(.tag);
|
||||
return switch (tags[@intFromEnum(i)]) {
|
||||
.func_coerced => {
|
||||
|
||||
519
src/Module.zig
519
src/Module.zig
@ -362,7 +362,7 @@ pub const Decl = struct {
|
||||
src_line: u32,
|
||||
/// Index of the ZIR `declaration` instruction from which this `Decl` was created.
|
||||
/// For the root `Decl` of a `File` and legacy anonymous decls, this is `.none`.
|
||||
zir_decl_index: Zir.Inst.OptionalIndex,
|
||||
zir_decl_index: InternPool.TrackedInst.Index.Optional,
|
||||
|
||||
/// Represents the "shallow" analysis status. For example, for decls that are functions,
|
||||
/// the function type is analyzed with this set to `in_progress`, however, the semantic
|
||||
@ -428,16 +428,9 @@ pub const Decl = struct {
|
||||
const Index = InternPool.DeclIndex;
|
||||
const OptionalIndex = InternPool.OptionalDeclIndex;
|
||||
|
||||
/// Asserts that `zir_decl_index` is not `.none`.
|
||||
fn getDeclaration(decl: Decl, zir: Zir) Zir.Inst.Declaration {
|
||||
const zir_index = decl.zir_decl_index.unwrap().?;
|
||||
const pl_node = zir.instructions.items(.data)[@intFromEnum(zir_index)].pl_node;
|
||||
return zir.extraData(Zir.Inst.Declaration, pl_node.payload_index).data;
|
||||
}
|
||||
|
||||
pub fn zirBodies(decl: Decl, zcu: *Zcu) Zir.Inst.Declaration.Bodies {
|
||||
const zir = decl.getFileScope(zcu).zir;
|
||||
const zir_index = decl.zir_decl_index.unwrap().?;
|
||||
const zir_index = decl.zir_decl_index.unwrap().?.resolve(&zcu.intern_pool);
|
||||
const pl_node = zir.instructions.items(.data)[@intFromEnum(zir_index)].pl_node;
|
||||
const extra = zir.extraData(Zir.Inst.Declaration, pl_node.payload_index);
|
||||
return extra.data.getBodies(@intCast(extra.end), zir);
|
||||
@ -769,14 +762,14 @@ pub const Namespace = struct {
|
||||
zcu: *Zcu,
|
||||
|
||||
pub fn hash(ctx: @This(), decl_index: Decl.Index) u32 {
|
||||
const decl = ctx.module.declPtr(decl_index);
|
||||
const decl = ctx.zcu.declPtr(decl_index);
|
||||
return std.hash.uint32(@intFromEnum(decl.name));
|
||||
}
|
||||
|
||||
pub fn eql(ctx: @This(), a_decl_index: Decl.Index, b_decl_index: Decl.Index, b_index: usize) bool {
|
||||
_ = b_index;
|
||||
const a_decl = ctx.module.declPtr(a_decl_index);
|
||||
const b_decl = ctx.module.declPtr(b_decl_index);
|
||||
const a_decl = ctx.zcu.declPtr(a_decl_index);
|
||||
const b_decl = ctx.zcu.declPtr(b_decl_index);
|
||||
return a_decl.name == b_decl.name;
|
||||
}
|
||||
};
|
||||
@ -2662,16 +2655,15 @@ pub fn markDependeeOutdated(zcu: *Zcu, dependee: InternPool.Dependee) !void {
|
||||
if (opt_po_entry) |e| e.value else 0,
|
||||
);
|
||||
log.debug("outdated: {}", .{depender});
|
||||
if (opt_po_entry != null) {
|
||||
if (opt_po_entry == null) {
|
||||
// This is a new entry with no PO dependencies.
|
||||
try zcu.outdated_ready.put(zcu.gpa, depender, {});
|
||||
}
|
||||
// If this is a Decl and was not previously PO, we must recursively
|
||||
// mark dependencies on its tyval as PO.
|
||||
if (opt_po_entry == null) switch (depender.unwrap()) {
|
||||
.decl => |decl_index| try zcu.markDeclDependenciesPotentiallyOutdated(decl_index),
|
||||
.func => {},
|
||||
};
|
||||
if (opt_po_entry == null) {
|
||||
try zcu.markTransitiveDependersPotentiallyOutdated(depender);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2701,15 +2693,19 @@ fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
|
||||
// as no longer PO.
|
||||
switch (depender.unwrap()) {
|
||||
.decl => |decl_index| try zcu.markPoDependeeUpToDate(.{ .decl_val = decl_index }),
|
||||
.func => {},
|
||||
.func => |func_index| try zcu.markPoDependeeUpToDate(.{ .func_ies = func_index }),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Given a Decl which is newly outdated or PO, mark all dependers which depend
|
||||
/// on its tyval as PO.
|
||||
fn markDeclDependenciesPotentiallyOutdated(zcu: *Zcu, decl_index: Decl.Index) !void {
|
||||
var it = zcu.intern_pool.dependencyIterator(.{ .decl_val = decl_index });
|
||||
/// Given a Depender which is newly outdated or PO, mark all Dependers which may
|
||||
/// in turn be PO, due to a dependency on the original Depender's tyval or IES.
|
||||
fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: InternPool.Depender) !void {
|
||||
var it = zcu.intern_pool.dependencyIterator(switch (maybe_outdated.unwrap()) {
|
||||
.decl => |decl_index| .{ .decl_val = decl_index }, // TODO: also `decl_ref` deps when introduced
|
||||
.func => |func_index| .{ .func_ies = func_index },
|
||||
});
|
||||
|
||||
while (it.next()) |po| {
|
||||
if (zcu.outdated.getPtr(po)) |po_dep_count| {
|
||||
// This dependency is already outdated, but it now has one more PO
|
||||
@ -2726,14 +2722,9 @@ fn markDeclDependenciesPotentiallyOutdated(zcu: *Zcu, decl_index: Decl.Index) !v
|
||||
continue;
|
||||
}
|
||||
try zcu.potentially_outdated.putNoClobber(zcu.gpa, po, 1);
|
||||
// If this ia a Decl, we must recursively mark dependencies
|
||||
// on its tyval as PO.
|
||||
switch (po.unwrap()) {
|
||||
.decl => |po_decl| try zcu.markDeclDependenciesPotentiallyOutdated(po_decl),
|
||||
.func => {},
|
||||
}
|
||||
// This Depender was not already PO, so we must recursively mark its dependers as also PO.
|
||||
try zcu.markTransitiveDependersPotentiallyOutdated(po);
|
||||
}
|
||||
// TODO: repeat the above for `decl_ty` dependencies when they are introduced
|
||||
}
|
||||
|
||||
pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?InternPool.Depender {
|
||||
@ -2859,10 +2850,7 @@ pub fn flushRetryableFailures(zcu: *Zcu) !void {
|
||||
// This Depender was not marked PO, but is now outdated. Mark it as
|
||||
// such, then recursively mark transitive dependencies as PO.
|
||||
try zcu.outdated.put(gpa, depender, 0);
|
||||
switch (depender.unwrap()) {
|
||||
.decl => |decl| try zcu.markDeclDependenciesPotentiallyOutdated(decl),
|
||||
.func => {},
|
||||
}
|
||||
try zcu.markTransitiveDependersPotentiallyOutdated(depender);
|
||||
}
|
||||
zcu.retryable_failures.clearRetainingCapacity();
|
||||
}
|
||||
@ -2994,6 +2982,15 @@ pub fn mapOldZirToNew(
|
||||
}
|
||||
}
|
||||
|
||||
/// Like `ensureDeclAnalyzed`, but the Decl is a file's root Decl.
|
||||
pub fn ensureFileAnalyzed(zcu: *Zcu, file: *File) SemaError!void {
|
||||
if (file.root_decl.unwrap()) |existing_root| {
|
||||
return zcu.ensureDeclAnalyzed(existing_root);
|
||||
} else {
|
||||
return zcu.semaFile(file);
|
||||
}
|
||||
}
|
||||
|
||||
/// This ensures that the Decl will have an up-to-date Type and Value populated.
|
||||
/// However the resolution status of the Type may not be fully resolved.
|
||||
/// For example an inferred error set is not resolved until after `analyzeFnBody`.
|
||||
@ -3004,6 +3001,11 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
|
||||
|
||||
const decl = mod.declPtr(decl_index);
|
||||
|
||||
log.debug("ensureDeclAnalyzed '{d}' (name '{}')", .{
|
||||
@intFromEnum(decl_index),
|
||||
decl.name.fmt(&mod.intern_pool),
|
||||
});
|
||||
|
||||
// Determine whether or not this Decl is outdated, i.e. requires re-analysis
|
||||
// even if `complete`. If a Decl is PO, we pessismistically assume that it
|
||||
// *does* require re-analysis, to ensure that the Decl is definitely
|
||||
@ -3015,13 +3017,15 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
|
||||
// dependencies are all up-to-date.
|
||||
|
||||
const decl_as_depender = InternPool.Depender.wrap(.{ .decl = decl_index });
|
||||
const was_outdated = mod.outdated.swapRemove(decl_as_depender) or
|
||||
const decl_was_outdated = mod.outdated.swapRemove(decl_as_depender) or
|
||||
mod.potentially_outdated.swapRemove(decl_as_depender);
|
||||
|
||||
if (was_outdated) {
|
||||
if (decl_was_outdated) {
|
||||
_ = mod.outdated_ready.swapRemove(decl_as_depender);
|
||||
}
|
||||
|
||||
const was_outdated = mod.outdated_file_root.swapRemove(decl_index) or decl_was_outdated;
|
||||
|
||||
switch (decl.analysis) {
|
||||
.in_progress => unreachable,
|
||||
|
||||
@ -3057,6 +3061,14 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
|
||||
};
|
||||
}
|
||||
|
||||
if (mod.declIsRoot(decl_index)) {
|
||||
const changed = try mod.semaFileUpdate(decl.getFileScope(mod), decl_was_outdated);
|
||||
break :blk .{
|
||||
.invalidate_decl_val = changed,
|
||||
.invalidate_decl_ref = changed,
|
||||
};
|
||||
}
|
||||
|
||||
break :blk mod.semaDecl(decl_index) catch |err| switch (err) {
|
||||
error.AnalysisFail => {
|
||||
if (decl.analysis == .in_progress) {
|
||||
@ -3085,13 +3097,15 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
|
||||
};
|
||||
|
||||
// TODO: we do not yet have separate dependencies for decl values vs types.
|
||||
if (was_outdated) {
|
||||
if (decl_was_outdated) {
|
||||
if (sema_result.invalidate_decl_val or sema_result.invalidate_decl_ref) {
|
||||
log.debug("Decl tv invalidated ('{d}')", .{@intFromEnum(decl_index)});
|
||||
// This dependency was marked as PO, meaning dependees were waiting
|
||||
// on its analysis result, and it has turned out to be outdated.
|
||||
// Update dependees accordingly.
|
||||
try mod.markDependeeOutdated(.{ .decl_val = decl_index });
|
||||
} else {
|
||||
log.debug("Decl tv up-to-date ('{d}')", .{@intFromEnum(decl_index)});
|
||||
// This dependency was previously PO, but turned out to be up-to-date.
|
||||
// We do not need to queue successive analysis.
|
||||
try mod.markPoDependeeUpToDate(.{ .decl_val = decl_index });
|
||||
@ -3099,15 +3113,48 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, func_index: InternPool.Index) SemaError!void {
|
||||
pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, maybe_coerced_func_index: InternPool.Index) SemaError!void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const gpa = zcu.gpa;
|
||||
const ip = &zcu.intern_pool;
|
||||
const func = zcu.funcInfo(func_index);
|
||||
|
||||
// We only care about the uncoerced function.
|
||||
// We need to do this for the "orphaned function" check below to be valid.
|
||||
const func_index = ip.unwrapCoercedFunc(maybe_coerced_func_index);
|
||||
|
||||
const func = zcu.funcInfo(maybe_coerced_func_index);
|
||||
const decl_index = func.owner_decl;
|
||||
const decl = zcu.declPtr(decl_index);
|
||||
|
||||
log.debug("ensureFuncBodyAnalyzed '{d}' (instance of '{}')", .{
|
||||
@intFromEnum(func_index),
|
||||
decl.name.fmt(ip),
|
||||
});
|
||||
|
||||
// First, our owner decl must be up-to-date. This will always be the case
|
||||
// during the first update, but may not on successive updates if we happen
|
||||
// to get analyzed before our parent decl.
|
||||
try zcu.ensureDeclAnalyzed(decl_index);
|
||||
|
||||
// On an update, it's possible this function changed such that our owner
|
||||
// decl now refers to a different function, making this one orphaned. If
|
||||
// that's the case, we should remove this function from the binary.
|
||||
if (decl.val.ip_index != func_index) {
|
||||
try zcu.markDependeeOutdated(.{ .func_ies = func_index });
|
||||
ip.removeDependenciesForDepender(gpa, InternPool.Depender.wrap(.{ .func = func_index }));
|
||||
ip.remove(func_index);
|
||||
@panic("TODO: remove orphaned function from binary");
|
||||
}
|
||||
|
||||
// We'll want to remember what the IES used to be before the update for
|
||||
// dependency invalidation purposes.
|
||||
const old_resolved_ies = if (func.analysis(ip).inferred_error_set)
|
||||
func.resolvedErrorSet(ip).*
|
||||
else
|
||||
.none;
|
||||
|
||||
switch (decl.analysis) {
|
||||
.unreferenced => unreachable,
|
||||
.in_progress => unreachable,
|
||||
@ -3131,7 +3178,7 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, func_index: InternPool.Index) SemaError
|
||||
}
|
||||
|
||||
switch (func.analysis(ip).state) {
|
||||
.success,
|
||||
.success => if (!was_outdated) return,
|
||||
.sema_failure,
|
||||
.dependency_failure,
|
||||
.codegen_failure,
|
||||
@ -3141,7 +3188,10 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, func_index: InternPool.Index) SemaError
|
||||
.inline_only => unreachable, // don't queue work for this
|
||||
}
|
||||
|
||||
const gpa = zcu.gpa;
|
||||
log.debug("analyze and generate fn body '{d}'; reason='{s}'", .{
|
||||
@intFromEnum(func_index),
|
||||
if (was_outdated) "outdated" else "never analyzed",
|
||||
});
|
||||
|
||||
var tmp_arena = std.heap.ArenaAllocator.init(gpa);
|
||||
defer tmp_arena.deinit();
|
||||
@ -3161,6 +3211,20 @@ pub fn ensureFuncBodyAnalyzed(zcu: *Zcu, func_index: InternPool.Index) SemaError
|
||||
};
|
||||
defer air.deinit(gpa);
|
||||
|
||||
const invalidate_ies_deps = i: {
|
||||
if (!was_outdated) break :i false;
|
||||
if (!func.analysis(ip).inferred_error_set) break :i true;
|
||||
const new_resolved_ies = func.resolvedErrorSet(ip).*;
|
||||
break :i new_resolved_ies != old_resolved_ies;
|
||||
};
|
||||
if (invalidate_ies_deps) {
|
||||
log.debug("func IES invalidated ('{d}')", .{@intFromEnum(func_index)});
|
||||
try zcu.markDependeeOutdated(.{ .func_ies = func_index });
|
||||
} else if (was_outdated) {
|
||||
log.debug("func IES up-to-date ('{d}')", .{@intFromEnum(func_index)});
|
||||
try zcu.markPoDependeeUpToDate(.{ .func_ies = func_index });
|
||||
}
|
||||
|
||||
const comp = zcu.comp;
|
||||
|
||||
const dump_air = build_options.enable_debug_extensions and comp.verbose_air;
|
||||
@ -3299,7 +3363,9 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index)
|
||||
/// https://github.com/ziglang/zig/issues/14307
|
||||
pub fn semaPkg(mod: *Module, pkg: *Package.Module) !void {
|
||||
const file = (try mod.importPkg(pkg)).file;
|
||||
return mod.semaFile(file);
|
||||
if (file.root_decl == .none) {
|
||||
return mod.semaFile(file);
|
||||
}
|
||||
}
|
||||
|
||||
fn getFileRootStruct(zcu: *Zcu, decl_index: Decl.Index, namespace_index: Namespace.Index, file: *File) Allocator.Error!InternPool.Index {
|
||||
@ -3366,13 +3432,75 @@ fn getFileRootStruct(zcu: *Zcu, decl_index: Decl.Index, namespace_index: Namespa
|
||||
return wip_ty.finish(ip, decl_index, namespace_index.toOptional());
|
||||
}
|
||||
|
||||
/// Regardless of the file status, will create a `Decl` so that we
|
||||
/// can track dependencies and re-analyze when the file becomes outdated.
|
||||
pub fn semaFile(mod: *Module, file: *File) SemaError!void {
|
||||
/// Re-analyze the root Decl of a file on an incremental update.
|
||||
/// If `type_outdated`, the struct type itself is considered outdated and is
|
||||
/// reconstructed at a new InternPool index. Otherwise, the namespace is just
|
||||
/// re-analyzed. Returns whether the decl's tyval was invalidated.
|
||||
fn semaFileUpdate(zcu: *Zcu, file: *File, type_outdated: bool) SemaError!bool {
|
||||
const decl = zcu.declPtr(file.root_decl.unwrap().?);
|
||||
|
||||
log.debug("semaFileUpdate mod={s} sub_file_path={s} type_outdated={}", .{
|
||||
file.mod.fully_qualified_name,
|
||||
file.sub_file_path,
|
||||
type_outdated,
|
||||
});
|
||||
|
||||
if (file.status != .success_zir) {
|
||||
if (decl.analysis == .file_failure) {
|
||||
return false;
|
||||
} else {
|
||||
decl.analysis = .file_failure;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (decl.analysis == .file_failure) {
|
||||
// No struct type currently exists. Create one!
|
||||
_ = try zcu.getFileRootStruct(file.root_decl.unwrap().?, decl.src_namespace, file);
|
||||
return true;
|
||||
}
|
||||
|
||||
assert(decl.has_tv);
|
||||
assert(decl.owns_tv);
|
||||
|
||||
if (type_outdated) {
|
||||
// Invalidate the existing type, reusing the decl and namespace.
|
||||
zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.Depender.wrap(.{ .decl = file.root_decl.unwrap().? }));
|
||||
zcu.intern_pool.remove(decl.val.toIntern());
|
||||
decl.val = undefined;
|
||||
_ = try zcu.getFileRootStruct(file.root_decl.unwrap().?, decl.src_namespace, file);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Only the struct's namespace is outdated.
|
||||
// Preserve the type - just scan the namespace again.
|
||||
|
||||
const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended;
|
||||
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
|
||||
|
||||
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
|
||||
extra_index += @intFromBool(small.has_fields_len);
|
||||
const decls_len = if (small.has_decls_len) blk: {
|
||||
const decls_len = file.zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk decls_len;
|
||||
} else 0;
|
||||
const decls = file.zir.bodySlice(extra_index, decls_len);
|
||||
|
||||
if (!type_outdated) {
|
||||
try zcu.scanNamespace(decl.src_namespace, decls, decl);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Regardless of the file status, will create a `Decl` if none exists so that we can track
|
||||
/// dependencies and re-analyze when the file becomes outdated.
|
||||
fn semaFile(mod: *Module, file: *File) SemaError!void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
if (file.root_decl != .none) return;
|
||||
assert(file.root_decl == .none);
|
||||
|
||||
const gpa = mod.gpa;
|
||||
log.debug("semaFile mod={s} sub_file_path={s}", .{
|
||||
@ -3439,9 +3567,6 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
|
||||
},
|
||||
.incremental => {},
|
||||
}
|
||||
|
||||
// Since this is our first time analyzing this file, there can be no dependencies on
|
||||
// its root Decl. Thus, we do not need to invalidate any dependencies.
|
||||
}
|
||||
|
||||
const SemaDeclResult = packed struct {
|
||||
@ -3462,16 +3587,16 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult {
|
||||
return error.AnalysisFail;
|
||||
}
|
||||
|
||||
if (mod.declIsRoot(decl_index)) {
|
||||
// This comes from an `analyze_decl` job on an incremental update where
|
||||
// this file changed.
|
||||
@panic("TODO: update root Decl of modified file");
|
||||
} else if (decl.owns_tv) {
|
||||
// We are re-analyzing an owner Decl (for a function or a namespace type).
|
||||
@panic("TODO: update owner Decl");
|
||||
assert(!mod.declIsRoot(decl_index));
|
||||
|
||||
if (decl.zir_decl_index == .none and decl.owns_tv) {
|
||||
// We are re-analyzing an anonymous owner Decl (for a function or a namespace type).
|
||||
return mod.semaAnonOwnerDecl(decl_index);
|
||||
}
|
||||
|
||||
const decl_inst = decl.zir_decl_index.unwrap().?;
|
||||
log.debug("semaDecl '{d}'", .{@intFromEnum(decl_index)});
|
||||
|
||||
const decl_inst = decl.zir_decl_index.unwrap().?.resolve(ip);
|
||||
|
||||
const gpa = mod.gpa;
|
||||
const zir = decl.getFileScope(mod).zir;
|
||||
@ -3763,6 +3888,42 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult {
|
||||
return result;
|
||||
}
|
||||
|
||||
fn semaAnonOwnerDecl(zcu: *Zcu, decl_index: Decl.Index) !SemaDeclResult {
|
||||
const decl = zcu.declPtr(decl_index);
|
||||
|
||||
assert(decl.has_tv);
|
||||
assert(decl.owns_tv);
|
||||
|
||||
log.debug("semaAnonOwnerDecl '{d}'", .{@intFromEnum(decl_index)});
|
||||
|
||||
switch (decl.ty.zigTypeTag(zcu)) {
|
||||
.Fn => @panic("TODO: update fn instance"),
|
||||
.Type => {},
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
// We are the owner Decl of a type, and we were marked as outdated. That means the *structure*
|
||||
// of this type changed; not just its namespace. Therefore, we need a new InternPool index.
|
||||
//
|
||||
// However, as soon as we make that, the context that created us will require re-analysis anyway
|
||||
// (as it depends on this Decl's value), meaning the `struct_decl` (or equivalent) instruction
|
||||
// will be analyzed again. Since Sema already needs to be able to reconstruct types like this,
|
||||
// why should we bother implementing it here too when the Sema logic will be hit right after?
|
||||
//
|
||||
// So instead, let's just mark this Decl as failed - so that any remaining Decls which genuinely
|
||||
// reference it (via `@This`) end up silently erroring too - and we'll let Sema make a new type
|
||||
// with a new Decl.
|
||||
//
|
||||
// Yes, this does mean that any type owner Decl has a constant value for its entire lifetime.
|
||||
zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.Depender.wrap(.{ .decl = decl_index }));
|
||||
zcu.intern_pool.remove(decl.val.toIntern());
|
||||
decl.analysis = .dependency_failure;
|
||||
return .{
|
||||
.invalidate_decl_val = true,
|
||||
.invalidate_decl_ref = true,
|
||||
};
|
||||
}
|
||||
|
||||
pub const ImportFileResult = struct {
|
||||
file: *File,
|
||||
is_new: bool,
|
||||
@ -4083,26 +4244,87 @@ pub fn scanNamespace(
|
||||
const gpa = zcu.gpa;
|
||||
const namespace = zcu.namespacePtr(namespace_index);
|
||||
|
||||
// For incremental updates, `scanDecl` wants to look up existing decls by their ZIR index rather
|
||||
// than their name. We'll build an efficient mapping now, then discard the current `decls`.
|
||||
var existing_by_inst: std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, Decl.Index) = .{};
|
||||
defer existing_by_inst.deinit(gpa);
|
||||
|
||||
try existing_by_inst.ensureTotalCapacity(gpa, @intCast(namespace.decls.count()));
|
||||
|
||||
for (namespace.decls.keys()) |decl_index| {
|
||||
const decl = zcu.declPtr(decl_index);
|
||||
existing_by_inst.putAssumeCapacityNoClobber(decl.zir_decl_index.unwrap().?, decl_index);
|
||||
}
|
||||
|
||||
var seen_decls: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{};
|
||||
defer seen_decls.deinit(gpa);
|
||||
|
||||
try zcu.comp.work_queue.ensureUnusedCapacity(decls.len);
|
||||
|
||||
namespace.decls.clearRetainingCapacity();
|
||||
try namespace.decls.ensureTotalCapacity(gpa, decls.len);
|
||||
|
||||
namespace.usingnamespace_set.clearRetainingCapacity();
|
||||
|
||||
var scan_decl_iter: ScanDeclIter = .{
|
||||
.zcu = zcu,
|
||||
.namespace_index = namespace_index,
|
||||
.parent_decl = parent_decl,
|
||||
.seen_decls = &seen_decls,
|
||||
.existing_by_inst = &existing_by_inst,
|
||||
.pass = .named,
|
||||
};
|
||||
for (decls) |decl_inst| {
|
||||
try scanDecl(&scan_decl_iter, decl_inst);
|
||||
}
|
||||
scan_decl_iter.pass = .unnamed;
|
||||
for (decls) |decl_inst| {
|
||||
try scanDecl(&scan_decl_iter, decl_inst);
|
||||
}
|
||||
|
||||
if (seen_decls.count() != namespace.decls.count()) {
|
||||
// Do a pass over the namespace contents and remove any decls from the last update
|
||||
// which were removed in this one.
|
||||
var i: usize = 0;
|
||||
while (i < namespace.decls.count()) {
|
||||
const decl_index = namespace.decls.keys()[i];
|
||||
const decl = zcu.declPtr(decl_index);
|
||||
if (!seen_decls.contains(decl.name)) {
|
||||
// We must preserve namespace ordering for @typeInfo.
|
||||
namespace.decls.orderedRemoveAt(i);
|
||||
i -= 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const ScanDeclIter = struct {
|
||||
zcu: *Zcu,
|
||||
namespace_index: Namespace.Index,
|
||||
parent_decl: *Decl,
|
||||
seen_decls: *std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void),
|
||||
existing_by_inst: *const std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, Decl.Index),
|
||||
/// Decl scanning is run in two passes, so that we can detect when a generated
|
||||
/// name would clash with an explicit name and use a different one.
|
||||
pass: enum { named, unnamed },
|
||||
usingnamespace_index: usize = 0,
|
||||
comptime_index: usize = 0,
|
||||
unnamed_test_index: usize = 0,
|
||||
|
||||
fn avoidNameConflict(iter: *ScanDeclIter, comptime fmt: []const u8, args: anytype) !InternPool.NullTerminatedString {
|
||||
const zcu = iter.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const ip = &zcu.intern_pool;
|
||||
var name = try ip.getOrPutStringFmt(gpa, fmt, args);
|
||||
var gop = try iter.seen_decls.getOrPut(gpa, name);
|
||||
var next_suffix: u32 = 0;
|
||||
while (gop.found_existing) {
|
||||
name = try ip.getOrPutStringFmt(gpa, fmt ++ "_{d}", args ++ .{next_suffix});
|
||||
gop = try iter.seen_decls.getOrPut(gpa, name);
|
||||
next_suffix += 1;
|
||||
}
|
||||
return name;
|
||||
}
|
||||
};
|
||||
|
||||
fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void {
|
||||
@ -4126,134 +4348,148 @@ fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void
|
||||
// Every Decl needs a name.
|
||||
const decl_name: InternPool.NullTerminatedString, const kind: Decl.Kind, const is_named_test: bool = switch (declaration.name) {
|
||||
.@"comptime" => info: {
|
||||
if (iter.pass != .unnamed) return;
|
||||
const i = iter.comptime_index;
|
||||
iter.comptime_index += 1;
|
||||
break :info .{
|
||||
try ip.getOrPutStringFmt(gpa, "comptime_{d}", .{i}),
|
||||
try iter.avoidNameConflict("comptime_{d}", .{i}),
|
||||
.@"comptime",
|
||||
false,
|
||||
};
|
||||
},
|
||||
.@"usingnamespace" => info: {
|
||||
// TODO: this isn't right! These should be considered unnamed. Name conflicts can happen here.
|
||||
// The problem is, we need to preserve the decl ordering for `@typeInfo`.
|
||||
// I'm not bothering to fix this now, since some upcoming changes will change this code significantly anyway.
|
||||
if (iter.pass != .named) return;
|
||||
const i = iter.usingnamespace_index;
|
||||
iter.usingnamespace_index += 1;
|
||||
break :info .{
|
||||
try ip.getOrPutStringFmt(gpa, "usingnamespace_{d}", .{i}),
|
||||
try iter.avoidNameConflict("usingnamespace_{d}", .{i}),
|
||||
.@"usingnamespace",
|
||||
false,
|
||||
};
|
||||
},
|
||||
.unnamed_test => info: {
|
||||
if (iter.pass != .unnamed) return;
|
||||
const i = iter.unnamed_test_index;
|
||||
iter.unnamed_test_index += 1;
|
||||
break :info .{
|
||||
try ip.getOrPutStringFmt(gpa, "test_{d}", .{i}),
|
||||
try iter.avoidNameConflict("test_{d}", .{i}),
|
||||
.@"test",
|
||||
false,
|
||||
};
|
||||
},
|
||||
.decltest => info: {
|
||||
// We consider these to be unnamed since the decl name can be adjusted to avoid conflicts if necessary.
|
||||
if (iter.pass != .unnamed) return;
|
||||
assert(declaration.flags.has_doc_comment);
|
||||
const name = zir.nullTerminatedString(@enumFromInt(zir.extra[extra.end]));
|
||||
break :info .{
|
||||
try ip.getOrPutStringFmt(gpa, "decltest.{s}", .{name}),
|
||||
try iter.avoidNameConflict("decltest.{s}", .{name}),
|
||||
.@"test",
|
||||
true,
|
||||
};
|
||||
},
|
||||
_ => if (declaration.name.isNamedTest(zir)) .{
|
||||
try ip.getOrPutStringFmt(gpa, "test.{s}", .{zir.nullTerminatedString(declaration.name.toString(zir).?)}),
|
||||
.@"test",
|
||||
true,
|
||||
} else .{
|
||||
try ip.getOrPutString(gpa, zir.nullTerminatedString(declaration.name.toString(zir).?)),
|
||||
.named,
|
||||
false,
|
||||
_ => if (declaration.name.isNamedTest(zir)) info: {
|
||||
// We consider these to be unnamed since the decl name can be adjusted to avoid conflicts if necessary.
|
||||
if (iter.pass != .unnamed) return;
|
||||
break :info .{
|
||||
try iter.avoidNameConflict("test.{s}", .{zir.nullTerminatedString(declaration.name.toString(zir).?)}),
|
||||
.@"test",
|
||||
true,
|
||||
};
|
||||
} else info: {
|
||||
if (iter.pass != .named) return;
|
||||
const name = try ip.getOrPutString(gpa, zir.nullTerminatedString(declaration.name.toString(zir).?));
|
||||
try iter.seen_decls.putNoClobber(gpa, name, {});
|
||||
break :info .{
|
||||
name,
|
||||
.named,
|
||||
false,
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
if (kind == .@"usingnamespace") try namespace.usingnamespace_set.ensureUnusedCapacity(gpa, 1);
|
||||
switch (kind) {
|
||||
.@"usingnamespace" => try namespace.usingnamespace_set.ensureUnusedCapacity(gpa, 1),
|
||||
.@"test" => try zcu.test_functions.ensureUnusedCapacity(gpa, 1),
|
||||
else => {},
|
||||
}
|
||||
|
||||
const tracked_inst = try ip.trackZir(gpa, iter.parent_decl.getFileScope(zcu), decl_inst);
|
||||
|
||||
// We create a Decl for it regardless of analysis status.
|
||||
const gop = try namespace.decls.getOrPutContextAdapted(
|
||||
gpa,
|
||||
decl_name,
|
||||
DeclAdapter{ .zcu = zcu },
|
||||
Namespace.DeclContext{ .zcu = zcu },
|
||||
);
|
||||
const comp = zcu.comp;
|
||||
if (!gop.found_existing) {
|
||||
|
||||
const prev_exported, const decl_index = if (iter.existing_by_inst.get(tracked_inst)) |decl_index| decl_index: {
|
||||
// We need only update this existing Decl.
|
||||
const decl = zcu.declPtr(decl_index);
|
||||
const was_exported = decl.is_exported;
|
||||
assert(decl.kind == kind); // ZIR tracking should preserve this
|
||||
assert(decl.alive);
|
||||
decl.name = decl_name;
|
||||
decl.src_node = decl_node;
|
||||
decl.src_line = line;
|
||||
decl.is_pub = declaration.flags.is_pub;
|
||||
decl.is_exported = declaration.flags.is_export;
|
||||
break :decl_index .{ was_exported, decl_index };
|
||||
} else decl_index: {
|
||||
// Create and set up a new Decl.
|
||||
const new_decl_index = try zcu.allocateNewDecl(namespace_index, decl_node);
|
||||
const new_decl = zcu.declPtr(new_decl_index);
|
||||
new_decl.kind = kind;
|
||||
new_decl.name = decl_name;
|
||||
if (kind == .@"usingnamespace") {
|
||||
namespace.usingnamespace_set.putAssumeCapacity(new_decl_index, declaration.flags.is_pub);
|
||||
}
|
||||
new_decl.src_line = line;
|
||||
gop.key_ptr.* = new_decl_index;
|
||||
// Exported decls, comptime decls, usingnamespace decls, and
|
||||
// test decls if in test mode, get analyzed.
|
||||
const decl_mod = namespace.file_scope.mod;
|
||||
const want_analysis = declaration.flags.is_export or switch (kind) {
|
||||
.anon => unreachable,
|
||||
.@"comptime", .@"usingnamespace" => true,
|
||||
.named => false,
|
||||
.@"test" => a: {
|
||||
if (!comp.config.is_test) break :a false;
|
||||
if (decl_mod != zcu.main_mod) break :a false;
|
||||
if (is_named_test and comp.test_filters.len > 0) {
|
||||
const decl_fqn = ip.stringToSlice(try namespace.fullyQualifiedName(zcu, decl_name));
|
||||
for (comp.test_filters) |test_filter| {
|
||||
if (mem.indexOf(u8, decl_fqn, test_filter)) |_| break;
|
||||
} else break :a false;
|
||||
}
|
||||
try zcu.test_functions.put(gpa, new_decl_index, {});
|
||||
break :a true;
|
||||
},
|
||||
};
|
||||
if (want_analysis) {
|
||||
log.debug("scanDecl queue analyze_decl file='{s}' decl_name='{s}' decl_index={d}", .{
|
||||
namespace.file_scope.sub_file_path, ip.stringToSlice(decl_name), new_decl_index,
|
||||
});
|
||||
comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl_index });
|
||||
}
|
||||
new_decl.is_pub = declaration.flags.is_pub;
|
||||
new_decl.is_exported = declaration.flags.is_export;
|
||||
new_decl.zir_decl_index = decl_inst.toOptional();
|
||||
new_decl.alive = true; // This Decl corresponds to an AST node and therefore always alive.
|
||||
return;
|
||||
}
|
||||
const decl_index = gop.key_ptr.*;
|
||||
const decl = zcu.declPtr(decl_index);
|
||||
if (kind == .@"test") {
|
||||
const src_loc = SrcLoc{
|
||||
.file_scope = decl.getFileScope(zcu),
|
||||
.parent_decl_node = decl.src_node,
|
||||
.lazy = .{ .token_offset = 1 },
|
||||
};
|
||||
const msg = try ErrorMsg.create(gpa, src_loc, "duplicate test name: {}", .{
|
||||
decl_name.fmt(ip),
|
||||
});
|
||||
errdefer msg.destroy(gpa);
|
||||
try zcu.failed_decls.putNoClobber(gpa, decl_index, msg);
|
||||
const other_src_loc = SrcLoc{
|
||||
.file_scope = namespace.file_scope,
|
||||
.parent_decl_node = decl_node,
|
||||
.lazy = .{ .token_offset = 1 },
|
||||
};
|
||||
try zcu.errNoteNonLazy(other_src_loc, msg, "other test here", .{});
|
||||
}
|
||||
// Update the AST node of the decl; even if its contents are unchanged, it may
|
||||
// have been re-ordered.
|
||||
decl.src_node = decl_node;
|
||||
decl.src_line = line;
|
||||
new_decl.zir_decl_index = tracked_inst.toOptional();
|
||||
new_decl.alive = true; // This Decl corresponds to an AST node and is therefore always alive.
|
||||
break :decl_index .{ false, new_decl_index };
|
||||
};
|
||||
|
||||
const decl = zcu.declPtr(decl_index);
|
||||
|
||||
namespace.decls.putAssumeCapacityNoClobberContext(decl_index, {}, .{ .zcu = zcu });
|
||||
|
||||
const comp = zcu.comp;
|
||||
const decl_mod = namespace.file_scope.mod;
|
||||
const want_analysis = declaration.flags.is_export or switch (kind) {
|
||||
.anon => unreachable,
|
||||
.@"comptime" => true,
|
||||
.@"usingnamespace" => a: {
|
||||
namespace.usingnamespace_set.putAssumeCapacityNoClobber(decl_index, declaration.flags.is_pub);
|
||||
break :a true;
|
||||
},
|
||||
.named => false,
|
||||
.@"test" => a: {
|
||||
if (!comp.config.is_test) break :a false;
|
||||
if (decl_mod != zcu.main_mod) break :a false;
|
||||
if (is_named_test and comp.test_filters.len > 0) {
|
||||
const decl_fqn = ip.stringToSlice(try namespace.fullyQualifiedName(zcu, decl_name));
|
||||
for (comp.test_filters) |test_filter| {
|
||||
if (mem.indexOf(u8, decl_fqn, test_filter)) |_| break;
|
||||
} else break :a false;
|
||||
}
|
||||
zcu.test_functions.putAssumeCapacity(decl_index, {}); // may clobber on incremental update
|
||||
break :a true;
|
||||
},
|
||||
};
|
||||
|
||||
if (want_analysis) {
|
||||
// We will not queue analysis if the decl has been analyzed on a previous update and
|
||||
// `is_export` is unchanged. In this case, the incremental update mechanism will handle
|
||||
// re-analysis for us if necessary.
|
||||
if (prev_exported != declaration.flags.is_export or decl.analysis == .unreferenced) {
|
||||
log.debug("scanDecl queue analyze_decl file='{s}' decl_name='{s}' decl_index={d}", .{
|
||||
namespace.file_scope.sub_file_path, ip.stringToSlice(decl_name), decl_index,
|
||||
});
|
||||
comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = decl_index });
|
||||
}
|
||||
}
|
||||
|
||||
decl.is_pub = declaration.flags.is_pub;
|
||||
decl.is_exported = declaration.flags.is_export;
|
||||
decl.kind = kind;
|
||||
decl.zir_decl_index = decl_inst.toOptional();
|
||||
if (decl.getOwnedFunction(zcu) != null) {
|
||||
// TODO this logic is insufficient; namespaces we don't re-scan may still require
|
||||
// updated line numbers. Look into this!
|
||||
// TODO Look into detecting when this would be unnecessary by storing enough state
|
||||
// in `Decl` to notice that the line number did not change.
|
||||
comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
|
||||
@ -4397,6 +4633,11 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
|
||||
};
|
||||
defer sema.deinit();
|
||||
|
||||
// Every runtime function has a dependency on the source of the Decl it originates from.
|
||||
// It also depends on the value of its owner Decl.
|
||||
try sema.declareDependency(.{ .src_hash = decl.zir_decl_index.unwrap().? });
|
||||
try sema.declareDependency(.{ .decl_val = decl_index });
|
||||
|
||||
if (func.analysis(ip).inferred_error_set) {
|
||||
const ies = try arena.create(Sema.InferredErrorSet);
|
||||
ies.* = .{ .func = func_index };
|
||||
|
||||
115
src/Sema.zig
115
src/Sema.zig
@ -2705,6 +2705,37 @@ fn getCaptures(sema: *Sema, block: *Block, extra_index: usize, captures_len: u32
|
||||
return captures;
|
||||
}
|
||||
|
||||
/// Given an `InternPool.WipNamespaceType` or `InternPool.WipEnumType`, apply
|
||||
/// `sema.builtin_type_target_index` to it if necessary.
|
||||
fn wrapWipTy(sema: *Sema, wip_ty: anytype) @TypeOf(wip_ty) {
|
||||
if (sema.builtin_type_target_index == .none) return wip_ty;
|
||||
var new = wip_ty;
|
||||
new.index = sema.builtin_type_target_index;
|
||||
sema.mod.intern_pool.resolveBuiltinType(new.index, wip_ty.index);
|
||||
return new;
|
||||
}
|
||||
|
||||
/// Given a type just looked up in the `InternPool`, check whether it is
|
||||
/// considered outdated on this update. If so, remove it from the pool
|
||||
/// and return `true`.
|
||||
fn maybeRemoveOutdatedType(sema: *Sema, ty: InternPool.Index) !bool {
|
||||
const zcu = sema.mod;
|
||||
|
||||
if (!zcu.comp.debug_incremental) return false;
|
||||
|
||||
const decl_index = Type.fromInterned(ty).getOwnerDecl(zcu);
|
||||
const decl_as_depender = InternPool.Depender.wrap(.{ .decl = decl_index });
|
||||
const was_outdated = zcu.outdated.swapRemove(decl_as_depender) or
|
||||
zcu.potentially_outdated.swapRemove(decl_as_depender);
|
||||
if (!was_outdated) return false;
|
||||
_ = zcu.outdated_ready.swapRemove(decl_as_depender);
|
||||
zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, InternPool.Depender.wrap(.{ .decl = decl_index }));
|
||||
zcu.intern_pool.remove(ty);
|
||||
zcu.declPtr(decl_index).analysis = .dependency_failure;
|
||||
try zcu.markDependeeOutdated(.{ .decl_val = decl_index });
|
||||
return true;
|
||||
}
|
||||
|
||||
fn zirStructDecl(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
@ -2748,7 +2779,7 @@ fn zirStructDecl(
|
||||
}
|
||||
}
|
||||
|
||||
const wip_ty = switch (try ip.getStructType(gpa, .{
|
||||
const struct_init: InternPool.StructTypeInit = .{
|
||||
.layout = small.layout,
|
||||
.fields_len = fields_len,
|
||||
.known_non_opv = small.known_non_opv,
|
||||
@ -2763,16 +2794,14 @@ fn zirStructDecl(
|
||||
.zir_index = try ip.trackZir(gpa, block.getFileScope(mod), inst),
|
||||
.captures = captures,
|
||||
} },
|
||||
})) {
|
||||
.existing => |ty| return Air.internedToRef(ty),
|
||||
.wip => |wip| wip: {
|
||||
if (sema.builtin_type_target_index == .none) break :wip wip;
|
||||
var new = wip;
|
||||
new.index = sema.builtin_type_target_index;
|
||||
ip.resolveBuiltinType(new.index, wip.index);
|
||||
break :wip new;
|
||||
},
|
||||
};
|
||||
const wip_ty = sema.wrapWipTy(switch (try ip.getStructType(gpa, struct_init)) {
|
||||
.existing => |ty| wip: {
|
||||
if (!try sema.maybeRemoveOutdatedType(ty)) return Air.internedToRef(ty);
|
||||
break :wip (try ip.getStructType(gpa, struct_init)).wip;
|
||||
},
|
||||
.wip => |wip| wip,
|
||||
});
|
||||
errdefer wip_ty.cancel(ip);
|
||||
|
||||
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
||||
@ -2969,7 +2998,7 @@ fn zirEnumDecl(
|
||||
if (bag != 0) break true;
|
||||
} else false;
|
||||
|
||||
const wip_ty = switch (try ip.getEnumType(gpa, .{
|
||||
const enum_init: InternPool.EnumTypeInit = .{
|
||||
.has_namespace = true or decls_len > 0, // TODO: see below
|
||||
.has_values = any_values,
|
||||
.tag_mode = if (small.nonexhaustive)
|
||||
@ -2983,16 +3012,14 @@ fn zirEnumDecl(
|
||||
.zir_index = try mod.intern_pool.trackZir(sema.gpa, block.getFileScope(mod), inst),
|
||||
.captures = captures,
|
||||
} },
|
||||
})) {
|
||||
.wip => |wip| wip: {
|
||||
if (sema.builtin_type_target_index == .none) break :wip wip;
|
||||
var new = wip;
|
||||
new.index = sema.builtin_type_target_index;
|
||||
ip.resolveBuiltinType(new.index, wip.index);
|
||||
break :wip new;
|
||||
},
|
||||
.existing => |ty| return Air.internedToRef(ty),
|
||||
};
|
||||
const wip_ty = sema.wrapWipTy(switch (try ip.getEnumType(gpa, enum_init)) {
|
||||
.existing => |ty| wip: {
|
||||
if (!try sema.maybeRemoveOutdatedType(ty)) return Air.internedToRef(ty);
|
||||
break :wip (try ip.getEnumType(gpa, enum_init)).wip;
|
||||
},
|
||||
.wip => |wip| wip,
|
||||
});
|
||||
|
||||
// Once this is `true`, we will not delete the decl or type even upon failure, since we
|
||||
// have finished constructing the type and are in the process of analyzing it.
|
||||
@ -3230,7 +3257,7 @@ fn zirUnionDecl(
|
||||
const captures = try sema.getCaptures(block, extra_index, captures_len);
|
||||
extra_index += captures_len;
|
||||
|
||||
const wip_ty = switch (try ip.getUnionType(gpa, .{
|
||||
const union_init: InternPool.UnionTypeInit = .{
|
||||
.flags = .{
|
||||
.layout = small.layout,
|
||||
.status = .none,
|
||||
@ -3257,16 +3284,14 @@ fn zirUnionDecl(
|
||||
.zir_index = try ip.trackZir(gpa, block.getFileScope(mod), inst),
|
||||
.captures = captures,
|
||||
} },
|
||||
})) {
|
||||
.wip => |wip| wip: {
|
||||
if (sema.builtin_type_target_index == .none) break :wip wip;
|
||||
var new = wip;
|
||||
new.index = sema.builtin_type_target_index;
|
||||
ip.resolveBuiltinType(new.index, wip.index);
|
||||
break :wip new;
|
||||
},
|
||||
.existing => |ty| return Air.internedToRef(ty),
|
||||
};
|
||||
const wip_ty = sema.wrapWipTy(switch (try ip.getUnionType(gpa, union_init)) {
|
||||
.existing => |ty| wip: {
|
||||
if (!try sema.maybeRemoveOutdatedType(ty)) return Air.internedToRef(ty);
|
||||
break :wip (try ip.getUnionType(gpa, union_init)).wip;
|
||||
},
|
||||
.wip => |wip| wip,
|
||||
});
|
||||
errdefer wip_ty.cancel(ip);
|
||||
|
||||
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
||||
@ -3336,15 +3361,20 @@ fn zirOpaqueDecl(
|
||||
const captures = try sema.getCaptures(block, extra_index, captures_len);
|
||||
extra_index += captures_len;
|
||||
|
||||
const wip_ty = switch (try ip.getOpaqueType(gpa, .{
|
||||
const opaque_init: InternPool.OpaqueTypeInit = .{
|
||||
.has_namespace = decls_len != 0,
|
||||
.key = .{ .declared = .{
|
||||
.zir_index = try ip.trackZir(gpa, block.getFileScope(mod), inst),
|
||||
.captures = captures,
|
||||
} },
|
||||
})) {
|
||||
};
|
||||
// No `wrapWipTy` needed as no std.builtin types are opaque.
|
||||
const wip_ty = switch (try ip.getOpaqueType(gpa, opaque_init)) {
|
||||
.existing => |ty| wip: {
|
||||
if (!try sema.maybeRemoveOutdatedType(ty)) return Air.internedToRef(ty);
|
||||
break :wip (try ip.getOpaqueType(gpa, opaque_init)).wip;
|
||||
},
|
||||
.wip => |wip| wip,
|
||||
.existing => |ty| return Air.internedToRef(ty),
|
||||
};
|
||||
errdefer wip_ty.cancel(ip);
|
||||
|
||||
@ -5883,7 +5913,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
|
||||
mod.astGenFile(result.file) catch |err|
|
||||
return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
|
||||
|
||||
try mod.semaFile(result.file);
|
||||
try mod.ensureFileAnalyzed(result.file);
|
||||
const file_root_decl_index = result.file.root_decl.unwrap().?;
|
||||
return sema.analyzeDeclVal(parent_block, src, file_root_decl_index);
|
||||
}
|
||||
@ -13705,7 +13735,7 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
|
||||
return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ operand, @errorName(err) });
|
||||
},
|
||||
};
|
||||
try mod.semaFile(result.file);
|
||||
try mod.ensureFileAnalyzed(result.file);
|
||||
const file_root_decl_index = result.file.root_decl.unwrap().?;
|
||||
return sema.analyzeDeclVal(block, operand_src, file_root_decl_index);
|
||||
}
|
||||
@ -36432,8 +36462,14 @@ fn resolveInferredErrorSet(
|
||||
const ip = &mod.intern_pool;
|
||||
const func_index = ip.iesFuncIndex(ies_index);
|
||||
const func = mod.funcInfo(func_index);
|
||||
|
||||
try sema.declareDependency(.{ .func_ies = func_index });
|
||||
|
||||
// TODO: during an incremental update this might not be `.none`, but the
|
||||
// function might be out-of-date!
|
||||
const resolved_ty = func.resolvedErrorSet(ip).*;
|
||||
if (resolved_ty != .none) return resolved_ty;
|
||||
|
||||
if (func.analysis(ip).state == .in_progress)
|
||||
return sema.fail(block, src, "unable to resolve inferred error set", .{});
|
||||
|
||||
@ -39052,6 +39088,15 @@ fn ptrType(sema: *Sema, info: InternPool.Key.PtrType) CompileError!Type {
|
||||
|
||||
pub fn declareDependency(sema: *Sema, dependee: InternPool.Dependee) !void {
|
||||
if (!sema.mod.comp.debug_incremental) return;
|
||||
|
||||
// Avoid creating dependencies on ourselves. This situation can arise when we analyze the fields
|
||||
// of a type and they use `@This()`. This dependency would be unnecessary, and in fact would
|
||||
// just result in over-analysis since `Zcu.findOutdatedToAnalyze` would never be able to resolve
|
||||
// the loop.
|
||||
if (sema.owner_func_index == .none and dependee == .decl_val and dependee.decl_val == sema.owner_decl_index) {
|
||||
return;
|
||||
}
|
||||
|
||||
const depender = InternPool.Depender.wrap(
|
||||
if (sema.owner_func_index != .none)
|
||||
.{ .func = sema.owner_func_index }
|
||||
|
||||
@ -0,0 +1,8 @@
|
||||
comptime {
|
||||
@compileError("should be reached");
|
||||
}
|
||||
const comptime_0 = {};
|
||||
|
||||
// error
|
||||
//
|
||||
// :2:5: error: should be reached
|
||||
@ -6,5 +6,5 @@ test "thingy" {}
|
||||
// target=native
|
||||
// is_test=true
|
||||
//
|
||||
// :1:6: error: duplicate test name: test.thingy
|
||||
// :2:6: note: other test here
|
||||
// :2:1: error: duplicate test name 'thingy'
|
||||
// :1:1: note: other test here
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user