Merge pull request #21722 from mlugg/incremental

incremental compilation progress
This commit is contained in:
Matthew Lugg 2024-10-16 20:29:55 +01:00 committed by GitHub
commit 6201031e05
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 181 additions and 62 deletions

View File

@ -2901,6 +2901,7 @@ pub fn makeBinFileWritable(comp: *Compilation) !void {
const Header = extern struct {
intern_pool: extern struct {
thread_count: u32,
file_deps_len: u32,
src_hash_deps_len: u32,
nav_val_deps_len: u32,
namespace_deps_len: u32,
@ -2943,6 +2944,7 @@ pub fn saveState(comp: *Compilation) !void {
const header: Header = .{
.intern_pool = .{
.thread_count = @intCast(ip.locals.len),
.file_deps_len = @intCast(ip.file_deps.count()),
.src_hash_deps_len = @intCast(ip.src_hash_deps.count()),
.nav_val_deps_len = @intCast(ip.nav_val_deps.count()),
.namespace_deps_len = @intCast(ip.namespace_deps.count()),
@ -2969,6 +2971,8 @@ pub fn saveState(comp: *Compilation) !void {
addBuf(&bufs, mem.asBytes(&header));
addBuf(&bufs, mem.sliceAsBytes(pt_headers.items));
addBuf(&bufs, mem.sliceAsBytes(ip.file_deps.keys()));
addBuf(&bufs, mem.sliceAsBytes(ip.file_deps.values()));
addBuf(&bufs, mem.sliceAsBytes(ip.src_hash_deps.keys()));
addBuf(&bufs, mem.sliceAsBytes(ip.src_hash_deps.values()));
addBuf(&bufs, mem.sliceAsBytes(ip.nav_val_deps.keys()));
@ -3076,15 +3080,12 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
});
}
var all_references: ?std.AutoHashMapUnmanaged(InternPool.AnalUnit, ?Zcu.ResolvedReference) = null;
defer if (all_references) |*a| a.deinit(gpa);
if (comp.zcu) |zcu| {
const ip = &zcu.intern_pool;
for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| {
if (error_msg) |msg| {
try addModuleErrorMsg(zcu, &bundle, msg.*, &all_references);
try addModuleErrorMsg(zcu, &bundle, msg.*);
} else {
// Must be ZIR errors. Note that this may include AST errors.
// addZirErrorMessages asserts that the tree is loaded.
@ -3093,7 +3094,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
}
}
for (zcu.failed_embed_files.values()) |error_msg| {
try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references);
try addModuleErrorMsg(zcu, &bundle, error_msg.*);
}
{
const SortOrder = struct {
@ -3136,10 +3137,8 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
}
for (zcu.failed_analysis.keys(), zcu.failed_analysis.values()) |anal_unit, error_msg| {
if (comp.incremental) {
if (all_references == null) {
all_references = try zcu.resolveReferences();
}
if (!all_references.?.contains(anal_unit)) continue;
const refs = try zcu.resolveReferences();
if (!refs.contains(anal_unit)) continue;
}
const file_index = switch (anal_unit.unwrap()) {
@ -3151,7 +3150,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
// We'll try again once parsing succeeds.
if (!zcu.fileByIndex(file_index).okToReportErrors()) continue;
try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references);
try addModuleErrorMsg(zcu, &bundle, error_msg.*);
if (zcu.cimport_errors.get(anal_unit)) |errors| {
for (errors.getMessages()) |err_msg_index| {
const err_msg = errors.getErrorMessage(err_msg_index);
@ -3175,10 +3174,10 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
}
for (zcu.failed_codegen.keys(), zcu.failed_codegen.values()) |nav, error_msg| {
if (!zcu.navFileScope(nav).okToReportErrors()) continue;
try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references);
try addModuleErrorMsg(zcu, &bundle, error_msg.*);
}
for (zcu.failed_exports.values()) |value| {
try addModuleErrorMsg(zcu, &bundle, value.*, &all_references);
try addModuleErrorMsg(zcu, &bundle, value.*);
}
const actual_error_count = zcu.intern_pool.global_error_set.getNamesFromMainThread().len;
@ -3252,17 +3251,15 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
};
}
try addModuleErrorMsg(zcu, &bundle, err_msg, &all_references);
try addModuleErrorMsg(zcu, &bundle, err_msg);
}
}
if (comp.zcu) |zcu| {
if (comp.incremental and bundle.root_list.items.len == 0) {
const should_have_error = for (zcu.transitive_failed_analysis.keys()) |failed_unit| {
if (all_references == null) {
all_references = try zcu.resolveReferences();
}
if (all_references.?.contains(failed_unit)) break true;
const refs = try zcu.resolveReferences();
if (refs.contains(failed_unit)) break true;
} else false;
if (should_have_error) {
@panic("referenced transitive analysis errors, but none actually emitted");
@ -3331,14 +3328,13 @@ pub const ErrorNoteHashContext = struct {
};
pub fn addModuleErrorMsg(
mod: *Zcu,
zcu: *Zcu,
eb: *ErrorBundle.Wip,
module_err_msg: Zcu.ErrorMsg,
all_references: *?std.AutoHashMapUnmanaged(InternPool.AnalUnit, ?Zcu.ResolvedReference),
) !void {
const gpa = eb.gpa;
const ip = &mod.intern_pool;
const err_src_loc = module_err_msg.src_loc.upgrade(mod);
const ip = &zcu.intern_pool;
const err_src_loc = module_err_msg.src_loc.upgrade(zcu);
const err_source = err_src_loc.file_scope.getSource(gpa) catch |err| {
const file_path = try err_src_loc.file_scope.fullPath(gpa);
defer gpa.free(file_path);
@ -3358,22 +3354,20 @@ pub fn addModuleErrorMsg(
defer ref_traces.deinit(gpa);
if (module_err_msg.reference_trace_root.unwrap()) |rt_root| {
if (all_references.* == null) {
all_references.* = try mod.resolveReferences();
}
const all_references = try zcu.resolveReferences();
var seen: std.AutoHashMapUnmanaged(InternPool.AnalUnit, void) = .empty;
defer seen.deinit(gpa);
const max_references = mod.comp.reference_trace orelse Sema.default_reference_trace_len;
const max_references = zcu.comp.reference_trace orelse Sema.default_reference_trace_len;
var referenced_by = rt_root;
while (all_references.*.?.get(referenced_by)) |maybe_ref| {
while (all_references.get(referenced_by)) |maybe_ref| {
const ref = maybe_ref orelse break;
const gop = try seen.getOrPut(gpa, ref.referencer);
if (gop.found_existing) break;
if (ref_traces.items.len < max_references) {
const src = ref.src.upgrade(mod);
const src = ref.src.upgrade(zcu);
const source = try src.file_scope.getSource(gpa);
const span = try src.span(gpa);
const loc = std.zig.findLineColumn(source.bytes, span.main);
@ -3385,7 +3379,7 @@ pub fn addModuleErrorMsg(
.type => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip),
.none => "comptime",
},
.func => |f| ip.getNav(mod.funcInfo(f).owner_nav).name.toSlice(ip),
.func => |f| ip.getNav(zcu.funcInfo(f).owner_nav).name.toSlice(ip),
};
try ref_traces.append(gpa, .{
.decl_name = try eb.addString(name),
@ -3435,7 +3429,7 @@ pub fn addModuleErrorMsg(
defer notes.deinit(gpa);
for (module_err_msg.notes) |module_note| {
const note_src_loc = module_note.src_loc.upgrade(mod);
const note_src_loc = module_note.src_loc.upgrade(zcu);
const source = try note_src_loc.file_scope.getSource(gpa);
const span = try note_src_loc.span(gpa);
const loc = std.zig.findLineColumn(source.bytes, span.main);
@ -3488,13 +3482,13 @@ pub fn performAllTheWork(
comp: *Compilation,
main_progress_node: std.Progress.Node,
) JobError!void {
defer if (comp.zcu) |mod| {
mod.sema_prog_node.end();
mod.sema_prog_node = std.Progress.Node.none;
mod.codegen_prog_node.end();
mod.codegen_prog_node = std.Progress.Node.none;
defer if (comp.zcu) |zcu| {
zcu.sema_prog_node.end();
zcu.sema_prog_node = std.Progress.Node.none;
zcu.codegen_prog_node.end();
zcu.codegen_prog_node = std.Progress.Node.none;
mod.generation += 1;
zcu.generation += 1;
};
try comp.performAllTheWorkInner(main_progress_node);
if (!InternPool.single_threaded) if (comp.codegen_work.job_error) |job_error| return job_error;

View File

@ -17,6 +17,13 @@ tid_shift_31: if (single_threaded) u0 else std.math.Log2Int(u32),
/// Cached shift amount to put a `tid` in the top bits of a 32-bit value.
tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32),
/// Dependencies on whether an entire file gets past AstGen.
/// These are triggered by `@import`, so that:
/// * if a file initially fails AstGen, triggering a transitive failure, when a future update
/// causes it to succeed AstGen, the `@import` is re-analyzed, allowing analysis to proceed
/// * if a file initially succeds AstGen, but a future update causes the file to fail it,
/// the `@import` is re-analyzed, registering a transitive failure
file_deps: std.AutoArrayHashMapUnmanaged(FileIndex, DepEntry.Index),
/// Dependencies on the source code hash associated with a ZIR instruction.
/// * For a `declaration`, this is the entire declaration body.
/// * For a `struct_decl`, `union_decl`, etc, this is the source of the fields (but not declarations).
@ -70,6 +77,7 @@ pub const empty: InternPool = .{
.tid_shift_30 = if (single_threaded) 0 else 31,
.tid_shift_31 = if (single_threaded) 0 else 31,
.tid_shift_32 = if (single_threaded) 0 else 31,
.file_deps = .empty,
.src_hash_deps = .empty,
.nav_val_deps = .empty,
.interned_deps = .empty,
@ -656,6 +664,7 @@ pub const Nav = struct {
};
pub const Dependee = union(enum) {
file: FileIndex,
src_hash: TrackedInst.Index,
nav_val: Nav.Index,
interned: Index,
@ -704,6 +713,7 @@ pub const DependencyIterator = struct {
pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyIterator {
const first_entry = switch (dependee) {
.file => |x| ip.file_deps.get(x),
.src_hash => |x| ip.src_hash_deps.get(x),
.nav_val => |x| ip.nav_val_deps.get(x),
.interned => |x| ip.interned_deps.get(x),
@ -740,6 +750,7 @@ pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, depend
const new_index: DepEntry.Index = switch (dependee) {
inline else => |dependee_payload, tag| new_index: {
const gop = try switch (tag) {
.file => ip.file_deps,
.src_hash => ip.src_hash_deps,
.nav_val => ip.nav_val_deps,
.interned => ip.interned_deps,
@ -6268,6 +6279,7 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void {
}
pub fn deinit(ip: *InternPool, gpa: Allocator) void {
ip.file_deps.deinit(gpa);
ip.src_hash_deps.deinit(gpa);
ip.nav_val_deps.deinit(gpa);
ip.interned_deps.deinit(gpa);

View File

@ -454,6 +454,7 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module {
.tree = undefined,
.zir = undefined,
.status = .never_loaded,
.prev_status = .never_loaded,
.mod = new,
};
break :b new;

View File

@ -2559,10 +2559,9 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Zcu.ErrorMsg
const zcu = sema.pt.zcu;
if (build_options.enable_debug_extensions and zcu.comp.debug_compile_errors) {
var all_references: ?std.AutoHashMapUnmanaged(AnalUnit, ?Zcu.ResolvedReference) = null;
var wip_errors: std.zig.ErrorBundle.Wip = undefined;
wip_errors.init(gpa) catch @panic("out of memory");
Compilation.addModuleErrorMsg(zcu, &wip_errors, err_msg.*, &all_references) catch @panic("out of memory");
Compilation.addModuleErrorMsg(zcu, &wip_errors, err_msg.*) catch @panic("out of memory");
std.debug.print("compile error during Sema:\n", .{});
var error_bundle = wip_errors.toOwnedBundle("") catch @panic("out of memory");
error_bundle.renderToStdErr(.{ .ttyconf = .no_color });
@ -6025,9 +6024,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
pt.astGenFile(result.file, path_digest) catch |err|
return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
// TODO: register some kind of dependency on the file.
// That way, if this returns `error.AnalysisFail`, we have the dependency banked ready to
// trigger re-analysis later.
try sema.declareDependency(.{ .file = result.file_index });
try pt.ensureFileAnalyzed(result.file_index);
const ty = zcu.fileRootType(result.file_index);
try sema.declareDependency(.{ .interned = ty });
@ -14348,9 +14345,7 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ operand, @errorName(err) });
},
};
// TODO: register some kind of dependency on the file.
// That way, if this returns `error.AnalysisFail`, we have the dependency banked ready to
// trigger re-analysis later.
try sema.declareDependency(.{ .file = result.file_index });
try pt.ensureFileAnalyzed(result.file_index);
const ty = zcu.fileRootType(result.file_index);
try sema.declareDependency(.{ .interned = ty });

View File

@ -173,6 +173,10 @@ retryable_failures: std.ArrayListUnmanaged(AnalUnit) = .empty,
/// These are the modules which we initially queue for analysis in `Compilation.update`.
/// `resolveReferences` will use these as the root of its reachability traversal.
analysis_roots: std.BoundedArray(*Package.Module, 3) = .{},
/// This is the cached result of `Zcu.resolveReferences`. It is computed on-demand, and
/// reset to `null` when any semantic analysis occurs (since this invalidates the data).
/// Allocated into `gpa`.
resolved_references: ?std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) = null,
stage1_flags: packed struct {
have_winmain: bool = false,
@ -420,13 +424,8 @@ pub const Namespace = struct {
};
pub const File = struct {
status: enum {
never_loaded,
retryable_failure,
parse_failure,
astgen_failure,
success_zir,
},
status: Status,
prev_status: Status,
source_loaded: bool,
tree_loaded: bool,
zir_loaded: bool,
@ -454,6 +453,14 @@ pub const File = struct {
/// successful, this field is unloaded.
prev_zir: ?*Zir = null,
pub const Status = enum {
never_loaded,
retryable_failure,
parse_failure,
astgen_failure,
success_zir,
};
/// A single reference to a file.
pub const Reference = union(enum) {
/// The file is imported directly (i.e. not as a package) with @import.
@ -2192,6 +2199,8 @@ pub fn deinit(zcu: *Zcu) void {
zcu.all_type_references.deinit(gpa);
zcu.free_type_references.deinit(gpa);
if (zcu.resolved_references) |*r| r.deinit(gpa);
zcu.intern_pool.deinit(gpa);
}
@ -2760,6 +2769,8 @@ pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void {
pub fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void {
const gpa = zcu.gpa;
zcu.clearCachedResolvedReferences();
unit_refs: {
const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse break :unit_refs;
var idx = kv.value;
@ -2792,6 +2803,8 @@ pub fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void {
pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit, ref_src: LazySrcLoc) Allocator.Error!void {
const gpa = zcu.gpa;
zcu.clearCachedResolvedReferences();
try zcu.reference_table.ensureUnusedCapacity(gpa, 1);
const ref_idx = zcu.free_references.popOrNull() orelse idx: {
@ -2815,6 +2828,8 @@ pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit
pub fn addTypeReference(zcu: *Zcu, src_unit: AnalUnit, referenced_type: InternPool.Index, ref_src: LazySrcLoc) Allocator.Error!void {
const gpa = zcu.gpa;
zcu.clearCachedResolvedReferences();
try zcu.type_reference_table.ensureUnusedCapacity(gpa, 1);
const ref_idx = zcu.free_type_references.popOrNull() orelse idx: {
@ -2835,6 +2850,11 @@ pub fn addTypeReference(zcu: *Zcu, src_unit: AnalUnit, referenced_type: InternPo
gop.value_ptr.* = @intCast(ref_idx);
}
fn clearCachedResolvedReferences(zcu: *Zcu) void {
if (zcu.resolved_references) |*r| r.deinit(zcu.gpa);
zcu.resolved_references = null;
}
pub fn errorSetBits(zcu: *const Zcu) u16 {
if (zcu.error_limit == 0) return 0;
return @as(u16, std.math.log2_int(ErrorInt, zcu.error_limit)) + 1;
@ -3138,7 +3158,15 @@ pub const ResolvedReference = struct {
/// Returns a mapping from an `AnalUnit` to where it is referenced.
/// If the value is `null`, the `AnalUnit` is a root of analysis.
/// If an `AnalUnit` is not in the returned map, it is unreferenced.
pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) {
/// The returned hashmap is owned by the `Zcu`, so should not be freed by the caller.
/// This hashmap is cached, so repeated calls to this function are cheap.
pub fn resolveReferences(zcu: *Zcu) !*const std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) {
if (zcu.resolved_references == null) {
zcu.resolved_references = try zcu.resolveReferencesInner();
}
return &zcu.resolved_references.?;
}
fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) {
const gpa = zcu.gpa;
const comp = zcu.comp;
const ip = &zcu.intern_pool;
@ -3449,6 +3477,10 @@ fn formatDependee(data: struct { dependee: InternPool.Dependee, zcu: *Zcu }, com
const zcu = data.zcu;
const ip = &zcu.intern_pool;
switch (data.dependee) {
.file => |file| {
const file_path = zcu.fileByIndex(file).sub_file_path;
return writer.print("file('{s}')", .{file_path});
},
.src_hash => |ti| {
const info = ti.resolveFull(ip) orelse {
return writer.writeAll("inst(<lost>)");

View File

@ -179,10 +179,10 @@ pub fn astGenFile(
.inode = header.stat_inode,
.mtime = header.stat_mtime,
};
file.prev_status = file.status;
file.status = .success_zir;
log.debug("AstGen cached success: {s}", .{file.sub_file_path});
// TODO don't report compile errors until Sema @importFile
if (file.zir.hasCompileErrors()) {
{
comp.mutex.lock();
@ -258,6 +258,7 @@ pub fn astGenFile(
// Any potential AST errors are converted to ZIR errors here.
file.zir = try AstGen.generate(gpa, file.tree);
file.zir_loaded = true;
file.prev_status = file.status;
file.status = .success_zir;
log.debug("AstGen fresh success: {s}", .{file.sub_file_path});
@ -350,6 +351,9 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
defer cleanupUpdatedFiles(gpa, &updated_files);
for (zcu.import_table.values()) |file_index| {
const file = zcu.fileByIndex(file_index);
if (file.prev_status != file.status and file.prev_status != .never_loaded) {
try zcu.markDependeeOutdated(.not_marked_po, .{ .file = file_index });
}
const old_zir = file.prev_zir orelse continue;
const new_zir = file.zir;
const gop = try updated_files.getOrPut(gpa, file_index);
@ -551,11 +555,13 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu
const cau_outdated = zcu.outdated.swapRemove(anal_unit) or
zcu.potentially_outdated.swapRemove(anal_unit);
const prev_failed = zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit);
if (cau_outdated) {
_ = zcu.outdated_ready.swapRemove(anal_unit);
} else {
// We can trust the current information about this `Cau`.
if (zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit)) {
if (prev_failed) {
return error.AnalysisFail;
}
// If it wasn't failed and wasn't marked outdated, then either...
@ -578,9 +584,13 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu
// Since it does not, this must be a transitive failure.
try zcu.transitive_failed_analysis.put(gpa, anal_unit, {});
}
// We treat errors as up-to-date, since those uses would just trigger a transitive error.
// The exception is types, since type declarations may require re-analysis if the type, e.g. its captures, changed.
const outdated = cau.owner.unwrap() == .type;
// We consider this `Cau` to be outdated if:
// * Previous analysis succeeded; in this case, we need to re-analyze dependants to ensure
// they hit a transitive error here, rather than reporting a different error later (which
// may now be invalid).
// * The `Cau` is a type; in this case, the declaration site may require re-analysis to
// construct a valid type.
const outdated = !prev_failed or cau.owner.unwrap() == .type;
break :res .{ .{
.invalidate_decl_val = outdated,
.invalidate_decl_ref = outdated,
@ -597,10 +607,9 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu
);
zcu.retryable_failures.appendAssumeCapacity(anal_unit);
zcu.failed_analysis.putAssumeCapacityNoClobber(anal_unit, msg);
// We treat errors as up-to-date, since those uses would just trigger a transitive error
break :res .{ .{
.invalidate_decl_val = false,
.invalidate_decl_ref = false,
.invalidate_decl_val = true,
.invalidate_decl_ref = true,
}, true };
},
};
@ -707,11 +716,13 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
const func_outdated = zcu.outdated.swapRemove(anal_unit) or
zcu.potentially_outdated.swapRemove(anal_unit);
const prev_failed = zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit);
if (func_outdated) {
_ = zcu.outdated_ready.swapRemove(anal_unit);
} else {
// We can trust the current information about this function.
if (zcu.failed_analysis.contains(anal_unit) or zcu.transitive_failed_analysis.contains(anal_unit)) {
if (prev_failed) {
return error.AnalysisFail;
}
switch (func.analysisUnordered(ip).state) {
@ -730,7 +741,10 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
// Since it does not, this must be a transitive failure.
try zcu.transitive_failed_analysis.put(gpa, anal_unit, {});
}
break :res .{ false, true }; // we treat errors as up-to-date IES, since those uses would just trigger a transitive error
// We consider the IES to be outdated if the function previously succeeded analysis; in this case,
// we need to re-analyze dependants to ensure they hit a transitive error here, rather than reporting
// a different error later (which may now be invalid).
break :res .{ !prev_failed, true };
},
error.OutOfMemory => return error.OutOfMemory, // TODO: graceful handling like `ensureCauAnalyzed`
};
@ -1445,6 +1459,7 @@ pub fn importPkg(pt: Zcu.PerThread, mod: *Module) !Zcu.ImportFileResult {
.tree = undefined,
.zir = undefined,
.status = .never_loaded,
.prev_status = .never_loaded,
.mod = mod,
};
@ -1555,6 +1570,7 @@ pub fn importFile(
.tree = undefined,
.zir = undefined,
.status = .never_loaded,
.prev_status = .never_loaded,
.mod = mod,
};

View File

@ -6118,6 +6118,7 @@ fn cmdAstCheck(
var file: Zcu.File = .{
.status = .never_loaded,
.prev_status = .never_loaded,
.source_loaded = false,
.tree_loaded = false,
.zir_loaded = false,
@ -6441,6 +6442,7 @@ fn cmdDumpZir(
var file: Zcu.File = .{
.status = .never_loaded,
.prev_status = .never_loaded,
.source_loaded = false,
.tree_loaded = false,
.zir_loaded = true,
@ -6508,6 +6510,7 @@ fn cmdChangelist(
var file: Zcu.File = .{
.status = .never_loaded,
.prev_status = .never_loaded,
.source_loaded = false,
.tree_loaded = false,
.zir_loaded = false,

View File

@ -1,4 +1,5 @@
#target=x86_64-linux-selfhosted
// Disabled on self-hosted due to linker crash
// #target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#update=initial version

View File

@ -0,0 +1,35 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#update=initial version with error
#file=main.zig
pub fn main() !void {
try @import("foo.zig").hello();
}
#file=foo.zig
pub fn hello() !void {
try std.io.getStdOut().writeAll("Hello, World!\n");
}
#expect_error=ignored
#update=fix the error
#file=foo.zig
const std = @import("std");
pub fn hello() !void {
try std.io.getStdOut().writeAll("Hello, World!\n");
}
#expect_stdout="Hello, World!\n"
#update=add new error
#file=foo.zig
const std = @import("std");
pub fn hello() !void {
try std.io.getStdOut().writeAll(hello_str);
}
#expect_error=ignored
#update=fix the new error
#file=foo.zig
const std = @import("std");
const hello_str = "Hello, World! Again!\n";
pub fn hello() !void {
try std.io.getStdOut().writeAll(hello_str);
}
#expect_stdout="Hello, World! Again!\n"

View File

@ -0,0 +1,30 @@
#target=x86_64-linux-selfhosted
#target=x86_64-linux-cbe
#target=x86_64-windows-cbe
#update=initial version
#file=main.zig
const E = enum { a, b, c };
const U = union(E) {
a: i32,
b: f64,
c: f64,
d: f64,
};
pub fn main() void {
const u: U = .{ .a = 123 };
_ = u;
}
#expect_error=ignored
#update=remove invalid backing enum
#file=main.zig
const U = union {
a: i32,
b: f64,
c: f64,
d: f64,
};
pub fn main() void {
const u: U = .{ .a = 123 };
_ = u;
}
#expect_stdout=""