mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
compiler: integrate importing ZON with incremental compilation
The changes from a few commits earlier, where semantic analysis no longer occurs if any Zig files failed to lower to ZIR, mean `file` dependencies are no longer necessary! However, we now need them for ZON files, to be invalidated whenever a ZON file changes.
This commit is contained in:
parent
55a2e535fd
commit
3ca588bcc6
@ -2903,10 +2903,12 @@ pub fn makeBinFileWritable(comp: *Compilation) !void {
|
||||
const Header = extern struct {
|
||||
intern_pool: extern struct {
|
||||
thread_count: u32,
|
||||
file_deps_len: u32,
|
||||
src_hash_deps_len: u32,
|
||||
nav_val_deps_len: u32,
|
||||
nav_ty_deps_len: u32,
|
||||
interned_deps_len: u32,
|
||||
zon_file_deps_len: u32,
|
||||
embed_file_deps_len: u32,
|
||||
namespace_deps_len: u32,
|
||||
namespace_name_deps_len: u32,
|
||||
first_dependency_len: u32,
|
||||
@ -2947,10 +2949,12 @@ pub fn saveState(comp: *Compilation) !void {
|
||||
const header: Header = .{
|
||||
.intern_pool = .{
|
||||
.thread_count = @intCast(ip.locals.len),
|
||||
.file_deps_len = @intCast(ip.file_deps.count()),
|
||||
.src_hash_deps_len = @intCast(ip.src_hash_deps.count()),
|
||||
.nav_val_deps_len = @intCast(ip.nav_val_deps.count()),
|
||||
.nav_ty_deps_len = @intCast(ip.nav_ty_deps.count()),
|
||||
.interned_deps_len = @intCast(ip.interned_deps.count()),
|
||||
.zon_file_deps_len = @intCast(ip.zon_file_deps.count()),
|
||||
.embed_file_deps_len = @intCast(ip.embed_file_deps.count()),
|
||||
.namespace_deps_len = @intCast(ip.namespace_deps.count()),
|
||||
.namespace_name_deps_len = @intCast(ip.namespace_name_deps.count()),
|
||||
.first_dependency_len = @intCast(ip.first_dependency.count()),
|
||||
@ -2975,14 +2979,18 @@ pub fn saveState(comp: *Compilation) !void {
|
||||
addBuf(&bufs, mem.asBytes(&header));
|
||||
addBuf(&bufs, mem.sliceAsBytes(pt_headers.items));
|
||||
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.file_deps.keys()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.file_deps.values()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.src_hash_deps.keys()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.src_hash_deps.values()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.nav_val_deps.keys()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.nav_val_deps.values()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.nav_ty_deps.keys()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.nav_ty_deps.values()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.interned_deps.keys()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.interned_deps.values()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.zon_file_deps.keys()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.zon_file_deps.values()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.embed_file_deps.keys()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.embed_file_deps.values()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.namespace_deps.keys()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.namespace_deps.values()));
|
||||
addBuf(&bufs, mem.sliceAsBytes(ip.namespace_name_deps.keys()));
|
||||
|
||||
@ -17,13 +17,6 @@ tid_shift_31: if (single_threaded) u0 else std.math.Log2Int(u32),
|
||||
/// Cached shift amount to put a `tid` in the top bits of a 32-bit value.
|
||||
tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32),
|
||||
|
||||
/// Dependencies on whether an entire file gets past AstGen.
|
||||
/// These are triggered by `@import`, so that:
|
||||
/// * if a file initially fails AstGen, triggering a transitive failure, when a future update
|
||||
/// causes it to succeed AstGen, the `@import` is re-analyzed, allowing analysis to proceed
|
||||
/// * if a file initially succeds AstGen, but a future update causes the file to fail it,
|
||||
/// the `@import` is re-analyzed, registering a transitive failure
|
||||
file_deps: std.AutoArrayHashMapUnmanaged(FileIndex, DepEntry.Index),
|
||||
/// Dependencies on the source code hash associated with a ZIR instruction.
|
||||
/// * For a `declaration`, this is the entire declaration body.
|
||||
/// * For a `struct_decl`, `union_decl`, etc, this is the source of the fields (but not declarations).
|
||||
@ -42,6 +35,9 @@ nav_ty_deps: std.AutoArrayHashMapUnmanaged(Nav.Index, DepEntry.Index),
|
||||
/// * a container type requiring resolution (invalidated when the type must be recreated at a new index)
|
||||
/// Value is index into `dep_entries` of the first dependency on this interned value.
|
||||
interned_deps: std.AutoArrayHashMapUnmanaged(Index, DepEntry.Index),
|
||||
/// Dependencies on a ZON file. Triggered by `@import` of ZON.
|
||||
/// Value is index into `dep_entries` of the first dependency on this ZON file.
|
||||
zon_file_deps: std.AutoArrayHashMapUnmanaged(FileIndex, DepEntry.Index),
|
||||
/// Dependencies on an embedded file.
|
||||
/// Introduced by `@embedFile`; invalidated when the file changes.
|
||||
/// Value is index into `dep_entries` of the first dependency on this `Zcu.EmbedFile`.
|
||||
@ -89,11 +85,11 @@ pub const empty: InternPool = .{
|
||||
.tid_shift_30 = if (single_threaded) 0 else 31,
|
||||
.tid_shift_31 = if (single_threaded) 0 else 31,
|
||||
.tid_shift_32 = if (single_threaded) 0 else 31,
|
||||
.file_deps = .empty,
|
||||
.src_hash_deps = .empty,
|
||||
.nav_val_deps = .empty,
|
||||
.nav_ty_deps = .empty,
|
||||
.interned_deps = .empty,
|
||||
.zon_file_deps = .empty,
|
||||
.embed_file_deps = .empty,
|
||||
.namespace_deps = .empty,
|
||||
.namespace_name_deps = .empty,
|
||||
@ -824,11 +820,11 @@ pub const Nav = struct {
|
||||
};
|
||||
|
||||
pub const Dependee = union(enum) {
|
||||
file: FileIndex,
|
||||
src_hash: TrackedInst.Index,
|
||||
nav_val: Nav.Index,
|
||||
nav_ty: Nav.Index,
|
||||
interned: Index,
|
||||
zon_file: FileIndex,
|
||||
embed_file: Zcu.EmbedFile.Index,
|
||||
namespace: TrackedInst.Index,
|
||||
namespace_name: NamespaceNameKey,
|
||||
@ -876,11 +872,11 @@ pub const DependencyIterator = struct {
|
||||
|
||||
pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyIterator {
|
||||
const first_entry = switch (dependee) {
|
||||
.file => |x| ip.file_deps.get(x),
|
||||
.src_hash => |x| ip.src_hash_deps.get(x),
|
||||
.nav_val => |x| ip.nav_val_deps.get(x),
|
||||
.nav_ty => |x| ip.nav_ty_deps.get(x),
|
||||
.interned => |x| ip.interned_deps.get(x),
|
||||
.zon_file => |x| ip.zon_file_deps.get(x),
|
||||
.embed_file => |x| ip.embed_file_deps.get(x),
|
||||
.namespace => |x| ip.namespace_deps.get(x),
|
||||
.namespace_name => |x| ip.namespace_name_deps.get(x),
|
||||
@ -947,11 +943,11 @@ pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, depend
|
||||
},
|
||||
inline else => |dependee_payload, tag| new_index: {
|
||||
const gop = try switch (tag) {
|
||||
.file => ip.file_deps,
|
||||
.src_hash => ip.src_hash_deps,
|
||||
.nav_val => ip.nav_val_deps,
|
||||
.nav_ty => ip.nav_ty_deps,
|
||||
.interned => ip.interned_deps,
|
||||
.zon_file => ip.zon_file_deps,
|
||||
.embed_file => ip.embed_file_deps,
|
||||
.namespace => ip.namespace_deps,
|
||||
.namespace_name => ip.namespace_name_deps,
|
||||
@ -6688,11 +6684,11 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void {
|
||||
pub fn deinit(ip: *InternPool, gpa: Allocator) void {
|
||||
if (debug_state.enable_checks) std.debug.assert(debug_state.intern_pool == null);
|
||||
|
||||
ip.file_deps.deinit(gpa);
|
||||
ip.src_hash_deps.deinit(gpa);
|
||||
ip.nav_val_deps.deinit(gpa);
|
||||
ip.nav_ty_deps.deinit(gpa);
|
||||
ip.interned_deps.deinit(gpa);
|
||||
ip.zon_file_deps.deinit(gpa);
|
||||
ip.embed_file_deps.deinit(gpa);
|
||||
ip.namespace_deps.deinit(gpa);
|
||||
ip.namespace_name_deps.deinit(gpa);
|
||||
|
||||
@ -6143,7 +6143,6 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
|
||||
pt.updateFile(result.file, path_digest) catch |err|
|
||||
return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
|
||||
|
||||
try sema.declareDependency(.{ .file = result.file_index });
|
||||
try pt.ensureFileAnalyzed(result.file_index);
|
||||
const ty = zcu.fileRootType(result.file_index);
|
||||
try sema.declareDependency(.{ .interned = ty });
|
||||
@ -13986,7 +13985,6 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
|
||||
};
|
||||
switch (result.file.getMode()) {
|
||||
.zig => {
|
||||
try sema.declareDependency(.{ .file = result.file_index });
|
||||
try pt.ensureFileAnalyzed(result.file_index);
|
||||
const ty = zcu.fileRootType(result.file_index);
|
||||
try sema.declareDependency(.{ .interned = ty });
|
||||
@ -14003,6 +14001,7 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
|
||||
return sema.fail(block, operand_src, "'@import' of ZON must have a known result type", .{});
|
||||
}
|
||||
|
||||
try sema.declareDependency(.{ .zon_file = result.file_index });
|
||||
const interned = try LowerZon.run(
|
||||
sema,
|
||||
result.file,
|
||||
|
||||
16
src/Zcu.zig
16
src/Zcu.zig
@ -705,6 +705,14 @@ pub const File = struct {
|
||||
/// field is populated with that old ZIR.
|
||||
prev_zir: ?*Zir = null,
|
||||
|
||||
/// This field serves a similar purpose to `prev_zir`, but for ZOIR. However, since we do not
|
||||
/// need to map old ZOIR to new ZOIR -- instead only invalidating dependencies if the ZOIR
|
||||
/// changed -- this field is just a simple boolean.
|
||||
///
|
||||
/// When `zoir` is updated, this field is set to `true`. In `updateZirRefs`, if this is `true`,
|
||||
/// we invalidate the corresponding `zon_file` dependency, and reset it to `false`.
|
||||
zoir_invalidated: bool = false,
|
||||
|
||||
/// A single reference to a file.
|
||||
pub const Reference = union(enum) {
|
||||
/// The file is imported directly (i.e. not as a package) with @import.
|
||||
@ -4074,10 +4082,6 @@ fn formatDependee(data: struct { dependee: InternPool.Dependee, zcu: *Zcu }, com
|
||||
const zcu = data.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
switch (data.dependee) {
|
||||
.file => |file| {
|
||||
const file_path = zcu.fileByIndex(file).sub_file_path;
|
||||
return writer.print("file('{s}')", .{file_path});
|
||||
},
|
||||
.src_hash => |ti| {
|
||||
const info = ti.resolveFull(ip) orelse {
|
||||
return writer.writeAll("inst(<lost>)");
|
||||
@ -4098,6 +4102,10 @@ fn formatDependee(data: struct { dependee: InternPool.Dependee, zcu: *Zcu }, com
|
||||
.func => |f| return writer.print("ies('{}')", .{ip.getNav(f.owner_nav).fqn.fmt(ip)}),
|
||||
else => unreachable,
|
||||
},
|
||||
.zon_file => |file| {
|
||||
const file_path = zcu.fileByIndex(file).sub_file_path;
|
||||
return writer.print("zon_file('{s}')", .{file_path});
|
||||
},
|
||||
.embed_file => |ef_idx| {
|
||||
const ef = ef_idx.get(zcu);
|
||||
return writer.print("embed_file('{s}')", .{std.fs.path.fmtJoin(&.{
|
||||
|
||||
@ -145,6 +145,9 @@ pub fn updateFile(
|
||||
file.zir = null;
|
||||
}
|
||||
|
||||
// If ZOIR is changing, then we need to invalidate dependencies on it
|
||||
if (file.zoir != null) file.zoir_invalidated = true;
|
||||
|
||||
// We're going to re-load everything, so unload source, AST, ZIR, ZOIR.
|
||||
file.unload(gpa);
|
||||
|
||||
@ -380,11 +383,23 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
|
||||
const gpa = zcu.gpa;
|
||||
|
||||
// We need to visit every updated File for every TrackedInst in InternPool.
|
||||
// This only includes Zig files; ZON files are omitted.
|
||||
var updated_files: std.AutoArrayHashMapUnmanaged(Zcu.File.Index, UpdatedFile) = .empty;
|
||||
defer cleanupUpdatedFiles(gpa, &updated_files);
|
||||
|
||||
for (zcu.import_table.values()) |file_index| {
|
||||
const file = zcu.fileByIndex(file_index);
|
||||
assert(file.status == .success);
|
||||
switch (file.getMode()) {
|
||||
.zig => {}, // logic below
|
||||
.zon => {
|
||||
if (file.zoir_invalidated) {
|
||||
try zcu.markDependeeOutdated(.not_marked_po, .{ .zon_file = file_index });
|
||||
file.zoir_invalidated = false;
|
||||
}
|
||||
continue;
|
||||
},
|
||||
}
|
||||
const old_zir = file.prev_zir orelse continue;
|
||||
const new_zir = file.zir.?;
|
||||
const gop = try updated_files.getOrPut(gpa, file_index);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user