InternPool: make tracked_insts thread-safe

This commit is contained in:
Jacob Young 2024-07-10 14:33:46 -04:00
parent f290b54f89
commit afa66fa392
5 changed files with 381 additions and 215 deletions

View File

@ -2675,7 +2675,10 @@ fn reportMultiModuleErrors(pt: Zcu.PerThread) !void {
.import => |import| try Zcu.ErrorMsg.init(
gpa,
.{
.base_node_inst = try ip.trackZir(gpa, import.file, .main_struct_inst),
.base_node_inst = try ip.trackZir(gpa, pt.tid, .{
.file = import.file,
.inst = .main_struct_inst,
}),
.offset = .{ .token_abs = import.token },
},
"imported from module {s}",
@ -2684,7 +2687,10 @@ fn reportMultiModuleErrors(pt: Zcu.PerThread) !void {
.root => |pkg| try Zcu.ErrorMsg.init(
gpa,
.{
.base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst),
.base_node_inst = try ip.trackZir(gpa, pt.tid, .{
.file = file_index,
.inst = .main_struct_inst,
}),
.offset = .entire_file,
},
"root of module {s}",
@ -2698,7 +2704,10 @@ fn reportMultiModuleErrors(pt: Zcu.PerThread) !void {
notes[num_notes] = try Zcu.ErrorMsg.init(
gpa,
.{
.base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst),
.base_node_inst = try ip.trackZir(gpa, pt.tid, .{
.file = file_index,
.inst = .main_struct_inst,
}),
.offset = .entire_file,
},
"{} more references omitted",
@ -2710,7 +2719,10 @@ fn reportMultiModuleErrors(pt: Zcu.PerThread) !void {
const err = try Zcu.ErrorMsg.create(
gpa,
.{
.base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst),
.base_node_inst = try ip.trackZir(gpa, pt.tid, .{
.file = file_index,
.inst = .main_struct_inst,
}),
.offset = .entire_file,
},
"file exists in multiple modules",
@ -2776,7 +2788,7 @@ const Header = extern struct {
//extra_len: u32,
//limbs_len: u32,
//string_bytes_len: u32,
tracked_insts_len: u32,
//tracked_insts_len: u32,
src_hash_deps_len: u32,
decl_val_deps_len: u32,
namespace_deps_len: u32,
@ -2805,7 +2817,7 @@ pub fn saveState(comp: *Compilation) !void {
//.extra_len = @intCast(ip.extra.items.len),
//.limbs_len = @intCast(ip.limbs.items.len),
//.string_bytes_len = @intCast(ip.string_bytes.items.len),
.tracked_insts_len = @intCast(ip.tracked_insts.count()),
//.tracked_insts_len = @intCast(ip.tracked_insts.count()),
.src_hash_deps_len = @intCast(ip.src_hash_deps.count()),
.decl_val_deps_len = @intCast(ip.decl_val_deps.count()),
.namespace_deps_len = @intCast(ip.namespace_deps.count()),
@ -2822,7 +2834,7 @@ pub fn saveState(comp: *Compilation) !void {
//addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.data)));
//addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.tag)));
//addBuf(&bufs_list, &bufs_len, ip.string_bytes.items);
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.tracked_insts.keys()));
//addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.tracked_insts.keys()));
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.src_hash_deps.keys()));
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.src_hash_deps.values()));
@ -4134,14 +4146,6 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) anye
};
}
const AstGenSrc = union(enum) {
root,
import: struct {
importing_file: Zcu.File.Index,
import_tok: std.zig.Ast.TokenIndex,
},
};
fn workerAstGenFile(
tid: usize,
comp: *Compilation,
@ -4151,7 +4155,7 @@ fn workerAstGenFile(
root_decl: Zcu.Decl.OptionalIndex,
prog_node: std.Progress.Node,
wg: *WaitGroup,
src: AstGenSrc,
src: Zcu.AstGenSrc,
) void {
const child_prog_node = prog_node.start(file.sub_file_path, 0);
defer child_prog_node.end();
@ -4161,7 +4165,7 @@ fn workerAstGenFile(
error.AnalysisFail => return,
else => {
file.status = .retryable_failure;
comp.reportRetryableAstGenError(src, file_index, err) catch |oom| switch (oom) {
pt.reportRetryableAstGenError(src, file_index, err) catch |oom| switch (oom) {
// Swallowing this error is OK because it's implied to be OOM when
// there is a missing `failed_files` error message.
error.OutOfMemory => {},
@ -4207,7 +4211,7 @@ fn workerAstGenFile(
log.debug("AstGen of {s} has import '{s}'; queuing AstGen of {s}", .{
file.sub_file_path, import_path, import_result.file.sub_file_path,
});
const sub_src: AstGenSrc = .{ .import = .{
const sub_src: Zcu.AstGenSrc = .{ .import = .{
.importing_file = file_index,
.import_tok = item.data.token,
} };
@ -4560,41 +4564,6 @@ fn reportRetryableWin32ResourceError(
}
}
fn reportRetryableAstGenError(
comp: *Compilation,
src: AstGenSrc,
file_index: Zcu.File.Index,
err: anyerror,
) error{OutOfMemory}!void {
const zcu = comp.module.?;
const gpa = zcu.gpa;
const file = zcu.fileByIndex(file_index);
file.status = .retryable_failure;
const src_loc: Zcu.LazySrcLoc = switch (src) {
.root => .{
.base_node_inst = try zcu.intern_pool.trackZir(gpa, file_index, .main_struct_inst),
.offset = .entire_file,
},
.import => |info| .{
.base_node_inst = try zcu.intern_pool.trackZir(gpa, info.importing_file, .main_struct_inst),
.offset = .{ .token_abs = info.import_tok },
},
};
const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{
file.mod.root, file.sub_file_path, @errorName(err),
});
errdefer err_msg.destroy(gpa);
{
comp.mutex.lock();
defer comp.mutex.unlock();
try zcu.failed_files.putNoClobber(gpa, file, err_msg);
}
}
fn reportRetryableEmbedFileError(
comp: *Compilation,
embed_file: *Zcu.EmbedFile,

View File

@ -20,10 +20,6 @@ tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_th
/// These are not serialized; it is computed upon deserialization.
maps: std.ArrayListUnmanaged(FieldMap) = .{},
/// An index into `tracked_insts` gives a reference to a single ZIR instruction which
/// persists across incremental updates.
tracked_insts: std.AutoArrayHashMapUnmanaged(TrackedInst, void) = .{},
/// Dependencies on the source code hash associated with a ZIR instruction.
/// * For a `declaration`, this is the entire declaration body.
/// * For a `struct_decl`, `union_decl`, etc, this is the source of the fields (but not declarations).
@ -76,12 +72,15 @@ pub const TrackedInst = extern struct {
}
pub const Index = enum(u32) {
_,
pub fn resolveFull(i: TrackedInst.Index, ip: *const InternPool) TrackedInst {
return ip.tracked_insts.keys()[@intFromEnum(i)];
pub fn resolveFull(tracked_inst_index: TrackedInst.Index, ip: *const InternPool) TrackedInst {
const tracked_inst_unwrapped = tracked_inst_index.unwrap(ip);
const tracked_insts = ip.getLocalShared(tracked_inst_unwrapped.tid).tracked_insts.acquire();
return tracked_insts.view().items(.@"0")[tracked_inst_unwrapped.index];
}
pub fn resolve(i: TrackedInst.Index, ip: *const InternPool) Zir.Inst.Index {
return i.resolveFull(ip).inst;
}
pub fn toOptional(i: TrackedInst.Index) Optional {
return @enumFromInt(@intFromEnum(i));
}
@ -95,21 +94,124 @@ pub const TrackedInst = extern struct {
};
}
};
pub const Unwrapped = struct {
tid: Zcu.PerThread.Id,
index: u32,
pub fn wrap(unwrapped: Unwrapped, ip: *const InternPool) TrackedInst.Index {
assert(@intFromEnum(unwrapped.tid) <= ip.getTidMask());
assert(unwrapped.index <= ip.getIndexMask(u32));
return @enumFromInt(@as(u32, @intFromEnum(unwrapped.tid)) << ip.tid_shift_32 |
unwrapped.index);
}
};
pub fn unwrap(tracked_inst_index: TrackedInst.Index, ip: *const InternPool) Unwrapped {
return .{
.tid = @enumFromInt(@intFromEnum(tracked_inst_index) >> ip.tid_shift_32 & ip.getTidMask()),
.index = @intFromEnum(tracked_inst_index) & ip.getIndexMask(u32),
};
}
};
};
pub fn trackZir(
ip: *InternPool,
gpa: Allocator,
file: FileIndex,
inst: Zir.Inst.Index,
tid: Zcu.PerThread.Id,
key: TrackedInst,
) Allocator.Error!TrackedInst.Index {
const key: TrackedInst = .{
.file = file,
.inst = inst,
};
const gop = try ip.tracked_insts.getOrPut(gpa, key);
return @enumFromInt(gop.index);
const full_hash = Hash.hash(0, std.mem.asBytes(&key));
const hash: u32 = @truncate(full_hash >> 32);
const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))];
var map = shard.shared.tracked_inst_map.acquire();
const Map = @TypeOf(map);
var map_mask = map.header().mask();
var map_index = hash;
while (true) : (map_index += 1) {
map_index &= map_mask;
const entry = &map.entries[map_index];
const index = entry.acquire().unwrap() orelse break;
if (entry.hash != hash) continue;
if (std.meta.eql(index.resolveFull(ip), key)) return index;
}
shard.mutate.tracked_inst_map.mutex.lock();
defer shard.mutate.tracked_inst_map.mutex.unlock();
if (map.entries != shard.shared.tracked_inst_map.entries) {
shard.mutate.tracked_inst_map.len += 1;
map = shard.shared.tracked_inst_map;
map_mask = map.header().mask();
map_index = hash;
}
while (true) : (map_index += 1) {
map_index &= map_mask;
const entry = &map.entries[map_index];
const index = entry.acquire().unwrap() orelse break;
if (entry.hash != hash) continue;
if (std.meta.eql(index.resolveFull(ip), key)) return index;
}
defer shard.mutate.tracked_inst_map.len += 1;
const local = ip.getLocal(tid);
local.mutate.tracked_insts.mutex.lock();
defer local.mutate.tracked_insts.mutex.unlock();
const list = local.getMutableTrackedInsts(gpa);
try list.ensureUnusedCapacity(1);
const map_header = map.header().*;
if (shard.mutate.tracked_inst_map.len < map_header.capacity * 3 / 5) {
const entry = &map.entries[map_index];
entry.hash = hash;
const index = (TrackedInst.Index.Unwrapped{
.tid = tid,
.index = list.mutate.len,
}).wrap(ip);
list.appendAssumeCapacity(.{key});
entry.release(index.toOptional());
return index;
}
const arena_state = &local.mutate.arena;
var arena = arena_state.promote(gpa);
defer arena_state.* = arena.state;
const new_map_capacity = map_header.capacity * 2;
const new_map_buf = try arena.allocator().alignedAlloc(
u8,
Map.alignment,
Map.entries_offset + new_map_capacity * @sizeOf(Map.Entry),
);
const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) };
new_map.header().* = .{ .capacity = new_map_capacity };
@memset(new_map.entries[0..new_map_capacity], .{ .value = .none, .hash = undefined });
const new_map_mask = new_map.header().mask();
map_index = 0;
while (map_index < map_header.capacity) : (map_index += 1) {
const entry = &map.entries[map_index];
const index = entry.value.unwrap() orelse continue;
const item_hash = entry.hash;
var new_map_index = item_hash;
while (true) : (new_map_index += 1) {
new_map_index &= new_map_mask;
const new_entry = &new_map.entries[new_map_index];
if (new_entry.value != .none) continue;
new_entry.* = .{
.value = index.toOptional(),
.hash = item_hash,
};
break;
}
}
map = new_map;
map_index = hash;
while (true) : (map_index += 1) {
map_index &= new_map_mask;
if (map.entries[map_index].value == .none) break;
}
const index = (TrackedInst.Index.Unwrapped{
.tid = tid,
.index = list.mutate.len,
}).wrap(ip);
list.appendAssumeCapacity(.{key});
map.entries[map_index] = .{ .value = index.toOptional(), .hash = hash };
shard.shared.tracked_inst_map.release(new_map);
return index;
}
/// Analysis Unit. Represents a single entity which undergoes semantic analysis.
@ -324,6 +426,7 @@ const Local = struct {
extra: ListMutate,
limbs: ListMutate,
strings: ListMutate,
tracked_insts: MutexListMutate,
files: ListMutate,
decls: BucketListMutate,
@ -335,6 +438,7 @@ const Local = struct {
extra: Extra,
limbs: Limbs,
strings: Strings,
tracked_insts: TrackedInsts,
files: List(File),
decls: Decls,
@ -356,6 +460,7 @@ const Local = struct {
else => @compileError("unsupported host"),
};
const Strings = List(struct { u8 });
const TrackedInsts = List(struct { TrackedInst });
const decls_bucket_width = 8;
const decls_bucket_mask = (1 << decls_bucket_width) - 1;
@ -375,6 +480,16 @@ const Local = struct {
};
};
const MutexListMutate = struct {
mutex: std.Thread.Mutex,
list: ListMutate,
const empty: MutexListMutate = .{
.mutex = .{},
.list = ListMutate.empty,
};
};
const BucketListMutate = struct {
last_bucket_len: u32,
buckets_list: ListMutate,
@ -396,7 +511,7 @@ const Local = struct {
const ListSelf = @This();
const Mutable = struct {
gpa: std.mem.Allocator,
gpa: Allocator,
arena: *std.heap.ArenaAllocator.State,
mutate: *ListMutate,
list: *ListSelf,
@ -564,7 +679,7 @@ const Local = struct {
mutable.list.release(new_list);
}
fn view(mutable: Mutable) View {
pub fn view(mutable: Mutable) View {
const capacity = mutable.list.header().capacity;
assert(capacity > 0); // optimizes `MultiArrayList.Slice.items`
return .{
@ -614,7 +729,7 @@ const Local = struct {
};
}
pub fn getMutableItems(local: *Local, gpa: std.mem.Allocator) List(Item).Mutable {
pub fn getMutableItems(local: *Local, gpa: Allocator) List(Item).Mutable {
return .{
.gpa = gpa,
.arena = &local.mutate.arena,
@ -623,7 +738,7 @@ const Local = struct {
};
}
pub fn getMutableExtra(local: *Local, gpa: std.mem.Allocator) Extra.Mutable {
pub fn getMutableExtra(local: *Local, gpa: Allocator) Extra.Mutable {
return .{
.gpa = gpa,
.arena = &local.mutate.arena,
@ -636,7 +751,7 @@ const Local = struct {
/// On 64-bit systems, this array is used for big integers and associated metadata.
/// Use the helper methods instead of accessing this directly in order to not
/// violate the above mechanism.
pub fn getMutableLimbs(local: *Local, gpa: std.mem.Allocator) Limbs.Mutable {
pub fn getMutableLimbs(local: *Local, gpa: Allocator) Limbs.Mutable {
return switch (@sizeOf(Limb)) {
@sizeOf(u32) => local.getMutableExtra(gpa),
@sizeOf(u64) => .{
@ -654,7 +769,7 @@ const Local = struct {
/// is referencing the data here whether they want to store both index and length,
/// thus allowing null bytes, or store only index, and use null-termination. The
/// `strings` array is agnostic to either usage.
pub fn getMutableStrings(local: *Local, gpa: std.mem.Allocator) Strings.Mutable {
pub fn getMutableStrings(local: *Local, gpa: Allocator) Strings.Mutable {
return .{
.gpa = gpa,
.arena = &local.mutate.arena,
@ -663,6 +778,17 @@ const Local = struct {
};
}
/// An index into `tracked_insts` gives a reference to a single ZIR instruction which
/// persists across incremental updates.
pub fn getMutableTrackedInsts(local: *Local, gpa: Allocator) TrackedInsts.Mutable {
return .{
.gpa = gpa,
.arena = &local.mutate.arena,
.mutate = &local.mutate.tracked_insts.list,
.list = &local.shared.tracked_insts,
};
}
/// Elements are ordered identically to the `import_table` field of `Zcu`.
///
/// Unlike `import_table`, this data is serialized as part of incremental
@ -672,7 +798,7 @@ const Local = struct {
/// `InternPool.TrackedInst`.
///
/// Value is the `Decl` of the struct that represents this `File`.
pub fn getMutableFiles(local: *Local, gpa: std.mem.Allocator) List(File).Mutable {
pub fn getMutableFiles(local: *Local, gpa: Allocator) List(File).Mutable {
return .{
.gpa = gpa,
.arena = &local.mutate.arena,
@ -691,7 +817,7 @@ const Local = struct {
/// serialization trivial.
/// * It provides a unique integer to be used for anonymous symbol names, avoiding
/// multi-threaded contention on an atomic counter.
pub fn getMutableDecls(local: *Local, gpa: std.mem.Allocator) Decls.Mutable {
pub fn getMutableDecls(local: *Local, gpa: Allocator) Decls.Mutable {
return .{
.gpa = gpa,
.arena = &local.mutate.arena,
@ -701,7 +827,7 @@ const Local = struct {
}
/// Same pattern as with `getMutableDecls`.
pub fn getMutableNamespaces(local: *Local, gpa: std.mem.Allocator) Namespaces.Mutable {
pub fn getMutableNamespaces(local: *Local, gpa: Allocator) Namespaces.Mutable {
return .{
.gpa = gpa,
.arena = &local.mutate.arena,
@ -723,11 +849,13 @@ const Shard = struct {
shared: struct {
map: Map(Index),
string_map: Map(OptionalNullTerminatedString),
tracked_inst_map: Map(TrackedInst.Index.Optional),
} align(std.atomic.cache_line),
mutate: struct {
// TODO: measure cost of sharing unrelated mutate state
map: Mutate align(std.atomic.cache_line),
string_map: Mutate align(std.atomic.cache_line),
tracked_inst_map: Mutate align(std.atomic.cache_line),
},
const Mutate = struct {
@ -5240,6 +5368,7 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void {
.extra = Local.Extra.empty,
.limbs = Local.Limbs.empty,
.strings = Local.Strings.empty,
.tracked_insts = Local.TrackedInsts.empty,
.files = Local.List(File).empty,
.decls = Local.Decls.empty,
@ -5252,6 +5381,7 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void {
.extra = Local.ListMutate.empty,
.limbs = Local.ListMutate.empty,
.strings = Local.ListMutate.empty,
.tracked_insts = Local.MutexListMutate.empty,
.files = Local.ListMutate.empty,
.decls = Local.BucketListMutate.empty,
@ -5267,10 +5397,12 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void {
.shared = .{
.map = Shard.Map(Index).empty,
.string_map = Shard.Map(OptionalNullTerminatedString).empty,
.tracked_inst_map = Shard.Map(TrackedInst.Index.Optional).empty,
},
.mutate = .{
.map = Shard.Mutate.empty,
.string_map = Shard.Mutate.empty,
.tracked_inst_map = Shard.Mutate.empty,
},
});
@ -5311,8 +5443,6 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void {
for (ip.maps.items) |*map| map.deinit(gpa);
ip.maps.deinit(gpa);
ip.tracked_insts.deinit(gpa);
ip.src_hash_deps.deinit(gpa);
ip.decl_val_deps.deinit(gpa);
ip.func_ies_deps.deinit(gpa);
@ -9887,7 +10017,7 @@ pub fn getOrPutTrailingString(
}
const key: []const u8 = strings.view().items(.@"0")[start..];
const value: embedded_nulls.StringType() =
@enumFromInt(@as(u32, @intFromEnum(tid)) << ip.tid_shift_32 | start);
@enumFromInt(@intFromEnum((String.Unwrapped{ .tid = tid, .index = start }).wrap(ip)));
const has_embedded_null = std.mem.indexOfScalar(u8, key, 0) != null;
switch (embedded_nulls) {
.no_embedded_nulls => assert(!has_embedded_null),

View File

@ -835,12 +835,11 @@ pub const Block = struct {
}
fn trackZir(block: *Block, inst: Zir.Inst.Index) Allocator.Error!InternPool.TrackedInst.Index {
const sema = block.sema;
const gpa = sema.gpa;
const zcu = sema.pt.zcu;
const ip = &zcu.intern_pool;
const file_index = block.getFileScopeIndex(zcu);
return ip.trackZir(gpa, file_index, inst);
const pt = block.sema.pt;
return pt.zcu.intern_pool.trackZir(pt.zcu.gpa, pt.tid, .{
.file = block.getFileScopeIndex(pt.zcu),
.inst = inst,
});
}
};

View File

@ -1018,6 +1018,14 @@ pub const ErrorMsg = struct {
}
};
pub const AstGenSrc = union(enum) {
root,
import: struct {
importing_file: Zcu.File.Index,
import_tok: std.zig.Ast.TokenIndex,
},
};
/// Canonical reference to a position within a source file.
pub const SrcLoc = struct {
file_scope: *File,
@ -3186,41 +3194,6 @@ pub fn handleUpdateExports(
};
}
pub fn reportRetryableFileError(
zcu: *Zcu,
file_index: File.Index,
comptime format: []const u8,
args: anytype,
) error{OutOfMemory}!void {
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const file = zcu.fileByIndex(file_index);
file.status = .retryable_failure;
const err_msg = try ErrorMsg.create(
gpa,
.{
.base_node_inst = try ip.trackZir(gpa, file_index, .main_struct_inst),
.offset = .entire_file,
},
format,
args,
);
errdefer err_msg.destroy(gpa);
zcu.comp.mutex.lock();
defer zcu.comp.mutex.unlock();
const gop = try zcu.failed_files.getOrPut(gpa, file);
if (gop.found_existing) {
if (gop.value_ptr.*) |old_err_msg| {
old_err_msg.destroy(gpa);
}
}
gop.value_ptr.* = err_msg;
}
pub fn addGlobalAssembly(mod: *Module, decl_index: Decl.Index, source: []const u8) !void {
const gop = try mod.global_assembly.getOrPut(mod.gpa, decl_index);
if (gop.found_existing) {

View File

@ -342,6 +342,7 @@ pub fn astGenFile(
/// the Compilation mutex when acting on shared state.
fn updateZirRefs(pt: Zcu.PerThread, file: *Zcu.File, file_index: Zcu.File.Index, old_zir: Zir) !void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const gpa = zcu.gpa;
const new_zir = file.zir;
@ -355,109 +356,117 @@ fn updateZirRefs(pt: Zcu.PerThread, file: *Zcu.File, file_index: Zcu.File.Index,
// TODO: this should be done after all AstGen workers complete, to avoid
// iterating over this full set for every updated file.
for (zcu.intern_pool.tracked_insts.keys(), 0..) |*ti, idx_raw| {
const ti_idx: InternPool.TrackedInst.Index = @enumFromInt(idx_raw);
if (ti.file != file_index) continue;
const old_inst = ti.inst;
ti.inst = inst_map.get(ti.inst) orelse {
// Tracking failed for this instruction. Invalidate associated `src_hash` deps.
zcu.comp.mutex.lock();
defer zcu.comp.mutex.unlock();
log.debug("tracking failed for %{d}", .{old_inst});
try zcu.markDependeeOutdated(.{ .src_hash = ti_idx });
continue;
};
for (ip.locals, 0..) |*local, tid| {
local.mutate.tracked_insts.mutex.lock();
defer local.mutate.tracked_insts.mutex.unlock();
const tracked_insts_list = local.getMutableTrackedInsts(gpa);
for (tracked_insts_list.view().items(.@"0"), 0..) |*tracked_inst, tracked_inst_unwrapped_index| {
if (tracked_inst.file != file_index) continue;
const old_inst = tracked_inst.inst;
const tracked_inst_index = (InternPool.TrackedInst.Index.Unwrapped{
.tid = @enumFromInt(tid),
.index = @intCast(tracked_inst_unwrapped_index),
}).wrap(ip);
tracked_inst.inst = inst_map.get(old_inst) orelse {
// Tracking failed for this instruction. Invalidate associated `src_hash` deps.
zcu.comp.mutex.lock();
defer zcu.comp.mutex.unlock();
log.debug("tracking failed for %{d}", .{old_inst});
try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index });
continue;
};
if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: {
if (new_zir.getAssociatedSrcHash(ti.inst)) |new_hash| {
if (std.zig.srcHashEql(old_hash, new_hash)) {
break :hash_changed;
if (old_zir.getAssociatedSrcHash(old_inst)) |old_hash| hash_changed: {
if (new_zir.getAssociatedSrcHash(tracked_inst.inst)) |new_hash| {
if (std.zig.srcHashEql(old_hash, new_hash)) {
break :hash_changed;
}
log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{
old_inst,
tracked_inst.inst,
std.fmt.fmtSliceHexLower(&old_hash),
std.fmt.fmtSliceHexLower(&new_hash),
});
}
log.debug("hash for (%{d} -> %{d}) changed: {} -> {}", .{
old_inst,
ti.inst,
std.fmt.fmtSliceHexLower(&old_hash),
std.fmt.fmtSliceHexLower(&new_hash),
});
// The source hash associated with this instruction changed - invalidate relevant dependencies.
zcu.comp.mutex.lock();
defer zcu.comp.mutex.unlock();
try zcu.markDependeeOutdated(.{ .src_hash = tracked_inst_index });
}
// The source hash associated with this instruction changed - invalidate relevant dependencies.
zcu.comp.mutex.lock();
defer zcu.comp.mutex.unlock();
try zcu.markDependeeOutdated(.{ .src_hash = ti_idx });
}
// If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies.
const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) {
.extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) {
.struct_decl, .union_decl, .opaque_decl, .enum_decl => true,
// If this is a `struct_decl` etc, we must invalidate any outdated namespace dependencies.
const has_namespace = switch (old_tag[@intFromEnum(old_inst)]) {
.extended => switch (old_data[@intFromEnum(old_inst)].extended.opcode) {
.struct_decl, .union_decl, .opaque_decl, .enum_decl => true,
else => false,
},
else => false,
},
else => false,
};
if (!has_namespace) continue;
};
if (!has_namespace) continue;
var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{};
defer old_names.deinit(zcu.gpa);
{
var it = old_zir.declIterator(old_inst);
while (it.next()) |decl_inst| {
const decl_name = old_zir.getDeclaration(decl_inst)[0].name;
switch (decl_name) {
.@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue,
_ => if (decl_name.isNamedTest(old_zir)) continue,
var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{};
defer old_names.deinit(zcu.gpa);
{
var it = old_zir.declIterator(old_inst);
while (it.next()) |decl_inst| {
const decl_name = old_zir.getDeclaration(decl_inst)[0].name;
switch (decl_name) {
.@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue,
_ => if (decl_name.isNamedTest(old_zir)) continue,
}
const name_zir = decl_name.toString(old_zir).?;
const name_ip = try zcu.intern_pool.getOrPutString(
zcu.gpa,
pt.tid,
old_zir.nullTerminatedString(name_zir),
.no_embedded_nulls,
);
try old_names.put(zcu.gpa, name_ip, {});
}
const name_zir = decl_name.toString(old_zir).?;
const name_ip = try zcu.intern_pool.getOrPutString(
zcu.gpa,
pt.tid,
old_zir.nullTerminatedString(name_zir),
.no_embedded_nulls,
);
try old_names.put(zcu.gpa, name_ip, {});
}
}
var any_change = false;
{
var it = new_zir.declIterator(ti.inst);
while (it.next()) |decl_inst| {
const decl_name = old_zir.getDeclaration(decl_inst)[0].name;
switch (decl_name) {
.@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue,
_ => if (decl_name.isNamedTest(old_zir)) continue,
var any_change = false;
{
var it = new_zir.declIterator(tracked_inst.inst);
while (it.next()) |decl_inst| {
const decl_name = old_zir.getDeclaration(decl_inst)[0].name;
switch (decl_name) {
.@"comptime", .@"usingnamespace", .unnamed_test, .decltest => continue,
_ => if (decl_name.isNamedTest(old_zir)) continue,
}
const name_zir = decl_name.toString(old_zir).?;
const name_ip = try zcu.intern_pool.getOrPutString(
zcu.gpa,
pt.tid,
old_zir.nullTerminatedString(name_zir),
.no_embedded_nulls,
);
if (!old_names.swapRemove(name_ip)) continue;
// Name added
any_change = true;
zcu.comp.mutex.lock();
defer zcu.comp.mutex.unlock();
try zcu.markDependeeOutdated(.{ .namespace_name = .{
.namespace = tracked_inst_index,
.name = name_ip,
} });
}
const name_zir = decl_name.toString(old_zir).?;
const name_ip = try zcu.intern_pool.getOrPutString(
zcu.gpa,
pt.tid,
old_zir.nullTerminatedString(name_zir),
.no_embedded_nulls,
);
if (!old_names.swapRemove(name_ip)) continue;
// Name added
}
// The only elements remaining in `old_names` now are any names which were removed.
for (old_names.keys()) |name_ip| {
any_change = true;
zcu.comp.mutex.lock();
defer zcu.comp.mutex.unlock();
try zcu.markDependeeOutdated(.{ .namespace_name = .{
.namespace = ti_idx,
.namespace = tracked_inst_index,
.name = name_ip,
} });
}
}
// The only elements remaining in `old_names` now are any names which were removed.
for (old_names.keys()) |name_ip| {
any_change = true;
zcu.comp.mutex.lock();
defer zcu.comp.mutex.unlock();
try zcu.markDependeeOutdated(.{ .namespace_name = .{
.namespace = ti_idx,
.name = name_ip,
} });
}
if (any_change) {
zcu.comp.mutex.lock();
defer zcu.comp.mutex.unlock();
try zcu.markDependeeOutdated(.{ .namespace = ti_idx });
if (any_change) {
zcu.comp.mutex.lock();
defer zcu.comp.mutex.unlock();
try zcu.markDependeeOutdated(.{ .namespace = tracked_inst_index });
}
}
}
}
@ -854,7 +863,10 @@ fn getFileRootStruct(
const decls = file.zir.bodySlice(extra_index, decls_len);
extra_index += decls_len;
const tracked_inst = try ip.trackZir(gpa, file_index, .main_struct_inst);
const tracked_inst = try ip.trackZir(gpa, pt.tid, .{
.file = file_index,
.inst = .main_struct_inst,
});
const wip_ty = switch (try ip.getStructType(gpa, pt.tid, .{
.layout = .auto,
.fields_len = fields_len,
@ -1015,7 +1027,7 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void {
switch (zcu.comp.cache_use) {
.whole => |whole| if (whole.cache_manifest) |man| {
const source = file.getSource(gpa) catch |err| {
try Zcu.reportRetryableFileError(zcu, file_index, "unable to load source: {s}", .{@errorName(err)});
try pt.reportRetryableFileError(file_index, "unable to load source: {s}", .{@errorName(err)});
return error.AnalysisFail;
};
@ -1024,7 +1036,7 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void {
file.mod.root.sub_path,
file.sub_file_path,
}) catch |err| {
try Zcu.reportRetryableFileError(zcu, file_index, "unable to resolve path: {s}", .{@errorName(err)});
try pt.reportRetryableFileError(file_index, "unable to resolve path: {s}", .{@errorName(err)});
return error.AnalysisFail;
};
errdefer gpa.free(resolved_path);
@ -1148,11 +1160,10 @@ fn semaDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !Zcu.SemaDeclResult {
defer sema.deinit();
// Every Decl (other than file root Decls, which do not have a ZIR index) has a dependency on its own source.
try sema.declareDependency(.{ .src_hash = try ip.trackZir(
gpa,
decl.getFileScopeIndex(zcu),
decl_inst,
) });
try sema.declareDependency(.{ .src_hash = try ip.trackZir(gpa, pt.tid, .{
.file = decl.getFileScopeIndex(zcu),
.inst = decl_inst,
}) });
var block_scope: Sema.Block = .{
.parent = null,
@ -1890,7 +1901,10 @@ const ScanDeclIter = struct {
}
const parent_file_scope_index = iter.parent_decl.getFileScopeIndex(zcu);
const tracked_inst = try ip.trackZir(gpa, parent_file_scope_index, decl_inst);
const tracked_inst = try ip.trackZir(gpa, pt.tid, .{
.file = parent_file_scope_index,
.inst = decl_inst,
});
// We create a Decl for it regardless of analysis status.
@ -2611,6 +2625,87 @@ pub fn linkerUpdateDecl(pt: Zcu.PerThread, decl_index: Zcu.Decl.Index) !void {
}
}
pub fn reportRetryableAstGenError(
pt: Zcu.PerThread,
src: Zcu.AstGenSrc,
file_index: Zcu.File.Index,
err: anyerror,
) error{OutOfMemory}!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const file = zcu.fileByIndex(file_index);
file.status = .retryable_failure;
const src_loc: Zcu.LazySrcLoc = switch (src) {
.root => .{
.base_node_inst = try ip.trackZir(gpa, pt.tid, .{
.file = file_index,
.inst = .main_struct_inst,
}),
.offset = .entire_file,
},
.import => |info| .{
.base_node_inst = try ip.trackZir(gpa, pt.tid, .{
.file = info.importing_file,
.inst = .main_struct_inst,
}),
.offset = .{ .token_abs = info.import_tok },
},
};
const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{
file.mod.root, file.sub_file_path, @errorName(err),
});
errdefer err_msg.destroy(gpa);
{
zcu.comp.mutex.lock();
defer zcu.comp.mutex.unlock();
try zcu.failed_files.putNoClobber(gpa, file, err_msg);
}
}
pub fn reportRetryableFileError(
pt: Zcu.PerThread,
file_index: Zcu.File.Index,
comptime format: []const u8,
args: anytype,
) error{OutOfMemory}!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const file = zcu.fileByIndex(file_index);
file.status = .retryable_failure;
const err_msg = try Zcu.ErrorMsg.create(
gpa,
.{
.base_node_inst = try ip.trackZir(gpa, pt.tid, .{
.file = file_index,
.inst = .main_struct_inst,
}),
.offset = .entire_file,
},
format,
args,
);
errdefer err_msg.destroy(gpa);
zcu.comp.mutex.lock();
defer zcu.comp.mutex.unlock();
const gop = try zcu.failed_files.getOrPut(gpa, file);
if (gop.found_existing) {
if (gop.value_ptr.*) |old_err_msg| {
old_err_msg.destroy(gpa);
}
}
gop.value_ptr.* = err_msg;
}
/// Shortcut for calling `intern_pool.get`.
pub fn intern(pt: Zcu.PerThread, key: InternPool.Key) Allocator.Error!InternPool.Index {
return pt.zcu.intern_pool.get(pt.zcu.gpa, pt.tid, key);