compiler: begin untangling anonymous decls from source decls

The idea here is to move towards a future where anonymous decls are
represented entirely by an `InternPool.Index`. This was needed to start
implementing `InternPool.getFuncDecl` which requires moving creation and
deletion of Decl objects into InternPool.

 * remove `Namespace.anon_decls`
 * remove the concept of cleaning up resources from anonymous decls,
   relying on InternPool instead.
 * move namespace and decl object allocation into InternPool
This commit is contained in:
Andrew Kelley 2023-07-06 15:27:47 -07:00
parent db33ee45b7
commit 55e89255e1
4 changed files with 170 additions and 131 deletions

View File

@ -2055,15 +2055,9 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void
const decl = module.declPtr(decl_index);
assert(decl.deletion_flag);
assert(decl.dependants.count() == 0);
const is_anon = if (decl.zir_decl_index == 0) blk: {
break :blk module.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index);
} else false;
assert(decl.zir_decl_index != 0);
try module.clearDecl(decl_index, null);
if (is_anon) {
module.destroyDecl(decl_index);
}
}
try module.processExports();

View File

@ -20,6 +20,25 @@ limbs: std.ArrayListUnmanaged(u64) = .{},
/// `string_bytes` array is agnostic to either usage.
string_bytes: std.ArrayListUnmanaged(u8) = .{},
/// Rather than allocating Decl objects with an Allocator, we instead allocate
/// them with this SegmentedList. This provides four advantages:
/// * Stable memory so that one thread can access a Decl object while another
/// thread allocates additional Decl objects from this list.
/// * It allows us to use u32 indexes to reference Decl objects rather than
/// pointers, saving memory in Type, Value, and dependency sets.
/// * Using integers to reference Decl objects rather than pointers makes
/// serialization trivial.
/// * It provides a unique integer to be used for anonymous symbol names, avoiding
/// multi-threaded contention on an atomic counter.
allocated_decls: std.SegmentedList(Module.Decl, 0) = .{},
/// When a Decl object is freed from `allocated_decls`, it is pushed into this stack.
decls_free_list: std.ArrayListUnmanaged(Module.Decl.Index) = .{},
/// Same pattern as with `allocated_decls`.
allocated_namespaces: std.SegmentedList(Module.Namespace, 0) = .{},
/// Same pattern as with `decls_free_list`.
namespaces_free_list: std.ArrayListUnmanaged(Module.Namespace.Index) = .{},
/// Struct objects are stored in this data structure because:
/// * They contain pointers such as the field maps.
/// * They need to be mutated after creation.
@ -2694,6 +2713,12 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void {
ip.inferred_error_sets_free_list.deinit(gpa);
ip.allocated_inferred_error_sets.deinit(gpa);
ip.decls_free_list.deinit(gpa);
ip.allocated_decls.deinit(gpa);
ip.namespaces_free_list.deinit(gpa);
ip.allocated_namespaces.deinit(gpa);
for (ip.maps.items) |*map| map.deinit(gpa);
ip.maps.deinit(gpa);
@ -4274,6 +4299,7 @@ pub fn getExternFunc(ip: *InternPool, gpa: Allocator, key: GetExternFuncKey) All
}
pub const GetFuncDeclKey = struct {
fn_owner_decl: Module.Decl.Index,
param_types: []const Index,
noalias_bits: u32,
comptime_bits: u32,
@ -4303,9 +4329,36 @@ pub const GetFuncDeclKey = struct {
};
pub fn getFuncDecl(ip: *InternPool, gpa: Allocator, key: GetFuncDeclKey) Allocator.Error!Index {
_ = ip;
_ = gpa;
_ = key;
const fn_owner_decl = ip.declPtr(key.fn_owner_decl);
const decl_index = try ip.createDecl(gpa, .{
.name = undefined,
.src_namespace = fn_owner_decl.src_namespace,
.src_node = fn_owner_decl.src_node,
.src_line = fn_owner_decl.src_line,
.has_tv = true,
.owns_tv = true,
.ty = @panic("TODO"),
.val = @panic("TODO"),
.alignment = .none,
.@"linksection" = fn_owner_decl.@"linksection",
.@"addrspace" = fn_owner_decl.@"addrspace",
.analysis = .complete,
.deletion_flag = false,
.zir_decl_index = fn_owner_decl.zir_decl_index,
.src_scope = fn_owner_decl.src_scope,
.generation = 0,
.is_pub = fn_owner_decl.is_pub,
.is_exported = fn_owner_decl.is_exported,
.has_linksection_or_addrspace = fn_owner_decl.has_linksection_or_addrspace,
.has_align = fn_owner_decl.has_align,
.alive = true,
.kind = .anon,
});
// TODO better names for generic function instantiations
const decl_name = try ip.getOrPutStringFmt(gpa, "{}__anon_{d}", .{
fn_owner_decl.name.fmt(ip), @intFromEnum(decl_index),
});
ip.declPtr(decl_index).name = decl_name;
@panic("TODO");
}
@ -5553,6 +5606,14 @@ pub fn inferredErrorSetPtrConst(ip: *const InternPool, index: Module.InferredErr
return ip.allocated_inferred_error_sets.at(@intFromEnum(index));
}
pub fn declPtr(ip: *InternPool, index: Module.Decl.Index) *Module.Decl {
return ip.allocated_decls.at(@intFromEnum(index));
}
pub fn namespacePtr(ip: *InternPool, index: Module.Namespace.Index) *Module.Namespace {
return ip.allocated_namespaces.at(@intFromEnum(index));
}
pub fn createStruct(
ip: *InternPool,
gpa: Allocator,
@ -5619,6 +5680,50 @@ pub fn destroyInferredErrorSet(ip: *InternPool, gpa: Allocator, index: Module.In
};
}
pub fn createDecl(
ip: *InternPool,
gpa: Allocator,
initialization: Module.Decl,
) Allocator.Error!Module.Decl.Index {
if (ip.decls_free_list.popOrNull()) |index| {
ip.allocated_decls.at(@intFromEnum(index)).* = initialization;
return index;
}
const ptr = try ip.allocated_decls.addOne(gpa);
ptr.* = initialization;
return @as(Module.Decl.Index, @enumFromInt(ip.allocated_decls.len - 1));
}
pub fn destroyDecl(ip: *InternPool, gpa: Allocator, index: Module.Decl.Index) void {
ip.declPtr(index).* = undefined;
ip.decls_free_list.append(gpa, index) catch {
// In order to keep `destroyDecl` a non-fallible function, we ignore memory
// allocation failures here, instead leaking the Decl until garbage collection.
};
}
pub fn createNamespace(
ip: *InternPool,
gpa: Allocator,
initialization: Module.Namespace,
) Allocator.Error!Module.Namespace.Index {
if (ip.namespaces_free_list.popOrNull()) |index| {
ip.allocated_namespaces.at(@intFromEnum(index)).* = initialization;
return index;
}
const ptr = try ip.allocated_namespaces.addOne(gpa);
ptr.* = initialization;
return @as(Module.Namespace.Index, @enumFromInt(ip.allocated_namespaces.len - 1));
}
pub fn destroyNamespace(ip: *InternPool, gpa: Allocator, index: Module.Namespace.Index) void {
ip.namespacePtr(index).* = undefined;
ip.namespaces_free_list.append(gpa, index) catch {
// In order to keep `destroyNamespace` a non-fallible function, we ignore memory
// allocation failures here, instead leaking the Namespace until garbage collection.
};
}
pub fn getOrPutString(
ip: *InternPool,
gpa: Allocator,

View File

@ -87,7 +87,9 @@ import_table: std.StringArrayHashMapUnmanaged(*File) = .{},
/// Keys are fully resolved file paths. This table owns the keys and values.
embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{},
/// Stores all Type and Value objects; periodically garbage collected.
/// Stores all Type and Value objects.
/// The idea is that this will be periodically garbage-collected, but such logic
/// is not yet implemented.
intern_pool: InternPool = .{},
/// To be eliminated in a future commit by moving more data into InternPool.
@ -152,25 +154,6 @@ emit_h: ?*GlobalEmitH,
test_functions: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{},
/// Rather than allocating Decl objects with an Allocator, we instead allocate
/// them with this SegmentedList. This provides four advantages:
/// * Stable memory so that one thread can access a Decl object while another
/// thread allocates additional Decl objects from this list.
/// * It allows us to use u32 indexes to reference Decl objects rather than
/// pointers, saving memory in Type, Value, and dependency sets.
/// * Using integers to reference Decl objects rather than pointers makes
/// serialization trivial.
/// * It provides a unique integer to be used for anonymous symbol names, avoiding
/// multi-threaded contention on an atomic counter.
allocated_decls: std.SegmentedList(Decl, 0) = .{},
/// When a Decl object is freed from `allocated_decls`, it is pushed into this stack.
decls_free_list: ArrayListUnmanaged(Decl.Index) = .{},
/// Same pattern as with `allocated_decls`.
allocated_namespaces: std.SegmentedList(Namespace, 0) = .{},
/// Same pattern as with `decls_free_list`.
namespaces_free_list: ArrayListUnmanaged(Namespace.Index) = .{},
global_assembly: std.AutoHashMapUnmanaged(Decl.Index, []u8) = .{},
reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct {
@ -313,6 +296,9 @@ pub const CaptureScope = struct {
}
pub fn incRef(self: *CaptureScope) void {
// TODO: wtf is reference counting doing in my beautiful codebase? 😠
// seriously though, let's change this to rely on InternPool garbage
// collection instead.
self.refs += 1;
}
@ -1427,12 +1413,10 @@ pub const Namespace = struct {
/// Direct children of the namespace. Used during an update to detect
/// which decls have been added/removed from source.
/// Declaration order is preserved via entry order.
/// Key memory is owned by `decl.name`.
/// Anonymous decls are not stored here; they are kept in `anon_decls` instead.
/// These are only declarations named directly by the AST; anonymous
/// declarations are not stored here.
decls: std.ArrayHashMapUnmanaged(Decl.Index, void, DeclContext, true) = .{},
anon_decls: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{},
/// Key is usingnamespace Decl itself. To find the namespace being included,
/// the Decl Value has to be resolved as a Type which has a Namespace.
/// Value is whether the usingnamespace decl is marked `pub`.
@ -1487,18 +1471,11 @@ pub const Namespace = struct {
var decls = ns.decls;
ns.decls = .{};
var anon_decls = ns.anon_decls;
ns.anon_decls = .{};
for (decls.keys()) |decl_index| {
mod.destroyDecl(decl_index);
}
decls.deinit(gpa);
for (anon_decls.keys()) |key| {
mod.destroyDecl(key);
}
anon_decls.deinit(gpa);
ns.usingnamespace_set.deinit(gpa);
}
@ -1512,9 +1489,6 @@ pub const Namespace = struct {
var decls = ns.decls;
ns.decls = .{};
var anon_decls = ns.anon_decls;
ns.anon_decls = .{};
// TODO rework this code to not panic on OOM.
// (might want to coordinate with the clearDecl function)
@ -1524,12 +1498,6 @@ pub const Namespace = struct {
}
decls.deinit(gpa);
for (anon_decls.keys()) |child_decl| {
mod.clearDecl(child_decl, outdated_decls) catch @panic("out of memory");
mod.destroyDecl(child_decl);
}
anon_decls.deinit(gpa);
ns.usingnamespace_set.deinit(gpa);
}
@ -3195,14 +3163,9 @@ pub fn deinit(mod: *Module) void {
mod.test_functions.deinit(gpa);
mod.decls_free_list.deinit(gpa);
mod.allocated_decls.deinit(gpa);
mod.global_assembly.deinit(gpa);
mod.reference_table.deinit(gpa);
mod.namespaces_free_list.deinit(gpa);
mod.allocated_namespaces.deinit(gpa);
mod.memoized_decls.deinit(gpa);
mod.intern_pool.deinit(gpa);
mod.tmp_hack_arena.deinit();
@ -3210,6 +3173,8 @@ pub fn deinit(mod: *Module) void {
pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void {
const gpa = mod.gpa;
const ip = &mod.intern_pool;
{
const decl = mod.declPtr(decl_index);
_ = mod.test_functions.swapRemove(decl_index);
@ -3228,12 +3193,10 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void {
if (decl.src_scope) |scope| scope.decRef(gpa);
decl.dependants.deinit(gpa);
decl.dependencies.deinit(gpa);
decl.* = undefined;
}
mod.decls_free_list.append(gpa, decl_index) catch {
// In order to keep `destroyDecl` a non-fallible function, we ignore memory
// allocation failures here, instead leaking the Decl until garbage collection.
};
ip.destroyDecl(gpa, decl_index);
if (mod.emit_h) |mod_emit_h| {
const decl_emit_h = mod_emit_h.declPtr(decl_index);
decl_emit_h.fwd_decl.deinit(gpa);
@ -3242,11 +3205,11 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void {
}
pub fn declPtr(mod: *Module, index: Decl.Index) *Decl {
return mod.allocated_decls.at(@intFromEnum(index));
return mod.intern_pool.declPtr(index);
}
pub fn namespacePtr(mod: *Module, index: Namespace.Index) *Namespace {
return mod.allocated_namespaces.at(@intFromEnum(index));
return mod.intern_pool.namespacePtr(index);
}
pub fn unionPtr(mod: *Module, index: Union.Index) *Union {
@ -3740,9 +3703,6 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void {
for (namespace.decls.keys()) |sub_decl| {
try decl_stack.append(gpa, sub_decl);
}
for (namespace.anon_decls.keys()) |sub_decl| {
try decl_stack.append(gpa, sub_decl);
}
}
}
}
@ -5202,21 +5162,19 @@ pub fn clearDecl(
}
/// This function is exclusively called for anonymous decls.
/// All resources referenced by anonymous decls are owned by InternPool
/// so there is no cleanup to do here.
pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void {
const decl = mod.declPtr(decl_index);
const gpa = mod.gpa;
const ip = &mod.intern_pool;
assert(!mod.declIsRoot(decl_index));
assert(mod.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index));
ip.destroyDecl(gpa, decl_index);
const dependants = decl.dependants.keys();
for (dependants) |dep| {
mod.declPtr(dep).removeDependency(decl_index);
if (mod.emit_h) |mod_emit_h| {
const decl_emit_h = mod_emit_h.declPtr(decl_index);
decl_emit_h.fwd_decl.deinit(gpa);
decl_emit_h.* = undefined;
}
for (decl.dependencies.keys()) |dep| {
mod.declPtr(dep).removeDependant(decl_index);
}
mod.destroyDecl(decl_index);
}
/// We don't perform a deletion here, because this Decl or another one
@ -5233,7 +5191,6 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void {
const decl = mod.declPtr(decl_index);
assert(!mod.declIsRoot(decl_index));
assert(mod.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index));
// An aborted decl must not have dependants -- they must have
// been aborted first and removed from this list.
@ -5545,21 +5502,11 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void {
}
pub fn createNamespace(mod: *Module, initialization: Namespace) !Namespace.Index {
if (mod.namespaces_free_list.popOrNull()) |index| {
mod.allocated_namespaces.at(@intFromEnum(index)).* = initialization;
return index;
}
const ptr = try mod.allocated_namespaces.addOne(mod.gpa);
ptr.* = initialization;
return @as(Namespace.Index, @enumFromInt(mod.allocated_namespaces.len - 1));
return mod.intern_pool.createNamespace(mod.gpa, initialization);
}
pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void {
mod.namespacePtr(index).* = undefined;
mod.namespaces_free_list.append(mod.gpa, index) catch {
// In order to keep `destroyNamespace` a non-fallible function, we ignore memory
// allocation failures here, instead leaking the Namespace until garbage collection.
};
return mod.intern_pool.destroyNamespace(mod.gpa, index);
}
pub fn createStruct(mod: *Module, initialization: Struct) Allocator.Error!Struct.Index {
@ -5584,29 +5531,9 @@ pub fn allocateNewDecl(
src_node: Ast.Node.Index,
src_scope: ?*CaptureScope,
) !Decl.Index {
const decl_and_index: struct {
new_decl: *Decl,
decl_index: Decl.Index,
} = if (mod.decls_free_list.popOrNull()) |decl_index| d: {
break :d .{
.new_decl = mod.declPtr(decl_index),
.decl_index = decl_index,
};
} else d: {
const decl = try mod.allocated_decls.addOne(mod.gpa);
errdefer mod.allocated_decls.shrinkRetainingCapacity(mod.allocated_decls.len - 1);
if (mod.emit_h) |mod_emit_h| {
const decl_emit_h = try mod_emit_h.allocated_emit_h.addOne(mod.gpa);
decl_emit_h.* = .{};
}
break :d .{
.new_decl = decl,
.decl_index = @as(Decl.Index, @enumFromInt(mod.allocated_decls.len - 1)),
};
};
if (src_scope) |scope| scope.incRef();
decl_and_index.new_decl.* = .{
const ip = &mod.intern_pool;
const gpa = mod.gpa;
const decl_index = try ip.createDecl(gpa, .{
.name = undefined,
.src_namespace = namespace,
.src_node = src_node,
@ -5629,9 +5556,18 @@ pub fn allocateNewDecl(
.has_align = false,
.alive = false,
.kind = .anon,
};
});
return decl_and_index.decl_index;
if (mod.emit_h) |mod_emit_h| {
if (@intFromEnum(decl_index) >= mod_emit_h.allocated_emit_h.len) {
try mod_emit_h.allocated_emit_h.append(gpa, .{});
assert(@intFromEnum(decl_index) == mod_emit_h.allocated_emit_h.len);
}
}
if (src_scope) |scope| scope.incRef();
return decl_index;
}
pub fn getErrorValue(
@ -5667,7 +5603,7 @@ pub fn createAnonymousDeclFromDecl(
const name = try mod.intern_pool.getOrPutStringFmt(mod.gpa, "{}__anon_{d}", .{
src_decl.name.fmt(&mod.intern_pool), @intFromEnum(new_decl_index),
});
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, tv, name);
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, tv, name);
return new_decl_index;
}
@ -5675,7 +5611,6 @@ pub fn initNewAnonDecl(
mod: *Module,
new_decl_index: Decl.Index,
src_line: u32,
namespace: Namespace.Index,
typed_value: TypedValue,
name: InternPool.NullTerminatedString,
) Allocator.Error!void {
@ -5692,8 +5627,6 @@ pub fn initNewAnonDecl(
new_decl.has_tv = true;
new_decl.analysis = .complete;
new_decl.generation = mod.generation;
try mod.namespacePtr(namespace).anon_decls.putNoClobber(mod.gpa, new_decl_index, {});
}
pub fn errNoteNonLazy(

View File

@ -2891,12 +2891,12 @@ fn createAnonymousDeclTypeNamed(
const name = mod.intern_pool.getOrPutStringFmt(gpa, "{}__{s}_{d}", .{
src_decl.name.fmt(&mod.intern_pool), anon_prefix, @intFromEnum(new_decl_index),
}) catch unreachable;
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name);
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name);
return new_decl_index;
},
.parent => {
const name = mod.declPtr(block.src_decl).name;
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name);
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name);
return new_decl_index;
},
.func => {
@ -2932,7 +2932,7 @@ fn createAnonymousDeclTypeNamed(
try writer.writeByte(')');
const name = try mod.intern_pool.getOrPutString(gpa, buf.items);
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name);
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name);
return new_decl_index;
},
.dbg_var => {
@ -2948,7 +2948,7 @@ fn createAnonymousDeclTypeNamed(
src_decl.name.fmt(&mod.intern_pool), zir_data[i].str_op.getStr(sema.code),
});
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name);
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name);
return new_decl_index;
},
else => {},
@ -7393,11 +7393,12 @@ fn instantiateGenericCall(
const ip = &mod.intern_pool;
const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known");
const module_fn = switch (mod.intern_pool.indexToKey(func_val.toIntern())) {
.func => |x| x,
.ptr => |ptr| mod.intern_pool.indexToKey(mod.declPtr(ptr.addr.decl).val.toIntern()).func,
const generic_owner = switch (mod.intern_pool.indexToKey(func_val.toIntern())) {
.func => func_val.toIntern(),
.ptr => |ptr| mod.declPtr(ptr.addr.decl).val.toIntern(),
else => unreachable,
};
const generic_owner_func = mod.intern_pool.indexToKey(generic_owner).func;
// Even though there may already be a generic instantiation corresponding
// to this callsite, we must evaluate the expressions of the generic
@ -7407,11 +7408,11 @@ fn instantiateGenericCall(
// The actual monomorphization happens via adding `func_instance` to
// `InternPool`.
const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
const fn_owner_decl = mod.declPtr(generic_owner_func.owner_decl);
const namespace_index = fn_owner_decl.src_namespace;
const namespace = mod.namespacePtr(namespace_index);
const fn_zir = namespace.file_scope.zir;
const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst);
const fn_info = fn_zir.getFnInfo(generic_owner_func.zir_body_inst);
const comptime_args = try sema.arena.alloc(InternPool.Index, uncasted_args.len);
@memset(comptime_args, .none);
@ -7434,7 +7435,7 @@ fn instantiateGenericCall(
.fn_ret_ty = Type.void,
.owner_func_index = .none,
.comptime_args = comptime_args,
.generic_owner = module_fn.generic_owner,
.generic_owner = generic_owner,
.branch_quota = sema.branch_quota,
.branch_count = sema.branch_count,
.comptime_mutable_decls = sema.comptime_mutable_decls,
@ -7444,7 +7445,7 @@ fn instantiateGenericCall(
var child_block: Block = .{
.parent = null,
.sema = &child_sema,
.src_decl = module_fn.owner_decl,
.src_decl = generic_owner_func.owner_decl,
.namespace = namespace_index,
.wip_capture_scope = block.wip_capture_scope,
.instructions = .{},
@ -8737,7 +8738,13 @@ fn funcCommon(
if (inferred_error_set)
try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src);
const fn_owner_decl = if (sema.generic_owner != .none)
mod.funcOwnerDeclIndex(sema.generic_owner)
else
sema.owner_decl_index;
break :i try ip.getFuncDecl(gpa, .{
.fn_owner_decl = fn_owner_decl,
.param_types = param_types,
.noalias_bits = noalias_bits,
.comptime_bits = comptime_bits,
@ -34628,7 +34635,7 @@ fn generateUnionTagTypeNumbered(
errdefer mod.destroyDecl(new_decl_index);
const fqn = try union_obj.getFullyQualifiedName(mod);
const name = try mod.intern_pool.getOrPutStringFmt(gpa, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(&mod.intern_pool)});
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, .{
.ty = Type.noreturn,
.val = Value.@"unreachable",
}, name);
@ -34679,7 +34686,7 @@ fn generateUnionTagTypeSimple(
errdefer mod.destroyDecl(new_decl_index);
const fqn = try union_obj.getFullyQualifiedName(mod);
const name = try mod.intern_pool.getOrPutStringFmt(gpa, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(&mod.intern_pool)});
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{
try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, .{
.ty = Type.noreturn,
.val = Value.@"unreachable",
}, name);