Module: move memoized data to the intern pool

This avoids memory management bugs with the previous implementation.
This commit is contained in:
Jacob Young 2023-05-28 02:41:22 -04:00 committed by Andrew Kelley
parent d40b83de45
commit 3b6ca1d35b
11 changed files with 264 additions and 140 deletions

View File

@ -217,6 +217,11 @@ pub const Key = union(enum) {
/// An instance of a union.
un: Union,
/// A declaration with a memoized value.
memoized_decl: MemoizedDecl,
/// A comptime function call with a memoized result.
memoized_call: Key.MemoizedCall,
pub const IntType = std.builtin.Type.Int;
pub const ErrorUnionType = struct {
@ -609,6 +614,17 @@ pub const Key = union(enum) {
};
};
pub const MemoizedDecl = struct {
val: Index,
decl: Module.Decl.Index,
};
pub const MemoizedCall = struct {
func: Module.Fn.Index,
arg_values: []const Index,
result: Index,
};
pub fn hash32(key: Key, ip: *const InternPool) u32 {
return @truncate(u32, key.hash64(ip));
}
@ -786,6 +802,13 @@ pub const Key = union(enum) {
std.hash.autoHash(hasher, func_type.is_generic);
std.hash.autoHash(hasher, func_type.is_noinline);
},
.memoized_decl => |memoized_decl| std.hash.autoHash(hasher, memoized_decl.val),
.memoized_call => |memoized_call| {
std.hash.autoHash(hasher, memoized_call.func);
for (memoized_call.arg_values) |arg| std.hash.autoHash(hasher, arg);
},
}
}
@ -1054,6 +1077,17 @@ pub const Key = union(enum) {
a_info.is_generic == b_info.is_generic and
a_info.is_noinline == b_info.is_noinline;
},
.memoized_decl => |a_info| {
const b_info = b.memoized_decl;
return a_info.val == b_info.val;
},
.memoized_call => |a_info| {
const b_info = b.memoized_call;
return a_info.func == b_info.func and
std.mem.eql(Index, a_info.arg_values, b_info.arg_values);
},
}
}
@ -1105,6 +1139,10 @@ pub const Key = union(enum) {
.@"unreachable" => .noreturn_type,
.generic_poison => .generic_poison_type,
},
.memoized_decl,
.memoized_call,
=> unreachable,
};
}
};
@ -1380,6 +1418,14 @@ pub const Index = enum(u32) {
bytes: struct { data: *Bytes },
aggregate: struct { data: *Aggregate },
repeated: struct { data: *Repeated },
memoized_decl: struct { data: *Key.MemoizedDecl },
memoized_call: struct {
const @"data.args_len" = opaque {};
data: *MemoizedCall,
@"trailing.arg_values.len": *@"data.args_len",
trailing: struct { arg_values: []Index },
},
}) void {
_ = self;
const map_fields = @typeInfo(@typeInfo(@TypeOf(tag_to_encoding_map)).Pointer.child).Struct.fields;
@ -1875,6 +1921,13 @@ pub const Tag = enum(u8) {
/// An instance of an array or vector with every element being the same value.
/// data is extra index to `Repeated`.
repeated,
/// A memoized declaration value.
/// data is extra index to `Key.MemoizedDecl`
memoized_decl,
/// A memoized comptime function call result.
/// data is extra index to `MemoizedFunc`
memoized_call,
};
/// Trailing:
@ -2271,6 +2324,14 @@ pub const Float128 = struct {
}
};
/// Trailing:
/// 0. arg value: Index for each args_len
pub const MemoizedCall = struct {
func: Module.Fn.Index,
args_len: u32,
result: Index,
};
pub fn init(ip: *InternPool, gpa: Allocator) !void {
assert(ip.items.len == 0);
@ -2758,6 +2819,16 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
},
.enum_literal => .{ .enum_literal = @intToEnum(NullTerminatedString, data) },
.enum_tag => .{ .enum_tag = ip.extraData(Key.EnumTag, data) },
.memoized_decl => .{ .memoized_decl = ip.extraData(Key.MemoizedDecl, data) },
.memoized_call => {
const extra = ip.extraDataTrail(MemoizedCall, data);
return .{ .memoized_call = .{
.func = extra.data.func,
.arg_values = @ptrCast([]const Index, ip.extra.items[extra.end..][0..extra.data.args_len]),
.result = extra.data.result,
} };
},
};
}
@ -3724,6 +3795,29 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.data = try ip.addExtra(gpa, un),
});
},
.memoized_decl => |memoized_decl| {
assert(memoized_decl.val != .none);
ip.items.appendAssumeCapacity(.{
.tag = .memoized_decl,
.data = try ip.addExtra(gpa, memoized_decl),
});
},
.memoized_call => |memoized_call| {
for (memoized_call.arg_values) |arg| assert(arg != .none);
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(MemoizedCall).Struct.fields.len +
memoized_call.arg_values.len);
ip.items.appendAssumeCapacity(.{
.tag = .memoized_call,
.data = ip.addExtraAssumeCapacity(MemoizedCall{
.func = memoized_call.func,
.args_len = @intCast(u32, memoized_call.arg_values.len),
.result = memoized_call.result,
}),
});
ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, memoized_call.arg_values));
},
}
return @intToEnum(Index, ip.items.len - 1);
}
@ -3788,7 +3882,7 @@ pub fn getIncompleteEnum(
ip: *InternPool,
gpa: Allocator,
enum_type: Key.IncompleteEnumType,
) Allocator.Error!InternPool.IncompleteEnumType {
) Allocator.Error!IncompleteEnumType {
switch (enum_type.tag_mode) {
.auto => return getIncompleteEnumAuto(ip, gpa, enum_type),
.explicit => return getIncompleteEnumExplicit(ip, gpa, enum_type, .type_enum_explicit),
@ -3800,7 +3894,7 @@ pub fn getIncompleteEnumAuto(
ip: *InternPool,
gpa: Allocator,
enum_type: Key.IncompleteEnumType,
) Allocator.Error!InternPool.IncompleteEnumType {
) Allocator.Error!IncompleteEnumType {
// Although the integer tag type will not be stored in the `EnumAuto` struct,
// `InternPool` logic depends on it being present so that `typeOf` can be infallible.
// Ensure it is present here:
@ -3849,7 +3943,7 @@ fn getIncompleteEnumExplicit(
gpa: Allocator,
enum_type: Key.IncompleteEnumType,
tag: Tag,
) Allocator.Error!InternPool.IncompleteEnumType {
) Allocator.Error!IncompleteEnumType {
// We must keep the map in sync with `items`. The hash and equality functions
// for enum types only look at the decl field, which is present even in
// an `IncompleteEnumType`.
@ -4704,6 +4798,12 @@ fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void {
.func => @sizeOf(Key.Func) + @sizeOf(Module.Fn) + @sizeOf(Module.Decl),
.only_possible_value => 0,
.union_value => @sizeOf(Key.Union),
.memoized_decl => @sizeOf(Key.MemoizedDecl),
.memoized_call => b: {
const info = ip.extraData(MemoizedCall, data);
break :b @sizeOf(MemoizedCall) + (@sizeOf(Index) * info.args_len);
},
});
}
const SortContext = struct {
@ -5215,6 +5315,9 @@ pub fn zigTypeTagOrPoison(ip: InternPool, index: Index) error{GenericPoison}!std
.bytes,
.aggregate,
.repeated,
// memoization, not types
.memoized_decl,
.memoized_call,
=> unreachable,
},
.none => unreachable, // special tag

View File

@ -88,18 +88,10 @@ embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{},
/// Stores all Type and Value objects; periodically garbage collected.
intern_pool: InternPool = .{},
/// This is currently only used for string literals, however the end-game once the lang spec
/// is settled will be to make this behavior consistent across all types.
memoized_decls: std.AutoHashMapUnmanaged(InternPool.Index, Decl.Index) = .{},
/// The set of all the generic function instantiations. This is used so that when a generic
/// function is called twice with the same comptime parameter arguments, both calls dispatch
/// to the same function.
monomorphed_funcs: MonomorphedFuncsSet = .{},
/// The set of all comptime function calls that have been cached so that future calls
/// with the same parameters will get the same return value.
memoized_calls: MemoizedCallSet = .{},
memoized_call_args: MemoizedCall.Args = .{},
/// Contains the values from `@setAlignStack`. A sparse table is used here
/// instead of a field of `Fn` because usage of `@setAlignStack` is rare, while
/// functions are many.
@ -223,42 +215,6 @@ const MonomorphedFuncsContext = struct {
}
};
pub const MemoizedCallSet = std.HashMapUnmanaged(
MemoizedCall.Key,
MemoizedCall.Result,
MemoizedCall,
std.hash_map.default_max_load_percentage,
);
pub const MemoizedCall = struct {
args: *const Args,
pub const Args = std.ArrayListUnmanaged(InternPool.Index);
pub const Key = struct {
func: Fn.Index,
args_index: u32,
args_count: u32,
pub fn args(key: Key, ctx: MemoizedCall) []InternPool.Index {
return ctx.args.items[key.args_index..][0..key.args_count];
}
};
pub const Result = InternPool.Index;
pub fn eql(ctx: MemoizedCall, a: Key, b: Key) bool {
return a.func == b.func and mem.eql(InternPool.Index, a.args(ctx), b.args(ctx));
}
pub fn hash(ctx: MemoizedCall, key: Key) u64 {
var hasher = std.hash.Wyhash.init(0);
std.hash.autoHash(&hasher, key.func);
std.hash.autoHashStrat(&hasher, key.args(ctx), .Deep);
return hasher.final();
}
};
pub const SetAlignStack = struct {
alignment: u32,
/// TODO: This needs to store a non-lazy source location for the case of an inline function
@ -605,7 +561,6 @@ pub const Decl = struct {
}
mod.destroyFunc(func);
}
_ = mod.memoized_decls.remove(decl.val.ip_index);
if (decl.value_arena) |value_arena| {
value_arena.deinit(gpa);
decl.value_arena = null;
@ -3314,8 +3269,6 @@ pub fn deinit(mod: *Module) void {
mod.test_functions.deinit(gpa);
mod.align_stack_fns.deinit(gpa);
mod.monomorphed_funcs.deinit(gpa);
mod.memoized_call_args.deinit(gpa);
mod.memoized_calls.deinit(gpa);
mod.decls_free_list.deinit(gpa);
mod.allocated_decls.deinit(gpa);
@ -3325,8 +3278,6 @@ pub fn deinit(mod: *Module) void {
mod.namespaces_free_list.deinit(gpa);
mod.allocated_namespaces.deinit(gpa);
mod.memoized_decls.deinit(gpa);
mod.intern_pool.deinit(gpa);
}
@ -5438,6 +5389,17 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void {
mod.destroyDecl(decl_index);
}
/// Finalize the creation of an anon decl.
pub fn finalizeAnonDecl(mod: *Module, decl_index: Decl.Index) Allocator.Error!void {
// The Decl starts off with alive=false and the codegen backend will set alive=true
// if the Decl is referenced by an instruction or another constant. Otherwise,
// the Decl will be garbage collected by the `codegen_decl` task instead of sent
// to the linker.
if (mod.declPtr(decl_index).ty.isFnOrHasRuntimeBits(mod)) {
try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = decl_index });
}
}
/// Delete all the Export objects that are caused by this Decl. Re-analysis of
/// this Decl will cause them to be re-created (or not).
fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void {
@ -5875,7 +5837,7 @@ pub fn initNewAnonDecl(
namespace: Namespace.Index,
typed_value: TypedValue,
name: [:0]u8,
) !void {
) Allocator.Error!void {
assert(typed_value.ty.toIntern() == mod.intern_pool.typeOf(typed_value.val.toIntern()));
errdefer mod.gpa.free(name);
@ -5892,14 +5854,6 @@ pub fn initNewAnonDecl(
new_decl.generation = mod.generation;
try mod.namespacePtr(namespace).anon_decls.putNoClobber(mod.gpa, new_decl_index, {});
// The Decl starts off with alive=false and the codegen backend will set alive=true
// if the Decl is referenced by an instruction or another constant. Otherwise,
// the Decl will be garbage collected by the `codegen_decl` task instead of sent
// to the linker.
if (typed_value.ty.isFnOrHasRuntimeBits(mod)) {
try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl_index });
}
}
pub fn errNoteNonLazy(

View File

@ -734,6 +734,7 @@ pub const Block = struct {
errdefer sema.mod.abortAnonDecl(new_decl_index);
try new_decl.finalizeNewArena(&wad.new_decl_arena);
wad.finished = true;
try sema.mod.finalizeAnonDecl(new_decl_index);
return new_decl_index;
}
};
@ -2292,7 +2293,7 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError {
defer reference_stack.deinit();
// Avoid infinite loops.
var seen = std.AutoHashMap(Module.Decl.Index, void).init(gpa);
var seen = std.AutoHashMap(Decl.Index, void).init(gpa);
defer seen.deinit();
var cur_reference_trace: u32 = 0;
@ -2742,7 +2743,9 @@ fn zirStructDecl(
try sema.analyzeStructDecl(new_decl, inst, struct_index);
try new_decl.finalizeNewArena(&new_decl_arena);
return sema.analyzeDeclVal(block, src, new_decl_index);
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
return decl_val;
}
fn createAnonymousDeclTypeNamed(
@ -2941,6 +2944,7 @@ fn zirEnumDecl(
new_namespace.ty = incomplete_enum.index.toType();
const decl_val = try sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
done = true;
const int_tag_ty = ty: {
@ -3193,7 +3197,9 @@ fn zirUnionDecl(
_ = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl);
try new_decl.finalizeNewArena(&new_decl_arena);
return sema.analyzeDeclVal(block, src, new_decl_index);
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
return decl_val;
}
fn zirOpaqueDecl(
@ -3257,7 +3263,9 @@ fn zirOpaqueDecl(
extra_index = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl);
try new_decl.finalizeNewArena(&new_decl_arena);
return sema.analyzeDeclVal(block, src, new_decl_index);
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
return decl_val;
}
fn zirErrorSetDecl(
@ -3298,7 +3306,9 @@ fn zirErrorSetDecl(
new_decl.owns_tv = true;
errdefer mod.abortAnonDecl(new_decl_index);
return sema.analyzeDeclVal(block, src, new_decl_index);
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
return decl_val;
}
fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref {
@ -5133,32 +5143,35 @@ fn zirStr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
return sema.addStrLit(block, bytes);
}
fn addStrLit(sema: *Sema, block: *Block, zir_bytes: []const u8) CompileError!Air.Inst.Ref {
// `zir_bytes` references memory inside the ZIR module, which can get deallocated
// after semantic analysis is complete, for example in the case of the initialization
// expression of a variable declaration.
fn addStrLit(sema: *Sema, block: *Block, bytes: []const u8) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const gpa = sema.gpa;
const ty = try mod.arrayType(.{
.len = zir_bytes.len,
.child = .u8_type,
.sentinel = .zero_u8,
});
const val = try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .bytes = zir_bytes },
} });
const gop = try mod.memoized_decls.getOrPut(gpa, val);
if (!gop.found_existing) {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const memoized_decl_index = memoized: {
const ty = try mod.arrayType(.{
.len = bytes.len,
.child = .u8_type,
.sentinel = .zero_u8,
});
const val = try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .bytes = bytes },
} });
const decl_index = try anon_decl.finish(ty, val.toValue(), 0);
_ = try sema.typeHasRuntimeBits(ty);
const new_decl_index = try mod.createAnonymousDecl(block, .{ .ty = ty, .val = val.toValue() });
errdefer mod.abortAnonDecl(new_decl_index);
gop.key_ptr.* = val;
gop.value_ptr.* = decl_index;
}
return sema.analyzeDeclRef(gop.value_ptr.*);
const memoized_index = try mod.intern(.{ .memoized_decl = .{
.val = val,
.decl = new_decl_index,
} });
const memoized_decl_index = mod.intern_pool.indexToKey(memoized_index).memoized_decl.decl;
if (memoized_decl_index != new_decl_index)
mod.abortAnonDecl(new_decl_index)
else
try mod.finalizeAnonDecl(new_decl_index);
break :memoized memoized_decl_index;
};
return sema.analyzeDeclRef(memoized_decl_index);
}
fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@ -6868,30 +6881,15 @@ fn analyzeCall(
defer child_block.instructions.deinit(gpa);
defer merges.deinit(gpa);
// If it's a comptime function call, we need to memoize it as long as no external
// comptime memory is mutated.
var memoized_call_key = Module.MemoizedCall.Key{
.func = module_fn_index,
.args_index = @intCast(u32, mod.memoized_call_args.items.len),
.args_count = @intCast(u32, func_ty_info.param_types.len),
};
var delete_memoized_call_key = false;
defer if (delete_memoized_call_key) {
assert(mod.memoized_call_args.items.len >= memoized_call_key.args_index and
mod.memoized_call_args.items.len < memoized_call_key.args_index + memoized_call_key.args_count);
mod.memoized_call_args.shrinkRetainingCapacity(memoized_call_key.args_index);
};
if (is_comptime_call) {
try mod.memoized_call_args.ensureUnusedCapacity(gpa, memoized_call_key.args_count);
delete_memoized_call_key = true;
}
try sema.emitBackwardBranch(block, call_src);
// Whether this call should be memoized, set to false if the call can mutate
// comptime state.
// Whether this call should be memoized, set to false if the call can mutate comptime state.
var should_memoize = true;
// If it's a comptime function call, we need to memoize it as long as no external
// comptime memory is mutated.
const memoized_arg_values = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len);
var new_fn_info = mod.typeToFunc(fn_owner_decl.ty).?;
new_fn_info.param_types = try sema.arena.alloc(InternPool.Index, new_fn_info.param_types.len);
new_fn_info.comptime_bits = 0;
@ -6918,6 +6916,7 @@ fn analyzeCall(
uncasted_args,
is_comptime_call,
&should_memoize,
memoized_arg_values,
mod.typeToFunc(func_ty).?.param_types,
func,
&has_comptime_args,
@ -6935,6 +6934,7 @@ fn analyzeCall(
uncasted_args,
is_comptime_call,
&should_memoize,
memoized_arg_values,
mod.typeToFunc(func_ty).?.param_types,
func,
&has_comptime_args,
@ -6988,28 +6988,18 @@ fn analyzeCall(
// bug generating invalid LLVM IR.
const res2: Air.Inst.Ref = res2: {
if (should_memoize and is_comptime_call) {
const gop = try mod.memoized_calls.getOrPutContext(
gpa,
memoized_call_key,
.{ .args = &mod.memoized_call_args },
);
if (gop.found_existing) {
assert(mod.memoized_call_args.items.len == memoized_call_key.args_index + memoized_call_key.args_count);
mod.memoized_call_args.shrinkRetainingCapacity(memoized_call_key.args_index);
delete_memoized_call_key = false;
// We need to use the original memoized error set instead of fn_ret_ty.
const result = gop.value_ptr.*;
assert(result != .none); // recursive memoization?
break :res2 try sema.addConstant(mod.intern_pool.typeOf(result).toType(), result.toValue());
if (mod.intern_pool.getIfExists(.{ .memoized_call = .{
.func = module_fn_index,
.arg_values = memoized_arg_values,
.result = .none,
} })) |memoized_call_index| {
const memoized_call = mod.intern_pool.indexToKey(memoized_call_index).memoized_call;
break :res2 try sema.addConstant(
mod.intern_pool.typeOf(memoized_call.result).toType(),
memoized_call.result.toValue(),
);
}
gop.value_ptr.* = .none;
} else if (delete_memoized_call_key) {
assert(mod.memoized_call_args.items.len == memoized_call_key.args_index + memoized_call_key.args_count);
mod.memoized_call_args.shrinkRetainingCapacity(memoized_call_key.args_index);
}
delete_memoized_call_key = false;
const new_func_resolved_ty = try mod.funcType(new_fn_info);
if (!is_comptime_call and !block.is_typeof) {
@ -7067,10 +7057,14 @@ fn analyzeCall(
if (should_memoize and is_comptime_call) {
const result_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, result, "");
mod.memoized_calls.getPtrContext(
memoized_call_key,
.{ .args = &mod.memoized_call_args },
).?.* = try result_val.intern(fn_ret_ty, mod);
// TODO: check whether any external comptime memory was mutated by the
// comptime function call. If so, then do not memoize the call here.
_ = try mod.intern(.{ .memoized_call = .{
.func = module_fn_index,
.arg_values = memoized_arg_values,
.result = try result_val.intern(fn_ret_ty, mod),
} });
}
break :res2 result;
@ -7216,6 +7210,7 @@ fn analyzeInlineCallArg(
uncasted_args: []const Air.Inst.Ref,
is_comptime_call: bool,
should_memoize: *bool,
memoized_arg_values: []InternPool.Index,
raw_param_types: []const InternPool.Index,
func_inst: Air.Inst.Ref,
has_comptime_args: *bool,
@ -7279,7 +7274,7 @@ fn analyzeInlineCallArg(
},
}
should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(mod);
mod.memoized_call_args.appendAssumeCapacity(try arg_val.intern(param_ty.toType(), mod));
memoized_arg_values[arg_i.*] = try arg_val.intern(param_ty.toType(), mod);
} else {
sema.inst_map.putAssumeCapacityNoClobber(inst, casted_arg);
}
@ -7315,7 +7310,7 @@ fn analyzeInlineCallArg(
},
}
should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(mod);
mod.memoized_call_args.appendAssumeCapacity(try arg_val.intern(sema.typeOf(uncasted_arg), mod));
memoized_arg_values[arg_i.*] = try arg_val.intern(sema.typeOf(uncasted_arg), mod);
} else {
if (zir_tags[inst] == .param_anytype_comptime) {
_ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime");
@ -19363,7 +19358,9 @@ fn zirReify(
}
}
return sema.analyzeDeclVal(block, src, new_decl_index);
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
return decl_val;
},
.Opaque => {
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
@ -19407,7 +19404,9 @@ fn zirReify(
new_namespace.ty = opaque_ty.toType();
try new_decl.finalizeNewArena(&new_decl_arena);
return sema.analyzeDeclVal(block, src, new_decl_index);
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
return decl_val;
},
.Union => {
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
@ -19604,7 +19603,9 @@ fn zirReify(
}
try new_decl.finalizeNewArena(&new_decl_arena);
return sema.analyzeDeclVal(block, src, new_decl_index);
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
return decl_val;
},
.Fn => {
const fields = ip.typeOf(union_val.val).toType().structFields(mod);
@ -19902,7 +19903,9 @@ fn reifyStruct(
}
try new_decl.finalizeNewArena(&new_decl_arena);
return sema.analyzeDeclVal(block, src, new_decl_index);
const decl_val = sema.analyzeDeclVal(block, src, new_decl_index);
try mod.finalizeAnonDecl(new_decl_index);
return decl_val;
}
fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
@ -31865,6 +31868,9 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.opt,
.aggregate,
.un,
// memoization, not types
.memoized_decl,
.memoized_call,
=> unreachable,
},
};
@ -32997,6 +33003,8 @@ fn generateUnionTagTypeNumbered(
.ty = Type.type,
.val = undefined,
}, name);
errdefer mod.abortAnonDecl(new_decl_index);
const new_decl = mod.declPtr(new_decl_index);
new_decl.name_fully_qualified = true;
new_decl.owns_tv = true;
@ -33016,6 +33024,7 @@ fn generateUnionTagTypeNumbered(
new_decl.val = enum_ty.toValue();
try mod.finalizeAnonDecl(new_decl_index);
return enum_ty.toType();
}
@ -33049,6 +33058,7 @@ fn generateUnionTagTypeSimple(
mod.declPtr(new_decl_index).name_fully_qualified = true;
break :new_decl_index new_decl_index;
};
errdefer mod.abortAnonDecl(new_decl_index);
const enum_ty = try mod.intern(.{ .enum_type = .{
.decl = new_decl_index,
@ -33066,6 +33076,7 @@ fn generateUnionTagTypeSimple(
new_decl.owns_tv = true;
new_decl.val = enum_ty.toValue();
try mod.finalizeAnonDecl(new_decl_index);
return enum_ty.toType();
}
@ -33358,6 +33369,9 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.opt,
.aggregate,
.un,
// memoization, not types
.memoized_decl,
.memoized_call,
=> unreachable,
},
};
@ -33843,6 +33857,9 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool {
.opt,
.aggregate,
.un,
// memoization, not types
.memoized_decl,
.memoized_call,
=> unreachable,
},
};

View File

@ -278,6 +278,9 @@ pub fn print(
} else try writer.writeAll("...");
return writer.writeAll(" }");
},
.memoized_decl,
.memoized_call,
=> unreachable,
},
};
}

View File

@ -3254,6 +3254,9 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
else => unreachable,
},
.un => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(mod)}),
.memoized_decl,
.memoized_call,
=> unreachable,
}
}

View File

@ -605,6 +605,9 @@ pub fn generateSymbol(
}
}
},
.memoized_decl,
.memoized_call,
=> unreachable,
}
return .ok;
}

View File

@ -1090,6 +1090,7 @@ pub const DeclGen = struct {
};
switch (mod.intern_pool.indexToKey(val.ip_index)) {
// types, not values
.int_type,
.ptr_type,
.array_type,
@ -1106,7 +1107,10 @@ pub const DeclGen = struct {
.func_type,
.error_set_type,
.inferred_error_set_type,
=> unreachable, // types, not values
// memoization, not values
.memoized_decl,
.memoized_call,
=> unreachable,
.undef, .runtime_value => unreachable, // handled above
.simple_value => |simple_value| switch (simple_value) {

View File

@ -3793,6 +3793,9 @@ pub const DeclGen = struct {
return llvm_union_ty.constNamedStruct(&fields, fields_len);
}
},
.memoized_decl,
.memoized_call,
=> unreachable,
}
}

View File

@ -830,6 +830,9 @@ pub const DeclGen = struct {
try self.addUndef(layout.padding);
},
.memoized_decl,
.memoized_call,
=> unreachable,
}
}
};

View File

@ -400,6 +400,9 @@ pub const Type = struct {
.opt,
.aggregate,
.un,
// memoization, not types
.memoized_decl,
.memoized_call,
=> unreachable,
}
}
@ -613,6 +616,9 @@ pub const Type = struct {
.opt,
.aggregate,
.un,
// memoization, not types
.memoized_decl,
.memoized_call,
=> unreachable,
},
};
@ -719,6 +725,9 @@ pub const Type = struct {
.opt,
.aggregate,
.un,
// memoization, not types
.memoized_decl,
.memoized_call,
=> unreachable,
};
}
@ -1050,6 +1059,9 @@ pub const Type = struct {
.opt,
.aggregate,
.un,
// memoization, not types
.memoized_decl,
.memoized_call,
=> unreachable,
},
}
@ -1464,6 +1476,9 @@ pub const Type = struct {
.opt,
.aggregate,
.un,
// memoization, not types
.memoized_decl,
.memoized_call,
=> unreachable,
},
}
@ -1695,6 +1710,9 @@ pub const Type = struct {
.opt,
.aggregate,
.un,
// memoization, not types
.memoized_decl,
.memoized_call,
=> unreachable,
}
}
@ -2250,6 +2268,9 @@ pub const Type = struct {
.opt,
.aggregate,
.un,
// memoization, not types
.memoized_decl,
.memoized_call,
=> unreachable,
},
};
@ -2586,6 +2607,9 @@ pub const Type = struct {
.opt,
.aggregate,
.un,
// memoization, not types
.memoized_decl,
.memoized_call,
=> unreachable,
},
};
@ -2728,6 +2752,9 @@ pub const Type = struct {
.opt,
.aggregate,
.un,
// memoization, not types
.memoized_decl,
.memoized_call,
=> unreachable,
},
};

View File

@ -476,6 +476,10 @@ pub const Value = struct {
.tag = un.tag.toValue(),
.val = un.val.toValue(),
}),
.memoized_decl,
.memoized_call,
=> unreachable,
};
}