diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig index 62165f0304..1e95352a02 100644 --- a/lib/std/array_hash_map.zig +++ b/lib/std/array_hash_map.zig @@ -1669,8 +1669,9 @@ pub fn ArrayHashMapUnmanaged( inline fn checkedHash(ctx: anytype, key: anytype) u32 { comptime std.hash_map.verifyContext(@TypeOf(ctx), @TypeOf(key), K, u32, true); - // If you get a compile error on the next line, it means that - const hash = ctx.hash(key); // your generic hash function doesn't accept your key + // If you get a compile error on the next line, it means that your + // generic hash function doesn't accept your key. + const hash = ctx.hash(key); if (@TypeOf(hash) != u32) { @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic hash function that returns the wrong type!\n" ++ @typeName(u32) ++ " was expected, but found " ++ @typeName(@TypeOf(hash))); @@ -1679,8 +1680,9 @@ pub fn ArrayHashMapUnmanaged( } inline fn checkedEql(ctx: anytype, a: anytype, b: K, b_index: usize) bool { comptime std.hash_map.verifyContext(@TypeOf(ctx), @TypeOf(a), K, u32, true); - // If you get a compile error on the next line, it means that - const eql = ctx.eql(a, b, b_index); // your generic eql function doesn't accept (self, adapt key, K, index) + // If you get a compile error on the next line, it means that your + // generic eql function doesn't accept (self, adapt key, K, index). + const eql = ctx.eql(a, b, b_index); if (@TypeOf(eql) != bool) { @compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic eql function that returns the wrong type!\n" ++ @typeName(bool) ++ " was expected, but found " ++ @typeName(@TypeOf(eql))); diff --git a/src/Air.zig b/src/Air.zig index 1cb47e3443..c4fb4ed6b8 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -946,6 +946,7 @@ pub const Inst = struct { slice_const_u8_sentinel_0_type = @intFromEnum(InternPool.Index.slice_const_u8_sentinel_0_type), optional_noreturn_type = @intFromEnum(InternPool.Index.optional_noreturn_type), anyerror_void_error_union_type = @intFromEnum(InternPool.Index.anyerror_void_error_union_type), + adhoc_inferred_error_set_type = @intFromEnum(InternPool.Index.adhoc_inferred_error_set_type), generic_poison_type = @intFromEnum(InternPool.Index.generic_poison_type), empty_struct_type = @intFromEnum(InternPool.Index.empty_struct_type), undef = @intFromEnum(InternPool.Index.undef), @@ -1003,7 +1004,7 @@ pub const Inst = struct { }, ty_fn: struct { ty: Ref, - func: Module.Fn.Index, + func: InternPool.Index, }, br: struct { block_inst: Index, @@ -1436,7 +1437,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool) .call, .call_always_tail, .call_never_tail, .call_never_inline => { const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip); - return ip.funcReturnType(callee_ty.toIntern()).toType(); + return ip.funcTypeReturnType(callee_ty.toIntern()).toType(); }, .slice_elem_val, .ptr_elem_val, .array_elem_val => { diff --git a/src/AstGen.zig b/src/AstGen.zig index 820531097c..1aa70d86a9 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -12095,7 +12095,10 @@ const GenZir = struct { return gz.addAsIndex(.{ .tag = .save_err_ret_index, .data = .{ .save_err_ret_index = .{ - .operand = if (cond == .if_of_error_type) cond.if_of_error_type else .none, + .operand = switch (cond) { + .if_of_error_type => |x| x, + else => .none, + }, } }, }); } diff --git a/src/Autodoc.zig b/src/Autodoc.zig index 3c6d8f8f60..7dbe506cdd 100644 --- a/src/Autodoc.zig +++ b/src/Autodoc.zig @@ -281,6 +281,7 @@ pub fn generateZirData(self: *Autodoc) !void { // Poison and special tag .generic_poison_type, .var_args_param_type, + .adhoc_inferred_error_set_type, => .{ .Type = .{ .name = try tmpbuf.toOwnedSlice() }, }, diff --git a/src/Compilation.zig b/src/Compilation.zig index 91747e8b01..eb4b67933d 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -29,6 +29,7 @@ const wasi_libc = @import("wasi_libc.zig"); const fatal = @import("main.zig").fatal; const clangMain = @import("main.zig").clangMain; const Module = @import("Module.zig"); +const InternPool = @import("InternPool.zig"); const BuildId = std.Build.CompileStep.BuildId; const Cache = std.Build.Cache; const translate_c = @import("translate_c.zig"); @@ -227,7 +228,8 @@ const Job = union(enum) { /// Write the constant value for a Decl to the output file. codegen_decl: Module.Decl.Index, /// Write the machine code for a function to the output file. - codegen_func: Module.Fn.Index, + /// This will either be a non-generic `func_decl` or a `func_instance`. + codegen_func: InternPool.Index, /// Render the .h file snippet for the Decl. emit_h_decl: Module.Decl.Index, /// The Decl needs to be analyzed and possibly export itself. @@ -2053,15 +2055,9 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void const decl = module.declPtr(decl_index); assert(decl.deletion_flag); assert(decl.dependants.count() == 0); - const is_anon = if (decl.zir_decl_index == 0) blk: { - break :blk module.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index); - } else false; + assert(decl.zir_decl_index != 0); try module.clearDecl(decl_index, null); - - if (is_anon) { - module.destroyDecl(decl_index); - } } try module.processExports(); @@ -3216,8 +3212,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v // Tests are always emitted in test binaries. The decl_refs are created by // Module.populateTestFunctions, but this will not queue body analysis, so do // that now. - const func_index = module.intern_pool.indexToFunc(decl.val.ip_index).unwrap().?; - try module.ensureFuncBodyAnalysisQueued(func_index); + try module.ensureFuncBodyAnalysisQueued(decl.val.toIntern()); } }, .update_embed_file => |embed_file| { diff --git a/src/InternPool.zig b/src/InternPool.zig index b2d96bd5b7..b8eaafceab 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -20,6 +20,25 @@ limbs: std.ArrayListUnmanaged(u64) = .{}, /// `string_bytes` array is agnostic to either usage. string_bytes: std.ArrayListUnmanaged(u8) = .{}, +/// Rather than allocating Decl objects with an Allocator, we instead allocate +/// them with this SegmentedList. This provides four advantages: +/// * Stable memory so that one thread can access a Decl object while another +/// thread allocates additional Decl objects from this list. +/// * It allows us to use u32 indexes to reference Decl objects rather than +/// pointers, saving memory in Type, Value, and dependency sets. +/// * Using integers to reference Decl objects rather than pointers makes +/// serialization trivial. +/// * It provides a unique integer to be used for anonymous symbol names, avoiding +/// multi-threaded contention on an atomic counter. +allocated_decls: std.SegmentedList(Module.Decl, 0) = .{}, +/// When a Decl object is freed from `allocated_decls`, it is pushed into this stack. +decls_free_list: std.ArrayListUnmanaged(Module.Decl.Index) = .{}, + +/// Same pattern as with `allocated_decls`. +allocated_namespaces: std.SegmentedList(Module.Namespace, 0) = .{}, +/// Same pattern as with `decls_free_list`. +namespaces_free_list: std.ArrayListUnmanaged(Module.Namespace.Index) = .{}, + /// Struct objects are stored in this data structure because: /// * They contain pointers such as the field maps. /// * They need to be mutated after creation. @@ -34,25 +53,11 @@ allocated_unions: std.SegmentedList(Module.Union, 0) = .{}, /// When a Union object is freed from `allocated_unions`, it is pushed into this stack. unions_free_list: std.ArrayListUnmanaged(Module.Union.Index) = .{}, -/// Fn objects are stored in this data structure because: -/// * They need to be mutated after creation. -allocated_funcs: std.SegmentedList(Module.Fn, 0) = .{}, -/// When a Fn object is freed from `allocated_funcs`, it is pushed into this stack. -funcs_free_list: std.ArrayListUnmanaged(Module.Fn.Index) = .{}, - -/// InferredErrorSet objects are stored in this data structure because: -/// * They contain pointers such as the errors map and the set of other inferred error sets. -/// * They need to be mutated after creation. -allocated_inferred_error_sets: std.SegmentedList(Module.Fn.InferredErrorSet, 0) = .{}, -/// When a Struct object is freed from `allocated_inferred_error_sets`, it is -/// pushed into this stack. -inferred_error_sets_free_list: std.ArrayListUnmanaged(Module.Fn.InferredErrorSet.Index) = .{}, - /// Some types such as enums, structs, and unions need to store mappings from field names /// to field index, or value to field index. In such cases, they will store the underlying /// field names and values directly, relying on one of these maps, stored separately, /// to provide lookup. -maps: std.ArrayListUnmanaged(std.AutoArrayHashMapUnmanaged(void, void)) = .{}, +maps: std.ArrayListUnmanaged(FieldMap) = .{}, /// Used for finding the index inside `string_bytes`. string_table: std.HashMapUnmanaged( @@ -62,6 +67,10 @@ string_table: std.HashMapUnmanaged( std.hash_map.default_max_load_percentage, ) = .{}, +/// TODO: after https://github.com/ziglang/zig/issues/10618 is solved, +/// change store_hash to false. +const FieldMap = std.ArrayHashMapUnmanaged(void, void, std.array_hash_map.AutoContext(void), true); + const builtin = @import("builtin"); const std = @import("std"); const Allocator = std.mem.Allocator; @@ -73,6 +82,7 @@ const Hash = std.hash.Wyhash; const InternPool = @This(); const Module = @import("Module.zig"); +const Zir = @import("Zir.zig"); const Sema = @import("Sema.zig"); const KeyAdapter = struct { @@ -129,12 +139,24 @@ pub const NullTerminatedString = enum(u32) { empty = 0, _, + /// An array of `NullTerminatedString` existing within the `extra` array. + /// This type exists to provide a struct with lifetime that is + /// not invalidated when items are added to the `InternPool`. + pub const Slice = struct { + start: u32, + len: u32, + + pub fn get(slice: Slice, ip: *const InternPool) []NullTerminatedString { + return @ptrCast(ip.extra.items[slice.start..][0..slice.len]); + } + }; + pub fn toString(self: NullTerminatedString) String { - return @as(String, @enumFromInt(@intFromEnum(self))); + return @enumFromInt(@intFromEnum(self)); } pub fn toOptional(self: NullTerminatedString) OptionalNullTerminatedString { - return @as(OptionalNullTerminatedString, @enumFromInt(@intFromEnum(self))); + return @enumFromInt(@intFromEnum(self)); } const Adapter = struct { @@ -224,7 +246,8 @@ pub const Key = union(enum) { enum_type: EnumType, func_type: FuncType, error_set_type: ErrorSetType, - inferred_error_set_type: Module.Fn.InferredErrorSet.Index, + /// The payload is the function body, either a `func_decl` or `func_instance`. + inferred_error_set_type: Index, /// Typed `undefined`. This will never be `none`; untyped `undefined` is represented /// via `simple_value` and has a named `Index` tag for it. @@ -273,16 +296,16 @@ pub const Key = union(enum) { pub const ErrorSetType = struct { /// Set of error names, sorted by null terminated string index. - names: []const NullTerminatedString, + names: NullTerminatedString.Slice, /// This is ignored by `get` but will always be provided by `indexToKey`. names_map: OptionalMapIndex = .none, /// Look up field index based on field name. pub fn nameIndex(self: ErrorSetType, ip: *const InternPool, name: NullTerminatedString) ?u32 { const map = &ip.maps.items[@intFromEnum(self.names_map.unwrap().?)]; - const adapter: NullTerminatedString.Adapter = .{ .strings = self.names }; + const adapter: NullTerminatedString.Adapter = .{ .strings = self.names.get(ip) }; const field_index = map.getIndexAdapted(name, adapter) orelse return null; - return @as(u32, @intCast(field_index)); + return @intCast(field_index); } }; @@ -487,7 +510,7 @@ pub const Key = union(enum) { }; pub const FuncType = struct { - param_types: []Index, + param_types: Index.Slice, return_type: Index, /// Tells whether a parameter is comptime. See `paramIsComptime` helper /// method for accessing this. @@ -518,6 +541,32 @@ pub const Key = union(enum) { assert(i < self.param_types.len); return @as(u1, @truncate(self.noalias_bits >> i)) != 0; } + + pub fn eql(a: FuncType, b: FuncType, ip: *const InternPool) bool { + return std.mem.eql(Index, a.param_types.get(ip), b.param_types.get(ip)) and + a.return_type == b.return_type and + a.comptime_bits == b.comptime_bits and + a.noalias_bits == b.noalias_bits and + a.alignment == b.alignment and + a.cc == b.cc and + a.is_var_args == b.is_var_args and + a.is_generic == b.is_generic and + a.is_noinline == b.is_noinline; + } + + pub fn hash(self: FuncType, hasher: *Hash, ip: *const InternPool) void { + for (self.param_types.get(ip)) |param_type| { + std.hash.autoHash(hasher, param_type); + } + std.hash.autoHash(hasher, self.return_type); + std.hash.autoHash(hasher, self.comptime_bits); + std.hash.autoHash(hasher, self.noalias_bits); + std.hash.autoHash(hasher, self.alignment); + std.hash.autoHash(hasher, self.cc); + std.hash.autoHash(hasher, self.is_var_args); + std.hash.autoHash(hasher, self.is_generic); + std.hash.autoHash(hasher, self.is_noinline); + } }; pub const Variable = struct { @@ -541,10 +590,73 @@ pub const Key = union(enum) { lib_name: OptionalNullTerminatedString, }; - /// Extern so it can be hashed by reinterpreting memory. - pub const Func = extern struct { + pub const Func = struct { + /// In the case of a generic function, this type will potentially have fewer parameters + /// than the generic owner's type, because the comptime parameters will be deleted. ty: Index, - index: Module.Fn.Index, + /// Index into extra array of the `FuncAnalysis` corresponding to this function. + /// Used for mutating that data. + analysis_extra_index: u32, + /// Index into extra array of the `zir_body_inst` corresponding to this function. + /// Used for mutating that data. + zir_body_inst_extra_index: u32, + /// Index into extra array of the resolved inferred error set for this function. + /// Used for mutating that data. + /// 0 when the function does not have an inferred error set. + resolved_error_set_extra_index: u32, + /// When a generic function is instantiated, branch_quota is inherited from the + /// active Sema context. Importantly, this value is also updated when an existing + /// generic function instantiation is found and called. + /// This field contains the index into the extra array of this value, + /// so that it can be mutated. + /// This will be 0 when the function is not a generic function instantiation. + branch_quota_extra_index: u32, + /// The Decl that corresponds to the function itself. + owner_decl: Module.Decl.Index, + /// The ZIR instruction that is a function instruction. Use this to find + /// the body. We store this rather than the body directly so that when ZIR + /// is regenerated on update(), we can map this to the new corresponding + /// ZIR instruction. + zir_body_inst: Zir.Inst.Index, + /// Relative to owner Decl. + lbrace_line: u32, + /// Relative to owner Decl. + rbrace_line: u32, + lbrace_column: u32, + rbrace_column: u32, + + /// The `func_decl` which is the generic function from whence this instance was spawned. + /// If this is `none` it means the function is not a generic instantiation. + generic_owner: Index, + /// If this is a generic function instantiation, this will be non-empty. + /// Corresponds to the parameters of the `generic_owner` type, which + /// may have more parameters than `ty`. + /// Each element is the comptime-known value the generic function was instantiated with, + /// or `none` if the element is runtime-known. + /// TODO: as a follow-up optimization, don't store `none` values here since that data + /// is redundant with `comptime_bits` stored elsewhere. + comptime_args: Index.Slice, + + /// Returns a pointer that becomes invalid after any additions to the `InternPool`. + pub fn analysis(func: *const Func, ip: *const InternPool) *FuncAnalysis { + return @ptrCast(&ip.extra.items[func.analysis_extra_index]); + } + + /// Returns a pointer that becomes invalid after any additions to the `InternPool`. + pub fn zirBodyInst(func: *const Func, ip: *const InternPool) *Zir.Inst.Index { + return @ptrCast(&ip.extra.items[func.zir_body_inst_extra_index]); + } + + /// Returns a pointer that becomes invalid after any additions to the `InternPool`. + pub fn branchQuota(func: *const Func, ip: *const InternPool) *u32 { + return &ip.extra.items[func.branch_quota_extra_index]; + } + + /// Returns a pointer that becomes invalid after any additions to the `InternPool`. + pub fn resolvedErrorSet(func: *const Func, ip: *const InternPool) *Index { + assert(func.analysis(ip).inferred_error_set); + return @ptrCast(&ip.extra.items[func.resolved_error_set_extra_index]); + } }; pub const Int = struct { @@ -679,13 +791,13 @@ pub const Key = union(enum) { }; pub const MemoizedCall = struct { - func: Module.Fn.Index, + func: Index, arg_values: []const Index, result: Index, }; pub fn hash32(key: Key, ip: *const InternPool) u32 { - return @as(u32, @truncate(key.hash64(ip))); + return @truncate(key.hash64(ip)); } pub fn hash64(key: Key, ip: *const InternPool) u64 { @@ -695,7 +807,6 @@ pub const Key = union(enum) { return switch (key) { // TODO: assert no padding in these types inline .ptr_type, - .func, .array_type, .vector_type, .opt_type, @@ -723,20 +834,11 @@ pub const Key = union(enum) { }, .runtime_value => |x| Hash.hash(seed, asBytes(&x.val)), - .opaque_type => |x| Hash.hash(seed, asBytes(&x.decl)), - .enum_type => |enum_type| { - var hasher = Hash.init(seed); - std.hash.autoHash(&hasher, enum_type.decl); - return hasher.final(); - }, - - .variable => |variable| { - var hasher = Hash.init(seed); - std.hash.autoHash(&hasher, variable.decl); - return hasher.final(); - }, - .extern_func => |x| Hash.hash(seed, asBytes(&x.ty) ++ asBytes(&x.decl)), + inline .opaque_type, + .enum_type, + .variable, + => |x| Hash.hash(seed, asBytes(&x.decl)), .int => |int| { var hasher = Hash.init(seed); @@ -859,11 +961,7 @@ pub const Key = union(enum) { return hasher.final(); }, - .error_set_type => |error_set_type| { - var hasher = Hash.init(seed); - for (error_set_type.names) |elem| std.hash.autoHash(&hasher, elem); - return hasher.final(); - }, + .error_set_type => |x| Hash.hash(seed, std.mem.sliceAsBytes(x.names.get(ip))), .anon_struct_type => |anon_struct_type| { var hasher = Hash.init(seed); @@ -875,15 +973,7 @@ pub const Key = union(enum) { .func_type => |func_type| { var hasher = Hash.init(seed); - for (func_type.param_types) |param_type| std.hash.autoHash(&hasher, param_type); - std.hash.autoHash(&hasher, func_type.return_type); - std.hash.autoHash(&hasher, func_type.comptime_bits); - std.hash.autoHash(&hasher, func_type.noalias_bits); - std.hash.autoHash(&hasher, func_type.alignment); - std.hash.autoHash(&hasher, func_type.cc); - std.hash.autoHash(&hasher, func_type.is_var_args); - std.hash.autoHash(&hasher, func_type.is_generic); - std.hash.autoHash(&hasher, func_type.is_noinline); + func_type.hash(&hasher, ip); return hasher.final(); }, @@ -893,6 +983,30 @@ pub const Key = union(enum) { for (memoized_call.arg_values) |arg| std.hash.autoHash(&hasher, arg); return hasher.final(); }, + + .func => |func| { + // In the case of a function with an inferred error set, we + // must not include the inferred error set type in the hash, + // otherwise we would get false negatives for interning generic + // function instances which have inferred error sets. + + if (func.generic_owner == .none and func.resolved_error_set_extra_index == 0) + return Hash.hash(seed, asBytes(&func.owner_decl) ++ asBytes(&func.ty)); + + var hasher = Hash.init(seed); + std.hash.autoHash(&hasher, func.generic_owner); + for (func.comptime_args.get(ip)) |arg| std.hash.autoHash(&hasher, arg); + if (func.resolved_error_set_extra_index == 0) { + std.hash.autoHash(&hasher, func.ty); + } else { + var ty_info = ip.indexToFuncType(func.ty).?; + ty_info.return_type = ip.errorUnionPayload(ty_info.return_type); + ty_info.hash(&hasher, ip); + } + return hasher.final(); + }, + + .extern_func => |x| Hash.hash(seed, asBytes(&x.ty) ++ asBytes(&x.decl)), }; } @@ -993,7 +1107,41 @@ pub const Key = union(enum) { }, .func => |a_info| { const b_info = b.func; - return a_info.ty == b_info.ty and a_info.index == b_info.index; + + if (a_info.generic_owner != b_info.generic_owner) + return false; + + if (a_info.generic_owner == .none) { + if (a_info.owner_decl != b_info.owner_decl) + return false; + } else { + if (!std.mem.eql( + Index, + a_info.comptime_args.get(ip), + b_info.comptime_args.get(ip), + )) return false; + } + + if (a_info.ty == b_info.ty) + return true; + + // There is one case where the types may be inequal but we + // still want to find the same function body instance. In the + // case of the functions having an inferred error set, the key + // used to find an existing function body will necessarily have + // a unique inferred error set type, because it refers to the + // function body InternPool Index. To make this case work we + // omit the inferred error set from the equality check. + if (a_info.resolved_error_set_extra_index == 0 or + b_info.resolved_error_set_extra_index == 0) + { + return false; + } + var a_ty_info = ip.indexToFuncType(a_info.ty).?; + a_ty_info.return_type = ip.errorUnionPayload(a_ty_info.return_type); + var b_ty_info = ip.indexToFuncType(b_info.ty).?; + b_ty_info.return_type = ip.errorUnionPayload(b_ty_info.return_type); + return a_ty_info.eql(b_ty_info, ip); }, .ptr => |a_info| { @@ -1145,7 +1293,7 @@ pub const Key = union(enum) { }, .error_set_type => |a_info| { const b_info = b.error_set_type; - return std.mem.eql(NullTerminatedString, a_info.names, b_info.names); + return std.mem.eql(NullTerminatedString, a_info.names.get(ip), b_info.names.get(ip)); }, .inferred_error_set_type => |a_info| { const b_info = b.inferred_error_set_type; @@ -1154,16 +1302,7 @@ pub const Key = union(enum) { .func_type => |a_info| { const b_info = b.func_type; - - return std.mem.eql(Index, a_info.param_types, b_info.param_types) and - a_info.return_type == b_info.return_type and - a_info.comptime_bits == b_info.comptime_bits and - a_info.noalias_bits == b_info.noalias_bits and - a_info.alignment == b_info.alignment and - a_info.cc == b_info.cc and - a_info.is_var_args == b_info.is_var_args and - a_info.is_generic == b_info.is_generic and - a_info.is_noinline == b_info.is_noinline; + return Key.FuncType.eql(a_info, b_info, ip); }, .memoized_call => |a_info| { @@ -1311,6 +1450,8 @@ pub const Index = enum(u32) { slice_const_u8_sentinel_0_type, optional_noreturn_type, anyerror_void_error_union_type, + /// Used for the inferred error set of inline/comptime function calls. + adhoc_inferred_error_set_type, generic_poison_type, /// `@TypeOf(.{})` empty_struct_type, @@ -1360,6 +1501,18 @@ pub const Index = enum(u32) { _, + /// An array of `Index` existing within the `extra` array. + /// This type exists to provide a struct with lifetime that is + /// not invalidated when items are added to the `InternPool`. + pub const Slice = struct { + start: u32, + len: u32, + + pub fn get(slice: Slice, ip: *const InternPool) []Index { + return @ptrCast(ip.extra.items[slice.start..][0..slice.len]); + } + }; + pub fn toType(i: Index) @import("type.zig").Type { assert(i != .none); return .{ .ip_index = i }; @@ -1390,6 +1543,7 @@ pub const Index = enum(u32) { /// This function is used in the debugger pretty formatters in tools/ to fetch the /// Tag to encoding mapping to facilitate fancy debug printing for this type. + /// TODO merge this with `Tag.Payload`. fn dbHelper(self: *Index, tag_to_encoding_map: *struct { const DataIsIndex = struct { data: Index }; const DataIsExtraIndexOfEnumExplicit = struct { @@ -1425,13 +1579,14 @@ pub const Index = enum(u32) { type_optional: DataIsIndex, type_anyframe: DataIsIndex, type_error_union: struct { data: *Key.ErrorUnionType }, + type_anyerror_union: DataIsIndex, type_error_set: struct { const @"data.names_len" = opaque {}; - data: *ErrorSet, + data: *Tag.ErrorSet, @"trailing.names.len": *@"data.names_len", trailing: struct { names: []NullTerminatedString }, }, - type_inferred_error_set: struct { data: Module.Fn.InferredErrorSet.Index }, + type_inferred_error_set: DataIsIndex, type_enum_auto: struct { const @"data.fields_len" = opaque {}; data: *EnumAuto, @@ -1450,10 +1605,14 @@ pub const Index = enum(u32) { type_union_untagged: struct { data: Module.Union.Index }, type_union_safety: struct { data: Module.Union.Index }, type_function: struct { + const @"data.flags.has_comptime_bits" = opaque {}; + const @"data.flags.has_noalias_bits" = opaque {}; const @"data.params_len" = opaque {}; - data: *TypeFunction, + data: *Tag.TypeFunction, + @"trailing.comptime_bits.len": *@"data.flags.has_comptime_bits", + @"trailing.noalias_bits.len": *@"data.flags.has_noalias_bits", @"trailing.param_types.len": *@"data.params_len", - trailing: struct { param_types: []Index }, + trailing: struct { comptime_bits: []u32, noalias_bits: []u32, param_types: []Index }, }, undef: DataIsIndex, @@ -1497,7 +1656,23 @@ pub const Index = enum(u32) { float_comptime_float: struct { data: *Float128 }, variable: struct { data: *Tag.Variable }, extern_func: struct { data: *Key.ExternFunc }, - func: struct { data: *Tag.Func }, + func_decl: struct { + const @"data.analysis.inferred_error_set" = opaque {}; + data: *Tag.FuncDecl, + @"trailing.resolved_error_set.len": *@"data.analysis.inferred_error_set", + trailing: struct { resolved_error_set: []Index }, + }, + func_instance: struct { + const @"data.analysis.inferred_error_set" = opaque {}; + const @"data.generic_owner.data.ty.data.params_len" = opaque {}; + data: *Tag.FuncInstance, + @"trailing.resolved_error_set.len": *@"data.analysis.inferred_error_set", + @"trailing.comptime_args.len": *@"data.generic_owner.data.ty.data.params_len", + trailing: struct { resolved_error_set: []Index, comptime_args: []Index }, + }, + func_coerced: struct { + data: *Tag.FuncCoerced, + }, only_possible_value: DataIsIndex, union_value: struct { data: *Key.Union }, bytes: struct { data: *Bytes }, @@ -1716,6 +1891,8 @@ pub const static_keys = [_]Key{ .payload_type = .void_type, } }, + // adhoc_inferred_error_set_type + .{ .simple_type = .adhoc_inferred_error_set }, // generic_poison_type .{ .simple_type = .generic_poison }, @@ -1822,11 +1999,14 @@ pub const Tag = enum(u8) { /// An error union type. /// data is payload to `Key.ErrorUnionType`. type_error_union, + /// An error union type of the form `anyerror!T`. + /// data is `Index` of payload type. + type_anyerror_union, /// An error set type. /// data is payload to `ErrorSet`. type_error_set, /// The inferred error set type of a function. - /// data is `Module.Fn.InferredErrorSet.Index`. + /// data is `Index` of a `func_decl` or `func_instance`. type_inferred_error_set, /// An enum type with auto-numbered tag values. /// The enum is exhaustive. @@ -2005,11 +2185,19 @@ pub const Tag = enum(u8) { /// data is extra index to Variable. variable, /// An extern function. - /// data is extra index to Key.ExternFunc. + /// data is extra index to ExternFunc. extern_func, - /// A regular function. - /// data is extra index to Func. - func, + /// A non-extern function corresponding directly to the AST node from whence it originated. + /// data is extra index to `FuncDecl`. + /// Only the owner Decl is used for hashing and equality because the other + /// fields can get patched up during incremental compilation. + func_decl, + /// A generic function instantiation. + /// data is extra index to `FuncInstance`. + func_instance, + /// A `func_decl` or a `func_instance` that has been coerced to a different type. + /// data is extra index to `FuncCoerced`. + func_coerced, /// This represents the only possible value for *some* types which have /// only one possible value. Not all only-possible-values are encoded this way; /// for example structs which have all comptime fields are not encoded this way. @@ -2041,7 +2229,6 @@ pub const Tag = enum(u8) { const Error = Key.Error; const EnumTag = Key.EnumTag; const ExternFunc = Key.ExternFunc; - const Func = Key.Func; const Union = Key.Union; const TypePointer = Key.PtrType; @@ -2057,6 +2244,7 @@ pub const Tag = enum(u8) { .type_optional => unreachable, .type_anyframe => unreachable, .type_error_union => ErrorUnionType, + .type_anyerror_union => unreachable, .type_error_set => ErrorSet, .type_inferred_error_set => unreachable, .type_enum_auto => EnumAuto, @@ -2114,7 +2302,9 @@ pub const Tag = enum(u8) { .float_comptime_float => unreachable, .variable => Variable, .extern_func => ExternFunc, - .func => Func, + .func_decl => FuncDecl, + .func_instance => FuncInstance, + .func_coerced => FuncCoerced, .only_possible_value => unreachable, .union_value => Union, .bytes => Bytes, @@ -2150,36 +2340,107 @@ pub const Tag = enum(u8) { /// The type of the aggregate. ty: Index, }; + + /// Trailing: + /// 0. If `analysis.inferred_error_set` is `true`, `Index` of an `error_set` which + /// is a regular error set corresponding to the finished inferred error set. + /// A `none` value marks that the inferred error set is not resolved yet. + pub const FuncDecl = struct { + analysis: FuncAnalysis, + owner_decl: Module.Decl.Index, + ty: Index, + zir_body_inst: Zir.Inst.Index, + lbrace_line: u32, + rbrace_line: u32, + lbrace_column: u32, + rbrace_column: u32, + }; + + /// Trailing: + /// 0. If `analysis.inferred_error_set` is `true`, `Index` of an `error_set` which + /// is a regular error set corresponding to the finished inferred error set. + /// A `none` value marks that the inferred error set is not resolved yet. + /// 1. For each parameter of generic_owner: `Index` if comptime, otherwise `none` + pub const FuncInstance = struct { + analysis: FuncAnalysis, + // Needed by the linker for codegen. Not part of hashing or equality. + owner_decl: Module.Decl.Index, + ty: Index, + branch_quota: u32, + /// Points to a `FuncDecl`. + generic_owner: Index, + }; + + pub const FuncCoerced = struct { + ty: Index, + func: Index, + }; + + /// Trailing: + /// 0. name: NullTerminatedString for each names_len + pub const ErrorSet = struct { + names_len: u32, + /// Maps error names to declaration index. + names_map: MapIndex, + }; + + /// Trailing: + /// 0. comptime_bits: u32, // if has_comptime_bits + /// 1. noalias_bits: u32, // if has_noalias_bits + /// 2. param_type: Index for each params_len + pub const TypeFunction = struct { + params_len: u32, + return_type: Index, + flags: Flags, + + pub const Flags = packed struct(u32) { + alignment: Alignment, + cc: std.builtin.CallingConvention, + is_var_args: bool, + is_generic: bool, + has_comptime_bits: bool, + has_noalias_bits: bool, + is_noinline: bool, + align_is_generic: bool, + cc_is_generic: bool, + section_is_generic: bool, + addrspace_is_generic: bool, + _: u9 = 0, + }; + }; }; -/// Trailing: -/// 0. name: NullTerminatedString for each names_len -pub const ErrorSet = struct { - names_len: u32, - /// Maps error names to declaration index. - names_map: MapIndex, -}; +/// State that is mutable during semantic analysis. This data is not used for +/// equality or hashing, except for `inferred_error_set` which is considered +/// to be part of the type of the function. +pub const FuncAnalysis = packed struct(u32) { + state: State, + is_cold: bool, + is_noinline: bool, + calls_or_awaits_errorable_fn: bool, + stack_alignment: Alignment, -/// Trailing: -/// 0. param_type: Index for each params_len -pub const TypeFunction = struct { - params_len: u32, - return_type: Index, - comptime_bits: u32, - noalias_bits: u32, - flags: Flags, + /// True if this function has an inferred error set. + inferred_error_set: bool, - pub const Flags = packed struct(u32) { - alignment: Alignment, - cc: std.builtin.CallingConvention, - is_var_args: bool, - is_generic: bool, - is_noinline: bool, - align_is_generic: bool, - cc_is_generic: bool, - section_is_generic: bool, - addrspace_is_generic: bool, - _: u11 = 0, + _: u14 = 0, + + pub const State = enum(u8) { + /// This function has not yet undergone analysis, because we have not + /// seen a potential runtime call. It may be analyzed in future. + none, + /// Analysis for this function has been queued, but not yet completed. + queued, + /// This function intentionally only has ZIR generated because it is marked + /// inline, which means no runtime version of the function will be generated. + inline_only, + in_progress, + /// There will be a corresponding ErrorMsg in Module.failed_decls + sema_failure, + /// This function might be OK but it depends on another Decl which did not + /// successfully complete semantic analysis. + dependency_failure, + success, }; }; @@ -2251,6 +2512,7 @@ pub const SimpleType = enum(u32) { extern_options, type_info, + adhoc_inferred_error_set, generic_poison, }; @@ -2499,7 +2761,7 @@ pub const Float128 = struct { /// Trailing: /// 0. arg value: Index for each args_len pub const MemoizedCall = struct { - func: Module.Fn.Index, + func: Index, args_len: u32, result: Index, }; @@ -2553,11 +2815,11 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.unions_free_list.deinit(gpa); ip.allocated_unions.deinit(gpa); - ip.funcs_free_list.deinit(gpa); - ip.allocated_funcs.deinit(gpa); + ip.decls_free_list.deinit(gpa); + ip.allocated_decls.deinit(gpa); - ip.inferred_error_sets_free_list.deinit(gpa); - ip.allocated_inferred_error_sets.deinit(gpa); + ip.namespaces_free_list.deinit(gpa); + ip.allocated_namespaces.deinit(gpa); for (ip.maps.items) |*map| map.deinit(gpa); ip.maps.deinit(gpa); @@ -2620,26 +2882,22 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { return .{ .ptr_type = ptr_info }; }, - .type_optional => .{ .opt_type = @as(Index, @enumFromInt(data)) }, - .type_anyframe => .{ .anyframe_type = @as(Index, @enumFromInt(data)) }, + .type_optional => .{ .opt_type = @enumFromInt(data) }, + .type_anyframe => .{ .anyframe_type = @enumFromInt(data) }, .type_error_union => .{ .error_union_type = ip.extraData(Key.ErrorUnionType, data) }, - .type_error_set => { - const error_set = ip.extraDataTrail(ErrorSet, data); - const names_len = error_set.data.names_len; - const names = ip.extra.items[error_set.end..][0..names_len]; - return .{ .error_set_type = .{ - .names = @as([]const NullTerminatedString, @ptrCast(names)), - .names_map = error_set.data.names_map.toOptional(), - } }; - }, + .type_anyerror_union => .{ .error_union_type = .{ + .error_set_type = .anyerror_type, + .payload_type = @enumFromInt(data), + } }, + .type_error_set => .{ .error_set_type = ip.extraErrorSet(data) }, .type_inferred_error_set => .{ - .inferred_error_set_type = @as(Module.Fn.InferredErrorSet.Index, @enumFromInt(data)), + .inferred_error_set_type = @enumFromInt(data), }, .type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) }, .type_struct => { - const struct_index = @as(Module.Struct.OptionalIndex, @enumFromInt(data)); + const struct_index: Module.Struct.OptionalIndex = @enumFromInt(data); const namespace = if (struct_index.unwrap()) |i| ip.structPtrConst(i).namespace.toOptional() else @@ -2661,9 +2919,9 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; const names = ip.extra.items[type_struct_anon.end + 2 * fields_len ..][0..fields_len]; return .{ .anon_struct_type = .{ - .types = @as([]const Index, @ptrCast(types)), - .values = @as([]const Index, @ptrCast(values)), - .names = @as([]const NullTerminatedString, @ptrCast(names)), + .types = @ptrCast(types), + .values = @ptrCast(values), + .names = @ptrCast(names), } }; }, .type_tuple_anon => { @@ -2672,8 +2930,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { const types = ip.extra.items[type_struct_anon.end..][0..fields_len]; const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; return .{ .anon_struct_type = .{ - .types = @as([]const Index, @ptrCast(types)), - .values = @as([]const Index, @ptrCast(values)), + .types = @ptrCast(types), + .values = @ptrCast(values), .names = &.{}, } }; }, @@ -2710,7 +2968,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, .type_enum_explicit => ip.indexToKeyEnum(data, .explicit), .type_enum_nonexhaustive => ip.indexToKeyEnum(data, .nonexhaustive), - .type_function => .{ .func_type = ip.indexToKeyFuncType(data) }, + .type_function => .{ .func_type = ip.extraFuncType(data) }, .undef => .{ .undef = @as(Index, @enumFromInt(data)) }, .runtime_value => .{ .runtime_value = ip.extraData(Tag.TypeValue, data) }, @@ -2957,7 +3215,9 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }; }, .extern_func => .{ .extern_func = ip.extraData(Tag.ExternFunc, data) }, - .func => .{ .func = ip.extraData(Tag.Func, data) }, + .func_instance => .{ .func = ip.extraFuncInstance(data) }, + .func_decl => .{ .func = ip.extraFuncDecl(data) }, + .func_coerced => .{ .func = ip.extraFuncCoerced(data) }, .only_possible_value => { const ty = @as(Index, @enumFromInt(data)); const ty_item = ip.items.get(@intFromEnum(ty)); @@ -3062,29 +3322,106 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }; } -fn indexToKeyFuncType(ip: *const InternPool, data: u32) Key.FuncType { - const type_function = ip.extraDataTrail(TypeFunction, data); - const param_types = @as( - []Index, - @ptrCast(ip.extra.items[type_function.end..][0..type_function.data.params_len]), - ); +fn extraErrorSet(ip: *const InternPool, extra_index: u32) Key.ErrorSetType { + const error_set = ip.extraDataTrail(Tag.ErrorSet, extra_index); return .{ - .param_types = param_types, + .names = .{ + .start = @intCast(error_set.end), + .len = error_set.data.names_len, + }, + .names_map = error_set.data.names_map.toOptional(), + }; +} + +fn extraFuncType(ip: *const InternPool, extra_index: u32) Key.FuncType { + const type_function = ip.extraDataTrail(Tag.TypeFunction, extra_index); + var index: usize = type_function.end; + const comptime_bits: u32 = if (!type_function.data.flags.has_comptime_bits) 0 else b: { + const x = ip.extra.items[index]; + index += 1; + break :b x; + }; + const noalias_bits: u32 = if (!type_function.data.flags.has_noalias_bits) 0 else b: { + const x = ip.extra.items[index]; + index += 1; + break :b x; + }; + return .{ + .param_types = .{ + .start = @intCast(index), + .len = type_function.data.params_len, + }, .return_type = type_function.data.return_type, - .comptime_bits = type_function.data.comptime_bits, - .noalias_bits = type_function.data.noalias_bits, + .comptime_bits = comptime_bits, + .noalias_bits = noalias_bits, .alignment = type_function.data.flags.alignment, .cc = type_function.data.flags.cc, .is_var_args = type_function.data.flags.is_var_args, - .is_generic = type_function.data.flags.is_generic, .is_noinline = type_function.data.flags.is_noinline, .align_is_generic = type_function.data.flags.align_is_generic, .cc_is_generic = type_function.data.flags.cc_is_generic, .section_is_generic = type_function.data.flags.section_is_generic, .addrspace_is_generic = type_function.data.flags.addrspace_is_generic, + .is_generic = type_function.data.flags.is_generic, }; } +fn extraFuncDecl(ip: *const InternPool, extra_index: u32) Key.Func { + const P = Tag.FuncDecl; + const func_decl = ip.extraDataTrail(P, extra_index); + return .{ + .ty = func_decl.data.ty, + .analysis_extra_index = extra_index + std.meta.fieldIndex(P, "analysis").?, + .zir_body_inst_extra_index = extra_index + std.meta.fieldIndex(P, "zir_body_inst").?, + .resolved_error_set_extra_index = if (func_decl.data.analysis.inferred_error_set) func_decl.end else 0, + .branch_quota_extra_index = 0, + .owner_decl = func_decl.data.owner_decl, + .zir_body_inst = func_decl.data.zir_body_inst, + .lbrace_line = func_decl.data.lbrace_line, + .rbrace_line = func_decl.data.rbrace_line, + .lbrace_column = func_decl.data.lbrace_column, + .rbrace_column = func_decl.data.rbrace_column, + .generic_owner = .none, + .comptime_args = .{ .start = 0, .len = 0 }, + }; +} + +fn extraFuncInstance(ip: *const InternPool, extra_index: u32) Key.Func { + const P = Tag.FuncInstance; + const fi = ip.extraDataTrail(P, extra_index); + const func_decl = ip.funcDeclInfo(fi.data.generic_owner); + return .{ + .ty = fi.data.ty, + .analysis_extra_index = extra_index + std.meta.fieldIndex(P, "analysis").?, + .zir_body_inst_extra_index = func_decl.zir_body_inst_extra_index, + .resolved_error_set_extra_index = if (fi.data.analysis.inferred_error_set) fi.end else 0, + .branch_quota_extra_index = extra_index + std.meta.fieldIndex(P, "branch_quota").?, + .owner_decl = fi.data.owner_decl, + .zir_body_inst = func_decl.zir_body_inst, + .lbrace_line = func_decl.lbrace_line, + .rbrace_line = func_decl.rbrace_line, + .lbrace_column = func_decl.lbrace_column, + .rbrace_column = func_decl.rbrace_column, + .generic_owner = fi.data.generic_owner, + .comptime_args = .{ + .start = fi.end + @intFromBool(fi.data.analysis.inferred_error_set), + .len = ip.funcTypeParamsLen(func_decl.ty), + }, + }; +} + +fn extraFuncCoerced(ip: *const InternPool, extra_index: u32) Key.Func { + const func_coerced = ip.extraData(Tag.FuncCoerced, extra_index); + const sub_item = ip.items.get(@intFromEnum(func_coerced.func)); + var func: Key.Func = switch (sub_item.tag) { + .func_instance => ip.extraFuncInstance(sub_item.data), + .func_decl => ip.extraFuncDecl(sub_item.data), + else => unreachable, + }; + func.ty = func_coerced.ty; + return func; +} + fn indexToKeyEnum(ip: *const InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key { const enum_explicit = ip.extraDataTrail(EnumExplicit, data); const names = @as( @@ -3122,7 +3459,7 @@ fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const adapter: KeyAdapter = .{ .intern_pool = ip }; const gop = try ip.map.getOrPutAdapted(gpa, key, adapter); - if (gop.found_existing) return @as(Index, @enumFromInt(gop.index)); + if (gop.found_existing) return @enumFromInt(gop.index); try ip.items.ensureUnusedCapacity(gpa, 1); switch (key) { .int_type => |int_type| { @@ -3213,26 +3550,29 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); }, .error_union_type => |error_union_type| { - ip.items.appendAssumeCapacity(.{ + ip.items.appendAssumeCapacity(if (error_union_type.error_set_type == .anyerror_type) .{ + .tag = .type_anyerror_union, + .data = @intFromEnum(error_union_type.payload_type), + } else .{ .tag = .type_error_union, .data = try ip.addExtra(gpa, error_union_type), }); }, .error_set_type => |error_set_type| { assert(error_set_type.names_map == .none); - assert(std.sort.isSorted(NullTerminatedString, error_set_type.names, {}, NullTerminatedString.indexLessThan)); + assert(std.sort.isSorted(NullTerminatedString, error_set_type.names.get(ip), {}, NullTerminatedString.indexLessThan)); const names_map = try ip.addMap(gpa); - try addStringsToMap(ip, gpa, names_map, error_set_type.names); - const names_len = @as(u32, @intCast(error_set_type.names.len)); - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(ErrorSet).Struct.fields.len + names_len); + try addStringsToMap(ip, gpa, names_map, error_set_type.names.get(ip)); + const names_len = error_set_type.names.len; + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.ErrorSet).Struct.fields.len + names_len); ip.items.appendAssumeCapacity(.{ .tag = .type_error_set, - .data = ip.addExtraAssumeCapacity(ErrorSet{ + .data = ip.addExtraAssumeCapacity(Tag.ErrorSet{ .names_len = names_len, .names_map = names_map, }), }); - ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(error_set_type.names))); + ip.extra.appendSliceAssumeCapacity(@ptrCast(error_set_type.names.get(ip))); }, .inferred_error_set_type => |ies_index| { ip.items.appendAssumeCapacity(.{ @@ -3369,36 +3709,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { } }, - .func_type => |func_type| { - assert(func_type.return_type != .none); - for (func_type.param_types) |param_type| assert(param_type != .none); - - const params_len = @as(u32, @intCast(func_type.param_types.len)); - - try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(TypeFunction).Struct.fields.len + - params_len); - ip.items.appendAssumeCapacity(.{ - .tag = .type_function, - .data = ip.addExtraAssumeCapacity(TypeFunction{ - .params_len = params_len, - .return_type = func_type.return_type, - .comptime_bits = func_type.comptime_bits, - .noalias_bits = func_type.noalias_bits, - .flags = .{ - .alignment = func_type.alignment, - .cc = func_type.cc, - .is_var_args = func_type.is_var_args, - .is_generic = func_type.is_generic, - .is_noinline = func_type.is_noinline, - .align_is_generic = func_type.align_is_generic, - .cc_is_generic = func_type.cc_is_generic, - .section_is_generic = func_type.section_is_generic, - .addrspace_is_generic = func_type.addrspace_is_generic, - }, - }), - }); - ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(func_type.param_types))); - }, + .func_type => unreachable, // use getFuncType() instead + .extern_func => unreachable, // use getExternFunc() instead + .func => unreachable, // use getFuncInstance() or getFuncDecl() instead .variable => |variable| { const has_init = variable.init != .none; @@ -3420,16 +3733,6 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { }); }, - .extern_func => |extern_func| ip.items.appendAssumeCapacity(.{ - .tag = .extern_func, - .data = try ip.addExtra(gpa, @as(Tag.ExternFunc, extern_func)), - }), - - .func => |func| ip.items.appendAssumeCapacity(.{ - .tag = .func, - .data = try ip.addExtra(gpa, @as(Tag.Func, func)), - }), - .ptr => |ptr| { const ptr_type = ip.indexToKey(ptr.ty).ptr_type; switch (ptr.len) { @@ -4065,7 +4368,604 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(memoized_call.arg_values))); }, } - return @as(Index, @enumFromInt(ip.items.len - 1)); + return @enumFromInt(ip.items.len - 1); +} + +/// This is equivalent to `Key.FuncType` but adjusted to have a slice for `param_types`. +pub const GetFuncTypeKey = struct { + param_types: []Index, + return_type: Index, + comptime_bits: u32, + noalias_bits: u32, + /// `null` means generic. + alignment: ?Alignment, + /// `null` means generic. + cc: ?std.builtin.CallingConvention, + is_var_args: bool, + is_generic: bool, + is_noinline: bool, + section_is_generic: bool, + addrspace_is_generic: bool, +}; + +pub fn getFuncType(ip: *InternPool, gpa: Allocator, key: GetFuncTypeKey) Allocator.Error!Index { + // Validate input parameters. + assert(key.return_type != .none); + for (key.param_types) |param_type| assert(param_type != .none); + + // The strategy here is to add the function type unconditionally, then to + // ask if it already exists, and if so, revert the lengths of the mutated + // arrays. This is similar to what `getOrPutTrailingString` does. + const prev_extra_len = ip.extra.items.len; + const params_len: u32 = @intCast(key.param_types.len); + + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeFunction).Struct.fields.len + + @intFromBool(key.comptime_bits != 0) + + @intFromBool(key.noalias_bits != 0) + + params_len); + try ip.items.ensureUnusedCapacity(gpa, 1); + + const func_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeFunction{ + .params_len = params_len, + .return_type = key.return_type, + .flags = .{ + .alignment = key.alignment orelse .none, + .cc = key.cc orelse .Unspecified, + .is_var_args = key.is_var_args, + .has_comptime_bits = key.comptime_bits != 0, + .has_noalias_bits = key.noalias_bits != 0, + .is_generic = key.is_generic, + .is_noinline = key.is_noinline, + .align_is_generic = key.alignment == null, + .cc_is_generic = key.cc == null, + .section_is_generic = key.section_is_generic, + .addrspace_is_generic = key.addrspace_is_generic, + }, + }); + + if (key.comptime_bits != 0) ip.extra.appendAssumeCapacity(key.comptime_bits); + if (key.noalias_bits != 0) ip.extra.appendAssumeCapacity(key.noalias_bits); + ip.extra.appendSliceAssumeCapacity(@ptrCast(key.param_types)); + + const adapter: KeyAdapter = .{ .intern_pool = ip }; + const gop = try ip.map.getOrPutAdapted(gpa, Key{ + .func_type = extraFuncType(ip, func_type_extra_index), + }, adapter); + if (gop.found_existing) { + ip.extra.items.len = prev_extra_len; + return @enumFromInt(gop.index); + } + + ip.items.appendAssumeCapacity(.{ + .tag = .type_function, + .data = func_type_extra_index, + }); + return @enumFromInt(ip.items.len - 1); +} + +pub fn getExternFunc(ip: *InternPool, gpa: Allocator, key: Key.ExternFunc) Allocator.Error!Index { + const adapter: KeyAdapter = .{ .intern_pool = ip }; + const gop = try ip.map.getOrPutAdapted(gpa, Key{ .extern_func = key }, adapter); + if (gop.found_existing) return @enumFromInt(gop.index); + errdefer _ = ip.map.pop(); + const prev_extra_len = ip.extra.items.len; + const extra_index = try ip.addExtra(gpa, @as(Tag.ExternFunc, key)); + errdefer ip.extra.items.len = prev_extra_len; + try ip.items.append(gpa, .{ + .tag = .extern_func, + .data = extra_index, + }); + errdefer ip.items.len -= 1; + return @enumFromInt(ip.items.len - 1); +} + +pub const GetFuncDeclKey = struct { + owner_decl: Module.Decl.Index, + ty: Index, + zir_body_inst: Zir.Inst.Index, + lbrace_line: u32, + rbrace_line: u32, + lbrace_column: u32, + rbrace_column: u32, + cc: ?std.builtin.CallingConvention, + is_noinline: bool, +}; + +pub fn getFuncDecl(ip: *InternPool, gpa: Allocator, key: GetFuncDeclKey) Allocator.Error!Index { + // The strategy here is to add the function type unconditionally, then to + // ask if it already exists, and if so, revert the lengths of the mutated + // arrays. This is similar to what `getOrPutTrailingString` does. + const prev_extra_len = ip.extra.items.len; + + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncDecl).Struct.fields.len); + try ip.items.ensureUnusedCapacity(gpa, 1); + try ip.map.ensureUnusedCapacity(gpa, 1); + + const func_decl_extra_index = ip.addExtraAssumeCapacity(Tag.FuncDecl{ + .analysis = .{ + .state = if (key.cc == .Inline) .inline_only else .none, + .is_cold = false, + .is_noinline = key.is_noinline, + .calls_or_awaits_errorable_fn = false, + .stack_alignment = .none, + .inferred_error_set = false, + }, + .owner_decl = key.owner_decl, + .ty = key.ty, + .zir_body_inst = key.zir_body_inst, + .lbrace_line = key.lbrace_line, + .rbrace_line = key.rbrace_line, + .lbrace_column = key.lbrace_column, + .rbrace_column = key.rbrace_column, + }); + + const adapter: KeyAdapter = .{ .intern_pool = ip }; + const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ + .func = extraFuncDecl(ip, func_decl_extra_index), + }, adapter); + + if (gop.found_existing) { + ip.extra.items.len = prev_extra_len; + return @enumFromInt(gop.index); + } + + ip.items.appendAssumeCapacity(.{ + .tag = .func_decl, + .data = func_decl_extra_index, + }); + return @enumFromInt(ip.items.len - 1); +} + +pub const GetFuncDeclIesKey = struct { + owner_decl: Module.Decl.Index, + param_types: []Index, + noalias_bits: u32, + comptime_bits: u32, + bare_return_type: Index, + /// null means generic. + cc: ?std.builtin.CallingConvention, + /// null means generic. + alignment: ?Alignment, + section_is_generic: bool, + addrspace_is_generic: bool, + is_var_args: bool, + is_generic: bool, + is_noinline: bool, + zir_body_inst: Zir.Inst.Index, + lbrace_line: u32, + rbrace_line: u32, + lbrace_column: u32, + rbrace_column: u32, +}; + +pub fn getFuncDeclIes(ip: *InternPool, gpa: Allocator, key: GetFuncDeclIesKey) Allocator.Error!Index { + // Validate input parameters. + assert(key.bare_return_type != .none); + for (key.param_types) |param_type| assert(param_type != .none); + + // The strategy here is to add the function decl unconditionally, then to + // ask if it already exists, and if so, revert the lengths of the mutated + // arrays. This is similar to what `getOrPutTrailingString` does. + const prev_extra_len = ip.extra.items.len; + const params_len: u32 = @intCast(key.param_types.len); + + try ip.map.ensureUnusedCapacity(gpa, 4); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncDecl).Struct.fields.len + + 1 + // inferred_error_set + @typeInfo(Tag.ErrorUnionType).Struct.fields.len + + @typeInfo(Tag.TypeFunction).Struct.fields.len + + @intFromBool(key.comptime_bits != 0) + + @intFromBool(key.noalias_bits != 0) + + params_len); + try ip.items.ensureUnusedCapacity(gpa, 4); + + const func_decl_extra_index = ip.addExtraAssumeCapacity(Tag.FuncDecl{ + .analysis = .{ + .state = if (key.cc == .Inline) .inline_only else .none, + .is_cold = false, + .is_noinline = key.is_noinline, + .calls_or_awaits_errorable_fn = false, + .stack_alignment = .none, + .inferred_error_set = true, + }, + .owner_decl = key.owner_decl, + .ty = @enumFromInt(ip.items.len + 3), + .zir_body_inst = key.zir_body_inst, + .lbrace_line = key.lbrace_line, + .rbrace_line = key.rbrace_line, + .lbrace_column = key.lbrace_column, + .rbrace_column = key.rbrace_column, + }); + + ip.items.appendAssumeCapacity(.{ + .tag = .func_decl, + .data = func_decl_extra_index, + }); + ip.extra.appendAssumeCapacity(@intFromEnum(Index.none)); + + ip.items.appendAssumeCapacity(.{ + .tag = .type_error_union, + .data = ip.addExtraAssumeCapacity(Tag.ErrorUnionType{ + .error_set_type = @enumFromInt(ip.items.len + 1), + .payload_type = key.bare_return_type, + }), + }); + + ip.items.appendAssumeCapacity(.{ + .tag = .type_inferred_error_set, + .data = @intCast(ip.items.len - 2), + }); + + const func_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeFunction{ + .params_len = params_len, + .return_type = @enumFromInt(ip.items.len - 2), + .flags = .{ + .alignment = key.alignment orelse .none, + .cc = key.cc orelse .Unspecified, + .is_var_args = key.is_var_args, + .has_comptime_bits = key.comptime_bits != 0, + .has_noalias_bits = key.noalias_bits != 0, + .is_generic = key.is_generic, + .is_noinline = key.is_noinline, + .align_is_generic = key.alignment == null, + .cc_is_generic = key.cc == null, + .section_is_generic = key.section_is_generic, + .addrspace_is_generic = key.addrspace_is_generic, + }, + }); + if (key.comptime_bits != 0) ip.extra.appendAssumeCapacity(key.comptime_bits); + if (key.noalias_bits != 0) ip.extra.appendAssumeCapacity(key.noalias_bits); + ip.extra.appendSliceAssumeCapacity(@ptrCast(key.param_types)); + + ip.items.appendAssumeCapacity(.{ + .tag = .type_function, + .data = func_type_extra_index, + }); + + const adapter: KeyAdapter = .{ .intern_pool = ip }; + const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ + .func = extraFuncDecl(ip, func_decl_extra_index), + }, adapter); + if (!gop.found_existing) { + assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ .error_union_type = .{ + .error_set_type = @enumFromInt(ip.items.len - 2), + .payload_type = key.bare_return_type, + } }, adapter).found_existing); + assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ + .inferred_error_set_type = @enumFromInt(ip.items.len - 4), + }, adapter).found_existing); + assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ + .func_type = extraFuncType(ip, func_type_extra_index), + }, adapter).found_existing); + return @enumFromInt(ip.items.len - 4); + } + + // An existing function type was found; undo the additions to our two arrays. + ip.items.len -= 4; + ip.extra.items.len = prev_extra_len; + return @enumFromInt(gop.index); +} + +pub fn getErrorSetType( + ip: *InternPool, + gpa: Allocator, + names: []const NullTerminatedString, +) Allocator.Error!Index { + assert(std.sort.isSorted(NullTerminatedString, names, {}, NullTerminatedString.indexLessThan)); + + // The strategy here is to add the type unconditionally, then to ask if it + // already exists, and if so, revert the lengths of the mutated arrays. + // This is similar to what `getOrPutTrailingString` does. + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.ErrorSet).Struct.fields.len + names.len); + + const prev_extra_len = ip.extra.items.len; + errdefer ip.extra.items.len = prev_extra_len; + + const predicted_names_map: MapIndex = @enumFromInt(ip.maps.items.len); + + const error_set_extra_index = ip.addExtraAssumeCapacity(Tag.ErrorSet{ + .names_len = @intCast(names.len), + .names_map = predicted_names_map, + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast(names)); + + const adapter: KeyAdapter = .{ .intern_pool = ip }; + const gop = try ip.map.getOrPutAdapted(gpa, Key{ + .error_set_type = extraErrorSet(ip, error_set_extra_index), + }, adapter); + errdefer _ = ip.map.pop(); + + if (gop.found_existing) { + ip.extra.items.len = prev_extra_len; + return @enumFromInt(gop.index); + } + + try ip.items.append(gpa, .{ + .tag = .type_error_set, + .data = error_set_extra_index, + }); + errdefer ip.items.len -= 1; + + const names_map = try ip.addMap(gpa); + errdefer _ = ip.maps.pop(); + + try addStringsToMap(ip, gpa, names_map, names); + + return @enumFromInt(ip.items.len - 1); +} + +pub const GetFuncInstanceKey = struct { + /// Has the length of the instance function (may be lesser than + /// comptime_args). + param_types: []Index, + /// Has the length of generic_owner's parameters (may be greater than + /// param_types). + comptime_args: []const Index, + noalias_bits: u32, + bare_return_type: Index, + cc: std.builtin.CallingConvention, + alignment: Alignment, + section: OptionalNullTerminatedString, + is_noinline: bool, + generic_owner: Index, + inferred_error_set: bool, + generation: u32, +}; + +pub fn getFuncInstance(ip: *InternPool, gpa: Allocator, arg: GetFuncInstanceKey) Allocator.Error!Index { + if (arg.inferred_error_set) + return getFuncInstanceIes(ip, gpa, arg); + + const func_ty = try ip.getFuncType(gpa, .{ + .param_types = arg.param_types, + .return_type = arg.bare_return_type, + .comptime_bits = 0, + .noalias_bits = arg.noalias_bits, + .alignment = arg.alignment, + .cc = arg.cc, + .is_var_args = false, + .is_generic = false, + .is_noinline = arg.is_noinline, + .section_is_generic = false, + .addrspace_is_generic = false, + }); + + const generic_owner = unwrapCoercedFunc(ip, arg.generic_owner); + + assert(arg.comptime_args.len == ip.funcTypeParamsLen(ip.typeOf(generic_owner))); + + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncInstance).Struct.fields.len + + arg.comptime_args.len); + const prev_extra_len = ip.extra.items.len; + errdefer ip.extra.items.len = prev_extra_len; + + const func_extra_index = ip.addExtraAssumeCapacity(Tag.FuncInstance{ + .analysis = .{ + .state = if (arg.cc == .Inline) .inline_only else .none, + .is_cold = false, + .is_noinline = arg.is_noinline, + .calls_or_awaits_errorable_fn = false, + .stack_alignment = .none, + .inferred_error_set = false, + }, + // This is populated after we create the Decl below. It is not read + // by equality or hashing functions. + .owner_decl = undefined, + .ty = func_ty, + .branch_quota = 0, + .generic_owner = generic_owner, + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast(arg.comptime_args)); + + const gop = try ip.map.getOrPutAdapted(gpa, Key{ + .func = extraFuncInstance(ip, func_extra_index), + }, KeyAdapter{ .intern_pool = ip }); + errdefer _ = ip.map.pop(); + + if (gop.found_existing) { + ip.extra.items.len = prev_extra_len; + return @enumFromInt(gop.index); + } + + const func_index: Index = @enumFromInt(ip.items.len); + + try ip.items.append(gpa, .{ + .tag = .func_instance, + .data = func_extra_index, + }); + errdefer ip.items.len -= 1; + + return finishFuncInstance( + ip, + gpa, + generic_owner, + func_index, + func_extra_index, + arg.generation, + func_ty, + arg.section, + ); +} + +/// This function exists separately than `getFuncInstance` because it needs to +/// create 4 new items in the InternPool atomically before it can look for an +/// existing item in the map. +pub fn getFuncInstanceIes( + ip: *InternPool, + gpa: Allocator, + arg: GetFuncInstanceKey, +) Allocator.Error!Index { + // Validate input parameters. + assert(arg.inferred_error_set); + assert(arg.bare_return_type != .none); + for (arg.param_types) |param_type| assert(param_type != .none); + + // The strategy here is to add the function decl unconditionally, then to + // ask if it already exists, and if so, revert the lengths of the mutated + // arrays. This is similar to what `getOrPutTrailingString` does. + const prev_extra_len = ip.extra.items.len; + const params_len: u32 = @intCast(arg.param_types.len); + + try ip.map.ensureUnusedCapacity(gpa, 4); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncInstance).Struct.fields.len + + 1 + // inferred_error_set + arg.comptime_args.len + + @typeInfo(Tag.ErrorUnionType).Struct.fields.len + + @typeInfo(Tag.TypeFunction).Struct.fields.len + + @intFromBool(arg.noalias_bits != 0) + + params_len); + try ip.items.ensureUnusedCapacity(gpa, 4); + + const func_index: Index = @enumFromInt(ip.items.len); + const error_union_type: Index = @enumFromInt(ip.items.len + 1); + const error_set_type: Index = @enumFromInt(ip.items.len + 2); + const func_ty: Index = @enumFromInt(ip.items.len + 3); + + const func_extra_index = ip.addExtraAssumeCapacity(Tag.FuncInstance{ + .analysis = .{ + .state = if (arg.cc == .Inline) .inline_only else .none, + .is_cold = false, + .is_noinline = arg.is_noinline, + .calls_or_awaits_errorable_fn = false, + .stack_alignment = .none, + .inferred_error_set = true, + }, + // This is populated after we create the Decl below. It is not read + // by equality or hashing functions. + .owner_decl = undefined, + .ty = func_ty, + .branch_quota = 0, + .generic_owner = arg.generic_owner, + }); + ip.extra.appendAssumeCapacity(@intFromEnum(Index.none)); // resolved error set + ip.extra.appendSliceAssumeCapacity(@ptrCast(arg.comptime_args)); + + const func_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeFunction{ + .params_len = params_len, + .return_type = error_union_type, + .flags = .{ + .alignment = arg.alignment, + .cc = arg.cc, + .is_var_args = false, + .has_comptime_bits = false, + .has_noalias_bits = arg.noalias_bits != 0, + .is_generic = false, + .is_noinline = arg.is_noinline, + .align_is_generic = false, + .cc_is_generic = false, + .section_is_generic = false, + .addrspace_is_generic = false, + }, + }); + // no comptime_bits because has_comptime_bits is false + if (arg.noalias_bits != 0) ip.extra.appendAssumeCapacity(arg.noalias_bits); + ip.extra.appendSliceAssumeCapacity(@ptrCast(arg.param_types)); + + // TODO: add appendSliceAssumeCapacity to MultiArrayList. + ip.items.appendAssumeCapacity(.{ + .tag = .func_instance, + .data = func_extra_index, + }); + ip.items.appendAssumeCapacity(.{ + .tag = .type_error_union, + .data = ip.addExtraAssumeCapacity(Tag.ErrorUnionType{ + .error_set_type = error_set_type, + .payload_type = arg.bare_return_type, + }), + }); + ip.items.appendAssumeCapacity(.{ + .tag = .type_inferred_error_set, + .data = @intFromEnum(func_index), + }); + ip.items.appendAssumeCapacity(.{ + .tag = .type_function, + .data = func_type_extra_index, + }); + + const adapter: KeyAdapter = .{ .intern_pool = ip }; + const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ + .func = extraFuncInstance(ip, func_extra_index), + }, adapter); + if (gop.found_existing) { + // Hot path: undo the additions to our two arrays. + ip.items.len -= 4; + ip.extra.items.len = prev_extra_len; + return @enumFromInt(gop.index); + } + + // Synchronize the map with items. + assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ .error_union_type = .{ + .error_set_type = error_set_type, + .payload_type = arg.bare_return_type, + } }, adapter).found_existing); + assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ + .inferred_error_set_type = func_index, + }, adapter).found_existing); + assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ + .func_type = extraFuncType(ip, func_type_extra_index), + }, adapter).found_existing); + + return finishFuncInstance( + ip, + gpa, + arg.generic_owner, + func_index, + func_extra_index, + arg.generation, + func_ty, + arg.section, + ); +} + +fn finishFuncInstance( + ip: *InternPool, + gpa: Allocator, + generic_owner: Index, + func_index: Index, + func_extra_index: u32, + generation: u32, + func_ty: Index, + section: OptionalNullTerminatedString, +) Allocator.Error!Index { + const fn_owner_decl = ip.declPtr(ip.funcDeclOwner(generic_owner)); + const decl_index = try ip.createDecl(gpa, .{ + .name = undefined, + .src_namespace = fn_owner_decl.src_namespace, + .src_node = fn_owner_decl.src_node, + .src_line = fn_owner_decl.src_line, + .has_tv = true, + .owns_tv = true, + .ty = func_ty.toType(), + .val = func_index.toValue(), + .alignment = .none, + .@"linksection" = section, + .@"addrspace" = fn_owner_decl.@"addrspace", + .analysis = .complete, + .deletion_flag = false, + .zir_decl_index = fn_owner_decl.zir_decl_index, + .src_scope = fn_owner_decl.src_scope, + .generation = generation, + .is_pub = fn_owner_decl.is_pub, + .is_exported = fn_owner_decl.is_exported, + .has_linksection_or_addrspace = fn_owner_decl.has_linksection_or_addrspace, + .has_align = fn_owner_decl.has_align, + .alive = true, + .kind = .anon, + }); + errdefer ip.destroyDecl(gpa, decl_index); + + // Populate the owner_decl field which was left undefined until now. + ip.extra.items[ + func_extra_index + std.meta.fieldIndex(Tag.FuncInstance, "owner_decl").? + ] = @intFromEnum(decl_index); + + // TODO: improve this name + const decl = ip.declPtr(decl_index); + decl.name = try ip.getOrPutStringFmt(gpa, "{}__anon_{d}", .{ + fn_owner_decl.name.fmt(ip), @intFromEnum(decl_index), + }); + + return func_index; } /// Provides API for completing an enum type after calling `getIncompleteEnum`. @@ -4265,15 +5165,15 @@ pub fn finishGetEnum( .values_map = values_map, }), }); - ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(enum_type.names))); - ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(enum_type.values))); - return @as(Index, @enumFromInt(ip.items.len - 1)); + ip.extra.appendSliceAssumeCapacity(@ptrCast(enum_type.names)); + ip.extra.appendSliceAssumeCapacity(@ptrCast(enum_type.values)); + return @enumFromInt(ip.items.len - 1); } pub fn getIfExists(ip: *const InternPool, key: Key) ?Index { const adapter: KeyAdapter = .{ .intern_pool = ip }; const index = ip.map.getIndexAdapted(key, adapter) orelse return null; - return @as(Index, @enumFromInt(index)); + return @enumFromInt(index); } pub fn getAssumeExists(ip: *const InternPool, key: Key) Index { @@ -4311,7 +5211,7 @@ fn addIndexesToMap( fn addMap(ip: *InternPool, gpa: Allocator) Allocator.Error!MapIndex { const ptr = try ip.maps.addOne(gpa); ptr.* = .{}; - return @as(MapIndex, @enumFromInt(ip.maps.items.len - 1)); + return @enumFromInt(ip.maps.items.len - 1); } /// This operation only happens under compile error conditions. @@ -4342,24 +5242,28 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { const result = @as(u32, @intCast(ip.extra.items.len)); inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { ip.extra.appendAssumeCapacity(switch (field.type) { - u32 => @field(extra, field.name), - Index => @intFromEnum(@field(extra, field.name)), - Module.Decl.Index => @intFromEnum(@field(extra, field.name)), - Module.Namespace.Index => @intFromEnum(@field(extra, field.name)), - Module.Namespace.OptionalIndex => @intFromEnum(@field(extra, field.name)), - Module.Fn.Index => @intFromEnum(@field(extra, field.name)), - MapIndex => @intFromEnum(@field(extra, field.name)), - OptionalMapIndex => @intFromEnum(@field(extra, field.name)), - RuntimeIndex => @intFromEnum(@field(extra, field.name)), - String => @intFromEnum(@field(extra, field.name)), - NullTerminatedString => @intFromEnum(@field(extra, field.name)), - OptionalNullTerminatedString => @intFromEnum(@field(extra, field.name)), - i32 => @as(u32, @bitCast(@field(extra, field.name))), - Tag.TypePointer.Flags => @as(u32, @bitCast(@field(extra, field.name))), - TypeFunction.Flags => @as(u32, @bitCast(@field(extra, field.name))), - Tag.TypePointer.PackedOffset => @as(u32, @bitCast(@field(extra, field.name))), - Tag.TypePointer.VectorIndex => @intFromEnum(@field(extra, field.name)), - Tag.Variable.Flags => @as(u32, @bitCast(@field(extra, field.name))), + Index, + Module.Decl.Index, + Module.Namespace.Index, + Module.Namespace.OptionalIndex, + MapIndex, + OptionalMapIndex, + RuntimeIndex, + String, + NullTerminatedString, + OptionalNullTerminatedString, + Tag.TypePointer.VectorIndex, + => @intFromEnum(@field(extra, field.name)), + + u32, + i32, + FuncAnalysis, + Tag.TypePointer.Flags, + Tag.TypeFunction.Flags, + Tag.TypePointer.PackedOffset, + Tag.Variable.Flags, + => @bitCast(@field(extra, field.name)), + else => @compileError("bad field type: " ++ @typeName(field.type)), }); } @@ -4404,36 +5308,40 @@ fn addLimbsAssumeCapacity(ip: *InternPool, limbs: []const Limb) void { } } -fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct { data: T, end: usize } { +fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct { data: T, end: u32 } { var result: T = undefined; const fields = @typeInfo(T).Struct.fields; inline for (fields, 0..) |field, i| { const int32 = ip.extra.items[i + index]; @field(result, field.name) = switch (field.type) { - u32 => int32, - Index => @as(Index, @enumFromInt(int32)), - Module.Decl.Index => @as(Module.Decl.Index, @enumFromInt(int32)), - Module.Namespace.Index => @as(Module.Namespace.Index, @enumFromInt(int32)), - Module.Namespace.OptionalIndex => @as(Module.Namespace.OptionalIndex, @enumFromInt(int32)), - Module.Fn.Index => @as(Module.Fn.Index, @enumFromInt(int32)), - MapIndex => @as(MapIndex, @enumFromInt(int32)), - OptionalMapIndex => @as(OptionalMapIndex, @enumFromInt(int32)), - RuntimeIndex => @as(RuntimeIndex, @enumFromInt(int32)), - String => @as(String, @enumFromInt(int32)), - NullTerminatedString => @as(NullTerminatedString, @enumFromInt(int32)), - OptionalNullTerminatedString => @as(OptionalNullTerminatedString, @enumFromInt(int32)), - i32 => @as(i32, @bitCast(int32)), - Tag.TypePointer.Flags => @as(Tag.TypePointer.Flags, @bitCast(int32)), - TypeFunction.Flags => @as(TypeFunction.Flags, @bitCast(int32)), - Tag.TypePointer.PackedOffset => @as(Tag.TypePointer.PackedOffset, @bitCast(int32)), - Tag.TypePointer.VectorIndex => @as(Tag.TypePointer.VectorIndex, @enumFromInt(int32)), - Tag.Variable.Flags => @as(Tag.Variable.Flags, @bitCast(int32)), + Index, + Module.Decl.Index, + Module.Namespace.Index, + Module.Namespace.OptionalIndex, + MapIndex, + OptionalMapIndex, + RuntimeIndex, + String, + NullTerminatedString, + OptionalNullTerminatedString, + Tag.TypePointer.VectorIndex, + => @enumFromInt(int32), + + u32, + i32, + Tag.TypePointer.Flags, + Tag.TypeFunction.Flags, + Tag.TypePointer.PackedOffset, + Tag.Variable.Flags, + FuncAnalysis, + => @bitCast(int32), + else => @compileError("bad field type: " ++ @typeName(field.type)), }; } return .{ .data = result, - .end = index + fields.len, + .end = @intCast(index + fields.len), }; } @@ -4603,206 +5511,226 @@ pub fn sliceLen(ip: *const InternPool, i: Index) Index { pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { const old_ty = ip.typeOf(val); if (old_ty == new_ty) return val; + + const tags = ip.items.items(.tag); + switch (val) { .undef => return ip.get(gpa, .{ .undef = new_ty }), - .null_value => if (ip.isOptionalType(new_ty)) - return ip.get(gpa, .{ .opt = .{ + .null_value => { + if (ip.isOptionalType(new_ty)) return ip.get(gpa, .{ .opt = .{ .ty = new_ty, .val = .none, - } }) - else if (ip.isPointerType(new_ty)) - return ip.get(gpa, .{ .ptr = .{ + } }); + + if (ip.isPointerType(new_ty)) return ip.get(gpa, .{ .ptr = .{ .ty = new_ty, .addr = .{ .int = .zero_usize }, .len = switch (ip.indexToKey(new_ty).ptr_type.flags.size) { .One, .Many, .C => .none, .Slice => try ip.get(gpa, .{ .undef = .usize_type }), }, - } }), - else => switch (ip.indexToKey(val)) { - .undef => return ip.get(gpa, .{ .undef = new_ty }), - .extern_func => |extern_func| if (ip.isFunctionType(new_ty)) - return ip.get(gpa, .{ .extern_func = .{ - .ty = new_ty, - .decl = extern_func.decl, - .lib_name = extern_func.lib_name, - } }), - .func => |func| if (ip.isFunctionType(new_ty)) - return ip.get(gpa, .{ .func = .{ - .ty = new_ty, - .index = func.index, - } }), - .int => |int| switch (ip.indexToKey(new_ty)) { - .enum_type => |enum_type| return ip.get(gpa, .{ .enum_tag = .{ - .ty = new_ty, - .int = try ip.getCoerced(gpa, val, enum_type.tag_ty), - } }), - .ptr_type => return ip.get(gpa, .{ .ptr = .{ - .ty = new_ty, - .addr = .{ .int = try ip.getCoerced(gpa, val, .usize_type) }, - } }), - else => if (ip.isIntegerType(new_ty)) - return getCoercedInts(ip, gpa, int, new_ty), - }, - .float => |float| switch (ip.indexToKey(new_ty)) { - .simple_type => |simple| switch (simple) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - .comptime_float, - => return ip.get(gpa, .{ .float = .{ - .ty = new_ty, - .storage = float.storage, - } }), - else => {}, - }, - else => {}, - }, - .enum_tag => |enum_tag| if (ip.isIntegerType(new_ty)) - return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty), - .enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) { - .enum_type => |enum_type| { - const index = enum_type.nameIndex(ip, enum_literal).?; - return ip.get(gpa, .{ .enum_tag = .{ - .ty = new_ty, - .int = if (enum_type.values.len != 0) - enum_type.values[index] - else - try ip.get(gpa, .{ .int = .{ - .ty = enum_type.tag_ty, - .storage = .{ .u64 = index }, - } }), - } }); - }, - else => {}, - }, - .ptr => |ptr| if (ip.isPointerType(new_ty)) - return ip.get(gpa, .{ .ptr = .{ - .ty = new_ty, - .addr = ptr.addr, - .len = ptr.len, - } }) - else if (ip.isIntegerType(new_ty)) - switch (ptr.addr) { - .int => |int| return ip.getCoerced(gpa, int, new_ty), - else => {}, - }, - .opt => |opt| switch (ip.indexToKey(new_ty)) { - .ptr_type => |ptr_type| return switch (opt.val) { - .none => try ip.get(gpa, .{ .ptr = .{ - .ty = new_ty, - .addr = .{ .int = .zero_usize }, - .len = switch (ptr_type.flags.size) { - .One, .Many, .C => .none, - .Slice => try ip.get(gpa, .{ .undef = .usize_type }), - }, - } }), - else => |payload| try ip.getCoerced(gpa, payload, new_ty), - }, - .opt_type => |child_type| return try ip.get(gpa, .{ .opt = .{ - .ty = new_ty, - .val = switch (opt.val) { - .none => .none, - else => try ip.getCoerced(gpa, opt.val, child_type), - }, - } }), - else => {}, - }, - .err => |err| if (ip.isErrorSetType(new_ty)) - return ip.get(gpa, .{ .err = .{ - .ty = new_ty, - .name = err.name, - } }) - else if (ip.isErrorUnionType(new_ty)) - return ip.get(gpa, .{ .error_union = .{ - .ty = new_ty, - .val = .{ .err_name = err.name }, - } }), - .error_union => |error_union| if (ip.isErrorUnionType(new_ty)) - return ip.get(gpa, .{ .error_union = .{ - .ty = new_ty, - .val = error_union.val, - } }), - .aggregate => |aggregate| { - const new_len = @as(usize, @intCast(ip.aggregateTypeLen(new_ty))); - direct: { - const old_ty_child = switch (ip.indexToKey(old_ty)) { - inline .array_type, .vector_type => |seq_type| seq_type.child, - .anon_struct_type, .struct_type => break :direct, - else => unreachable, - }; - const new_ty_child = switch (ip.indexToKey(new_ty)) { - inline .array_type, .vector_type => |seq_type| seq_type.child, - .anon_struct_type, .struct_type => break :direct, - else => unreachable, - }; - if (old_ty_child != new_ty_child) break :direct; - // TODO: write something like getCoercedInts to avoid needing to dupe here - switch (aggregate.storage) { - .bytes => |bytes| { - const bytes_copy = try gpa.dupe(u8, bytes[0..new_len]); - defer gpa.free(bytes_copy); - return ip.get(gpa, .{ .aggregate = .{ - .ty = new_ty, - .storage = .{ .bytes = bytes_copy }, - } }); - }, - .elems => |elems| { - const elems_copy = try gpa.dupe(InternPool.Index, elems[0..new_len]); - defer gpa.free(elems_copy); - return ip.get(gpa, .{ .aggregate = .{ - .ty = new_ty, - .storage = .{ .elems = elems_copy }, - } }); - }, - .repeated_elem => |elem| { - return ip.get(gpa, .{ .aggregate = .{ - .ty = new_ty, - .storage = .{ .repeated_elem = elem }, - } }); - }, - } + } }); + }, + else => switch (tags[@intFromEnum(val)]) { + .func_decl => return getCoercedFuncDecl(ip, gpa, val, new_ty), + .func_instance => return getCoercedFuncInstance(ip, gpa, val, new_ty), + .func_coerced => { + const extra_index = ip.items.items(.data)[@intFromEnum(val)]; + const func: Index = @enumFromInt( + ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncCoerced, "func").?], + ); + switch (tags[@intFromEnum(func)]) { + .func_decl => return getCoercedFuncDecl(ip, gpa, val, new_ty), + .func_instance => return getCoercedFuncInstance(ip, gpa, val, new_ty), + else => unreachable, } - // Direct approach failed - we must recursively coerce elems - const agg_elems = try gpa.alloc(InternPool.Index, new_len); - defer gpa.free(agg_elems); - // First, fill the vector with the uncoerced elements. We do this to avoid key - // lifetime issues, since it'll allow us to avoid referencing `aggregate` after we - // begin interning elems. - switch (aggregate.storage) { - .bytes => { - // We have to intern each value here, so unfortunately we can't easily avoid - // the repeated indexToKey calls. - for (agg_elems, 0..) |*elem, i| { - const x = ip.indexToKey(val).aggregate.storage.bytes[i]; - elem.* = try ip.get(gpa, .{ .int = .{ - .ty = .u8_type, - .storage = .{ .u64 = x }, - } }); - } - }, - .elems => |elems| @memcpy(agg_elems, elems[0..new_len]), - .repeated_elem => |elem| @memset(agg_elems, elem), - } - // Now, coerce each element to its new type. - for (agg_elems, 0..) |*elem, i| { - const new_elem_ty = switch (ip.indexToKey(new_ty)) { - inline .array_type, .vector_type => |seq_type| seq_type.child, - .anon_struct_type => |anon_struct_type| anon_struct_type.types[i], - .struct_type => |struct_type| ip.structPtr(struct_type.index.unwrap().?) - .fields.values()[i].ty.toIntern(), - else => unreachable, - }; - elem.* = try ip.getCoerced(gpa, elem.*, new_elem_ty); - } - return ip.get(gpa, .{ .aggregate = .{ .ty = new_ty, .storage = .{ .elems = agg_elems } } }); }, else => {}, }, } + + switch (ip.indexToKey(val)) { + .undef => return ip.get(gpa, .{ .undef = new_ty }), + .extern_func => |extern_func| if (ip.isFunctionType(new_ty)) + return ip.get(gpa, .{ .extern_func = .{ + .ty = new_ty, + .decl = extern_func.decl, + .lib_name = extern_func.lib_name, + } }), + + .func => unreachable, + + .int => |int| switch (ip.indexToKey(new_ty)) { + .enum_type => |enum_type| return ip.get(gpa, .{ .enum_tag = .{ + .ty = new_ty, + .int = try ip.getCoerced(gpa, val, enum_type.tag_ty), + } }), + .ptr_type => return ip.get(gpa, .{ .ptr = .{ + .ty = new_ty, + .addr = .{ .int = try ip.getCoerced(gpa, val, .usize_type) }, + } }), + else => if (ip.isIntegerType(new_ty)) + return getCoercedInts(ip, gpa, int, new_ty), + }, + .float => |float| switch (ip.indexToKey(new_ty)) { + .simple_type => |simple| switch (simple) { + .f16, + .f32, + .f64, + .f80, + .f128, + .c_longdouble, + .comptime_float, + => return ip.get(gpa, .{ .float = .{ + .ty = new_ty, + .storage = float.storage, + } }), + else => {}, + }, + else => {}, + }, + .enum_tag => |enum_tag| if (ip.isIntegerType(new_ty)) + return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty), + .enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) { + .enum_type => |enum_type| { + const index = enum_type.nameIndex(ip, enum_literal).?; + return ip.get(gpa, .{ .enum_tag = .{ + .ty = new_ty, + .int = if (enum_type.values.len != 0) + enum_type.values[index] + else + try ip.get(gpa, .{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = index }, + } }), + } }); + }, + else => {}, + }, + .ptr => |ptr| if (ip.isPointerType(new_ty)) + return ip.get(gpa, .{ .ptr = .{ + .ty = new_ty, + .addr = ptr.addr, + .len = ptr.len, + } }) + else if (ip.isIntegerType(new_ty)) + switch (ptr.addr) { + .int => |int| return ip.getCoerced(gpa, int, new_ty), + else => {}, + }, + .opt => |opt| switch (ip.indexToKey(new_ty)) { + .ptr_type => |ptr_type| return switch (opt.val) { + .none => try ip.get(gpa, .{ .ptr = .{ + .ty = new_ty, + .addr = .{ .int = .zero_usize }, + .len = switch (ptr_type.flags.size) { + .One, .Many, .C => .none, + .Slice => try ip.get(gpa, .{ .undef = .usize_type }), + }, + } }), + else => |payload| try ip.getCoerced(gpa, payload, new_ty), + }, + .opt_type => |child_type| return try ip.get(gpa, .{ .opt = .{ + .ty = new_ty, + .val = switch (opt.val) { + .none => .none, + else => try ip.getCoerced(gpa, opt.val, child_type), + }, + } }), + else => {}, + }, + .err => |err| if (ip.isErrorSetType(new_ty)) + return ip.get(gpa, .{ .err = .{ + .ty = new_ty, + .name = err.name, + } }) + else if (ip.isErrorUnionType(new_ty)) + return ip.get(gpa, .{ .error_union = .{ + .ty = new_ty, + .val = .{ .err_name = err.name }, + } }), + .error_union => |error_union| if (ip.isErrorUnionType(new_ty)) + return ip.get(gpa, .{ .error_union = .{ + .ty = new_ty, + .val = error_union.val, + } }), + .aggregate => |aggregate| { + const new_len = @as(usize, @intCast(ip.aggregateTypeLen(new_ty))); + direct: { + const old_ty_child = switch (ip.indexToKey(old_ty)) { + inline .array_type, .vector_type => |seq_type| seq_type.child, + .anon_struct_type, .struct_type => break :direct, + else => unreachable, + }; + const new_ty_child = switch (ip.indexToKey(new_ty)) { + inline .array_type, .vector_type => |seq_type| seq_type.child, + .anon_struct_type, .struct_type => break :direct, + else => unreachable, + }; + if (old_ty_child != new_ty_child) break :direct; + // TODO: write something like getCoercedInts to avoid needing to dupe here + switch (aggregate.storage) { + .bytes => |bytes| { + const bytes_copy = try gpa.dupe(u8, bytes[0..new_len]); + defer gpa.free(bytes_copy); + return ip.get(gpa, .{ .aggregate = .{ + .ty = new_ty, + .storage = .{ .bytes = bytes_copy }, + } }); + }, + .elems => |elems| { + const elems_copy = try gpa.dupe(Index, elems[0..new_len]); + defer gpa.free(elems_copy); + return ip.get(gpa, .{ .aggregate = .{ + .ty = new_ty, + .storage = .{ .elems = elems_copy }, + } }); + }, + .repeated_elem => |elem| { + return ip.get(gpa, .{ .aggregate = .{ + .ty = new_ty, + .storage = .{ .repeated_elem = elem }, + } }); + }, + } + } + // Direct approach failed - we must recursively coerce elems + const agg_elems = try gpa.alloc(Index, new_len); + defer gpa.free(agg_elems); + // First, fill the vector with the uncoerced elements. We do this to avoid key + // lifetime issues, since it'll allow us to avoid referencing `aggregate` after we + // begin interning elems. + switch (aggregate.storage) { + .bytes => { + // We have to intern each value here, so unfortunately we can't easily avoid + // the repeated indexToKey calls. + for (agg_elems, 0..) |*elem, i| { + const x = ip.indexToKey(val).aggregate.storage.bytes[i]; + elem.* = try ip.get(gpa, .{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = x }, + } }); + } + }, + .elems => |elems| @memcpy(agg_elems, elems[0..new_len]), + .repeated_elem => |elem| @memset(agg_elems, elem), + } + // Now, coerce each element to its new type. + for (agg_elems, 0..) |*elem, i| { + const new_elem_ty = switch (ip.indexToKey(new_ty)) { + inline .array_type, .vector_type => |seq_type| seq_type.child, + .anon_struct_type => |anon_struct_type| anon_struct_type.types[i], + .struct_type => |struct_type| ip.structPtr(struct_type.index.unwrap().?) + .fields.values()[i].ty.toIntern(), + else => unreachable, + }; + elem.* = try ip.getCoerced(gpa, elem.*, new_elem_ty); + } + return ip.get(gpa, .{ .aggregate = .{ .ty = new_ty, .storage = .{ .elems = agg_elems } } }); + }, + else => {}, + } + switch (ip.indexToKey(new_ty)) { .opt_type => |child_type| switch (val) { .null_value => return ip.get(gpa, .{ .opt = .{ @@ -4830,6 +5758,54 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al unreachable; } +fn getCoercedFuncDecl(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { + const datas = ip.items.items(.data); + const extra_index = datas[@intFromEnum(val)]; + const prev_ty: Index = @enumFromInt( + ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncDecl, "ty").?], + ); + if (new_ty == prev_ty) return val; + return getCoercedFunc(ip, gpa, val, new_ty); +} + +fn getCoercedFuncInstance(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { + const datas = ip.items.items(.data); + const extra_index = datas[@intFromEnum(val)]; + const prev_ty: Index = @enumFromInt( + ip.extra.items[extra_index + std.meta.fieldIndex(Tag.FuncInstance, "ty").?], + ); + if (new_ty == prev_ty) return val; + return getCoercedFunc(ip, gpa, val, new_ty); +} + +fn getCoercedFunc(ip: *InternPool, gpa: Allocator, func: Index, ty: Index) Allocator.Error!Index { + const prev_extra_len = ip.extra.items.len; + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncCoerced).Struct.fields.len); + try ip.items.ensureUnusedCapacity(gpa, 1); + try ip.map.ensureUnusedCapacity(gpa, 1); + + const extra_index = ip.addExtraAssumeCapacity(Tag.FuncCoerced{ + .ty = ty, + .func = func, + }); + + const adapter: KeyAdapter = .{ .intern_pool = ip }; + const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{ + .func = extraFuncCoerced(ip, extra_index), + }, adapter); + + if (gop.found_existing) { + ip.extra.items.len = prev_extra_len; + return @enumFromInt(gop.index); + } + + ip.items.appendAssumeCapacity(.{ + .tag = .func_coerced, + .data = extra_index, + }); + return @enumFromInt(ip.items.len - 1); +} + /// Asserts `val` has an integer type. /// Assumes `new_ty` is an integer type. pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Index) Allocator.Error!Index { @@ -4881,27 +5857,11 @@ pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType { const tags = ip.items.items(.tag); const datas = ip.items.items(.data); switch (tags[@intFromEnum(val)]) { - .type_function => return indexToKeyFuncType(ip, datas[@intFromEnum(val)]), + .type_function => return extraFuncType(ip, datas[@intFromEnum(val)]), else => return null, } } -pub fn indexToFunc(ip: *const InternPool, val: Index) Module.Fn.OptionalIndex { - assert(val != .none); - const tags = ip.items.items(.tag); - if (tags[@intFromEnum(val)] != .func) return .none; - const datas = ip.items.items(.data); - return ip.extraData(Tag.Func, datas[@intFromEnum(val)]).index.toOptional(); -} - -pub fn indexToInferredErrorSetType(ip: *const InternPool, val: Index) Module.Fn.InferredErrorSet.OptionalIndex { - assert(val != .none); - const tags = ip.items.items(.tag); - if (tags[@intFromEnum(val)] != .type_inferred_error_set) return .none; - const datas = ip.items.items(.data); - return @as(Module.Fn.InferredErrorSet.Index, @enumFromInt(datas[@intFromEnum(val)])).toOptional(); -} - /// includes .comptime_int_type pub fn isIntegerType(ip: *const InternPool, ty: Index) bool { return switch (ty) { @@ -4952,14 +5912,17 @@ pub fn isOptionalType(ip: *const InternPool, ty: Index) bool { /// includes .inferred_error_set_type pub fn isErrorSetType(ip: *const InternPool, ty: Index) bool { - return ty == .anyerror_type or switch (ip.indexToKey(ty)) { - .error_set_type, .inferred_error_set_type => true, - else => false, + return switch (ty) { + .anyerror_type, .adhoc_inferred_error_set_type => true, + else => switch (ip.indexToKey(ty)) { + .error_set_type, .inferred_error_set_type => true, + else => false, + }, }; } pub fn isInferredErrorSetType(ip: *const InternPool, ty: Index) bool { - return ip.indexToKey(ty) == .inferred_error_set_type; + return ty == .adhoc_inferred_error_set_type or ip.indexToKey(ty) == .inferred_error_set_type; } pub fn isErrorUnionType(ip: *const InternPool, ty: Index) bool { @@ -4973,6 +5936,14 @@ pub fn isAggregateType(ip: *const InternPool, ty: Index) bool { }; } +pub fn errorUnionSet(ip: *const InternPool, ty: Index) Index { + return ip.indexToKey(ty).error_union_type.error_set_type; +} + +pub fn errorUnionPayload(ip: *const InternPool, ty: Index) Index { + return ip.indexToKey(ty).error_union_type.payload_type; +} + /// The is only legal because the initializer is not part of the hash. pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { const item = ip.items.get(@intFromEnum(index)); @@ -4994,12 +5965,10 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { (@sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl)); const unions_size = ip.allocated_unions.len * (@sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl)); - const funcs_size = ip.allocated_funcs.len * - (@sizeOf(Module.Fn) + @sizeOf(Module.Decl)); // TODO: map overhead size is not taken into account const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size + - structs_size + unions_size + funcs_size; + structs_size + unions_size; std.debug.print( \\InternPool size: {d} bytes @@ -5008,7 +5977,6 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { \\ {d} limbs: {d} bytes \\ {d} structs: {d} bytes \\ {d} unions: {d} bytes - \\ {d} funcs: {d} bytes \\ , .{ total_size, @@ -5022,8 +5990,6 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { structs_size, ip.allocated_unions.len, unions_size, - ip.allocated_funcs.len, - funcs_size, }); const tags = ip.items.items(.tag); @@ -5048,11 +6014,12 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .type_optional => 0, .type_anyframe => 0, .type_error_union => @sizeOf(Key.ErrorUnionType), + .type_anyerror_union => 0, .type_error_set => b: { - const info = ip.extraData(ErrorSet, data); - break :b @sizeOf(ErrorSet) + (@sizeOf(u32) * info.names_len); + const info = ip.extraData(Tag.ErrorSet, data); + break :b @sizeOf(Tag.ErrorSet) + (@sizeOf(u32) * info.names_len); }, - .type_inferred_error_set => @sizeOf(Module.Fn.InferredErrorSet), + .type_inferred_error_set => 0, .type_enum_explicit, .type_enum_nonexhaustive => @sizeOf(EnumExplicit), .type_enum_auto => @sizeOf(EnumAuto), .type_opaque => @sizeOf(Key.OpaqueType), @@ -5080,8 +6047,11 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { => @sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl), .type_function => b: { - const info = ip.extraData(TypeFunction, data); - break :b @sizeOf(TypeFunction) + (@sizeOf(Index) * info.params_len); + const info = ip.extraData(Tag.TypeFunction, data); + break :b @sizeOf(Tag.TypeFunction) + + (@sizeOf(Index) * info.params_len) + + (@as(u32, 4) * @intFromBool(info.flags.has_comptime_bits)) + + (@as(u32, 4) * @intFromBool(info.flags.has_noalias_bits)); }, .undef => 0, @@ -5130,7 +6100,7 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { }, .aggregate => b: { const info = ip.extraData(Tag.Aggregate, data); - const fields_len = @as(u32, @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty))); + const fields_len: u32 = @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty)); break :b @sizeOf(Tag.Aggregate) + (@sizeOf(Index) * fields_len); }, .repeated => @sizeOf(Repeated), @@ -5145,7 +6115,15 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .float_comptime_float => @sizeOf(Float128), .variable => @sizeOf(Tag.Variable) + @sizeOf(Module.Decl), .extern_func => @sizeOf(Tag.ExternFunc) + @sizeOf(Module.Decl), - .func => @sizeOf(Tag.Func) + @sizeOf(Module.Fn) + @sizeOf(Module.Decl), + .func_decl => @sizeOf(Tag.FuncDecl) + @sizeOf(Module.Decl), + .func_instance => b: { + const info = ip.extraData(Tag.FuncInstance, data); + const ty = ip.typeOf(info.generic_owner); + const params_len = ip.indexToKey(ty).func_type.param_types.len; + break :b @sizeOf(Tag.FuncInstance) + @sizeOf(Index) * params_len + + @sizeOf(Module.Decl); + }, + .func_coerced => @sizeOf(Tag.FuncCoerced), .only_possible_value => 0, .union_value => @sizeOf(Key.Union), @@ -5193,6 +6171,7 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void { .type_optional, .type_anyframe, .type_error_union, + .type_anyerror_union, .type_error_set, .type_inferred_error_set, .type_enum_explicit, @@ -5249,7 +6228,9 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void { .float_comptime_float, .variable, .extern_func, - .func, + .func_decl, + .func_instance, + .func_coerced, .union_value, .memoized_call, => try w.print("{d}", .{data}), @@ -5284,20 +6265,12 @@ pub fn unionPtrConst(ip: *const InternPool, index: Module.Union.Index) *const Mo return ip.allocated_unions.at(@intFromEnum(index)); } -pub fn funcPtr(ip: *InternPool, index: Module.Fn.Index) *Module.Fn { - return ip.allocated_funcs.at(@intFromEnum(index)); +pub fn declPtr(ip: *InternPool, index: Module.Decl.Index) *Module.Decl { + return ip.allocated_decls.at(@intFromEnum(index)); } -pub fn funcPtrConst(ip: *const InternPool, index: Module.Fn.Index) *const Module.Fn { - return ip.allocated_funcs.at(@intFromEnum(index)); -} - -pub fn inferredErrorSetPtr(ip: *InternPool, index: Module.Fn.InferredErrorSet.Index) *Module.Fn.InferredErrorSet { - return ip.allocated_inferred_error_sets.at(@intFromEnum(index)); -} - -pub fn inferredErrorSetPtrConst(ip: *const InternPool, index: Module.Fn.InferredErrorSet.Index) *const Module.Fn.InferredErrorSet { - return ip.allocated_inferred_error_sets.at(@intFromEnum(index)); +pub fn namespacePtr(ip: *InternPool, index: Module.Namespace.Index) *Module.Namespace { + return ip.allocated_namespaces.at(@intFromEnum(index)); } pub fn createStruct( @@ -5344,47 +6317,47 @@ pub fn destroyUnion(ip: *InternPool, gpa: Allocator, index: Module.Union.Index) }; } -pub fn createFunc( +pub fn createDecl( ip: *InternPool, gpa: Allocator, - initialization: Module.Fn, -) Allocator.Error!Module.Fn.Index { - if (ip.funcs_free_list.popOrNull()) |index| { - ip.allocated_funcs.at(@intFromEnum(index)).* = initialization; + initialization: Module.Decl, +) Allocator.Error!Module.Decl.Index { + if (ip.decls_free_list.popOrNull()) |index| { + ip.allocated_decls.at(@intFromEnum(index)).* = initialization; return index; } - const ptr = try ip.allocated_funcs.addOne(gpa); + const ptr = try ip.allocated_decls.addOne(gpa); ptr.* = initialization; - return @as(Module.Fn.Index, @enumFromInt(ip.allocated_funcs.len - 1)); + return @as(Module.Decl.Index, @enumFromInt(ip.allocated_decls.len - 1)); } -pub fn destroyFunc(ip: *InternPool, gpa: Allocator, index: Module.Fn.Index) void { - ip.funcPtr(index).* = undefined; - ip.funcs_free_list.append(gpa, index) catch { - // In order to keep `destroyFunc` a non-fallible function, we ignore memory - // allocation failures here, instead leaking the Fn until garbage collection. +pub fn destroyDecl(ip: *InternPool, gpa: Allocator, index: Module.Decl.Index) void { + ip.declPtr(index).* = undefined; + ip.decls_free_list.append(gpa, index) catch { + // In order to keep `destroyDecl` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Decl until garbage collection. }; } -pub fn createInferredErrorSet( +pub fn createNamespace( ip: *InternPool, gpa: Allocator, - initialization: Module.Fn.InferredErrorSet, -) Allocator.Error!Module.Fn.InferredErrorSet.Index { - if (ip.inferred_error_sets_free_list.popOrNull()) |index| { - ip.allocated_inferred_error_sets.at(@intFromEnum(index)).* = initialization; + initialization: Module.Namespace, +) Allocator.Error!Module.Namespace.Index { + if (ip.namespaces_free_list.popOrNull()) |index| { + ip.allocated_namespaces.at(@intFromEnum(index)).* = initialization; return index; } - const ptr = try ip.allocated_inferred_error_sets.addOne(gpa); + const ptr = try ip.allocated_namespaces.addOne(gpa); ptr.* = initialization; - return @as(Module.Fn.InferredErrorSet.Index, @enumFromInt(ip.allocated_inferred_error_sets.len - 1)); + return @as(Module.Namespace.Index, @enumFromInt(ip.allocated_namespaces.len - 1)); } -pub fn destroyInferredErrorSet(ip: *InternPool, gpa: Allocator, index: Module.Fn.InferredErrorSet.Index) void { - ip.inferredErrorSetPtr(index).* = undefined; - ip.inferred_error_sets_free_list.append(gpa, index) catch { - // In order to keep `destroyInferredErrorSet` a non-fallible function, we ignore memory - // allocation failures here, instead leaking the InferredErrorSet until garbage collection. +pub fn destroyNamespace(ip: *InternPool, gpa: Allocator, index: Module.Namespace.Index) void { + ip.namespacePtr(index).* = undefined; + ip.namespaces_free_list.append(gpa, index) catch { + // In order to keep `destroyNamespace` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Namespace until garbage collection. }; } @@ -5547,6 +6520,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .slice_const_u8_sentinel_0_type, .optional_noreturn_type, .anyerror_void_error_union_type, + .adhoc_inferred_error_set_type, .generic_poison_type, .empty_struct_type, => .type_type, @@ -5576,6 +6550,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .type_optional, .type_anyframe, .type_error_union, + .type_anyerror_union, .type_error_set, .type_inferred_error_set, .type_enum_auto, @@ -5596,7 +6571,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .undef, .opt_null, .only_possible_value, - => @as(Index, @enumFromInt(ip.items.items(.data)[@intFromEnum(index)])), + => @enumFromInt(ip.items.items(.data)[@intFromEnum(index)]), .simple_value => unreachable, // handled via Index above @@ -5620,7 +6595,9 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .enum_tag, .variable, .extern_func, - .func, + .func_decl, + .func_instance, + .func_coerced, .union_value, .bytes, .aggregate, @@ -5628,7 +6605,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { => |t| { const extra_index = ip.items.items(.data)[@intFromEnum(index)]; const field_index = std.meta.fieldIndex(t.Payload(), "ty").?; - return @as(Index, @enumFromInt(ip.extra.items[extra_index + field_index])); + return @enumFromInt(ip.extra.items[extra_index + field_index]); }, .int_u8 => .u8_type, @@ -5693,7 +6670,7 @@ pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 { }; } -pub fn funcReturnType(ip: *const InternPool, ty: Index) Index { +pub fn funcTypeReturnType(ip: *const InternPool, ty: Index) Index { const item = ip.items.get(@intFromEnum(ty)); const child_item = switch (item.tag) { .type_pointer => ip.items.get(ip.extra.items[ @@ -5704,7 +6681,7 @@ pub fn funcReturnType(ip: *const InternPool, ty: Index) Index { }; assert(child_item.tag == .type_function); return @as(Index, @enumFromInt(ip.extra.items[ - child_item.data + std.meta.fieldIndex(TypeFunction, "return_type").? + child_item.data + std.meta.fieldIndex(Tag.TypeFunction, "return_type").? ])); } @@ -5712,7 +6689,7 @@ pub fn isNoReturn(ip: *const InternPool, ty: Index) bool { return switch (ty) { .noreturn_type => true, else => switch (ip.items.items(.tag)[@intFromEnum(ty)]) { - .type_error_set => ip.extra.items[ip.items.items(.data)[@intFromEnum(ty)] + std.meta.fieldIndex(ErrorSet, "names_len").?] == 0, + .type_error_set => ip.extra.items[ip.items.items(.data)[@intFromEnum(ty)] + std.meta.fieldIndex(Tag.ErrorSet, "names_len").?] == 0, else => false, }, }; @@ -5821,7 +6798,7 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois .bool_type => .Bool, .void_type => .Void, .type_type => .Type, - .anyerror_type => .ErrorSet, + .anyerror_type, .adhoc_inferred_error_set_type => .ErrorSet, .comptime_int_type => .ComptimeInt, .comptime_float_type => .ComptimeFloat, .noreturn_type => .NoReturn, @@ -5899,7 +6876,10 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois .type_optional => .Optional, .type_anyframe => .AnyFrame, - .type_error_union => .ErrorUnion, + + .type_error_union, + .type_anyerror_union, + => .ErrorUnion, .type_error_set, .type_inferred_error_set, @@ -5969,7 +6949,9 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois .float_comptime_float, .variable, .extern_func, - .func, + .func_decl, + .func_instance, + .func_coerced, .only_possible_value, .union_value, .bytes, @@ -5982,3 +6964,126 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois .none => unreachable, // special tag }; } + +pub fn isFuncBody(ip: *const InternPool, i: Index) bool { + assert(i != .none); + return switch (ip.items.items(.tag)[@intFromEnum(i)]) { + .func_decl, .func_instance, .func_coerced => true, + else => false, + }; +} + +pub fn funcAnalysis(ip: *const InternPool, i: Index) *FuncAnalysis { + assert(i != .none); + const item = ip.items.get(@intFromEnum(i)); + const extra_index = switch (item.tag) { + .func_decl => item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?, + .func_instance => item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?, + .func_coerced => i: { + const extra_index = item.data + std.meta.fieldIndex(Tag.FuncCoerced, "func").?; + const func_index: Index = @enumFromInt(ip.extra.items[extra_index]); + const sub_item = ip.items.get(@intFromEnum(func_index)); + break :i switch (sub_item.tag) { + .func_decl => sub_item.data + std.meta.fieldIndex(Tag.FuncDecl, "analysis").?, + .func_instance => sub_item.data + std.meta.fieldIndex(Tag.FuncInstance, "analysis").?, + else => unreachable, + }; + }, + else => unreachable, + }; + return @ptrCast(&ip.extra.items[extra_index]); +} + +pub fn funcHasInferredErrorSet(ip: *const InternPool, i: Index) bool { + return funcAnalysis(ip, i).inferred_error_set; +} + +pub fn funcZirBodyInst(ip: *const InternPool, i: Index) Zir.Inst.Index { + assert(i != .none); + const item = ip.items.get(@intFromEnum(i)); + const zir_body_inst_field_index = std.meta.fieldIndex(Tag.FuncDecl, "zir_body_inst").?; + const extra_index = switch (item.tag) { + .func_decl => item.data + zir_body_inst_field_index, + .func_instance => b: { + const generic_owner_field_index = std.meta.fieldIndex(Tag.FuncInstance, "generic_owner").?; + const func_decl_index = ip.extra.items[item.data + generic_owner_field_index]; + assert(ip.items.items(.tag)[func_decl_index] == .func_decl); + break :b ip.items.items(.data)[func_decl_index] + zir_body_inst_field_index; + }, + else => unreachable, + }; + return ip.extra.items[extra_index]; +} + +pub fn iesFuncIndex(ip: *const InternPool, ies_index: Index) Index { + assert(ies_index != .none); + const tags = ip.items.items(.tag); + assert(tags[@intFromEnum(ies_index)] == .type_inferred_error_set); + const func_index = ip.items.items(.data)[@intFromEnum(ies_index)]; + switch (tags[func_index]) { + .func_decl, .func_instance => {}, + else => unreachable, // assertion failed + } + return @enumFromInt(func_index); +} + +/// Returns a mutable pointer to the resolved error set type of an inferred +/// error set function. The returned pointer is invalidated when anything is +/// added to `ip`. +pub fn iesResolved(ip: *const InternPool, ies_index: Index) *Index { + assert(ies_index != .none); + const tags = ip.items.items(.tag); + const datas = ip.items.items(.data); + assert(tags[@intFromEnum(ies_index)] == .type_inferred_error_set); + const func_index = datas[@intFromEnum(ies_index)]; + return funcIesResolved(ip, func_index); +} + +/// Returns a mutable pointer to the resolved error set type of an inferred +/// error set function. The returned pointer is invalidated when anything is +/// added to `ip`. +pub fn funcIesResolved(ip: *const InternPool, func_index: Index) *Index { + const tags = ip.items.items(.tag); + const datas = ip.items.items(.data); + assert(funcHasInferredErrorSet(ip, func_index)); + const func_start = datas[@intFromEnum(func_index)]; + const extra_index = switch (tags[@intFromEnum(func_index)]) { + .func_decl => func_start + @typeInfo(Tag.FuncDecl).Struct.fields.len, + .func_instance => func_start + @typeInfo(Tag.FuncInstance).Struct.fields.len, + else => unreachable, + }; + return @ptrCast(&ip.extra.items[extra_index]); +} + +pub fn funcDeclInfo(ip: *const InternPool, i: Index) Key.Func { + const tags = ip.items.items(.tag); + const datas = ip.items.items(.data); + assert(tags[@intFromEnum(i)] == .func_decl); + return extraFuncDecl(ip, datas[@intFromEnum(i)]); +} + +pub fn funcDeclOwner(ip: *const InternPool, i: Index) Module.Decl.Index { + return funcDeclInfo(ip, i).owner_decl; +} + +pub fn funcTypeParamsLen(ip: *const InternPool, i: Index) u32 { + const tags = ip.items.items(.tag); + const datas = ip.items.items(.data); + assert(tags[@intFromEnum(i)] == .type_function); + const start = datas[@intFromEnum(i)]; + return ip.extra.items[start + std.meta.fieldIndex(Tag.TypeFunction, "params_len").?]; +} + +fn unwrapCoercedFunc(ip: *const InternPool, i: Index) Index { + const tags = ip.items.items(.tag); + return switch (tags[@intFromEnum(i)]) { + .func_coerced => { + const datas = ip.items.items(.data); + return @enumFromInt(ip.extra.items[ + datas[@intFromEnum(i)] + std.meta.fieldIndex(Tag.FuncCoerced, "func").? + ]); + }, + .func_instance, .func_decl => i, + else => unreachable, + }; +} diff --git a/src/Module.zig b/src/Module.zig index 96be13e768..ea444d3cc4 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -87,7 +87,9 @@ import_table: std.StringArrayHashMapUnmanaged(*File) = .{}, /// Keys are fully resolved file paths. This table owns the keys and values. embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{}, -/// Stores all Type and Value objects; periodically garbage collected. +/// Stores all Type and Value objects. +/// The idea is that this will be periodically garbage-collected, but such logic +/// is not yet implemented. intern_pool: InternPool = .{}, /// To be eliminated in a future commit by moving more data into InternPool. @@ -101,16 +103,6 @@ tmp_hack_arena: std.heap.ArenaAllocator, /// This is currently only used for string literals. memoized_decls: std.AutoHashMapUnmanaged(InternPool.Index, Decl.Index) = .{}, -monomorphed_func_keys: std.ArrayListUnmanaged(InternPool.Index) = .{}, -/// The set of all the generic function instantiations. This is used so that when a generic -/// function is called twice with the same comptime parameter arguments, both calls dispatch -/// to the same function. -monomorphed_funcs: MonomorphedFuncsSet = .{}, -/// Contains the values from `@setAlignStack`. A sparse table is used here -/// instead of a field of `Fn` because usage of `@setAlignStack` is rare, while -/// functions are many. -align_stack_fns: std.AutoHashMapUnmanaged(Fn.Index, SetAlignStack) = .{}, - /// We optimize memory usage for a compilation with no compile errors by storing the /// error messages and mapping outside of `Decl`. /// The ErrorMsg memory is owned by the decl, using Module's general purpose allocator. @@ -162,25 +154,6 @@ emit_h: ?*GlobalEmitH, test_functions: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, -/// Rather than allocating Decl objects with an Allocator, we instead allocate -/// them with this SegmentedList. This provides four advantages: -/// * Stable memory so that one thread can access a Decl object while another -/// thread allocates additional Decl objects from this list. -/// * It allows us to use u32 indexes to reference Decl objects rather than -/// pointers, saving memory in Type, Value, and dependency sets. -/// * Using integers to reference Decl objects rather than pointers makes -/// serialization trivial. -/// * It provides a unique integer to be used for anonymous symbol names, avoiding -/// multi-threaded contention on an atomic counter. -allocated_decls: std.SegmentedList(Decl, 0) = .{}, -/// When a Decl object is freed from `allocated_decls`, it is pushed into this stack. -decls_free_list: ArrayListUnmanaged(Decl.Index) = .{}, - -/// Same pattern as with `allocated_decls`. -allocated_namespaces: std.SegmentedList(Namespace, 0) = .{}, -/// Same pattern as with `decls_free_list`. -namespaces_free_list: ArrayListUnmanaged(Namespace.Index) = .{}, - global_assembly: std.AutoHashMapUnmanaged(Decl.Index, []u8) = .{}, reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct { @@ -189,7 +162,8 @@ reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct { }) = .{}, panic_messages: [PanicId.len]Decl.OptionalIndex = .{.none} ** PanicId.len, -panic_func_index: Fn.OptionalIndex = .none, +/// The panic function body. +panic_func_index: InternPool.Index = .none, null_stack_trace: InternPool.Index = .none, pub const PanicId = enum { @@ -239,50 +213,6 @@ pub const CImportError = struct { } }; -pub const MonomorphedFuncKey = struct { func: Fn.Index, args_index: u32, args_len: u32 }; - -pub const MonomorphedFuncAdaptedKey = struct { func: Fn.Index, args: []const InternPool.Index }; - -pub const MonomorphedFuncsSet = std.HashMapUnmanaged( - MonomorphedFuncKey, - InternPool.Index, - MonomorphedFuncsContext, - std.hash_map.default_max_load_percentage, -); - -pub const MonomorphedFuncsContext = struct { - mod: *Module, - - pub fn eql(_: @This(), a: MonomorphedFuncKey, b: MonomorphedFuncKey) bool { - return std.meta.eql(a, b); - } - - pub fn hash(ctx: @This(), key: MonomorphedFuncKey) u64 { - const key_args = ctx.mod.monomorphed_func_keys.items[key.args_index..][0..key.args_len]; - return std.hash.Wyhash.hash(@intFromEnum(key.func), std.mem.sliceAsBytes(key_args)); - } -}; - -pub const MonomorphedFuncsAdaptedContext = struct { - mod: *Module, - - pub fn eql(ctx: @This(), adapted_key: MonomorphedFuncAdaptedKey, other_key: MonomorphedFuncKey) bool { - const other_key_args = ctx.mod.monomorphed_func_keys.items[other_key.args_index..][0..other_key.args_len]; - return adapted_key.func == other_key.func and std.mem.eql(InternPool.Index, adapted_key.args, other_key_args); - } - - pub fn hash(_: @This(), adapted_key: MonomorphedFuncAdaptedKey) u64 { - return std.hash.Wyhash.hash(@intFromEnum(adapted_key.func), std.mem.sliceAsBytes(adapted_key.args)); - } -}; - -pub const SetAlignStack = struct { - alignment: Alignment, - /// TODO: This needs to store a non-lazy source location for the case of an inline function - /// which does `@setAlignStack` (applying it to the caller). - src: LazySrcLoc, -}; - /// A `Module` has zero or one of these depending on whether `-femit-h` is enabled. pub const GlobalEmitH = struct { /// Where to put the output. @@ -366,6 +296,9 @@ pub const CaptureScope = struct { } pub fn incRef(self: *CaptureScope) void { + // TODO: wtf is reference counting doing in my beautiful codebase? 😠 + // seriously though, let's change this to rely on InternPool garbage + // collection instead. self.refs += 1; } @@ -625,13 +558,6 @@ pub const Decl = struct { function_body, }; - pub fn clearValues(decl: *Decl, mod: *Module) void { - if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { - _ = mod.align_stack_fns.remove(func); - mod.destroyFunc(func); - } - } - /// This name is relative to the containing namespace of the decl. /// The memory is owned by the containing File ZIR. pub fn getName(decl: Decl, mod: *Module) ?[:0]const u8 { @@ -816,14 +742,18 @@ pub const Decl = struct { return mod.typeToUnion(decl.val.toType()); } - /// If the Decl owns its value and it is a function, return it, - /// otherwise null. - pub fn getOwnedFunction(decl: Decl, mod: *Module) ?*Fn { - return mod.funcPtrUnwrap(decl.getOwnedFunctionIndex(mod)); + pub fn getOwnedFunction(decl: Decl, mod: *Module) ?InternPool.Key.Func { + const i = decl.getOwnedFunctionIndex(); + if (i == .none) return null; + return switch (mod.intern_pool.indexToKey(i)) { + .func => |func| func, + else => null, + }; } - pub fn getOwnedFunctionIndex(decl: Decl, mod: *Module) Fn.OptionalIndex { - return if (decl.owns_tv) decl.val.getFunctionIndex(mod) else .none; + /// This returns an InternPool.Index even when the value is not a function. + pub fn getOwnedFunctionIndex(decl: Decl) InternPool.Index { + return if (decl.owns_tv) decl.val.toIntern() else .none; } /// If the Decl owns its value and it is an extern function, returns it, @@ -1368,252 +1298,6 @@ pub const Union = struct { } }; -/// Some extern function struct memory is owned by the Decl's TypedValue.Managed -/// arena allocator. -pub const ExternFn = struct { - /// The Decl that corresponds to the function itself. - owner_decl: Decl.Index, - /// Library name if specified. - /// For example `extern "c" fn write(...) usize` would have 'c' as library name. - /// Allocated with Module's allocator; outlives the ZIR code. - lib_name: ?[*:0]const u8, - - pub fn deinit(extern_fn: *ExternFn, gpa: Allocator) void { - if (extern_fn.lib_name) |lib_name| { - gpa.free(mem.sliceTo(lib_name, 0)); - } - } -}; - -/// Some Fn struct memory is owned by the Decl's TypedValue.Managed arena allocator. -/// Extern functions do not have this data structure; they are represented by `ExternFn` -/// instead. -pub const Fn = struct { - /// The Decl that corresponds to the function itself. - owner_decl: Decl.Index, - /// The ZIR instruction that is a function instruction. Use this to find - /// the body. We store this rather than the body directly so that when ZIR - /// is regenerated on update(), we can map this to the new corresponding - /// ZIR instruction. - zir_body_inst: Zir.Inst.Index, - /// If this is not null, this function is a generic function instantiation, and - /// there is a `TypedValue` here for each parameter of the function. - /// Non-comptime parameters are marked with a `generic_poison` for the value. - /// Non-anytype parameters are marked with a `generic_poison` for the type. - /// These never have .generic_poison for the Type - /// because the Type is needed to pass to `Type.eql` and for inserting comptime arguments - /// into the inst_map when analyzing the body of a generic function instantiation. - /// Instead, the is_anytype knowledge is communicated via `isAnytypeParam`. - comptime_args: ?[*]TypedValue, - - /// Precomputed hash for monomorphed_funcs. - /// This is important because it may be accessed when resizing monomorphed_funcs - /// while this Fn has already been added to the set, but does not have the - /// owner_decl, comptime_args, or other fields populated yet. - /// This field is undefined if comptime_args == null. - hash: u64, - - /// Relative to owner Decl. - lbrace_line: u32, - /// Relative to owner Decl. - rbrace_line: u32, - lbrace_column: u16, - rbrace_column: u16, - - /// When a generic function is instantiated, this value is inherited from the - /// active Sema context. Importantly, this value is also updated when an existing - /// generic function instantiation is found and called. - branch_quota: u32, - - /// If this is not none, this function is a generic function instantiation, and - /// this is the generic function decl from which the instance was derived. - /// This information is redundant with a combination of checking if comptime_args is - /// not null and looking at the first decl dependency of owner_decl. This redundant - /// information is useful for three reasons: - /// 1. Improved perf of monomorphed_funcs when checking the eql() function because it - /// can do two fewer pointer chases by grabbing the info from this field directly - /// instead of accessing the decl and then the dependencies set. - /// 2. While a generic function instantiation is being initialized, we need hash() - /// and eql() to work before the initialization is complete. Completing the - /// insertion into the decl dependency set has more fallible operations than simply - /// setting this field. - /// 3. I forgot what the third thing was while typing up the other two. - generic_owner_decl: Decl.OptionalIndex, - - state: Analysis, - is_cold: bool = false, - is_noinline: bool, - calls_or_awaits_errorable_fn: bool = false, - - pub const Index = enum(u32) { - _, - - pub fn toOptional(i: Index) OptionalIndex { - return @as(OptionalIndex, @enumFromInt(@intFromEnum(i))); - } - }; - - pub const OptionalIndex = enum(u32) { - none = std.math.maxInt(u32), - _, - - pub fn init(oi: ?Index) OptionalIndex { - return @as(OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none))); - } - - pub fn unwrap(oi: OptionalIndex) ?Index { - if (oi == .none) return null; - return @as(Index, @enumFromInt(@intFromEnum(oi))); - } - }; - - pub const Analysis = enum { - /// This function has not yet undergone analysis, because we have not - /// seen a potential runtime call. It may be analyzed in future. - none, - /// Analysis for this function has been queued, but not yet completed. - queued, - /// This function intentionally only has ZIR generated because it is marked - /// inline, which means no runtime version of the function will be generated. - inline_only, - in_progress, - /// There will be a corresponding ErrorMsg in Module.failed_decls - sema_failure, - /// This Fn might be OK but it depends on another Decl which did not - /// successfully complete semantic analysis. - dependency_failure, - success, - }; - - /// This struct is used to keep track of any dependencies related to functions instances - /// that return inferred error sets. Note that a function may be associated to - /// multiple different error sets, for example an inferred error set which - /// this function returns, but also any inferred error sets of called inline - /// or comptime functions. - pub const InferredErrorSet = struct { - /// The function from which this error set originates. - func: Fn.Index, - - /// All currently known errors that this error set contains. This includes - /// direct additions via `return error.Foo;`, and possibly also errors that - /// are returned from any dependent functions. When the inferred error set is - /// fully resolved, this map contains all the errors that the function might return. - errors: NameMap = .{}, - - /// Other inferred error sets which this inferred error set should include. - inferred_error_sets: std.AutoArrayHashMapUnmanaged(InferredErrorSet.Index, void) = .{}, - - /// Whether the function returned anyerror. This is true if either of - /// the dependent functions returns anyerror. - is_anyerror: bool = false, - - /// Whether this error set is already fully resolved. If true, resolving - /// can skip resolving any dependents of this inferred error set. - is_resolved: bool = false, - - pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void); - - pub const Index = enum(u32) { - _, - - pub fn toOptional(i: InferredErrorSet.Index) InferredErrorSet.OptionalIndex { - return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(i))); - } - }; - - pub const OptionalIndex = enum(u32) { - none = std.math.maxInt(u32), - _, - - pub fn init(oi: ?InferredErrorSet.Index) InferredErrorSet.OptionalIndex { - return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none))); - } - - pub fn unwrap(oi: InferredErrorSet.OptionalIndex) ?InferredErrorSet.Index { - if (oi == .none) return null; - return @as(InferredErrorSet.Index, @enumFromInt(@intFromEnum(oi))); - } - }; - - pub fn addErrorSet( - self: *InferredErrorSet, - err_set_ty: Type, - ip: *InternPool, - gpa: Allocator, - ) !void { - switch (err_set_ty.toIntern()) { - .anyerror_type => { - self.is_anyerror = true; - }, - else => switch (ip.indexToKey(err_set_ty.toIntern())) { - .error_set_type => |error_set_type| { - for (error_set_type.names) |name| { - try self.errors.put(gpa, name, {}); - } - }, - .inferred_error_set_type => |ies_index| { - try self.inferred_error_sets.put(gpa, ies_index, {}); - }, - else => unreachable, - }, - } - } - }; - - pub fn isAnytypeParam(func: Fn, mod: *Module, index: u32) bool { - const file = mod.declPtr(func.owner_decl).getFileScope(mod); - - const tags = file.zir.instructions.items(.tag); - - const param_body = file.zir.getParamBody(func.zir_body_inst); - const param = param_body[index]; - - return switch (tags[param]) { - .param, .param_comptime => false, - .param_anytype, .param_anytype_comptime => true, - else => unreachable, - }; - } - - pub fn getParamName(func: Fn, mod: *Module, index: u32) [:0]const u8 { - const file = mod.declPtr(func.owner_decl).getFileScope(mod); - - const tags = file.zir.instructions.items(.tag); - const data = file.zir.instructions.items(.data); - - const param_body = file.zir.getParamBody(func.zir_body_inst); - const param = param_body[index]; - - return switch (tags[param]) { - .param, .param_comptime => blk: { - const extra = file.zir.extraData(Zir.Inst.Param, data[param].pl_tok.payload_index); - break :blk file.zir.nullTerminatedString(extra.data.name); - }, - .param_anytype, .param_anytype_comptime => blk: { - const param_data = data[param].str_tok; - break :blk param_data.get(file.zir); - }, - else => unreachable, - }; - } - - pub fn hasInferredErrorSet(func: Fn, mod: *Module) bool { - const owner_decl = mod.declPtr(func.owner_decl); - const zir = owner_decl.getFileScope(mod).zir; - const zir_tags = zir.instructions.items(.tag); - switch (zir_tags[func.zir_body_inst]) { - .func => return false, - .func_inferred => return true, - .func_fancy => { - const inst_data = zir.instructions.items(.data)[func.zir_body_inst].pl_node; - const extra = zir.extraData(Zir.Inst.FuncFancy, inst_data.payload_index); - return extra.data.bits.is_inferred_error; - }, - else => unreachable, - } - } -}; - pub const DeclAdapter = struct { mod: *Module, @@ -1638,12 +1322,10 @@ pub const Namespace = struct { /// Direct children of the namespace. Used during an update to detect /// which decls have been added/removed from source. /// Declaration order is preserved via entry order. - /// Key memory is owned by `decl.name`. - /// Anonymous decls are not stored here; they are kept in `anon_decls` instead. + /// These are only declarations named directly by the AST; anonymous + /// declarations are not stored here. decls: std.ArrayHashMapUnmanaged(Decl.Index, void, DeclContext, true) = .{}, - anon_decls: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, - /// Key is usingnamespace Decl itself. To find the namespace being included, /// the Decl Value has to be resolved as a Type which has a Namespace. /// Value is whether the usingnamespace decl is marked `pub`. @@ -1698,18 +1380,11 @@ pub const Namespace = struct { var decls = ns.decls; ns.decls = .{}; - var anon_decls = ns.anon_decls; - ns.anon_decls = .{}; - for (decls.keys()) |decl_index| { mod.destroyDecl(decl_index); } decls.deinit(gpa); - for (anon_decls.keys()) |key| { - mod.destroyDecl(key); - } - anon_decls.deinit(gpa); ns.usingnamespace_set.deinit(gpa); } @@ -1723,9 +1398,6 @@ pub const Namespace = struct { var decls = ns.decls; ns.decls = .{}; - var anon_decls = ns.anon_decls; - ns.anon_decls = .{}; - // TODO rework this code to not panic on OOM. // (might want to coordinate with the clearDecl function) @@ -1735,12 +1407,6 @@ pub const Namespace = struct { } decls.deinit(gpa); - for (anon_decls.keys()) |child_decl| { - mod.clearDecl(child_decl, outdated_decls) catch @panic("out of memory"); - mod.destroyDecl(child_decl); - } - anon_decls.deinit(gpa); - ns.usingnamespace_set.deinit(gpa); } @@ -2155,8 +1821,8 @@ pub const SrcLoc = struct { return tree.firstToken(src_loc.parent_decl_node); } - pub fn declRelativeToNodeIndex(src_loc: SrcLoc, offset: i32) Ast.TokenIndex { - return @as(Ast.Node.Index, @bitCast(offset + @as(i32, @bitCast(src_loc.parent_decl_node)))); + pub fn declRelativeToNodeIndex(src_loc: SrcLoc, offset: i32) Ast.Node.Index { + return @bitCast(offset + @as(i32, @bitCast(src_loc.parent_decl_node))); } pub const Span = struct { @@ -2468,6 +2134,37 @@ pub const SrcLoc = struct { } } else unreachable; }, + .call_arg => |call_arg| { + const tree = try src_loc.file_scope.getTree(gpa); + const node = src_loc.declRelativeToNodeIndex(call_arg.call_node_offset); + var buf: [1]Ast.Node.Index = undefined; + const call_full = tree.fullCall(&buf, node).?; + const src_node = call_full.ast.params[call_arg.arg_index]; + return nodeToSpan(tree, src_node); + }, + .fn_proto_param => |fn_proto_param| { + const tree = try src_loc.file_scope.getTree(gpa); + const node = src_loc.declRelativeToNodeIndex(fn_proto_param.fn_proto_node_offset); + var buf: [1]Ast.Node.Index = undefined; + const full = tree.fullFnProto(&buf, node).?; + var it = full.iterate(tree); + var i: usize = 0; + while (it.next()) |param| : (i += 1) { + if (i == fn_proto_param.param_index) { + if (param.anytype_ellipsis3) |token| return tokenToSpan(tree, token); + const first_token = param.comptime_noalias orelse + param.name_token orelse + tree.firstToken(param.type_expr); + return tokensToSpan( + tree, + first_token, + tree.lastToken(param.type_expr), + first_token, + ); + } + } + unreachable; + }, .node_offset_bin_lhs => |node_off| { const tree = try src_loc.file_scope.getTree(gpa); const node = src_loc.declRelativeToNodeIndex(node_off); @@ -2820,6 +2517,10 @@ pub const SrcLoc = struct { ); } + fn tokenToSpan(tree: *const Ast, token: Ast.TokenIndex) Span { + return tokensToSpan(tree, token, token, token); + } + fn tokensToSpan(tree: *const Ast, start: Ast.TokenIndex, end: Ast.TokenIndex, main: Ast.TokenIndex) Span { const token_starts = tree.tokens.items(.start); var start_tok = start; @@ -3146,6 +2847,21 @@ pub const LazySrcLoc = union(enum) { /// Next, navigate to the corresponding capture. /// The Decl is determined contextually. for_capture_from_input: i32, + /// The source location points to the argument node of a function call. + call_arg: struct { + decl: Decl.Index, + /// Points to the function call AST node. + call_node_offset: i32, + /// The index of the argument the source location points to. + arg_index: u32, + }, + fn_proto_param: struct { + decl: Decl.Index, + /// Points to the function prototype AST node. + fn_proto_node_offset: i32, + /// The index of the parameter the source location points to. + param_index: u32, + }, pub const nodeOffset = if (TracedOffset.want_tracing) nodeOffsetDebug else nodeOffsetRelease; @@ -3240,6 +2956,13 @@ pub const LazySrcLoc = union(enum) { .parent_decl_node = decl.src_node, .lazy = lazy, }, + inline .call_arg, + .fn_proto_param, + => |x| .{ + .file_scope = decl.getFileScope(mod), + .parent_decl_node = mod.declPtr(x.decl).src_node, + .lazy = lazy, + }, }; } }; @@ -3373,17 +3096,10 @@ pub fn deinit(mod: *Module) void { mod.global_error_set.deinit(gpa); mod.test_functions.deinit(gpa); - mod.align_stack_fns.deinit(gpa); - mod.monomorphed_funcs.deinit(gpa); - mod.decls_free_list.deinit(gpa); - mod.allocated_decls.deinit(gpa); mod.global_assembly.deinit(gpa); mod.reference_table.deinit(gpa); - mod.namespaces_free_list.deinit(gpa); - mod.allocated_namespaces.deinit(gpa); - mod.memoized_decls.deinit(gpa); mod.intern_pool.deinit(gpa); mod.tmp_hack_arena.deinit(); @@ -3391,6 +3107,8 @@ pub fn deinit(mod: *Module) void { pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { const gpa = mod.gpa; + const ip = &mod.intern_pool; + { const decl = mod.declPtr(decl_index); _ = mod.test_functions.swapRemove(decl_index); @@ -3407,15 +3125,12 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { } } if (decl.src_scope) |scope| scope.decRef(gpa); - decl.clearValues(mod); decl.dependants.deinit(gpa); decl.dependencies.deinit(gpa); - decl.* = undefined; } - mod.decls_free_list.append(gpa, decl_index) catch { - // In order to keep `destroyDecl` a non-fallible function, we ignore memory - // allocation failures here, instead leaking the Decl until garbage collection. - }; + + ip.destroyDecl(gpa, decl_index); + if (mod.emit_h) |mod_emit_h| { const decl_emit_h = mod_emit_h.declPtr(decl_index); decl_emit_h.fwd_decl.deinit(gpa); @@ -3424,11 +3139,11 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { } pub fn declPtr(mod: *Module, index: Decl.Index) *Decl { - return mod.allocated_decls.at(@intFromEnum(index)); + return mod.intern_pool.declPtr(index); } pub fn namespacePtr(mod: *Module, index: Namespace.Index) *Namespace { - return mod.allocated_namespaces.at(@intFromEnum(index)); + return mod.intern_pool.namespacePtr(index); } pub fn unionPtr(mod: *Module, index: Union.Index) *Union { @@ -3439,14 +3154,6 @@ pub fn structPtr(mod: *Module, index: Struct.Index) *Struct { return mod.intern_pool.structPtr(index); } -pub fn funcPtr(mod: *Module, index: Fn.Index) *Fn { - return mod.intern_pool.funcPtr(index); -} - -pub fn inferredErrorSetPtr(mod: *Module, index: Fn.InferredErrorSet.Index) *Fn.InferredErrorSet { - return mod.intern_pool.inferredErrorSetPtr(index); -} - pub fn namespacePtrUnwrap(mod: *Module, index: Namespace.OptionalIndex) ?*Namespace { return mod.namespacePtr(index.unwrap() orelse return null); } @@ -3457,10 +3164,6 @@ pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct { return mod.structPtr(index.unwrap() orelse return null); } -pub fn funcPtrUnwrap(mod: *Module, index: Fn.OptionalIndex) ?*Fn { - return mod.funcPtr(index.unwrap() orelse return null); -} - /// Returns true if and only if the Decl is the top level struct associated with a File. pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool { const decl = mod.declPtr(decl_index); @@ -3881,6 +3584,8 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { // to re-generate ZIR for the File. try file.outdated_decls.append(gpa, root_decl); + const ip = &mod.intern_pool; + while (decl_stack.popOrNull()) |decl_index| { const decl = mod.declPtr(decl_index); // Anonymous decls and the root decl have this set to 0. We still need @@ -3918,7 +3623,7 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { } if (decl.getOwnedFunction(mod)) |func| { - func.zir_body_inst = inst_map.get(func.zir_body_inst) orelse { + func.zirBodyInst(ip).* = inst_map.get(func.zir_body_inst) orelse { try file.deleted_decls.append(gpa, decl_index); continue; }; @@ -3928,9 +3633,6 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { for (namespace.decls.keys()) |sub_decl| { try decl_stack.append(gpa, sub_decl); } - for (namespace.anon_decls.keys()) |sub_decl| { - try decl_stack.append(gpa, sub_decl); - } } } } @@ -4101,11 +3803,6 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { // prior to re-analysis. try mod.deleteDeclExports(decl_index); - // Similarly, `@setAlignStack` invocations will be re-discovered. - if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { - _ = mod.align_stack_fns.remove(func); - } - // Dependencies will be re-discovered, so we remove them here prior to re-analysis. for (decl.dependencies.keys()) |dep_index| { const dep = mod.declPtr(dep_index); @@ -4189,11 +3886,12 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { } } -pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void { +pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: InternPool.Index) SemaError!void { const tracy = trace(@src()); defer tracy.end(); - const func = mod.funcPtr(func_index); + const ip = &mod.intern_pool; + const func = mod.funcInfo(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -4211,7 +3909,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void => return error.AnalysisFail, .complete, .codegen_failure_retryable => { - switch (func.state) { + switch (func.analysis(ip).state) { .sema_failure, .dependency_failure => return error.AnalysisFail, .none, .queued => {}, .in_progress => unreachable, @@ -4227,11 +3925,11 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void var air = mod.analyzeFnBody(func_index, sema_arena) catch |err| switch (err) { error.AnalysisFail => { - if (func.state == .in_progress) { + if (func.analysis(ip).state == .in_progress) { // If this decl caused the compile error, the analysis field would // be changed to indicate it was this Decl's fault. Because this // did not happen, we infer here that it was a dependency failure. - func.state = .dependency_failure; + func.analysis(ip).state = .dependency_failure; } return error.AnalysisFail; }, @@ -4251,14 +3949,14 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void if (no_bin_file and !dump_air and !dump_llvm_ir) return; - var liveness = try Liveness.analyze(gpa, air, &mod.intern_pool); + var liveness = try Liveness.analyze(gpa, air, ip); defer liveness.deinit(gpa); if (dump_air) { const fqn = try decl.getFullyQualifiedName(mod); - std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(&mod.intern_pool)}); + std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(ip)}); @import("print_air.zig").dump(mod, air, liveness); - std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(&mod.intern_pool)}); + std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(ip)}); } if (std.debug.runtime_safety) { @@ -4266,7 +3964,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void .gpa = gpa, .air = air, .liveness = liveness, - .intern_pool = &mod.intern_pool, + .intern_pool = ip, }; defer verify.deinit(); @@ -4321,8 +4019,9 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void /// analyzed, and for ensuring it can exist at runtime (see /// `sema.fnHasRuntimeBits`). This function does *not* guarantee that the body /// will be analyzed when it returns: for that, see `ensureFuncBodyAnalyzed`. -pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: Fn.Index) !void { - const func = mod.funcPtr(func_index); +pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: InternPool.Index) !void { + const ip = &mod.intern_pool; + const func = mod.funcInfo(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -4348,7 +4047,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: Fn.Index) !void { assert(decl.has_tv); - switch (func.state) { + switch (func.analysis(ip).state) { .none => {}, .queued => return, // As above, we don't need to forward errors here. @@ -4366,7 +4065,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: Fn.Index) !void { // since the last update try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index }); } - func.state = .queued; + func.analysis(ip).state = .queued; } pub fn updateEmbedFile(mod: *Module, embed_file: *EmbedFile) SemaError!void { @@ -4490,10 +4189,9 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { .code = file.zir, .owner_decl = new_decl, .owner_decl_index = new_decl_index, - .func = null, .func_index = .none, .fn_ret_ty = Type.void, - .owner_func = null, + .fn_ret_ty_ies = null, .owner_func_index = .none, .comptime_mutable_decls = &comptime_mutable_decls, }; @@ -4573,10 +4271,9 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, - .func = null, .func_index = .none, .fn_ret_ty = Type.void, - .owner_func = null, + .fn_ret_ty_ies = null, .owner_func_index = .none, .comptime_mutable_decls = &comptime_mutable_decls, }; @@ -4608,10 +4305,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { .inlining = null, .is_comptime = true, }; - defer { - block_scope.instructions.deinit(gpa); - block_scope.params.deinit(gpa); - } + defer block_scope.instructions.deinit(gpa); const zir_block_index = decl.zirBlockIndex(mod); const inst_data = zir_datas[zir_block_index].pl_node; @@ -4658,48 +4352,49 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { return true; } - if (mod.intern_pool.indexToFunc(decl_tv.val.toIntern()).unwrap()) |func_index| { - const func = mod.funcPtr(func_index); - const owns_tv = func.owner_decl == decl_index; - if (owns_tv) { - var prev_type_has_bits = false; - var prev_is_inline = false; - var type_changed = true; + const ip = &mod.intern_pool; + switch (ip.indexToKey(decl_tv.val.toIntern())) { + .func => |func| { + const owns_tv = func.owner_decl == decl_index; + if (owns_tv) { + var prev_type_has_bits = false; + var prev_is_inline = false; + var type_changed = true; - if (decl.has_tv) { - prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(mod); - type_changed = !decl.ty.eql(decl_tv.ty, mod); - if (decl.getOwnedFunction(mod)) |prev_func| { - prev_is_inline = prev_func.state == .inline_only; + if (decl.has_tv) { + prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(mod); + type_changed = !decl.ty.eql(decl_tv.ty, mod); + if (decl.getOwnedFunction(mod)) |prev_func| { + prev_is_inline = prev_func.analysis(ip).state == .inline_only; + } } - } - decl.clearValues(mod); - decl.ty = decl_tv.ty; - decl.val = (try decl_tv.val.intern(decl_tv.ty, mod)).toValue(); - // linksection, align, and addrspace were already set by Sema - decl.has_tv = true; - decl.owns_tv = owns_tv; - decl.analysis = .complete; - decl.generation = mod.generation; + decl.ty = decl_tv.ty; + decl.val = (try decl_tv.val.intern(decl_tv.ty, mod)).toValue(); + // linksection, align, and addrspace were already set by Sema + decl.has_tv = true; + decl.owns_tv = owns_tv; + decl.analysis = .complete; + decl.generation = mod.generation; - const is_inline = decl.ty.fnCallingConvention(mod) == .Inline; - if (decl.is_exported) { - const export_src: LazySrcLoc = .{ .token_offset = @intFromBool(decl.is_pub) }; - if (is_inline) { - return sema.fail(&block_scope, export_src, "export of inline function", .{}); + const is_inline = decl.ty.fnCallingConvention(mod) == .Inline; + if (decl.is_exported) { + const export_src: LazySrcLoc = .{ .token_offset = @intFromBool(decl.is_pub) }; + if (is_inline) { + return sema.fail(&block_scope, export_src, "export of inline function", .{}); + } + // The scope needs to have the decl in it. + try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index); } - // The scope needs to have the decl in it. - try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index); + return type_changed or is_inline != prev_is_inline; } - return type_changed or is_inline != prev_is_inline; - } + }, + else => {}, } var type_changed = true; if (decl.has_tv) { type_changed = !decl.ty.eql(decl_tv.ty, mod); } - decl.clearValues(mod); decl.owns_tv = false; var queue_linker_work = false; @@ -4707,7 +4402,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { switch (decl_tv.val.toIntern()) { .generic_poison => unreachable, .unreachable_value => unreachable, - else => switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) { + else => switch (ip.indexToKey(decl_tv.val.toIntern())) { .variable => |variable| if (variable.decl == decl_index) { decl.owns_tv = true; queue_linker_work = true; @@ -4743,11 +4438,11 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { } else if (bytes.len == 0) { return sema.fail(&block_scope, section_src, "linksection cannot be empty", .{}); } - const section = try mod.intern_pool.getOrPutString(gpa, bytes); + const section = try ip.getOrPutString(gpa, bytes); break :blk section.toOptional(); }; decl.@"addrspace" = blk: { - const addrspace_ctx: Sema.AddressSpaceContext = switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) { + const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(decl_tv.val.toIntern())) { .variable => .variable, .extern_func, .func => .function, else => .constant, @@ -5309,7 +5004,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err decl.has_align = has_align; decl.has_linksection_or_addrspace = has_linksection_or_addrspace; decl.zir_decl_index = @as(u32, @intCast(decl_sub_index)); - if (decl.getOwnedFunctionIndex(mod) != .none) { + if (decl.getOwnedFunction(mod) != null) { switch (comp.bin_file.tag) { .coff, .elf, .macho, .plan9 => { // TODO Look into detecting when this would be unnecessary by storing enough state @@ -5386,7 +5081,6 @@ pub fn clearDecl( try namespace.deleteAllDecls(mod, outdated_decls); } } - decl.clearValues(mod); if (decl.deletion_flag) { decl.deletion_flag = false; @@ -5397,21 +5091,19 @@ pub fn clearDecl( } /// This function is exclusively called for anonymous decls. +/// All resources referenced by anonymous decls are owned by InternPool +/// so there is no cleanup to do here. pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void { - const decl = mod.declPtr(decl_index); + const gpa = mod.gpa; + const ip = &mod.intern_pool; - assert(!mod.declIsRoot(decl_index)); - assert(mod.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index)); + ip.destroyDecl(gpa, decl_index); - const dependants = decl.dependants.keys(); - for (dependants) |dep| { - mod.declPtr(dep).removeDependency(decl_index); + if (mod.emit_h) |mod_emit_h| { + const decl_emit_h = mod_emit_h.declPtr(decl_index); + decl_emit_h.fwd_decl.deinit(gpa); + decl_emit_h.* = undefined; } - - for (decl.dependencies.keys()) |dep| { - mod.declPtr(dep).removeDependant(decl_index); - } - mod.destroyDecl(decl_index); } /// We don't perform a deletion here, because this Decl or another one @@ -5428,7 +5120,6 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void { const decl = mod.declPtr(decl_index); assert(!mod.declIsRoot(decl_index)); - assert(mod.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index)); // An aborted decl must not have dependants -- they must have // been aborted first and removed from this list. @@ -5497,19 +5188,26 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void export_owners.deinit(mod.gpa); } -pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaError!Air { +pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocator) SemaError!Air { const tracy = trace(@src()); defer tracy.end(); const gpa = mod.gpa; - const func = mod.funcPtr(func_index); + const ip = &mod.intern_pool; + const func = mod.funcInfo(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); defer comptime_mutable_decls.deinit(); + // In the case of a generic function instance, this is the type of the + // instance, which has comptime parameters elided. In other words, it is + // the runtime-known parameters only, not to be confused with the + // generic_owner function type, which potentially has more parameters, + // including comptime parameters. const fn_ty = decl.ty; + const fn_ty_info = mod.typeToFunc(fn_ty).?; var sema: Sema = .{ .mod = mod, @@ -5518,18 +5216,23 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE .code = decl.getFileScope(mod).zir, .owner_decl = decl, .owner_decl_index = decl_index, - .func = func, - .func_index = func_index.toOptional(), - .fn_ret_ty = mod.typeToFunc(fn_ty).?.return_type.toType(), - .owner_func = func, - .owner_func_index = func_index.toOptional(), - .branch_quota = @max(func.branch_quota, Sema.default_branch_quota), + .func_index = func_index, + .fn_ret_ty = fn_ty_info.return_type.toType(), + .fn_ret_ty_ies = null, + .owner_func_index = func_index, + .branch_quota = @max(func.branchQuota(ip).*, Sema.default_branch_quota), .comptime_mutable_decls = &comptime_mutable_decls, }; defer sema.deinit(); + if (func.analysis(ip).inferred_error_set) { + const ies = try arena.create(Sema.InferredErrorSet); + ies.* = .{ .func = func_index }; + sema.fn_ret_ty_ies = ies; + } + // reset in case calls to errorable functions are removed. - func.calls_or_awaits_errorable_fn = false; + func.analysis(ip).calls_or_awaits_errorable_fn = false; // First few indexes of extra are reserved and set at the end. const reserved_count = @typeInfo(Air.ExtraIndex).Enum.fields.len; @@ -5551,8 +5254,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE }; defer inner_block.instructions.deinit(gpa); - const fn_info = sema.code.getFnInfo(func.zir_body_inst); - const zir_tags = sema.code.instructions.items(.tag); + const fn_info = sema.code.getFnInfo(func.zirBodyInst(ip).*); // Here we are performing "runtime semantic analysis" for a function body, which means // we must map the parameter ZIR instructions to `arg` AIR instructions. @@ -5560,35 +5262,36 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE // This could be a generic function instantiation, however, in which case we need to // map the comptime parameters to constant values and only emit arg AIR instructions // for the runtime ones. - const runtime_params_len = @as(u32, @intCast(mod.typeToFunc(fn_ty).?.param_types.len)); + const runtime_params_len = fn_ty_info.param_types.len; try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len); - try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len * 2); // * 2 for the `addType` + try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len); try sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body); - var runtime_param_index: usize = 0; - var total_param_index: usize = 0; - for (fn_info.param_body) |inst| { - switch (zir_tags[inst]) { - .param, .param_comptime, .param_anytype, .param_anytype_comptime => {}, - else => continue, + // In the case of a generic function instance, pre-populate all the comptime args. + if (func.comptime_args.len != 0) { + for ( + fn_info.param_body[0..func.comptime_args.len], + func.comptime_args.get(ip), + ) |inst, comptime_arg| { + if (comptime_arg == .none) continue; + sema.inst_map.putAssumeCapacityNoClobber(inst, Air.internedToRef(comptime_arg)); } - const param_ty = if (func.comptime_args) |comptime_args| t: { - const arg_tv = comptime_args[total_param_index]; + } - const arg_val = if (!arg_tv.val.isGenericPoison()) - arg_tv.val - else if (try arg_tv.ty.onePossibleValue(mod)) |opv| - opv - else - break :t arg_tv.ty; + const src_params_len = if (func.comptime_args.len != 0) + func.comptime_args.len + else + runtime_params_len; - const arg = try sema.addConstant(arg_val); - sema.inst_map.putAssumeCapacityNoClobber(inst, arg); - total_param_index += 1; - continue; - } else mod.typeToFunc(fn_ty).?.param_types[runtime_param_index].toType(); + var runtime_param_index: usize = 0; + for (fn_info.param_body[0..src_params_len], 0..) |inst, src_param_index| { + const gop = sema.inst_map.getOrPutAssumeCapacity(inst); + if (gop.found_existing) continue; // provided above by comptime arg - const opt_opv = sema.typeHasOnePossibleValue(param_ty) catch |err| switch (err) { + const param_ty = fn_ty_info.param_types.get(ip)[runtime_param_index]; + runtime_param_index += 1; + + const opt_opv = sema.typeHasOnePossibleValue(param_ty.toType()) catch |err| switch (err) { error.NeededSourceLocation => unreachable, error.GenericPoison => unreachable, error.ComptimeReturn => unreachable, @@ -5596,28 +5299,22 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE else => |e| return e, }; if (opt_opv) |opv| { - const arg = try sema.addConstant(opv); - sema.inst_map.putAssumeCapacityNoClobber(inst, arg); - total_param_index += 1; - runtime_param_index += 1; + gop.value_ptr.* = Air.internedToRef(opv.toIntern()); continue; } - const air_ty = try sema.addType(param_ty); - const arg_index = @as(u32, @intCast(sema.air_instructions.len)); + const arg_index: u32 = @intCast(sema.air_instructions.len); + gop.value_ptr.* = Air.indexToRef(arg_index); inner_block.instructions.appendAssumeCapacity(arg_index); sema.air_instructions.appendAssumeCapacity(.{ .tag = .arg, .data = .{ .arg = .{ - .ty = air_ty, - .src_index = @as(u32, @intCast(total_param_index)), + .ty = Air.internedToRef(param_ty), + .src_index = @intCast(src_param_index), } }, }); - sema.inst_map.putAssumeCapacityNoClobber(inst, Air.indexToRef(arg_index)); - total_param_index += 1; - runtime_param_index += 1; } - func.state = .in_progress; + func.analysis(ip).state = .in_progress; const last_arg_index = inner_block.instructions.items.len; @@ -5648,7 +5345,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE } // If we don't get an error return trace from a caller, create our own. - if (func.calls_or_awaits_errorable_fn and + if (func.analysis(ip).calls_or_awaits_errorable_fn and mod.comp.bin_file.options.error_return_tracing and !sema.fn_ret_ty.isError(mod)) { @@ -5672,12 +5369,33 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + inner_block.instructions.items.len); const main_block_index = sema.addExtraAssumeCapacity(Air.Block{ - .body_len = @as(u32, @intCast(inner_block.instructions.items.len)), + .body_len = @intCast(inner_block.instructions.items.len), }); sema.air_extra.appendSliceAssumeCapacity(inner_block.instructions.items); sema.air_extra.items[@intFromEnum(Air.ExtraIndex.main_block)] = main_block_index; - func.state = .success; + // Resolving inferred error sets is done *before* setting the function + // state to success, so that "unable to resolve inferred error set" errors + // can be emitted here. + if (sema.fn_ret_ty_ies) |ies| { + sema.resolveInferredErrorSetPtr(&inner_block, LazySrcLoc.nodeOffset(0), ies) catch |err| switch (err) { + error.NeededSourceLocation => unreachable, + error.GenericPoison => unreachable, + error.ComptimeReturn => unreachable, + error.ComptimeBreak => unreachable, + error.AnalysisFail => { + // In this case our function depends on a type that had a compile error. + // We should not try to lower this function. + decl.analysis = .dependency_failure; + return error.AnalysisFail; + }, + else => |e| return e, + }; + assert(ies.resolved != .none); + ip.funcIesResolved(func_index).* = ies.resolved; + } + + func.analysis(ip).state = .success; // Finally we must resolve the return type and parameter types so that backends // have full access to type information. @@ -5716,7 +5434,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaE }; } - return Air{ + return .{ .instructions = sema.air_instructions.toOwnedSlice(), .extra = try sema.air_extra.toOwnedSlice(gpa), }; @@ -5731,9 +5449,6 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { if (mod.cimport_errors.fetchSwapRemove(decl_index)) |kv| { for (kv.value) |err| err.deinit(mod.gpa); } - if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { - _ = mod.align_stack_fns.remove(func); - } if (mod.emit_h) |emit_h| { if (emit_h.failed_decls.fetchSwapRemove(decl_index)) |kv| { kv.value.destroy(mod.gpa); @@ -5744,21 +5459,11 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { } pub fn createNamespace(mod: *Module, initialization: Namespace) !Namespace.Index { - if (mod.namespaces_free_list.popOrNull()) |index| { - mod.allocated_namespaces.at(@intFromEnum(index)).* = initialization; - return index; - } - const ptr = try mod.allocated_namespaces.addOne(mod.gpa); - ptr.* = initialization; - return @as(Namespace.Index, @enumFromInt(mod.allocated_namespaces.len - 1)); + return mod.intern_pool.createNamespace(mod.gpa, initialization); } pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void { - mod.namespacePtr(index).* = undefined; - mod.namespaces_free_list.append(mod.gpa, index) catch { - // In order to keep `destroyNamespace` a non-fallible function, we ignore memory - // allocation failures here, instead leaking the Namespace until garbage collection. - }; + return mod.intern_pool.destroyNamespace(mod.gpa, index); } pub fn createStruct(mod: *Module, initialization: Struct) Allocator.Error!Struct.Index { @@ -5777,43 +5482,15 @@ pub fn destroyUnion(mod: *Module, index: Union.Index) void { return mod.intern_pool.destroyUnion(mod.gpa, index); } -pub fn createFunc(mod: *Module, initialization: Fn) Allocator.Error!Fn.Index { - return mod.intern_pool.createFunc(mod.gpa, initialization); -} - -pub fn destroyFunc(mod: *Module, index: Fn.Index) void { - return mod.intern_pool.destroyFunc(mod.gpa, index); -} - pub fn allocateNewDecl( mod: *Module, namespace: Namespace.Index, src_node: Ast.Node.Index, src_scope: ?*CaptureScope, ) !Decl.Index { - const decl_and_index: struct { - new_decl: *Decl, - decl_index: Decl.Index, - } = if (mod.decls_free_list.popOrNull()) |decl_index| d: { - break :d .{ - .new_decl = mod.declPtr(decl_index), - .decl_index = decl_index, - }; - } else d: { - const decl = try mod.allocated_decls.addOne(mod.gpa); - errdefer mod.allocated_decls.shrinkRetainingCapacity(mod.allocated_decls.len - 1); - if (mod.emit_h) |mod_emit_h| { - const decl_emit_h = try mod_emit_h.allocated_emit_h.addOne(mod.gpa); - decl_emit_h.* = .{}; - } - break :d .{ - .new_decl = decl, - .decl_index = @as(Decl.Index, @enumFromInt(mod.allocated_decls.len - 1)), - }; - }; - - if (src_scope) |scope| scope.incRef(); - decl_and_index.new_decl.* = .{ + const ip = &mod.intern_pool; + const gpa = mod.gpa; + const decl_index = try ip.createDecl(gpa, .{ .name = undefined, .src_namespace = namespace, .src_node = src_node, @@ -5836,9 +5513,18 @@ pub fn allocateNewDecl( .has_align = false, .alive = false, .kind = .anon, - }; + }); - return decl_and_index.decl_index; + if (mod.emit_h) |mod_emit_h| { + if (@intFromEnum(decl_index) >= mod_emit_h.allocated_emit_h.len) { + try mod_emit_h.allocated_emit_h.append(gpa, .{}); + assert(@intFromEnum(decl_index) == mod_emit_h.allocated_emit_h.len); + } + } + + if (src_scope) |scope| scope.incRef(); + + return decl_index; } pub fn getErrorValue( @@ -5874,7 +5560,7 @@ pub fn createAnonymousDeclFromDecl( const name = try mod.intern_pool.getOrPutStringFmt(mod.gpa, "{}__anon_{d}", .{ src_decl.name.fmt(&mod.intern_pool), @intFromEnum(new_decl_index), }); - try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, tv, name); + try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, tv, name); return new_decl_index; } @@ -5882,7 +5568,6 @@ pub fn initNewAnonDecl( mod: *Module, new_decl_index: Decl.Index, src_line: u32, - namespace: Namespace.Index, typed_value: TypedValue, name: InternPool.NullTerminatedString, ) Allocator.Error!void { @@ -5899,8 +5584,6 @@ pub fn initNewAnonDecl( new_decl.has_tv = true; new_decl.analysis = .complete; new_decl.generation = mod.generation; - - try mod.namespacePtr(namespace).anon_decls.putNoClobber(mod.gpa, new_decl_index, {}); } pub fn errNoteNonLazy( @@ -6578,7 +6261,6 @@ pub fn populateTestFunctions( // Since we are replacing the Decl's value we must perform cleanup on the // previous value. - decl.clearValues(mod); decl.ty = new_ty; decl.val = new_val; decl.has_tv = true; @@ -6657,7 +6339,7 @@ pub fn markReferencedDeclsAlive(mod: *Module, val: Value) Allocator.Error!void { switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable => |variable| try mod.markDeclIndexAlive(variable.decl), .extern_func => |extern_func| try mod.markDeclIndexAlive(extern_func.decl), - .func => |func| try mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl), + .func => |func| try mod.markDeclIndexAlive(func.owner_decl), .error_union => |error_union| switch (error_union.val) { .err_name => {}, .payload => |payload| try mod.markReferencedDeclsAlive(payload.toValue()), @@ -6851,8 +6533,8 @@ pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator return mod.ptrType(info); } -pub fn funcType(mod: *Module, info: InternPool.Key.FuncType) Allocator.Error!Type { - return (try intern(mod, .{ .func_type = info })).toType(); +pub fn funcType(mod: *Module, key: InternPool.GetFuncTypeKey) Allocator.Error!Type { + return (try mod.intern_pool.getFuncType(mod.gpa, key)).toType(); } /// Use this for `anyframe->T` only. @@ -6870,7 +6552,8 @@ pub fn errorUnionType(mod: *Module, error_set_ty: Type, payload_ty: Type) Alloca pub fn singleErrorSetType(mod: *Module, name: InternPool.NullTerminatedString) Allocator.Error!Type { const names: *const [1]InternPool.NullTerminatedString = &name; - return (try mod.intern_pool.get(mod.gpa, .{ .error_set_type = .{ .names = names } })).toType(); + const new_ty = try mod.intern_pool.getErrorSetType(mod.gpa, names); + return new_ty.toType(); } /// Sorts `names` in place. @@ -6884,7 +6567,7 @@ pub fn errorSetFromUnsortedNames( {}, InternPool.NullTerminatedString.indexLessThan, ); - const new_ty = try mod.intern(.{ .error_set_type = .{ .names = names } }); + const new_ty = try mod.intern_pool.getErrorSetType(mod.gpa, names); return new_ty.toType(); } @@ -7231,14 +6914,20 @@ pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType { return mod.intern_pool.indexToFuncType(ty.toIntern()); } -pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*Fn.InferredErrorSet { - const index = typeToInferredErrorSetIndex(mod, ty).unwrap() orelse return null; - return mod.inferredErrorSetPtr(index); +pub fn funcOwnerDeclPtr(mod: *Module, func_index: InternPool.Index) *Decl { + return mod.declPtr(mod.funcOwnerDeclIndex(func_index)); } -pub fn typeToInferredErrorSetIndex(mod: *Module, ty: Type) Fn.InferredErrorSet.OptionalIndex { - if (ty.ip_index == .none) return .none; - return mod.intern_pool.indexToInferredErrorSetType(ty.toIntern()); +pub fn funcOwnerDeclIndex(mod: *Module, func_index: InternPool.Index) Decl.Index { + return mod.funcInfo(func_index).owner_decl; +} + +pub fn iesFuncIndex(mod: *const Module, ies_index: InternPool.Index) InternPool.Index { + return mod.intern_pool.iesFuncIndex(ies_index); +} + +pub fn funcInfo(mod: *Module, func_index: InternPool.Index) InternPool.Key.Func { + return mod.intern_pool.indexToKey(func_index).func; } pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQuery) SrcLoc { @@ -7265,3 +6954,41 @@ pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQu pub fn toEnum(mod: *Module, comptime E: type, val: Value) E { return mod.intern_pool.toEnum(E, val.toIntern()); } + +pub fn isAnytypeParam(mod: *Module, func: InternPool.Index, index: u32) bool { + const file = mod.declPtr(func.owner_decl).getFileScope(mod); + + const tags = file.zir.instructions.items(.tag); + + const param_body = file.zir.getParamBody(func.zir_body_inst); + const param = param_body[index]; + + return switch (tags[param]) { + .param, .param_comptime => false, + .param_anytype, .param_anytype_comptime => true, + else => unreachable, + }; +} + +pub fn getParamName(mod: *Module, func_index: InternPool.Index, index: u32) [:0]const u8 { + const func = mod.funcInfo(func_index); + const file = mod.declPtr(func.owner_decl).getFileScope(mod); + + const tags = file.zir.instructions.items(.tag); + const data = file.zir.instructions.items(.data); + + const param_body = file.zir.getParamBody(func.zir_body_inst); + const param = param_body[index]; + + return switch (tags[param]) { + .param, .param_comptime => blk: { + const extra = file.zir.extraData(Zir.Inst.Param, data[param].pl_tok.payload_index); + break :blk file.zir.nullTerminatedString(extra.data.name); + }, + .param_anytype, .param_anytype_comptime => blk: { + const param_data = data[param].str_tok; + break :blk param_data.get(file.zir); + }, + else => unreachable, + }; +} diff --git a/src/Sema.zig b/src/Sema.zig index 0e1774032a..3f8b936e0b 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -23,13 +23,13 @@ owner_decl: *Decl, owner_decl_index: Decl.Index, /// For an inline or comptime function call, this will be the root parent function /// which contains the callsite. Corresponds to `owner_decl`. -owner_func: ?*Module.Fn, -owner_func_index: Module.Fn.OptionalIndex, +/// This could be `none`, a `func_decl`, or a `func_instance`. +owner_func_index: InternPool.Index, /// The function this ZIR code is the body of, according to the source code. -/// This starts out the same as `owner_func` and then diverges in the case of +/// This starts out the same as `owner_func_index` and then diverges in the case of /// an inline or comptime function call. -func: ?*Module.Fn, -func_index: Module.Fn.OptionalIndex, +/// This could be `none`, a `func_decl`, or a `func_instance`. +func_index: InternPool.Index, /// Used to restore the error return trace when returning a non-error from a function. error_return_trace_index_on_fn_entry: Air.Inst.Ref = .none, /// When semantic analysis needs to know the return type of the function whose body @@ -38,6 +38,10 @@ error_return_trace_index_on_fn_entry: Air.Inst.Ref = .none, /// generic function which uses a type expression for the return type. /// The type will be `void` in the case that `func` is `null`. fn_ret_ty: Type, +/// In case of the return type being an error union with an inferred error +/// set, this is the inferred error set. `null` otherwise. Allocated with +/// `Sema.arena`. +fn_ret_ty_ies: ?*InferredErrorSet, branch_quota: u32 = default_branch_quota, branch_count: u32 = 0, /// Populated when returning `error.ComptimeBreak`. Used to communicate the @@ -49,21 +53,23 @@ comptime_break_inst: Zir.Inst.Index = undefined, /// contain a mapped source location. src: LazySrcLoc = .{ .token_offset = 0 }, decl_val_table: std.AutoHashMapUnmanaged(Decl.Index, Air.Inst.Ref) = .{}, -/// When doing a generic function instantiation, this array collects a -/// `Value` object for each parameter that is comptime-known and thus elided -/// from the generated function. This memory is allocated by a parent `Sema` and -/// owned by the values arena of the Sema owner_decl. -comptime_args: []TypedValue = &.{}, -/// Marks the function instruction that `comptime_args` applies to so that we -/// don't accidentally apply it to a function prototype which is used in the -/// type expression of a generic function parameter. -comptime_args_fn_inst: Zir.Inst.Index = 0, -/// When `comptime_args` is provided, this field is also provided. It was used as -/// the key in the `monomorphed_funcs` set. The `func` instruction is supposed -/// to use this instead of allocating a fresh one. This avoids an unnecessary -/// extra hash table lookup in the `monomorphed_funcs` set. -/// Sema will set this to null when it takes ownership. -preallocated_new_func: Module.Fn.OptionalIndex = .none, +/// When doing a generic function instantiation, this array collects a value +/// for each parameter of the generic owner. `none` for non-comptime parameters. +/// This is a separate array from `block.params` so that it can be passed +/// directly to `comptime_args` when calling `InternPool.getFuncInstance`. +/// This memory is allocated by a parent `Sema` in the temporary arena, and is +/// used only to add a `func_instance` into the `InternPool`. +comptime_args: []InternPool.Index = &.{}, +/// Used to communicate from a generic function instantiation to the logic that +/// creates a generic function instantiation value in `funcCommon`. +generic_owner: InternPool.Index = .none, +/// When `generic_owner` is not none, this contains the generic function +/// instantiation callsite so that compile errors on the parameter types of the +/// instantiation can point back to the instantiation site in addition to the +/// declaration site. +generic_call_src: LazySrcLoc = .unneeded, +/// Corresponds to `generic_call_src`. +generic_call_decl: Decl.OptionalIndex = .none, /// The key is types that must be fully resolved prior to machine code /// generation pass. Types are added to this set when resolving them /// immediately could cause a dependency loop, but they do need to be resolved @@ -79,8 +85,6 @@ types_to_resolve: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}, post_hoc_blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *LabeledBlock) = .{}, /// Populated with the last compile error created. err: ?*Module.ErrorMsg = null, -/// True when analyzing a generic instantiation. Used to suppress some errors. -is_generic_instantiation: bool = false, /// Set to true when analyzing a func type instruction so that nested generic /// function types will emit generic poison instead of a partial type. no_partial_func_ty: bool = false, @@ -97,6 +101,10 @@ unresolved_inferred_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, InferredAll /// involve transitioning comptime-mutable memory away from using Decls at all. comptime_mutable_decls: *std.ArrayList(Decl.Index), +/// This is populated when `@setAlignStack` occurs so that if there is a duplicate +/// one encountered, the conflicting source location can be shown. +prev_stack_alignment_src: ?LazySrcLoc = null, + const std = @import("std"); const math = std.math; const mem = std.mem; @@ -131,6 +139,49 @@ const Alignment = InternPool.Alignment; pub const default_branch_quota = 1000; pub const default_reference_trace_len = 2; +pub const InferredErrorSet = struct { + /// The function body from which this error set originates. + /// This is `none` in the case of a comptime/inline function call, corresponding to + /// `InternPool.Index.adhoc_inferred_error_set_type`. + /// The function's resolved error set is not set until analysis of the + /// function body completes. + func: InternPool.Index, + /// All currently known errors that this error set contains. This includes + /// direct additions via `return error.Foo;`, and possibly also errors that + /// are returned from any dependent functions. + errors: NameMap = .{}, + /// Other inferred error sets which this inferred error set should include. + inferred_error_sets: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}, + /// The regular error set created by resolving this inferred error set. + resolved: InternPool.Index = .none, + + pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void); + + pub fn addErrorSet( + self: *InferredErrorSet, + err_set_ty: Type, + ip: *InternPool, + arena: Allocator, + ) !void { + switch (err_set_ty.toIntern()) { + .anyerror_type => self.resolved = .anyerror_type, + .adhoc_inferred_error_set_type => {}, // Adding an inferred error set to itself. + + else => switch (ip.indexToKey(err_set_ty.toIntern())) { + .error_set_type => |error_set_type| { + for (error_set_type.names.get(ip)) |name| { + try self.errors.put(arena, name, {}); + } + }, + .inferred_error_set_type => { + try self.inferred_error_sets.put(arena, err_set_ty.toIntern(), {}); + }, + else => unreachable, + }, + } + } +}; + /// Stores the mapping from `Zir.Inst.Index -> Air.Inst.Ref`, which is used by sema to resolve /// instructions during analysis. /// Instead of a hash table approach, InstMap is simply a slice that is indexed into using the @@ -243,7 +294,13 @@ pub const Block = struct { /// The AIR instructions generated for this block. instructions: std.ArrayListUnmanaged(Air.Inst.Index), // `param` instructions are collected here to be used by the `func` instruction. - params: std.ArrayListUnmanaged(Param) = .{}, + /// When doing a generic function instantiation, this array collects a type + /// for each *runtime-known* parameter. This array corresponds to the instance + /// function type, while `Sema.comptime_args` corresponds to the generic owner + /// function type. + /// This memory is allocated by a parent `Sema` in the temporary arena, and is + /// used to add a `func_instance` into the `InternPool`. + params: std.MultiArrayList(Param) = .{}, wip_capture_scope: *CaptureScope, @@ -323,10 +380,10 @@ pub const Block = struct { }; const Param = struct { - /// `noreturn` means `anytype`. - ty: Type, + /// `none` means `anytype`. + ty: InternPool.Index, is_comptime: bool, - name: []const u8, + name: Zir.NullTerminatedString, }; /// This `Block` maps a block ZIR instruction to the corresponding @@ -342,7 +399,8 @@ pub const Block = struct { /// It is shared among all the blocks in an inline or comptime called /// function. pub const Inlining = struct { - func: ?*Module.Fn, + /// Might be `none`. + func: InternPool.Index, comptime_result: Air.Inst.Ref, merges: Merges, }; @@ -906,7 +964,7 @@ fn analyzeBodyInner( // We use a while (true) loop here to avoid a redundant way of breaking out of // the loop. The only way to break out of the loop is with a `noreturn` // instruction. - var i: usize = 0; + var i: u32 = 0; const result = while (true) { crash_info.setBodyIndex(i); const inst = body[i]; @@ -1116,7 +1174,7 @@ fn analyzeBodyInner( .shl_sat => try sema.zirShl(block, inst, .shl_sat), .ret_ptr => try sema.zirRetPtr(block), - .ret_type => try sema.addType(sema.fn_ret_ty), + .ret_type => Air.internedToRef(sema.fn_ret_ty.toIntern()), // Instructions that we know to *always* be noreturn based solely on their tag. // These functions match the return type of analyzeBody so that we can @@ -1338,22 +1396,22 @@ fn analyzeBodyInner( continue; }, .param => { - try sema.zirParam(block, inst, false); + try sema.zirParam(block, inst, i, false); i += 1; continue; }, .param_comptime => { - try sema.zirParam(block, inst, true); + try sema.zirParam(block, inst, i, true); i += 1; continue; }, .param_anytype => { - try sema.zirParamAnytype(block, inst, false); + try sema.zirParamAnytype(block, inst, i, false); i += 1; continue; }, .param_anytype_comptime => { - try sema.zirParamAnytype(block, inst, true); + try sema.zirParamAnytype(block, inst, i, true); i += 1; continue; }, @@ -1493,10 +1551,7 @@ fn analyzeBodyInner( // Note: this probably needs to be resolved in a more general manner. const prev_params = block.params; block.params = .{}; - defer { - block.params.deinit(sema.gpa); - block.params = prev_params; - } + defer block.params = prev_params; const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse break always_noreturn; if (inst == break_data.block_inst) { @@ -1532,7 +1587,6 @@ fn analyzeBodyInner( .merges = undefined, }; child_block.label = &label; - defer child_block.params.deinit(gpa); // Write these instructions directly into the parent block child_block.instructions = block.instructions; @@ -2008,10 +2062,7 @@ fn resolveDefinedValue( /// Value Tag `variable` causes this function to return `null`. /// Value Tag `undef` causes this function to return the Value. /// Value Tag `generic_poison` causes `error.GenericPoison` to be returned. -fn resolveMaybeUndefVal( - sema: *Sema, - inst: Air.Inst.Ref, -) CompileError!?Value { +fn resolveMaybeUndefVal(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value { const val = (try sema.resolveMaybeUndefValAllowVariables(inst)) orelse return null; if (val.isGenericPoison()) return error.GenericPoison; if (val.ip_index != .none and sema.mod.intern_pool.isVariable(val.toIntern())) return null; @@ -2022,10 +2073,7 @@ fn resolveMaybeUndefVal( /// Value Tag `undef` causes this function to return the Value. /// Value Tag `generic_poison` causes `error.GenericPoison` to be returned. /// Lazy values are recursively resolved. -fn resolveMaybeUndefLazyVal( - sema: *Sema, - inst: Air.Inst.Ref, -) CompileError!?Value { +fn resolveMaybeUndefLazyVal(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value { return try sema.resolveLazyValue((try sema.resolveMaybeUndefVal(inst)) orelse return null); } @@ -2034,10 +2082,7 @@ fn resolveMaybeUndefLazyVal( /// Value Tag `generic_poison` causes `error.GenericPoison` to be returned. /// Value Tag `decl_ref` and `decl_ref_mut` or any nested such value results in `null`. /// Lazy values are recursively resolved. -fn resolveMaybeUndefValIntable( - sema: *Sema, - inst: Air.Inst.Ref, -) CompileError!?Value { +fn resolveMaybeUndefValIntable(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value { const val = (try sema.resolveMaybeUndefValAllowVariables(inst)) orelse return null; if (val.isGenericPoison()) return error.GenericPoison; if (val.ip_index == .none) return val; @@ -2363,7 +2408,10 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { break :blk default_reference_trace_len; }; - var referenced_by = if (sema.func) |some| some.owner_decl else sema.owner_decl_index; + var referenced_by = if (sema.func_index != .none) + mod.funcOwnerDeclIndex(sema.func_index) + else + sema.owner_decl_index; var reference_stack = std.ArrayList(Module.ErrorMsg.Trace).init(gpa); defer reference_stack.deinit(); @@ -2399,14 +2447,15 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { } err_msg.reference_trace = try reference_stack.toOwnedSlice(); } - if (sema.owner_func) |func| { - func.state = .sema_failure; + const ip = &mod.intern_pool; + if (sema.owner_func_index != .none) { + ip.funcAnalysis(sema.owner_func_index).state = .sema_failure; } else { sema.owner_decl.analysis = .sema_failure; sema.owner_decl.generation = mod.generation; } - if (sema.func) |func| { - func.state = .sema_failure; + if (sema.func_index != .none) { + ip.funcAnalysis(sema.func_index).state = .sema_failure; } const gop = mod.failed_decls.getOrPutAssumeCapacity(sema.owner_decl_index); if (gop.found_existing) { @@ -2866,6 +2915,7 @@ fn createAnonymousDeclTypeNamed( inst: ?Zir.Inst.Index, ) !Decl.Index { const mod = sema.mod; + const ip = &mod.intern_pool; const gpa = sema.gpa; const namespace = block.namespace; const src_scope = block.wip_capture_scope; @@ -2886,16 +2936,16 @@ fn createAnonymousDeclTypeNamed( const name = mod.intern_pool.getOrPutStringFmt(gpa, "{}__{s}_{d}", .{ src_decl.name.fmt(&mod.intern_pool), anon_prefix, @intFromEnum(new_decl_index), }) catch unreachable; - try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); + try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name); return new_decl_index; }, .parent => { const name = mod.declPtr(block.src_decl).name; - try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); + try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name); return new_decl_index; }, .func => { - const fn_info = sema.code.getFnInfo(sema.func.?.zir_body_inst); + const fn_info = sema.code.getFnInfo(ip.funcZirBodyInst(sema.func_index)); const zir_tags = sema.code.instructions.items(.tag); var buf = std.ArrayList(u8).init(gpa); @@ -2927,7 +2977,7 @@ fn createAnonymousDeclTypeNamed( try writer.writeByte(')'); const name = try mod.intern_pool.getOrPutString(gpa, buf.items); - try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); + try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name); return new_decl_index; }, .dbg_var => { @@ -2943,7 +2993,7 @@ fn createAnonymousDeclTypeNamed( src_decl.name.fmt(&mod.intern_pool), zir_data[i].str_op.getStr(sema.code), }); - try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); + try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, typed_value, name); return new_decl_index; }, else => {}, @@ -3070,18 +3120,12 @@ fn zirEnumDecl( sema.owner_decl_index = prev_owner_decl_index; } - const prev_owner_func = sema.owner_func; const prev_owner_func_index = sema.owner_func_index; - sema.owner_func = null; sema.owner_func_index = .none; - defer sema.owner_func = prev_owner_func; defer sema.owner_func_index = prev_owner_func_index; - const prev_func = sema.func; const prev_func_index = sema.func_index; - sema.func = null; sema.func_index = .none; - defer sema.func = prev_func; defer sema.func_index = prev_func_index; var wip_captures = try WipCaptureScope.init(gpa, new_decl.src_scope); @@ -3393,7 +3437,7 @@ fn zirErrorSetDecl( const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index); - var names: Module.Fn.InferredErrorSet.NameMap = .{}; + var names: InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, extra.data.fields_len); var extra_index = @as(u32, @intCast(extra.end)); @@ -5236,12 +5280,10 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v // %b = store(%a, %c) // Where %c is an error union or error set. In such case we need to add // to the current function's inferred error set, if any. - if (is_ret and (sema.typeOf(operand).zigTypeTag(mod) == .ErrorUnion or - sema.typeOf(operand).zigTypeTag(mod) == .ErrorSet) and - sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) - { - try sema.addToInferredErrorSet(operand); - } + if (is_ret and sema.fn_ret_ty_ies != null) switch (sema.typeOf(operand).zigTypeTag(mod)) { + .ErrorUnion, .ErrorSet => try sema.addToInferredErrorSet(operand), + else => {}, + }; const ptr_src: LazySrcLoc = .{ .node_offset_store_ptr = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_store_operand = inst_data.src_node }; @@ -5379,7 +5421,10 @@ fn zirCompileLog( } try writer.print("\n", .{}); - const decl_index = if (sema.func) |some| some.owner_decl else sema.owner_decl_index; + const decl_index = if (sema.func_index != .none) + mod.funcOwnerDeclIndex(sema.func_index) + else + sema.owner_decl_index; const gop = try mod.compile_log_decls.getOrPut(sema.gpa, decl_index); if (!gop.found_existing) { gop.value_ptr.* = src_node; @@ -5967,11 +6012,11 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst alignment.toByteUnitsOptional().?, }); } - const func_index = sema.func_index.unwrap() orelse + if (sema.func_index == .none) { return sema.fail(block, src, "@setAlignStack outside function body", .{}); - const func = mod.funcPtr(func_index); + } - const fn_owner_decl = mod.declPtr(func.owner_decl); + const fn_owner_decl = mod.funcOwnerDeclPtr(sema.func_index); switch (fn_owner_decl.ty.fnCallingConvention(mod)) { .Naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}), .Inline => return sema.fail(block, src, "@setAlignStack in inline function", .{}), @@ -5980,25 +6025,34 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst }, } - const gop = try mod.align_stack_fns.getOrPut(sema.gpa, func_index); - if (gop.found_existing) { + if (sema.prev_stack_alignment_src) |prev_src| { const msg = msg: { const msg = try sema.errMsg(block, src, "multiple @setAlignStack in the same function body", .{}); errdefer msg.destroy(sema.gpa); - try sema.errNote(block, gop.value_ptr.src, msg, "other instance here", .{}); + try sema.errNote(block, prev_src, msg, "other instance here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - gop.value_ptr.* = .{ .alignment = alignment, .src = src }; + + const ip = &mod.intern_pool; + const a = ip.funcAnalysis(sema.func_index); + if (a.stack_alignment != .none) { + a.stack_alignment = @enumFromInt(@max( + @intFromEnum(alignment), + @intFromEnum(a.stack_alignment), + )); + } } fn zirSetCold(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { + const mod = sema.mod; + const ip = &mod.intern_pool; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const is_cold = try sema.resolveConstBool(block, operand_src, extra.operand, "operand to @setCold must be comptime-known"); - const func = sema.func orelse return; // does nothing outside a function - func.is_cold = is_cold; + if (sema.func_index == .none) return; // does nothing outside a function + ip.funcAnalysis(sema.func_index).is_cold = is_cold; } fn zirSetFloatMode(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { @@ -6308,7 +6362,7 @@ fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl { if (func_val.isUndef(mod)) return null; const owner_decl_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { .extern_func => |extern_func| extern_func.decl, - .func => |func| mod.funcPtr(func.index).owner_decl, + .func => |func| func.owner_decl, .ptr => |ptr| switch (ptr.addr) { .decl => |decl| mod.declPtr(decl).val.getFunction(mod).?.owner_decl, else => return null, @@ -6445,6 +6499,7 @@ fn zirCall( defer tracy.end(); const mod = sema.mod; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const callee_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node }; const call_src = inst_data.src(); @@ -6493,9 +6548,10 @@ fn zirCall( const args_body = sema.code.extra[extra.end..]; var input_is_error = false; - const block_index = @as(Air.Inst.Index, @intCast(block.instructions.items.len)); + const block_index: Air.Inst.Index = @intCast(block.instructions.items.len); - const fn_params_len = mod.typeToFunc(func_ty).?.param_types.len; + const func_ty_info = mod.typeToFunc(func_ty).?; + const fn_params_len = func_ty_info.param_types.len; const parent_comptime = block.is_comptime; // `extra_index` and `arg_index` are separate since the bound function is passed as the first argument. var extra_index: usize = 0; @@ -6504,13 +6560,12 @@ fn zirCall( extra_index += 1; arg_index += 1; }) { - const func_ty_info = mod.typeToFunc(func_ty).?; const arg_end = sema.code.extra[extra.end + extra_index]; defer arg_start = arg_end; // Generate args to comptime params in comptime block. defer block.is_comptime = parent_comptime; - if (arg_index < @min(fn_params_len, 32) and func_ty_info.paramIsComptime(@as(u5, @intCast(arg_index)))) { + if (arg_index < @min(fn_params_len, 32) and func_ty_info.paramIsComptime(@intCast(arg_index))) { block.is_comptime = true; // TODO set comptime_reason } @@ -6519,10 +6574,10 @@ fn zirCall( if (arg_index >= fn_params_len) break :inst Air.Inst.Ref.var_args_param_type; - if (func_ty_info.param_types[arg_index] == .generic_poison_type) + if (func_ty_info.param_types.get(ip)[arg_index] == .generic_poison_type) break :inst Air.Inst.Ref.generic_poison_type; - break :inst try sema.addType(func_ty_info.param_types[arg_index].toType()); + break :inst try sema.addType(func_ty_info.param_types.get(ip)[arg_index].toType()); }); const resolved = try sema.resolveBody(block, args_body[arg_start..arg_end], inst); @@ -6535,7 +6590,9 @@ fn zirCall( } resolved_args[arg_index] = resolved; } - if (sema.owner_func == null or !sema.owner_func.?.calls_or_awaits_errorable_fn) { + if (sema.owner_func_index == .none or + !ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn) + { input_is_error = false; // input was an error type, but no errorable fn's were actually called } @@ -6702,6 +6759,7 @@ fn analyzeCall( call_dbg_node: ?Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const mod = sema.mod; + const ip = &mod.intern_pool; const callee_ty = sema.typeOf(func); const func_ty_info = mod.typeToFunc(func_ty).?; @@ -6749,20 +6807,17 @@ fn analyzeCall( var is_generic_call = func_ty_info.is_generic; var is_comptime_call = block.is_comptime or modifier == .compile_time; - var comptime_reason_buf: Block.ComptimeReason = undefined; var comptime_reason: ?*const Block.ComptimeReason = null; if (!is_comptime_call) { if (sema.typeRequiresComptime(func_ty_info.return_type.toType())) |ct| { is_comptime_call = ct; if (ct) { - // stage1 can't handle doing this directly - comptime_reason_buf = .{ .comptime_ret_ty = .{ + comptime_reason = &.{ .comptime_ret_ty = .{ .block = block, .func = func, .func_src = func_src, .return_ty = func_ty_info.return_type.toType(), } }; - comptime_reason = &comptime_reason_buf; } } else |err| switch (err) { error.GenericPoison => is_generic_call = true, @@ -6778,7 +6833,6 @@ fn analyzeCall( func, func_src, call_src, - func_ty, ensure_result_used, uncasted_args, call_tag, @@ -6793,14 +6847,12 @@ fn analyzeCall( error.ComptimeReturn => { is_inline_call = true; is_comptime_call = true; - // stage1 can't handle doing this directly - comptime_reason_buf = .{ .comptime_ret_ty = .{ + comptime_reason = &.{ .comptime_ret_ty = .{ .block = block, .func = func, .func_src = func_src, .return_ty = func_ty_info.return_type.toType(), } }; - comptime_reason = &comptime_reason_buf; }, else => |e| return e, } @@ -6819,9 +6871,9 @@ fn analyzeCall( .extern_func => return sema.fail(block, call_src, "{s} call of extern function", .{ @as([]const u8, if (is_comptime_call) "comptime" else "inline"), }), - .func => |function| function.index, + .func => func_val.toIntern(), .ptr => |ptr| switch (ptr.addr) { - .decl => |decl| mod.declPtr(decl).val.getFunctionIndex(mod).unwrap().?, + .decl => |decl| mod.declPtr(decl).val.toIntern(), else => { assert(callee_ty.isPtrAtRuntime(mod)); return sema.fail(block, call_src, "{s} call of function pointer", .{ @@ -6850,7 +6902,7 @@ fn analyzeCall( // This one is shared among sub-blocks within the same callee, but not // shared among the entire inline/comptime call stack. var inlining: Block.Inlining = .{ - .func = null, + .func = .none, .comptime_result = undefined, .merges = .{ .src_locs = .{}, @@ -6862,7 +6914,7 @@ fn analyzeCall( // In order to save a bit of stack space, directly modify Sema rather // than create a child one. const parent_zir = sema.code; - const module_fn = mod.funcPtr(module_fn_index); + const module_fn = mod.funcInfo(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); sema.code = fn_owner_decl.getFileScope(mod).zir; defer sema.code = parent_zir; @@ -6877,11 +6929,8 @@ fn analyzeCall( sema.inst_map = parent_inst_map; } - const parent_func = sema.func; const parent_func_index = sema.func_index; - sema.func = module_fn; - sema.func_index = module_fn_index.toOptional(); - defer sema.func = parent_func; + sema.func_index = module_fn_index; defer sema.func_index = parent_func_index; const parent_err_ret_index = sema.error_return_trace_index_on_fn_entry; @@ -6913,16 +6962,28 @@ fn analyzeCall( try sema.emitBackwardBranch(block, call_src); - // Whether this call should be memoized, set to false if the call can mutate comptime state. + // Whether this call should be memoized, set to false if the call can + // mutate comptime state. var should_memoize = true; // If it's a comptime function call, we need to memoize it as long as no external // comptime memory is mutated. const memoized_arg_values = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len); - var new_fn_info = mod.typeToFunc(fn_owner_decl.ty).?; - new_fn_info.param_types = try sema.arena.alloc(InternPool.Index, new_fn_info.param_types.len); - new_fn_info.comptime_bits = 0; + const owner_info = mod.typeToFunc(fn_owner_decl.ty).?; + var new_fn_info: InternPool.GetFuncTypeKey = .{ + .param_types = try sema.arena.alloc(InternPool.Index, owner_info.param_types.len), + .return_type = owner_info.return_type, + .comptime_bits = 0, + .noalias_bits = owner_info.noalias_bits, + .alignment = if (owner_info.align_is_generic) null else owner_info.alignment, + .cc = if (owner_info.cc_is_generic) null else owner_info.cc, + .is_var_args = owner_info.is_var_args, + .is_noinline = owner_info.is_noinline, + .section_is_generic = owner_info.section_is_generic, + .addrspace_is_generic = owner_info.addrspace_is_generic, + .is_generic = owner_info.is_generic, + }; // This will have return instructions analyzed as break instructions to // the block_inst above. Here we are performing "comptime/inline semantic analysis" @@ -6934,59 +6995,46 @@ fn analyzeCall( try sema.inst_map.ensureSpaceForInstructions(sema.gpa, fn_info.param_body); var has_comptime_args = false; - var arg_i: usize = 0; + var arg_i: u32 = 0; for (fn_info.param_body) |inst| { - sema.analyzeInlineCallArg( + const arg_src: LazySrcLoc = if (arg_i == 0 and bound_arg_src != null) + bound_arg_src.? + else + .{ .call_arg = .{ + .decl = block.src_decl, + .call_node_offset = call_src.node_offset.x, + .arg_index = arg_i - @intFromBool(bound_arg_src != null), + } }; + try sema.analyzeInlineCallArg( block, &child_block, - .unneeded, + arg_src, inst, - &new_fn_info, + new_fn_info.param_types, &arg_i, uncasted_args, is_comptime_call, &should_memoize, memoized_arg_values, - mod.typeToFunc(func_ty).?.param_types, + func_ty_info.param_types, func, &has_comptime_args, - ) catch |err| switch (err) { - error.NeededSourceLocation => { - _ = sema.inst_map.remove(inst); - const decl = mod.declPtr(block.src_decl); - try sema.analyzeInlineCallArg( - block, - &child_block, - mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src), - inst, - &new_fn_info, - &arg_i, - uncasted_args, - is_comptime_call, - &should_memoize, - memoized_arg_values, - mod.typeToFunc(func_ty).?.param_types, - func, - &has_comptime_args, - ); - unreachable; - }, - else => |e| return e, - }; + ); } - if (!has_comptime_args and module_fn.state == .sema_failure) return error.AnalysisFail; + if (!has_comptime_args and module_fn.analysis(ip).state == .sema_failure) + return error.AnalysisFail; const recursive_msg = "inline call is recursive"; var head = if (!has_comptime_args) block else null; while (head) |some| { const parent_inlining = some.inlining orelse break; - if (parent_inlining.func == module_fn) { + if (parent_inlining.func == module_fn_index) { return sema.fail(block, call_src, recursive_msg, .{}); } head = some.parent; } - if (!has_comptime_args) inlining.func = module_fn; + if (!has_comptime_args) inlining.func = module_fn_index; // In case it is a generic function with an expression for the return type that depends // on parameters, we must now do the same for the return type as we just did with @@ -6998,21 +7046,32 @@ fn analyzeCall( try sema.resolveInst(fn_info.ret_ty_ref); const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; const bare_return_type = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst); - // Create a fresh inferred error set type for inline/comptime calls. - const fn_ret_ty = blk: { - if (module_fn.hasInferredErrorSet(mod)) { - const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{ - .func = module_fn_index, - }); - const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index }); - break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type); - } - break :blk bare_return_type; - }; - new_fn_info.return_type = fn_ret_ty.toIntern(); const parent_fn_ret_ty = sema.fn_ret_ty; - sema.fn_ret_ty = fn_ret_ty; + const parent_fn_ret_ty_ies = sema.fn_ret_ty_ies; + const parent_generic_owner = sema.generic_owner; + const parent_generic_call_src = sema.generic_call_src; + const parent_generic_call_decl = sema.generic_call_decl; + sema.fn_ret_ty = bare_return_type; + sema.fn_ret_ty_ies = null; + sema.generic_owner = .none; + sema.generic_call_src = .unneeded; + sema.generic_call_decl = .none; defer sema.fn_ret_ty = parent_fn_ret_ty; + defer sema.fn_ret_ty_ies = parent_fn_ret_ty_ies; + defer sema.generic_owner = parent_generic_owner; + defer sema.generic_call_src = parent_generic_call_src; + defer sema.generic_call_decl = parent_generic_call_decl; + + if (module_fn.analysis(ip).inferred_error_set) { + // Create a fresh inferred error set type for inline/comptime calls. + const ies = try sema.arena.create(InferredErrorSet); + ies.* = .{ .func = .none }; + sema.fn_ret_ty_ies = ies; + sema.fn_ret_ty = (try ip.get(gpa, .{ .error_union_type = .{ + .error_set_type = .adhoc_inferred_error_set_type, + .payload_type = bare_return_type.toIntern(), + } })).toType(); + } // This `res2` is here instead of directly breaking from `res` due to a stage1 // bug generating invalid LLVM IR. @@ -7030,9 +7089,10 @@ fn analyzeCall( } } + new_fn_info.return_type = sema.fn_ret_ty.toIntern(); const new_func_resolved_ty = try mod.funcType(new_fn_info); if (!is_comptime_call and !block.is_typeof) { - try sema.emitDbgInline(block, parent_func_index.unwrap().?, module_fn_index, new_func_resolved_ty, .dbg_inline_begin); + try sema.emitDbgInline(block, parent_func_index, module_fn_index, new_func_resolved_ty, .dbg_inline_begin); const zir_tags = sema.code.instructions.items(.tag); for (fn_info.param_body) |param| switch (zir_tags[param]) { @@ -7056,7 +7116,7 @@ fn analyzeCall( } if (is_comptime_call and ensure_result_used) { - try sema.ensureResultUsed(block, fn_ret_ty, call_src); + try sema.ensureResultUsed(block, sema.fn_ret_ty, call_src); } const result = result: { @@ -7074,26 +7134,47 @@ fn analyzeCall( break :result try sema.analyzeBlockBody(block, call_src, &child_block, merges); }; - if (!is_comptime_call and !block.is_typeof and sema.typeOf(result).zigTypeTag(mod) != .NoReturn) { + if (!is_comptime_call and !block.is_typeof and + sema.typeOf(result).zigTypeTag(mod) != .NoReturn) + { try sema.emitDbgInline( block, module_fn_index, - parent_func_index.unwrap().?, - mod.declPtr(parent_func.?.owner_decl).ty, + parent_func_index, + mod.funcOwnerDeclPtr(parent_func_index).ty, .dbg_inline_end, ); } if (should_memoize and is_comptime_call) { const result_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, result, ""); + const result_interned = try result_val.intern2(sema.fn_ret_ty, mod); + + // Transform ad-hoc inferred error set types into concrete error sets. + const result_transformed = try sema.resolveAdHocInferredErrorSet(block, call_src, result_interned); // TODO: check whether any external comptime memory was mutated by the // comptime function call. If so, then do not memoize the call here. _ = try mod.intern(.{ .memoized_call = .{ .func = module_fn_index, .arg_values = memoized_arg_values, - .result = try result_val.intern(fn_ret_ty, mod), + .result = result_transformed, } }); + + break :res2 Air.internedToRef(result_transformed); + } + + if (try sema.resolveMaybeUndefVal(result)) |result_val| { + const result_interned = try result_val.intern2(sema.fn_ret_ty, mod); + const result_transformed = try sema.resolveAdHocInferredErrorSet(block, call_src, result_interned); + break :res2 Air.internedToRef(result_transformed); + } + + const new_ty = try sema.resolveAdHocInferredErrorSetTy(block, call_src, sema.typeOf(result).toIntern()); + if (new_ty != .none) { + // TODO: mutate in place the previous instruction if possible + // rather than adding a bitcast instruction. + break :res2 try block.addBitCast(new_ty.toType(), result); } break :res2 result; @@ -7110,9 +7191,9 @@ fn analyzeCall( if (i < fn_params_len) { const opts: CoerceOpts = .{ .param_src = .{ .func_inst = func, - .param_i = @as(u32, @intCast(i)), + .param_i = @intCast(i), } }; - const param_ty = mod.typeToFunc(func_ty).?.param_types[i].toType(); + const param_ty = func_ty_info.param_types.get(ip)[i].toType(); args[i] = sema.analyzeCallArg( block, .unneeded, @@ -7152,13 +7233,13 @@ fn analyzeCall( if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); try sema.queueFullTypeResolution(func_ty_info.return_type.toType()); - if (sema.owner_func != null and func_ty_info.return_type.toType().isError(mod)) { - sema.owner_func.?.calls_or_awaits_errorable_fn = true; + if (sema.owner_func_index != .none and func_ty_info.return_type.toType().isError(mod)) { + ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true; } if (try sema.resolveMaybeUndefVal(func)) |func_val| { - if (mod.intern_pool.indexToFunc(func_val.toIntern()).unwrap()) |func_index| { - try mod.ensureFuncBodyAnalysisQueued(func_index); + if (mod.intern_pool.isFuncBody(func_val.toIntern())) { + try mod.ensureFuncBodyAnalysisQueued(func_val.toIntern()); } } @@ -7219,7 +7300,7 @@ fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Typ @tagName(backend), @tagName(target.cpu.arch), }); } - const func_decl = mod.declPtr(sema.owner_func.?.owner_decl); + const func_decl = mod.funcOwnerDeclPtr(sema.owner_func_index); if (!func_ty.eql(func_decl.ty, mod)) { return sema.fail(block, call_src, "unable to perform tail call: type of function being called '{}' does not match type of calling function '{}'", .{ func_ty.fmt(mod), func_decl.ty.fmt(mod), @@ -7235,17 +7316,18 @@ fn analyzeInlineCallArg( param_block: *Block, arg_src: LazySrcLoc, inst: Zir.Inst.Index, - new_fn_info: *InternPool.Key.FuncType, - arg_i: *usize, + new_param_types: []InternPool.Index, + arg_i: *u32, uncasted_args: []const Air.Inst.Ref, is_comptime_call: bool, should_memoize: *bool, memoized_arg_values: []InternPool.Index, - raw_param_types: []const InternPool.Index, + raw_param_types: InternPool.Index.Slice, func_inst: Air.Inst.Ref, has_comptime_args: *bool, ) !void { const mod = sema.mod; + const ip = &mod.intern_pool; const zir_tags = sema.code.instructions.items(.tag); switch (zir_tags[inst]) { .param_comptime, .param_anytype_comptime => has_comptime_args.* = true, @@ -7260,13 +7342,13 @@ fn analyzeInlineCallArg( const extra = sema.code.extraData(Zir.Inst.Param, pl_tok.payload_index); const param_body = sema.code.extra[extra.end..][0..extra.data.body_len]; const param_ty = param_ty: { - const raw_param_ty = raw_param_types[arg_i.*]; + const raw_param_ty = raw_param_types.get(ip)[arg_i.*]; if (raw_param_ty != .generic_poison_type) break :param_ty raw_param_ty; const param_ty_inst = try sema.resolveBody(param_block, param_body, inst); const param_ty = try sema.analyzeAsType(param_block, param_src, param_ty_inst); break :param_ty param_ty.toIntern(); }; - new_fn_info.param_types[arg_i.*] = param_ty; + new_param_types[arg_i.*] = param_ty; const uncasted_arg = uncasted_args[arg_i.*]; if (try sema.typeRequiresComptime(param_ty.toType())) { _ = sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to parameter with comptime-only type must be comptime-known") catch |err| { @@ -7278,7 +7360,7 @@ fn analyzeInlineCallArg( } const casted_arg = sema.coerceExtra(arg_block, param_ty.toType(), uncasted_arg, arg_src, .{ .param_src = .{ .func_inst = func_inst, - .param_i = @as(u32, @intCast(arg_i.*)), + .param_i = @intCast(arg_i.*), } }) catch |err| switch (err) { error.NotCoercible => unreachable, else => |e| return e, @@ -7317,7 +7399,7 @@ fn analyzeInlineCallArg( .param_anytype, .param_anytype_comptime => { // No coercion needed. const uncasted_arg = uncasted_args[arg_i.*]; - new_fn_info.param_types[arg_i.*] = sema.typeOf(uncasted_arg).toIntern(); + new_param_types[arg_i.*] = sema.typeOf(uncasted_arg).toIntern(); if (is_comptime_call) { sema.inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg); @@ -7371,50 +7453,12 @@ fn analyzeCallArg( }; } -fn analyzeGenericCallArg( - sema: *Sema, - block: *Block, - arg_src: LazySrcLoc, - uncasted_arg: Air.Inst.Ref, - comptime_arg: TypedValue, - runtime_args: []Air.Inst.Ref, - new_fn_info: InternPool.Key.FuncType, - runtime_i: *u32, -) !void { - const mod = sema.mod; - const is_runtime = comptime_arg.val.isGenericPoison() and - comptime_arg.ty.hasRuntimeBits(mod) and - !(try sema.typeRequiresComptime(comptime_arg.ty)); - if (is_runtime) { - const param_ty = new_fn_info.param_types[runtime_i.*].toType(); - const casted_arg = try sema.coerce(block, param_ty, uncasted_arg, arg_src); - try sema.queueFullTypeResolution(param_ty); - runtime_args[runtime_i.*] = casted_arg; - runtime_i.* += 1; - } else if (try sema.typeHasOnePossibleValue(comptime_arg.ty)) |_| { - _ = try sema.coerce(block, comptime_arg.ty, uncasted_arg, arg_src); - } -} - -fn analyzeGenericCallArgVal( - sema: *Sema, - block: *Block, - arg_src: LazySrcLoc, - arg_ty: Type, - uncasted_arg: Air.Inst.Ref, - reason: []const u8, -) !Value { - const casted_arg = try sema.coerce(block, arg_ty, uncasted_arg, arg_src); - return sema.resolveLazyValue(try sema.resolveValue(block, arg_src, casted_arg, reason)); -} - fn instantiateGenericCall( sema: *Sema, block: *Block, func: Air.Inst.Ref, func_src: LazySrcLoc, call_src: LazySrcLoc, - generic_func_ty: Type, ensure_result_used: bool, uncasted_args: []const Air.Inst.Ref, call_tag: Air.Inst.Tag, @@ -7423,248 +7467,153 @@ fn instantiateGenericCall( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known"); - const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { - .func => |function| function.index, - .ptr => |ptr| mod.declPtr(ptr.addr.decl).val.getFunctionIndex(mod).unwrap().?, + const generic_owner = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { + .func => func_val.toIntern(), + .ptr => |ptr| mod.declPtr(ptr.addr.decl).val.toIntern(), else => unreachable, }; - const module_fn = mod.funcPtr(module_fn_index); - // Check the Module's generic function map with an adapted context, so that we - // can match against `uncasted_args` rather than doing the work below to create a - // generic Scope only to junk it if it matches an existing instantiation. - const fn_owner_decl = mod.declPtr(module_fn.owner_decl); + const generic_owner_func = mod.intern_pool.indexToKey(generic_owner).func; + + // Even though there may already be a generic instantiation corresponding + // to this callsite, we must evaluate the expressions of the generic + // function signature with the values of the callsite plugged in. + // Importantly, this may include type coercions that determine whether the + // instantiation is a match of a previous instantiation. + // The actual monomorphization happens via adding `func_instance` to + // `InternPool`. + + const fn_owner_decl = mod.declPtr(generic_owner_func.owner_decl); const namespace_index = fn_owner_decl.src_namespace; const namespace = mod.namespacePtr(namespace_index); const fn_zir = namespace.file_scope.zir; - const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst); - const zir_tags = fn_zir.instructions.items(.tag); + const fn_info = fn_zir.getFnInfo(generic_owner_func.zir_body_inst); - const monomorphed_args = try sema.arena.alloc(InternPool.Index, mod.typeToFunc(generic_func_ty).?.param_types.len); - const callee_index = callee: { - var arg_i: usize = 0; - var monomorphed_arg_i: u32 = 0; - var known_unique = false; - for (fn_info.param_body) |inst| { - const generic_func_ty_info = mod.typeToFunc(generic_func_ty).?; - var is_comptime = false; - var is_anytype = false; - switch (zir_tags[inst]) { - .param => { - is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i))); - }, - .param_comptime => { - is_comptime = true; - }, - .param_anytype => { - is_anytype = true; - is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i))); - }, - .param_anytype_comptime => { - is_anytype = true; - is_comptime = true; - }, - else => continue, - } + const comptime_args = try sema.arena.alloc(InternPool.Index, uncasted_args.len); + @memset(comptime_args, .none); - defer arg_i += 1; - const param_ty = generic_func_ty_info.param_types[arg_i]; - const is_generic = !is_anytype and param_ty == .generic_poison_type; - - if (known_unique) { - if (is_comptime or is_anytype or is_generic) { - monomorphed_arg_i += 1; - } - continue; - } - - const uncasted_arg = uncasted_args[arg_i]; - const arg_ty = if (is_generic) mod.monomorphed_funcs.getAdapted( - Module.MonomorphedFuncAdaptedKey{ - .func = module_fn_index, - .args = monomorphed_args[0..monomorphed_arg_i], - }, - Module.MonomorphedFuncsAdaptedContext{ .mod = mod }, - ) orelse { - known_unique = true; - monomorphed_arg_i += 1; - continue; - } else if (is_anytype) sema.typeOf(uncasted_arg).toIntern() else param_ty; - const was_comptime = is_comptime; - if (!is_comptime and try sema.typeRequiresComptime(arg_ty.toType())) is_comptime = true; - if (is_comptime or is_anytype) { - // Tuple default values are a part of the type and need to be - // resolved to hash the type. - try sema.resolveTupleLazyValues(block, call_src, arg_ty.toType()); - } - - if (is_comptime) { - const casted_arg = sema.analyzeGenericCallArgVal(block, .unneeded, arg_ty.toType(), uncasted_arg, "") catch |err| switch (err) { - error.NeededSourceLocation => { - const decl = mod.declPtr(block.src_decl); - const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src); - _ = try sema.analyzeGenericCallArgVal( - block, - arg_src, - arg_ty.toType(), - uncasted_arg, - if (was_comptime) - "parameter is comptime" - else - "argument to parameter with comptime-only type must be comptime-known", - ); - unreachable; - }, - else => |e| return e, - }; - monomorphed_args[monomorphed_arg_i] = casted_arg.toIntern(); - monomorphed_arg_i += 1; - } else if (is_anytype or is_generic) { - monomorphed_args[monomorphed_arg_i] = try mod.intern(.{ .undef = arg_ty }); - monomorphed_arg_i += 1; - } - } - - if (!known_unique) { - if (mod.monomorphed_funcs.getAdapted( - Module.MonomorphedFuncAdaptedKey{ - .func = module_fn_index, - .args = monomorphed_args[0..monomorphed_arg_i], - }, - Module.MonomorphedFuncsAdaptedContext{ .mod = mod }, - )) |callee_func| break :callee mod.intern_pool.indexToKey(callee_func).func.index; - } - - const new_module_func_index = try mod.createFunc(undefined); - const new_module_func = mod.funcPtr(new_module_func_index); - - new_module_func.generic_owner_decl = module_fn.owner_decl.toOptional(); - new_module_func.comptime_args = null; - - try namespace.anon_decls.ensureUnusedCapacity(gpa, 1); - - // Create a Decl for the new function. - const src_decl_index = namespace.getDeclIndex(mod); - const src_decl = mod.declPtr(src_decl_index); - const new_decl_index = try mod.allocateNewDecl(namespace_index, fn_owner_decl.src_node, src_decl.src_scope); - const new_decl = mod.declPtr(new_decl_index); - // TODO better names for generic function instantiations - const decl_name = try mod.intern_pool.getOrPutStringFmt(gpa, "{}__anon_{d}", .{ - fn_owner_decl.name.fmt(&mod.intern_pool), @intFromEnum(new_decl_index), - }); - new_decl.name = decl_name; - new_decl.src_line = fn_owner_decl.src_line; - new_decl.is_pub = fn_owner_decl.is_pub; - new_decl.is_exported = fn_owner_decl.is_exported; - new_decl.has_align = fn_owner_decl.has_align; - new_decl.has_linksection_or_addrspace = fn_owner_decl.has_linksection_or_addrspace; - new_decl.@"linksection" = fn_owner_decl.@"linksection"; - new_decl.@"addrspace" = fn_owner_decl.@"addrspace"; - new_decl.zir_decl_index = fn_owner_decl.zir_decl_index; - new_decl.alive = true; // This Decl is called at runtime. - new_decl.analysis = .in_progress; - new_decl.generation = mod.generation; - - namespace.anon_decls.putAssumeCapacityNoClobber(new_decl_index, {}); - - // The generic function Decl is guaranteed to be the first dependency - // of each of its instantiations. - assert(new_decl.dependencies.keys().len == 0); - try mod.declareDeclDependencyType(new_decl_index, module_fn.owner_decl, .function_body); - - const new_func = sema.resolveGenericInstantiationType( - block, - fn_zir, - new_decl, - new_decl_index, - uncasted_args, - monomorphed_arg_i, - module_fn_index, - new_module_func_index, - namespace_index, - generic_func_ty, - call_src, - bound_arg_src, - ) catch |err| switch (err) { - error.GenericPoison, error.ComptimeReturn => { - // Resolving the new function type below will possibly declare more decl dependencies - // and so we remove them all here in case of error. - for (new_decl.dependencies.keys()) |dep_index| { - const dep = mod.declPtr(dep_index); - dep.removeDependant(new_decl_index); - } - assert(namespace.anon_decls.orderedRemove(new_decl_index)); - mod.destroyDecl(new_decl_index); - mod.destroyFunc(new_module_func_index); - return err; - }, - else => { - // TODO look up the compile error that happened here and attach a note to it - // pointing here, at the generic instantiation callsite. - if (sema.owner_func) |owner_func| { - owner_func.state = .dependency_failure; - } else { - sema.owner_decl.analysis = .dependency_failure; - } - return err; - }, - }; - - break :callee new_func; + // Re-run the block that creates the function, with the comptime parameters + // pre-populated inside `inst_map`. This causes `param_comptime` and + // `param_anytype_comptime` ZIR instructions to be ignored, resulting in a + // new, monomorphized function, with the comptime parameters elided. + var child_sema: Sema = .{ + .mod = mod, + .gpa = gpa, + .arena = sema.arena, + .code = fn_zir, + // We pass the generic callsite's owner decl here because whatever `Decl` + // dependencies are chased at this point should be attached to the + // callsite, not the `Decl` associated with the `func_instance`. + .owner_decl = sema.owner_decl, + .owner_decl_index = sema.owner_decl_index, + .func_index = sema.owner_func_index, + .fn_ret_ty = Type.void, + .fn_ret_ty_ies = null, + .owner_func_index = .none, + .comptime_args = comptime_args, + .generic_owner = generic_owner, + .generic_call_src = call_src, + .generic_call_decl = block.src_decl.toOptional(), + .branch_quota = sema.branch_quota, + .branch_count = sema.branch_count, + .comptime_mutable_decls = sema.comptime_mutable_decls, }; - const callee = mod.funcPtr(callee_index); - callee.branch_quota = @max(callee.branch_quota, sema.branch_quota); + defer child_sema.deinit(); - const callee_inst = try sema.analyzeDeclVal(block, func_src, callee.owner_decl); + var wip_captures = try WipCaptureScope.init(gpa, sema.owner_decl.src_scope); + defer wip_captures.deinit(); + + var child_block: Block = .{ + .parent = null, + .sema = &child_sema, + .src_decl = generic_owner_func.owner_decl, + .namespace = namespace_index, + .wip_capture_scope = wip_captures.scope, + .instructions = .{}, + .inlining = null, + .is_comptime = true, + }; + defer child_block.instructions.deinit(gpa); + + try child_sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body); + + for (fn_info.param_body[0..uncasted_args.len], uncasted_args, 0..) |inst, arg, i| { + // `child_sema` will use a different `inst_map` which means we have to + // convert from parent-relative `Air.Inst.Ref` to child-relative here. + // Constants are simple; runtime-known values need a new instruction. + child_sema.inst_map.putAssumeCapacityNoClobber(inst, if (try sema.resolveMaybeUndefVal(arg)) |val| + Air.internedToRef(val.toIntern()) + else + // We insert into the map an instruction which is runtime-known + // but has the type of the argument. + try child_block.addInst(.{ + .tag = .arg, + .data = .{ .arg = .{ + .ty = Air.internedToRef(sema.typeOf(arg).toIntern()), + .src_index = @intCast(i), + } }, + })); + } + + const new_func_inst = try child_sema.resolveBody(&child_block, fn_info.param_body, fn_info.param_body_inst); + const callee_index = (child_sema.resolveConstValue(&child_block, .unneeded, new_func_inst, undefined) catch unreachable).toIntern(); + + const callee = mod.funcInfo(callee_index); + callee.branchQuota(ip).* = @max(callee.branchQuota(ip).*, sema.branch_quota); // Make a runtime call to the new function, making sure to omit the comptime args. - const comptime_args = callee.comptime_args.?; - const func_ty = mod.declPtr(callee.owner_decl).ty; - const runtime_args_len = @as(u32, @intCast(mod.typeToFunc(func_ty).?.param_types.len)); + const func_ty = callee.ty.toType(); + const func_ty_info = mod.typeToFunc(func_ty).?; + + try wip_captures.finalize(); + + // If the call evaluated to a return type that requires comptime, never mind + // our generic instantiation. Instead we need to perform a comptime call. + if (try sema.typeRequiresComptime(func_ty_info.return_type.toType())) { + return error.ComptimeReturn; + } + // Similarly, if the call evaluated to a generic type we need to instead + // call it inline. + if (func_ty_info.is_generic or func_ty_info.cc == .Inline) { + return error.GenericPoison; + } + + const runtime_args_len: u32 = func_ty_info.param_types.len; const runtime_args = try sema.arena.alloc(Air.Inst.Ref, runtime_args_len); { var runtime_i: u32 = 0; - var total_i: u32 = 0; - for (fn_info.param_body) |inst| { - switch (zir_tags[inst]) { - .param_comptime, .param_anytype_comptime, .param, .param_anytype => {}, - else => continue, + for (uncasted_args, 0..) |uncasted_arg, total_i| { + // In the case of a function call generated by the language, the LazySrcLoc + // provided for `call_src` may not point to anything interesting. + const arg_src: LazySrcLoc = if (total_i == 0 and bound_arg_src != null) + bound_arg_src.? + else if (call_src == .node_offset) .{ .call_arg = .{ + .decl = block.src_decl, + .call_node_offset = call_src.node_offset.x, + .arg_index = @intCast(total_i), + } } else .unneeded; + + const comptime_arg = callee.comptime_args.get(ip)[total_i]; + if (comptime_arg == .none) { + const param_ty = func_ty_info.param_types.get(ip)[runtime_i].toType(); + const casted_arg = try sema.coerce(block, param_ty, uncasted_arg, arg_src); + try sema.queueFullTypeResolution(param_ty); + runtime_args[runtime_i] = casted_arg; + runtime_i += 1; } - sema.analyzeGenericCallArg( - block, - .unneeded, - uncasted_args[total_i], - comptime_args[total_i], - runtime_args, - mod.typeToFunc(func_ty).?, - &runtime_i, - ) catch |err| switch (err) { - error.NeededSourceLocation => { - const decl = mod.declPtr(block.src_decl); - _ = try sema.analyzeGenericCallArg( - block, - mod.argSrc(call_src.node_offset.x, decl, total_i, bound_arg_src), - uncasted_args[total_i], - comptime_args[total_i], - runtime_args, - mod.typeToFunc(func_ty).?, - &runtime_i, - ); - unreachable; - }, - else => |e| return e, - }; - total_i += 1; } - try sema.queueFullTypeResolution(mod.typeToFunc(func_ty).?.return_type.toType()); + try sema.queueFullTypeResolution(func_ty_info.return_type.toType()); } if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - if (sema.owner_func != null and mod.typeToFunc(func_ty).?.return_type.toType().isError(mod)) { - sema.owner_func.?.calls_or_awaits_errorable_fn = true; + if (sema.owner_func_index != .none and + func_ty_info.return_type.toType().isError(mod)) + { + ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn = true; } try mod.ensureFuncBodyAnalysisQueued(callee_index); @@ -7674,7 +7623,7 @@ fn instantiateGenericCall( const result = try block.addInst(.{ .tag = call_tag, .data = .{ .pl_op = .{ - .operand = callee_inst, + .operand = Air.internedToRef(callee_index), .payload = sema.addExtraAssumeCapacity(Air.Call{ .args_len = runtime_args_len, }), @@ -7695,238 +7644,6 @@ fn instantiateGenericCall( return result; } -fn resolveGenericInstantiationType( - sema: *Sema, - block: *Block, - fn_zir: Zir, - new_decl: *Decl, - new_decl_index: Decl.Index, - uncasted_args: []const Air.Inst.Ref, - monomorphed_args_len: u32, - module_fn_index: Module.Fn.Index, - new_module_func: Module.Fn.Index, - namespace: Namespace.Index, - generic_func_ty: Type, - call_src: LazySrcLoc, - bound_arg_src: ?LazySrcLoc, -) !Module.Fn.Index { - const mod = sema.mod; - const gpa = sema.gpa; - - const zir_tags = fn_zir.instructions.items(.tag); - const module_fn = mod.funcPtr(module_fn_index); - const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst); - - // Re-run the block that creates the function, with the comptime parameters - // pre-populated inside `inst_map`. This causes `param_comptime` and - // `param_anytype_comptime` ZIR instructions to be ignored, resulting in a - // new, monomorphized function, with the comptime parameters elided. - var child_sema: Sema = .{ - .mod = mod, - .gpa = gpa, - .arena = sema.arena, - .code = fn_zir, - .owner_decl = new_decl, - .owner_decl_index = new_decl_index, - .func = null, - .func_index = .none, - .fn_ret_ty = Type.void, - .owner_func = null, - .owner_func_index = .none, - // TODO: fully migrate functions into InternPool - .comptime_args = try mod.tmp_hack_arena.allocator().alloc(TypedValue, uncasted_args.len), - .comptime_args_fn_inst = module_fn.zir_body_inst, - .preallocated_new_func = new_module_func.toOptional(), - .is_generic_instantiation = true, - .branch_quota = sema.branch_quota, - .branch_count = sema.branch_count, - .comptime_mutable_decls = sema.comptime_mutable_decls, - }; - defer child_sema.deinit(); - - var wip_captures = try WipCaptureScope.init(gpa, new_decl.src_scope); - defer wip_captures.deinit(); - - var child_block: Block = .{ - .parent = null, - .sema = &child_sema, - .src_decl = new_decl_index, - .namespace = namespace, - .wip_capture_scope = wip_captures.scope, - .instructions = .{}, - .inlining = null, - .is_comptime = true, - }; - defer { - child_block.instructions.deinit(gpa); - child_block.params.deinit(gpa); - } - - try child_sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body); - - var arg_i: usize = 0; - for (fn_info.param_body) |inst| { - const generic_func_ty_info = mod.typeToFunc(generic_func_ty).?; - var is_comptime = false; - var is_anytype = false; - switch (zir_tags[inst]) { - .param => { - is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i))); - }, - .param_comptime => { - is_comptime = true; - }, - .param_anytype => { - is_anytype = true; - is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i))); - }, - .param_anytype_comptime => { - is_anytype = true; - is_comptime = true; - }, - else => continue, - } - const arg = uncasted_args[arg_i]; - if (is_comptime) { - const arg_val = (try sema.resolveMaybeUndefVal(arg)).?; - const child_arg = try child_sema.addConstant(arg_val); - child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg); - } else if (is_anytype) { - const arg_ty = sema.typeOf(arg); - if (try sema.typeRequiresComptime(arg_ty)) { - const arg_val = sema.resolveConstValue(block, .unneeded, arg, "") catch |err| switch (err) { - error.NeededSourceLocation => { - const decl = mod.declPtr(block.src_decl); - const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src); - _ = try sema.resolveConstValue(block, arg_src, arg, "argument to parameter with comptime-only type must be comptime-known"); - unreachable; - }, - else => |e| return e, - }; - const child_arg = try child_sema.addConstant(arg_val); - child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg); - } else { - // We insert into the map an instruction which is runtime-known - // but has the type of the argument. - const child_arg = try child_block.addInst(.{ - .tag = .arg, - .data = .{ .arg = .{ - .ty = try child_sema.addType(arg_ty), - .src_index = @as(u32, @intCast(arg_i)), - } }, - }); - child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg); - } - } - arg_i += 1; - } - - // Save the error trace as our first action in the function. - // If this is unnecessary after all, Liveness will clean it up for us. - const error_return_trace_index = try sema.analyzeSaveErrRetIndex(&child_block); - child_sema.error_return_trace_index_on_fn_entry = error_return_trace_index; - child_block.error_return_trace_index = error_return_trace_index; - - const new_func_inst = try child_sema.resolveBody(&child_block, fn_info.param_body, fn_info.param_body_inst); - const new_func_val = child_sema.resolveConstValue(&child_block, .unneeded, new_func_inst, undefined) catch unreachable; - const new_func = new_func_val.getFunctionIndex(mod).unwrap().?; - assert(new_func == new_module_func); - - const monomorphed_args_index = @as(u32, @intCast(mod.monomorphed_func_keys.items.len)); - const monomorphed_args = try mod.monomorphed_func_keys.addManyAsSlice(gpa, monomorphed_args_len); - var monomorphed_arg_i: u32 = 0; - try mod.monomorphed_funcs.ensureUnusedCapacityContext(gpa, monomorphed_args_len + 1, .{ .mod = mod }); - - arg_i = 0; - for (fn_info.param_body) |inst| { - const generic_func_ty_info = mod.typeToFunc(generic_func_ty).?; - var is_comptime = false; - var is_anytype = false; - switch (zir_tags[inst]) { - .param => { - is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i))); - }, - .param_comptime => { - is_comptime = true; - }, - .param_anytype => { - is_anytype = true; - is_comptime = generic_func_ty_info.paramIsComptime(@as(u5, @intCast(arg_i))); - }, - .param_anytype_comptime => { - is_anytype = true; - is_comptime = true; - }, - else => continue, - } - - const param_ty = generic_func_ty_info.param_types[arg_i]; - const is_generic = !is_anytype and param_ty == .generic_poison_type; - - const arg = child_sema.inst_map.get(inst).?; - const arg_ty = child_sema.typeOf(arg); - - if (is_generic) if (mod.monomorphed_funcs.fetchPutAssumeCapacityContext(.{ - .func = module_fn_index, - .args_index = monomorphed_args_index, - .args_len = monomorphed_arg_i, - }, arg_ty.toIntern(), .{ .mod = mod })) |kv| assert(kv.value == arg_ty.toIntern()); - if (!is_comptime and try sema.typeRequiresComptime(arg_ty)) is_comptime = true; - - if (is_comptime) { - const arg_val = (child_sema.resolveMaybeUndefValAllowVariables(arg) catch unreachable).?; - monomorphed_args[monomorphed_arg_i] = arg_val.toIntern(); - monomorphed_arg_i += 1; - child_sema.comptime_args[arg_i] = .{ .ty = arg_ty, .val = arg_val }; - } else { - if (is_anytype or is_generic) { - monomorphed_args[monomorphed_arg_i] = try mod.intern(.{ .undef = arg_ty.toIntern() }); - monomorphed_arg_i += 1; - } - child_sema.comptime_args[arg_i] = .{ .ty = arg_ty, .val = Value.generic_poison }; - } - - arg_i += 1; - } - - try wip_captures.finalize(); - - // Populate the Decl ty/val with the function and its type. - new_decl.ty = child_sema.typeOf(new_func_inst); - // If the call evaluated to a return type that requires comptime, never mind - // our generic instantiation. Instead we need to perform a comptime call. - const new_fn_info = mod.typeToFunc(new_decl.ty).?; - if (try sema.typeRequiresComptime(new_fn_info.return_type.toType())) { - return error.ComptimeReturn; - } - // Similarly, if the call evaluated to a generic type we need to instead - // call it inline. - if (new_fn_info.is_generic or new_fn_info.cc == .Inline) { - return error.GenericPoison; - } - - new_decl.val = (try mod.intern(.{ .func = .{ - .ty = new_decl.ty.toIntern(), - .index = new_func, - } })).toValue(); - new_decl.alignment = .none; - new_decl.has_tv = true; - new_decl.owns_tv = true; - new_decl.analysis = .complete; - - mod.monomorphed_funcs.putAssumeCapacityNoClobberContext(.{ - .func = module_fn_index, - .args_index = monomorphed_args_index, - .args_len = monomorphed_arg_i, - }, new_decl.val.toIntern(), .{ .mod = mod }); - - // Queue up a `codegen_func` work item for the new Fn. The `comptime_args` field - // will be populated, ensuring it will have `analyzeBody` called with the ZIR - // parameters mapped appropriately. - try mod.comp.work_queue.writeItem(.{ .codegen_func = new_func }); - return new_func; -} - fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { const mod = sema.mod; const tuple = switch (mod.intern_pool.indexToKey(ty.toIntern())) { @@ -7944,8 +7661,8 @@ fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) fn emitDbgInline( sema: *Sema, block: *Block, - old_func: Module.Fn.Index, - new_func: Module.Fn.Index, + old_func: InternPool.Index, + new_func: InternPool.Index, new_func_ty: Type, tag: Air.Inst.Tag, ) CompileError!void { @@ -8149,6 +7866,7 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD defer tracy.end(); const mod = sema.mod; + const ip = &mod.intern_pool; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -8159,7 +7877,7 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD if (val.isUndef(mod)) { return sema.addConstUndef(Type.err_int); } - const err_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; + const err_name = ip.indexToKey(val.toIntern()).err.name; return sema.addConstant(try mod.intValue( Type.err_int, try mod.getErrorValue(err_name), @@ -8167,17 +7885,19 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD } const op_ty = sema.typeOf(uncasted_operand); - try sema.resolveInferredErrorSetTy(block, src, op_ty); - if (!op_ty.isAnyError(mod)) { - const names = op_ty.errorSetNames(mod); - switch (names.len) { - 0 => return sema.addConstant(try mod.intValue(Type.err_int, 0)), - 1 => { - const int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(names[0]).?)); - return sema.addIntUnsigned(Type.err_int, int); - }, - else => {}, - } + switch (try sema.resolveInferredErrorSetTy(block, src, op_ty.toIntern())) { + .anyerror_type => {}, + else => |err_set_ty_index| { + const names = ip.indexToKey(err_set_ty_index).error_set_type.names; + switch (names.len) { + 0 => return sema.addConstant(try mod.intValue(Type.err_int, 0)), + 1 => { + const int: Module.ErrorInt = @intCast(mod.global_error_set.getIndex(names.get(ip)[0]).?); + return sema.addIntUnsigned(Type.err_int, int); + }, + else => {}, + } + }, } try sema.requireRuntimeBlock(block, src, operand_src); @@ -8226,6 +7946,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr defer tracy.end(); const mod = sema.mod; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; @@ -8254,23 +7975,25 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr return Air.Inst.Ref.anyerror_type; } - if (mod.typeToInferredErrorSetIndex(lhs_ty).unwrap()) |ies_index| { - try sema.resolveInferredErrorSet(block, src, ies_index); - // isAnyError might have changed from a false negative to a true positive after resolution. - if (lhs_ty.isAnyError(mod)) { - return Air.Inst.Ref.anyerror_type; + if (ip.isInferredErrorSetType(lhs_ty.toIntern())) { + switch (try sema.resolveInferredErrorSet(block, src, lhs_ty.toIntern())) { + // isAnyError might have changed from a false negative to a true + // positive after resolution. + .anyerror_type => return .anyerror_type, + else => {}, } } - if (mod.typeToInferredErrorSetIndex(rhs_ty).unwrap()) |ies_index| { - try sema.resolveInferredErrorSet(block, src, ies_index); - // isAnyError might have changed from a false negative to a true positive after resolution. - if (rhs_ty.isAnyError(mod)) { - return Air.Inst.Ref.anyerror_type; + if (ip.isInferredErrorSetType(rhs_ty.toIntern())) { + switch (try sema.resolveInferredErrorSet(block, src, rhs_ty.toIntern())) { + // isAnyError might have changed from a false negative to a true + // positive after resolution. + .anyerror_type => return .anyerror_type, + else => {}, } } const err_set_ty = try sema.errorSetMerge(lhs_ty, rhs_ty); - return sema.addType(err_set_ty); + return Air.internedToRef(err_set_ty.toIntern()); } fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -8747,9 +8470,7 @@ fn zirFunc( inst: Zir.Inst.Index, inferred_error_set: bool, ) CompileError!Air.Inst.Ref { - const tracy = trace(@src()); - defer tracy.end(); - + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Func, inst_data.payload_index); const target = sema.mod.getTarget(); @@ -8790,8 +8511,7 @@ fn zirFunc( // If this instruction has a body it means it's the type of the `owner_decl` // otherwise it's a function type without a `callconv` attribute and should // never be `.C`. - // NOTE: revisit when doing #1717 - const cc: std.builtin.CallingConvention = if (sema.owner_decl.is_exported and has_body) + const cc: std.builtin.CallingConvention = if (has_body and mod.declPtr(block.src_decl).is_exported) .C else .Unspecified; @@ -8802,7 +8522,7 @@ fn zirFunc( inst, .none, target_util.defaultAddressSpace(target, .function), - FuncLinkSection.default, + .default, cc, ret_ty, false, @@ -8830,10 +8550,21 @@ fn resolveGenericBody( const err = err: { // Make sure any nested param instructions don't clobber our work. const prev_params = block.params; + const prev_no_partial_func_type = sema.no_partial_func_ty; + const prev_generic_owner = sema.generic_owner; + const prev_generic_call_src = sema.generic_call_src; + const prev_generic_call_decl = sema.generic_call_decl; block.params = .{}; + sema.no_partial_func_ty = true; + sema.generic_owner = .none; + sema.generic_call_src = .unneeded; + sema.generic_call_decl = .none; defer { - block.params.deinit(sema.gpa); block.params = prev_params; + sema.no_partial_func_ty = prev_no_partial_func_type; + sema.generic_owner = prev_generic_owner; + sema.generic_call_src = prev_generic_call_src; + sema.generic_call_decl = prev_generic_call_decl; } const uncasted = sema.resolveBody(block, body, func_inst) catch |err| break :err err; @@ -8952,7 +8683,7 @@ fn checkCallConvSupportsVarArgs(sema: *Sema, block: *Block, src: LazySrcLoc, cc: } } -const FuncLinkSection = union(enum) { +const Section = union(enum) { generic, default, explicit: InternPool.NullTerminatedString, @@ -8967,8 +8698,7 @@ fn funcCommon( alignment: ?Alignment, /// null means generic poison address_space: ?std.builtin.AddressSpace, - /// outer null means generic poison; inner null means default link section - section: FuncLinkSection, + section: Section, /// null means generic poison cc: ?std.builtin.CallingConvention, /// this might be Type.generic_poison @@ -8984,6 +8714,8 @@ fn funcCommon( ) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; + const target = mod.getTarget(); + const ip = &mod.intern_pool; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = src_node_offset }; const func_src = LazySrcLoc.nodeOffset(src_node_offset); @@ -9001,226 +8733,150 @@ fn funcCommon( try sema.checkCallConvSupportsVarArgs(block, cc_src, cc.?); } - var destroy_fn_on_error = false; - const new_func_index = new_func: { - if (!has_body) break :new_func undefined; - if (sema.comptime_args_fn_inst == func_inst) { - const new_func_index = sema.preallocated_new_func.unwrap().?; - sema.preallocated_new_func = .none; // take ownership - break :new_func new_func_index; - } - destroy_fn_on_error = true; - var new_func: Module.Fn = undefined; - // Set this here so that the inferred return type can be printed correctly if it appears in an error. - new_func.owner_decl = sema.owner_decl_index; - const new_func_index = try mod.createFunc(new_func); - break :new_func new_func_index; - }; - errdefer if (destroy_fn_on_error) mod.destroyFunc(new_func_index); + const is_source_decl = sema.generic_owner == .none; - const target = mod.getTarget(); - const fn_ty: Type = fn_ty: { - // In the case of generic calling convention, or generic alignment, we use - // default values which are only meaningful for the generic function, *not* - // the instantiation, which can depend on comptime parameters. - // Related proposal: https://github.com/ziglang/zig/issues/11834 - const cc_resolved = cc orelse .Unspecified; - const param_types = try sema.arena.alloc(InternPool.Index, block.params.items.len); - var comptime_bits: u32 = 0; - for (param_types, block.params.items, 0..) |*dest_param_ty, param, i| { - const is_noalias = blk: { - const index = std.math.cast(u5, i) orelse break :blk false; - break :blk @as(u1, @truncate(noalias_bits >> index)) != 0; - }; - dest_param_ty.* = param.ty.toIntern(); - sema.analyzeParameter( - block, - .unneeded, - param, - &comptime_bits, - i, - &is_generic, - cc_resolved, - has_body, - is_noalias, - ) catch |err| switch (err) { - error.NeededSourceLocation => { - const decl = mod.declPtr(block.src_decl); - try sema.analyzeParameter( - block, - Module.paramSrc(src_node_offset, mod, decl, i), - param, - &comptime_bits, - i, - &is_generic, - cc_resolved, - has_body, - is_noalias, - ); - unreachable; - }, - else => |e| return e, - }; - } - - var ret_ty_requires_comptime = false; - const ret_poison = if (sema.typeRequiresComptime(bare_return_type)) |ret_comptime| rp: { - ret_ty_requires_comptime = ret_comptime; - break :rp bare_return_type.isGenericPoison(); - } else |err| switch (err) { - error.GenericPoison => rp: { - is_generic = true; - break :rp true; - }, - else => |e| return e, + // In the case of generic calling convention, or generic alignment, we use + // default values which are only meaningful for the generic function, *not* + // the instantiation, which can depend on comptime parameters. + // Related proposal: https://github.com/ziglang/zig/issues/11834 + const cc_resolved = cc orelse .Unspecified; + var comptime_bits: u32 = 0; + for (block.params.items(.ty), block.params.items(.is_comptime), 0..) |param_ty_ip, param_is_comptime, i| { + const param_ty = param_ty_ip.toType(); + const is_noalias = blk: { + const index = std.math.cast(u5, i) orelse break :blk false; + break :blk @as(u1, @truncate(noalias_bits >> index)) != 0; }; - - const return_type: Type = if (!inferred_error_set or ret_poison) - bare_return_type - else blk: { - try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); - const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{ - .func = new_func_index, - }); - const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index }); - break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type); - }; - - if (!return_type.isValidReturnType(mod)) { - const opaque_str = if (return_type.zigTypeTag(mod) == .Opaque) "opaque " else ""; + const param_src: LazySrcLoc = .{ .fn_proto_param = .{ + .decl = block.src_decl, + .fn_proto_node_offset = src_node_offset, + .param_index = @intCast(i), + } }; + const requires_comptime = try sema.typeRequiresComptime(param_ty); + if (param_is_comptime or requires_comptime) { + comptime_bits |= @as(u32, 1) << @intCast(i); // TODO: handle cast error + } + const this_generic = param_ty.isGenericPoison(); + is_generic = is_generic or this_generic; + if (param_is_comptime and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved)) { + return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc_resolved)}); + } + if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved)) { + return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc_resolved)}); + } + if (!param_ty.isValidParamType(mod)) { + const opaque_str = if (param_ty.zigTypeTag(mod) == .Opaque) "opaque " else ""; const msg = msg: { - const msg = try sema.errMsg(block, ret_ty_src, "{s}return type '{}' not allowed", .{ - opaque_str, return_type.fmt(mod), + const msg = try sema.errMsg(block, param_src, "parameter of {s}type '{}' not allowed", .{ + opaque_str, param_ty.fmt(mod), }); - errdefer msg.destroy(gpa); + errdefer msg.destroy(sema.gpa); - try sema.addDeclaredHereNote(msg, return_type); + try sema.addDeclaredHereNote(msg, param_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - if (!ret_poison and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and - !try sema.validateExternType(return_type, .ret_ty)) - { + if (!this_generic and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and !try sema.validateExternType(param_ty, .param_ty)) { const msg = msg: { - const msg = try sema.errMsg(block, ret_ty_src, "return type '{}' not allowed in function with calling convention '{s}'", .{ - return_type.fmt(mod), @tagName(cc_resolved), + const msg = try sema.errMsg(block, param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{ + param_ty.fmt(mod), @tagName(cc_resolved), }); - errdefer msg.destroy(gpa); + errdefer msg.destroy(sema.gpa); const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, ret_ty_src.toSrcLoc(src_decl, mod), return_type, .ret_ty); + try sema.explainWhyTypeIsNotExtern(msg, param_src.toSrcLoc(src_decl, mod), param_ty, .param_ty); - try sema.addDeclaredHereNote(msg, return_type); + try sema.addDeclaredHereNote(msg, param_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } + if (is_source_decl and requires_comptime and !param_is_comptime and has_body) { + const msg = msg: { + const msg = try sema.errMsg(block, param_src, "parameter of type '{}' must be declared comptime", .{ + param_ty.fmt(mod), + }); + errdefer msg.destroy(sema.gpa); - // If the return type is comptime-only but not dependent on parameters then all parameter types also need to be comptime - if (!sema.is_generic_instantiation and has_body and ret_ty_requires_comptime) comptime_check: { - for (block.params.items) |param| { - if (!param.is_comptime) break; - } else break :comptime_check; + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsComptime(msg, param_src.toSrcLoc(src_decl, mod), param_ty); - const msg = try sema.errMsg( - block, - ret_ty_src, - "function with comptime-only return type '{}' requires all parameters to be comptime", - .{return_type.fmt(mod)}, - ); - try sema.explainWhyTypeIsComptime(msg, ret_ty_src.toSrcLoc(sema.owner_decl, mod), return_type); - - const tags = sema.code.instructions.items(.tag); - const data = sema.code.instructions.items(.data); - const param_body = sema.code.getParamBody(func_inst); - for (block.params.items, 0..) |param, i| { - if (!param.is_comptime) { - const param_index = param_body[i]; - const param_src = switch (tags[param_index]) { - .param => data[param_index].pl_tok.src(), - .param_anytype => data[param_index].str_tok.src(), - else => unreachable, - }; - if (param.name.len != 0) { - try sema.errNote(block, param_src, msg, "param '{s}' is required to be comptime", .{param.name}); - } else { - try sema.errNote(block, param_src, msg, "param is required to be comptime", .{}); - } - } - } + try sema.addDeclaredHereNote(msg, param_ty); + break :msg msg; + }; return sema.failWithOwnedErrorMsg(msg); } - - const arch = mod.getTarget().cpu.arch; - if (switch (cc_resolved) { - .Unspecified, .C, .Naked, .Async, .Inline => null, - .Interrupt => switch (arch) { - .x86, .x86_64, .avr, .msp430 => null, - else => @as([]const u8, "x86, x86_64, AVR, and MSP430"), - }, - .Signal => switch (arch) { - .avr => null, - else => @as([]const u8, "AVR"), - }, - .Stdcall, .Fastcall, .Thiscall => switch (arch) { - .x86 => null, - else => @as([]const u8, "x86"), - }, - .Vectorcall => switch (arch) { - .x86, .aarch64, .aarch64_be, .aarch64_32 => null, - else => @as([]const u8, "x86 and AArch64"), - }, - .APCS, .AAPCS, .AAPCSVFP => switch (arch) { - .arm, .armeb, .aarch64, .aarch64_be, .aarch64_32, .thumb, .thumbeb => null, - else => @as([]const u8, "ARM"), - }, - .SysV, .Win64 => switch (arch) { - .x86_64 => null, - else => @as([]const u8, "x86_64"), - }, - .Kernel => switch (arch) { - .nvptx, .nvptx64, .amdgcn, .spirv32, .spirv64 => null, - else => @as([]const u8, "nvptx, amdgcn and SPIR-V"), - }, - }) |allowed_platform| { - return sema.fail(block, cc_src, "callconv '{s}' is only available on {s}, not {s}", .{ - @tagName(cc_resolved), - allowed_platform, - @tagName(arch), - }); + if (is_source_decl and !this_generic and is_noalias and + !(param_ty.zigTypeTag(mod) == .Pointer or param_ty.isPtrLikeOptional(mod))) + { + return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{}); } + } - if (cc_resolved == .Inline and is_noinline) { - return sema.fail(block, cc_src, "'noinline' function cannot have callconv 'Inline'", .{}); + var ret_ty_requires_comptime = false; + const ret_poison = if (sema.typeRequiresComptime(bare_return_type)) |ret_comptime| rp: { + ret_ty_requires_comptime = ret_comptime; + break :rp bare_return_type.isGenericPoison(); + } else |err| switch (err) { + error.GenericPoison => rp: { + is_generic = true; + break :rp true; + }, + else => |e| return e, + }; + const final_is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime; + + const param_types = block.params.items(.ty); + + if (!is_source_decl) { + assert(has_body); + assert(!is_generic); + assert(comptime_bits == 0); + assert(cc != null); + assert(section != .generic); + assert(address_space != null); + assert(!var_args); + if (inferred_error_set) { + try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); } - if (is_generic and sema.no_partial_func_ty) return error.GenericPoison; - is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime; - - if (!is_generic and sema.wantErrorReturnTracing(return_type)) { - // Make sure that StackTrace's fields are resolved so that the backend can - // lower this fn type. - const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); - _ = try sema.resolveTypeFields(unresolved_stack_trace_ty); - } - - break :fn_ty try mod.funcType(.{ + const func_index = try ip.getFuncInstance(gpa, .{ .param_types = param_types, .noalias_bits = noalias_bits, - .comptime_bits = comptime_bits, - .return_type = return_type.toIntern(), + .bare_return_type = bare_return_type.toIntern(), .cc = cc_resolved, - .cc_is_generic = cc == null, - .alignment = alignment orelse .none, - .align_is_generic = alignment == null, - .section_is_generic = section == .generic, - .addrspace_is_generic = address_space == null, - .is_var_args = var_args, - .is_generic = is_generic, + .alignment = alignment.?, + .section = switch (section) { + .generic => unreachable, + .default => .none, + .explicit => |x| x.toOptional(), + }, .is_noinline = is_noinline, + .inferred_error_set = inferred_error_set, + .generic_owner = sema.generic_owner, + .comptime_args = sema.comptime_args, + .generation = mod.generation, }); - }; + return finishFunc( + sema, + block, + func_index, + .none, + ret_poison, + bare_return_type, + ret_ty_src, + cc_resolved, + is_source_decl, + ret_ty_requires_comptime, + func_inst, + cc_src, + is_noinline, + is_generic, + final_is_generic, + ); + } + // extern_func and func_decl functions take ownership of `sema.owner_decl`. sema.owner_decl.@"linksection" = switch (section) { .generic => .none, .default => .none, @@ -9229,9 +8885,73 @@ fn funcCommon( sema.owner_decl.alignment = alignment orelse .none; sema.owner_decl.@"addrspace" = address_space orelse .generic; + if (inferred_error_set) { + assert(!is_extern); + assert(has_body); + if (!ret_poison) + try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); + const func_index = try ip.getFuncDeclIes(gpa, .{ + .owner_decl = sema.owner_decl_index, + + .param_types = param_types, + .noalias_bits = noalias_bits, + .comptime_bits = comptime_bits, + .bare_return_type = bare_return_type.toIntern(), + .cc = cc, + .alignment = alignment, + .section_is_generic = section == .generic, + .addrspace_is_generic = address_space == null, + .is_var_args = var_args, + .is_generic = final_is_generic, + .is_noinline = is_noinline, + + .zir_body_inst = func_inst, + .lbrace_line = src_locs.lbrace_line, + .rbrace_line = src_locs.rbrace_line, + .lbrace_column = @as(u16, @truncate(src_locs.columns)), + .rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)), + }); + return finishFunc( + sema, + block, + func_index, + .none, + ret_poison, + bare_return_type, + ret_ty_src, + cc_resolved, + is_source_decl, + ret_ty_requires_comptime, + func_inst, + cc_src, + is_noinline, + is_generic, + final_is_generic, + ); + } + + const func_ty = try ip.getFuncType(gpa, .{ + .param_types = param_types, + .noalias_bits = noalias_bits, + .comptime_bits = comptime_bits, + .return_type = bare_return_type.toIntern(), + .cc = cc, + .alignment = alignment, + .section_is_generic = section == .generic, + .addrspace_is_generic = address_space == null, + .is_var_args = var_args, + .is_generic = final_is_generic, + .is_noinline = is_noinline, + }); + if (is_extern) { - return sema.addConstant((try mod.intern(.{ .extern_func = .{ - .ty = fn_ty.toIntern(), + assert(comptime_bits == 0); + assert(cc != null); + assert(section != .generic); + assert(address_space != null); + assert(!is_generic); + const func_index = try ip.getExternFunc(gpa, .{ + .ty = func_ty, .decl = sema.owner_decl_index, .lib_name = if (opt_lib_name) |lib_name| (try mod.intern_pool.getOrPutString( gpa, @@ -9239,129 +8959,241 @@ fn funcCommon( .node_offset_lib_name = src_node_offset, }, lib_name), )).toOptional() else .none, - } })).toValue()); + }); + return finishFunc( + sema, + block, + func_index, + func_ty, + ret_poison, + bare_return_type, + ret_ty_src, + cc_resolved, + is_source_decl, + ret_ty_requires_comptime, + func_inst, + cc_src, + is_noinline, + is_generic, + final_is_generic, + ); } - if (!has_body) { - return sema.addType(fn_ty); + if (has_body) { + const func_index = try ip.getFuncDecl(gpa, .{ + .owner_decl = sema.owner_decl_index, + .ty = func_ty, + .cc = cc, + .is_noinline = is_noinline, + .zir_body_inst = func_inst, + .lbrace_line = src_locs.lbrace_line, + .rbrace_line = src_locs.rbrace_line, + .lbrace_column = @as(u16, @truncate(src_locs.columns)), + .rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)), + }); + return finishFunc( + sema, + block, + func_index, + func_ty, + ret_poison, + bare_return_type, + ret_ty_src, + cc_resolved, + is_source_decl, + ret_ty_requires_comptime, + func_inst, + cc_src, + is_noinline, + is_generic, + final_is_generic, + ); } - const is_inline = fn_ty.fnCallingConvention(mod) == .Inline; - const anal_state: Module.Fn.Analysis = if (is_inline) .inline_only else .none; - - const comptime_args: ?[*]TypedValue = if (sema.comptime_args_fn_inst == func_inst) blk: { - break :blk if (sema.comptime_args.len == 0) null else sema.comptime_args.ptr; - } else null; - - const new_func = mod.funcPtr(new_func_index); - const hash = new_func.hash; - const generic_owner_decl = if (comptime_args == null) .none else new_func.generic_owner_decl; - new_func.* = .{ - .state = anal_state, - .zir_body_inst = func_inst, - .owner_decl = sema.owner_decl_index, - .generic_owner_decl = generic_owner_decl, - .comptime_args = comptime_args, - .hash = hash, - .lbrace_line = src_locs.lbrace_line, - .rbrace_line = src_locs.rbrace_line, - .lbrace_column = @as(u16, @truncate(src_locs.columns)), - .rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)), - .branch_quota = default_branch_quota, - .is_noinline = is_noinline, - }; - return sema.addConstant((try mod.intern(.{ .func = .{ - .ty = fn_ty.toIntern(), - .index = new_func_index, - } })).toValue()); + return finishFunc( + sema, + block, + .none, + func_ty, + ret_poison, + bare_return_type, + ret_ty_src, + cc_resolved, + is_source_decl, + ret_ty_requires_comptime, + func_inst, + cc_src, + is_noinline, + is_generic, + final_is_generic, + ); } -fn analyzeParameter( +fn finishFunc( sema: *Sema, block: *Block, - param_src: LazySrcLoc, - param: Block.Param, - comptime_bits: *u32, - i: usize, - is_generic: *bool, - cc: std.builtin.CallingConvention, - has_body: bool, - is_noalias: bool, -) !void { + opt_func_index: InternPool.Index, + func_ty: InternPool.Index, + ret_poison: bool, + bare_return_type: Type, + ret_ty_src: LazySrcLoc, + cc_resolved: std.builtin.CallingConvention, + is_source_decl: bool, + ret_ty_requires_comptime: bool, + func_inst: Zir.Inst.Index, + cc_src: LazySrcLoc, + is_noinline: bool, + is_generic: bool, + final_is_generic: bool, +) CompileError!Air.Inst.Ref { const mod = sema.mod; - const requires_comptime = try sema.typeRequiresComptime(param.ty); - if (param.is_comptime or requires_comptime) { - comptime_bits.* |= @as(u32, 1) << @as(u5, @intCast(i)); // TODO: handle cast error - } - const this_generic = param.ty.isGenericPoison(); - is_generic.* = is_generic.* or this_generic; + const ip = &mod.intern_pool; + const gpa = sema.gpa; const target = mod.getTarget(); - if (param.is_comptime and !target_util.fnCallConvAllowsZigTypes(target, cc)) { - return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)}); - } - if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(target, cc)) { - return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)}); - } - if (!param.ty.isValidParamType(mod)) { - const opaque_str = if (param.ty.zigTypeTag(mod) == .Opaque) "opaque " else ""; - const msg = msg: { - const msg = try sema.errMsg(block, param_src, "parameter of {s}type '{}' not allowed", .{ - opaque_str, param.ty.fmt(mod), - }); - errdefer msg.destroy(sema.gpa); - try sema.addDeclaredHereNote(msg, param.ty); + const return_type: Type = if (opt_func_index == .none or ret_poison) + bare_return_type + else + ip.funcTypeReturnType(ip.typeOf(opt_func_index)).toType(); + + if (!return_type.isValidReturnType(mod)) { + const opaque_str = if (return_type.zigTypeTag(mod) == .Opaque) "opaque " else ""; + const msg = msg: { + const msg = try sema.errMsg(block, ret_ty_src, "{s}return type '{}' not allowed", .{ + opaque_str, return_type.fmt(mod), + }); + errdefer msg.destroy(gpa); + + try sema.addDeclaredHereNote(msg, return_type); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - if (!this_generic and !target_util.fnCallConvAllowsZigTypes(target, cc) and !try sema.validateExternType(param.ty, .param_ty)) { - const msg = msg: { - const msg = try sema.errMsg(block, param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{ - param.ty.fmt(mod), @tagName(cc), - }); - errdefer msg.destroy(sema.gpa); - - const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, param_src.toSrcLoc(src_decl, mod), param.ty, .param_ty); - - try sema.addDeclaredHereNote(msg, param.ty); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); - } - if (!sema.is_generic_instantiation and requires_comptime and !param.is_comptime and has_body) { - const msg = msg: { - const msg = try sema.errMsg(block, param_src, "parameter of type '{}' must be declared comptime", .{ - param.ty.fmt(mod), - }); - errdefer msg.destroy(sema.gpa); - - const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsComptime(msg, param_src.toSrcLoc(src_decl, mod), param.ty); - - try sema.addDeclaredHereNote(msg, param.ty); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); - } - if (!sema.is_generic_instantiation and !this_generic and is_noalias and - !(param.ty.zigTypeTag(mod) == .Pointer or param.ty.isPtrLikeOptional(mod))) + if (!ret_poison and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and + !try sema.validateExternType(return_type, .ret_ty)) { - return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{}); + const msg = msg: { + const msg = try sema.errMsg(block, ret_ty_src, "return type '{}' not allowed in function with calling convention '{s}'", .{ + return_type.fmt(mod), @tagName(cc_resolved), + }); + errdefer msg.destroy(gpa); + + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsNotExtern(msg, ret_ty_src.toSrcLoc(src_decl, mod), return_type, .ret_ty); + + try sema.addDeclaredHereNote(msg, return_type); + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(msg); } + + // If the return type is comptime-only but not dependent on parameters then + // all parameter types also need to be comptime. + if (is_source_decl and opt_func_index != .none and ret_ty_requires_comptime) comptime_check: { + for (block.params.items(.is_comptime)) |is_comptime| { + if (!is_comptime) break; + } else break :comptime_check; + + const msg = try sema.errMsg( + block, + ret_ty_src, + "function with comptime-only return type '{}' requires all parameters to be comptime", + .{return_type.fmt(mod)}, + ); + try sema.explainWhyTypeIsComptime(msg, ret_ty_src.toSrcLoc(sema.owner_decl, mod), return_type); + + const tags = sema.code.instructions.items(.tag); + const data = sema.code.instructions.items(.data); + const param_body = sema.code.getParamBody(func_inst); + for ( + block.params.items(.is_comptime), + block.params.items(.name), + param_body[0..block.params.len], + ) |is_comptime, name_nts, param_index| { + if (!is_comptime) { + const param_src = switch (tags[param_index]) { + .param => data[param_index].pl_tok.src(), + .param_anytype => data[param_index].str_tok.src(), + else => unreachable, + }; + const name = sema.code.nullTerminatedString2(name_nts); + if (name.len != 0) { + try sema.errNote(block, param_src, msg, "param '{s}' is required to be comptime", .{name}); + } else { + try sema.errNote(block, param_src, msg, "param is required to be comptime", .{}); + } + } + } + return sema.failWithOwnedErrorMsg(msg); + } + + const arch = target.cpu.arch; + if (switch (cc_resolved) { + .Unspecified, .C, .Naked, .Async, .Inline => null, + .Interrupt => switch (arch) { + .x86, .x86_64, .avr, .msp430 => null, + else => @as([]const u8, "x86, x86_64, AVR, and MSP430"), + }, + .Signal => switch (arch) { + .avr => null, + else => @as([]const u8, "AVR"), + }, + .Stdcall, .Fastcall, .Thiscall => switch (arch) { + .x86 => null, + else => @as([]const u8, "x86"), + }, + .Vectorcall => switch (arch) { + .x86, .aarch64, .aarch64_be, .aarch64_32 => null, + else => @as([]const u8, "x86 and AArch64"), + }, + .APCS, .AAPCS, .AAPCSVFP => switch (arch) { + .arm, .armeb, .aarch64, .aarch64_be, .aarch64_32, .thumb, .thumbeb => null, + else => @as([]const u8, "ARM"), + }, + .SysV, .Win64 => switch (arch) { + .x86_64 => null, + else => @as([]const u8, "x86_64"), + }, + .Kernel => switch (arch) { + .nvptx, .nvptx64, .amdgcn, .spirv32, .spirv64 => null, + else => @as([]const u8, "nvptx, amdgcn and SPIR-V"), + }, + }) |allowed_platform| { + return sema.fail(block, cc_src, "callconv '{s}' is only available on {s}, not {s}", .{ + @tagName(cc_resolved), + allowed_platform, + @tagName(arch), + }); + } + + if (cc_resolved == .Inline and is_noinline) { + return sema.fail(block, cc_src, "'noinline' function cannot have callconv 'Inline'", .{}); + } + if (is_generic and sema.no_partial_func_ty) return error.GenericPoison; + + if (!final_is_generic and sema.wantErrorReturnTracing(return_type)) { + // Make sure that StackTrace's fields are resolved so that the backend can + // lower this fn type. + const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); + _ = try sema.resolveTypeFields(unresolved_stack_trace_ty); + } + + return Air.internedToRef(if (opt_func_index != .none) opt_func_index else func_ty); } fn zirParam( sema: *Sema, block: *Block, inst: Zir.Inst.Index, + param_index: u32, comptime_syntax: bool, ) CompileError!void { + const mod = sema.mod; + const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_tok; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Param, inst_data.payload_index); - const param_name = sema.code.nullTerminatedString(extra.data.name); + const param_name: Zir.NullTerminatedString = @enumFromInt(extra.data.name); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; // We could be in a generic function instantiation, or we could be evaluating a generic @@ -9370,16 +9202,21 @@ fn zirParam( const err = err: { // Make sure any nested param instructions don't clobber our work. const prev_params = block.params; - const prev_preallocated_new_func = sema.preallocated_new_func; const prev_no_partial_func_type = sema.no_partial_func_ty; + const prev_generic_owner = sema.generic_owner; + const prev_generic_call_src = sema.generic_call_src; + const prev_generic_call_decl = sema.generic_call_decl; block.params = .{}; - sema.preallocated_new_func = .none; sema.no_partial_func_ty = true; + sema.generic_owner = .none; + sema.generic_call_src = .unneeded; + sema.generic_call_decl = .none; defer { - block.params.deinit(sema.gpa); block.params = prev_params; - sema.preallocated_new_func = prev_preallocated_new_func; sema.no_partial_func_ty = prev_no_partial_func_type; + sema.generic_owner = prev_generic_owner; + sema.generic_call_src = prev_generic_call_src; + sema.generic_call_decl = prev_generic_call_decl; } if (sema.resolveBody(block, body, inst)) |param_ty_inst| { @@ -9390,7 +9227,7 @@ fn zirParam( }; switch (err) { error.GenericPoison => { - if (sema.inst_map.get(inst)) |_| { + if (sema.inst_map.contains(inst)) { // A generic function is about to evaluate to another generic function. // Return an error instead. return error.GenericPoison; @@ -9398,8 +9235,8 @@ fn zirParam( // The type is not available until the generic instantiation. // We result the param instruction with a poison value and // insert an anytype parameter. - try block.params.append(sema.gpa, .{ - .ty = Type.generic_poison, + try block.params.append(sema.arena, .{ + .ty = .generic_poison_type, .is_comptime = comptime_syntax, .name = param_name, }); @@ -9409,9 +9246,10 @@ fn zirParam( else => |e| return e, } }; + const is_comptime = sema.typeRequiresComptime(param_ty) catch |err| switch (err) { error.GenericPoison => { - if (sema.inst_map.get(inst)) |_| { + if (sema.inst_map.contains(inst)) { // A generic function is about to evaluate to another generic function. // Return an error instead. return error.GenericPoison; @@ -9419,8 +9257,8 @@ fn zirParam( // The type is not available until the generic instantiation. // We result the param instruction with a poison value and // insert an anytype parameter. - try block.params.append(sema.gpa, .{ - .ty = Type.generic_poison, + try block.params.append(sema.arena, .{ + .ty = .generic_poison_type, .is_comptime = comptime_syntax, .name = param_name, }); @@ -9429,8 +9267,9 @@ fn zirParam( }, else => |e| return e, } or comptime_syntax; + if (sema.inst_map.get(inst)) |arg| { - if (is_comptime and sema.preallocated_new_func != .none) { + if (is_comptime and sema.generic_owner != .none) { // We have a comptime value for this parameter so it should be elided from the // function type of the function instruction in this block. const coerced_arg = sema.coerce(block, param_ty, arg, .unneeded) catch |err| switch (err) { @@ -9440,32 +9279,53 @@ fn zirParam( // have the callee source location return `GenericPoison` // so that the instantiation is failed and the coercion // is handled by comptime call logic instead. - assert(sema.is_generic_instantiation); + assert(sema.generic_owner != .none); return error.GenericPoison; }, - else => return err, + else => |e| return e, }; sema.inst_map.putAssumeCapacity(inst, coerced_arg); - return; + if (try sema.resolveMaybeUndefVal(coerced_arg)) |val| { + sema.comptime_args[param_index] = val.toIntern(); + return; + } + const arg_src: LazySrcLoc = if (sema.generic_call_src == .node_offset) .{ .call_arg = .{ + .decl = sema.generic_call_decl.unwrap().?, + .call_node_offset = sema.generic_call_src.node_offset.x, + .arg_index = param_index, + } } else src; + const msg = msg: { + const src_loc = arg_src.toSrcLoc(mod.declPtr(block.src_decl), mod); + const msg = try Module.ErrorMsg.create(gpa, src_loc, "{s}", .{ + @as([]const u8, "runtime-known argument passed to comptime parameter"), + }); + errdefer msg.destroy(gpa); + + if (sema.generic_call_decl != .none) { + try sema.errNote(block, src, msg, "{s}", .{@as([]const u8, "declared comptime here")}); + } + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(msg); } // Even though a comptime argument is provided, the generic function wants to treat // this as a runtime parameter. assert(sema.inst_map.remove(inst)); } - if (sema.preallocated_new_func != .none) { + if (sema.generic_owner != .none) { if (try sema.typeHasOnePossibleValue(param_ty)) |opv| { // In this case we are instantiating a generic function call with a non-comptime // non-anytype parameter that ended up being a one-possible-type. // We don't want the parameter to be part of the instantiated function type. - const result = try sema.addConstant(opv); - sema.inst_map.putAssumeCapacity(inst, result); + sema.inst_map.putAssumeCapacity(inst, Air.internedToRef(opv.toIntern())); + sema.comptime_args[param_index] = opv.toIntern(); return; } } - try block.params.append(sema.gpa, .{ - .ty = param_ty, + try block.params.append(sema.arena, .{ + .ty = param_ty.toIntern(), .is_comptime = comptime_syntax, .name = param_name, }); @@ -9473,17 +9333,15 @@ fn zirParam( if (is_comptime) { // If this is a comptime parameter we can add a constant generic_poison // since this is also a generic parameter. - const result = try sema.addConstant(Value.generic_poison); - sema.inst_map.putAssumeCapacityNoClobber(inst, result); + sema.inst_map.putAssumeCapacityNoClobber(inst, .generic_poison); } else { // Otherwise we need a dummy runtime instruction. - const result_index = @as(Air.Inst.Index, @intCast(sema.air_instructions.len)); + const result_index: Air.Inst.Index = @intCast(sema.air_instructions.len); try sema.air_instructions.append(sema.gpa, .{ .tag = .alloc, .data = .{ .ty = param_ty }, }); - const result = Air.indexToRef(result_index); - sema.inst_map.putAssumeCapacityNoClobber(inst, result); + sema.inst_map.putAssumeCapacityNoClobber(inst, Air.indexToRef(result_index)); } } @@ -9491,24 +9349,76 @@ fn zirParamAnytype( sema: *Sema, block: *Block, inst: Zir.Inst.Index, + param_index: u32, comptime_syntax: bool, ) CompileError!void { + const mod = sema.mod; + const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - const param_name = inst_data.get(sema.code); + const param_name: Zir.NullTerminatedString = @enumFromInt(inst_data.start); + const src = inst_data.src(); if (sema.inst_map.get(inst)) |air_ref| { const param_ty = sema.typeOf(air_ref); - if (comptime_syntax or try sema.typeRequiresComptime(param_ty)) { - // We have a comptime value for this parameter so it should be elided from the - // function type of the function instruction in this block. + // If we have a comptime value for this parameter, it should be elided + // from the function type of the function instruction in this block. + if (try sema.typeHasOnePossibleValue(param_ty)) |opv| { + sema.comptime_args[param_index] = opv.toIntern(); return; } - if (null != try sema.typeHasOnePossibleValue(param_ty)) { - return; + const arg_src: LazySrcLoc = if (sema.generic_call_src == .node_offset) .{ .call_arg = .{ + .decl = sema.generic_call_decl.unwrap().?, + .call_node_offset = sema.generic_call_src.node_offset.x, + .arg_index = param_index, + } } else src; + + if (comptime_syntax) { + if (try sema.resolveMaybeUndefVal(air_ref)) |val| { + sema.comptime_args[param_index] = val.toIntern(); + return; + } + const msg = msg: { + const src_loc = arg_src.toSrcLoc(mod.declPtr(block.src_decl), mod); + const msg = try Module.ErrorMsg.create(gpa, src_loc, "{s}", .{ + @as([]const u8, "runtime-known argument passed to comptime parameter"), + }); + errdefer msg.destroy(gpa); + + if (sema.generic_call_decl != .none) { + try sema.errNote(block, src, msg, "{s}", .{@as([]const u8, "declared comptime here")}); + } + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(msg); } + + if (try sema.typeRequiresComptime(param_ty)) { + if (try sema.resolveMaybeUndefVal(air_ref)) |val| { + sema.comptime_args[param_index] = val.toIntern(); + return; + } + const msg = msg: { + const src_loc = arg_src.toSrcLoc(mod.declPtr(block.src_decl), mod); + const msg = try Module.ErrorMsg.create(gpa, src_loc, "{s}", .{ + @as([]const u8, "runtime-known argument passed to comptime-only type parameter"), + }); + errdefer msg.destroy(gpa); + + if (sema.generic_call_decl != .none) { + try sema.errNote(block, src, msg, "{s}", .{@as([]const u8, "declared here")}); + } + + try sema.explainWhyTypeIsComptime(msg, src_loc, param_ty); + + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(msg); + } + + // The parameter is runtime-known. // The map is already populated but we do need to add a runtime parameter. - try block.params.append(sema.gpa, .{ - .ty = param_ty, + try block.params.append(sema.arena, .{ + .ty = param_ty.toIntern(), .is_comptime = false, .name = param_name, }); @@ -9517,8 +9427,8 @@ fn zirParamAnytype( // We are evaluating a generic function without any comptime args provided. - try block.params.append(sema.gpa, .{ - .ty = Type.generic_poison, + try block.params.append(sema.arena, .{ + .ty = .generic_poison_type, .is_comptime = comptime_syntax, .name = param_name, }); @@ -10673,7 +10583,7 @@ const SwitchProngAnalysis = struct { return sema.bitCast(block, item_ty, spa.operand, operand_src, null); } - var names: Module.Fn.InferredErrorSet.NameMap = .{}; + var names: InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, case_vals.len); for (case_vals) |err| { const err_val = sema.resolveConstValue(block, .unneeded, err, "") catch unreachable; @@ -11041,97 +10951,100 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r } } - try sema.resolveInferredErrorSetTy(block, src, operand_ty); - - if (operand_ty.isAnyError(mod)) { - if (special_prong != .@"else") { - return sema.fail( - block, - src, - "else prong required when switching on type 'anyerror'", - .{}, - ); - } - else_error_ty = Type.anyerror; - } else else_validation: { - var maybe_msg: ?*Module.ErrorMsg = null; - errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa); - - for (operand_ty.errorSetNames(mod)) |error_name| { - if (!seen_errors.contains(error_name) and special_prong != .@"else") { - const msg = maybe_msg orelse blk: { - maybe_msg = try sema.errMsg( - block, - src, - "switch must handle all possibilities", - .{}, - ); - break :blk maybe_msg.?; - }; - - try sema.errNote( + switch (try sema.resolveInferredErrorSetTy(block, src, operand_ty.toIntern())) { + .anyerror_type => { + if (special_prong != .@"else") { + return sema.fail( block, src, - msg, - "unhandled error value: 'error.{}'", - .{error_name.fmt(ip)}, + "else prong required when switching on type 'anyerror'", + .{}, ); } - } + else_error_ty = Type.anyerror; + }, + else => |err_set_ty_index| else_validation: { + const error_names = ip.indexToKey(err_set_ty_index).error_set_type.names; + var maybe_msg: ?*Module.ErrorMsg = null; + errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa); - if (maybe_msg) |msg| { - maybe_msg = null; - try sema.addDeclaredHereNote(msg, operand_ty); - return sema.failWithOwnedErrorMsg(msg); - } + for (error_names.get(ip)) |error_name| { + if (!seen_errors.contains(error_name) and special_prong != .@"else") { + const msg = maybe_msg orelse blk: { + maybe_msg = try sema.errMsg( + block, + src, + "switch must handle all possibilities", + .{}, + ); + break :blk maybe_msg.?; + }; - if (special_prong == .@"else" and seen_errors.count() == operand_ty.errorSetNames(mod).len) { - // In order to enable common patterns for generic code allow simple else bodies - // else => unreachable, - // else => return, - // else => |e| return e, - // even if all the possible errors were already handled. - const tags = sema.code.instructions.items(.tag); - for (special.body) |else_inst| switch (tags[else_inst]) { - .dbg_block_begin, - .dbg_block_end, - .dbg_stmt, - .dbg_var_val, - .ret_type, - .as_node, - .ret_node, - .@"unreachable", - .@"defer", - .defer_err_code, - .err_union_code, - .ret_err_value_code, - .restore_err_ret_index, - .is_non_err, - .ret_is_non_err, - .condbr, - => {}, - else => break, - } else break :else_validation; + try sema.errNote( + block, + src, + msg, + "unhandled error value: 'error.{}'", + .{error_name.fmt(ip)}, + ); + } + } - return sema.fail( - block, - special_prong_src, - "unreachable else prong; all cases already handled", - .{}, - ); - } + if (maybe_msg) |msg| { + maybe_msg = null; + try sema.addDeclaredHereNote(msg, operand_ty); + return sema.failWithOwnedErrorMsg(msg); + } - const error_names = operand_ty.errorSetNames(mod); - var names: Module.Fn.InferredErrorSet.NameMap = .{}; - try names.ensureUnusedCapacity(sema.arena, error_names.len); - for (error_names) |error_name| { - if (seen_errors.contains(error_name)) continue; + if (special_prong == .@"else" and + seen_errors.count() == error_names.len) + { + // In order to enable common patterns for generic code allow simple else bodies + // else => unreachable, + // else => return, + // else => |e| return e, + // even if all the possible errors were already handled. + const tags = sema.code.instructions.items(.tag); + for (special.body) |else_inst| switch (tags[else_inst]) { + .dbg_block_begin, + .dbg_block_end, + .dbg_stmt, + .dbg_var_val, + .ret_type, + .as_node, + .ret_node, + .@"unreachable", + .@"defer", + .defer_err_code, + .err_union_code, + .ret_err_value_code, + .restore_err_ret_index, + .is_non_err, + .ret_is_non_err, + .condbr, + => {}, + else => break, + } else break :else_validation; - names.putAssumeCapacityNoClobber(error_name, {}); - } - // No need to keep the hash map metadata correct; here we - // extract the (sorted) keys only. - else_error_ty = try mod.errorSetFromUnsortedNames(names.keys()); + return sema.fail( + block, + special_prong_src, + "unreachable else prong; all cases already handled", + .{}, + ); + } + + var names: InferredErrorSet.NameMap = .{}; + try names.ensureUnusedCapacity(sema.arena, error_names.len); + for (error_names.get(ip)) |error_name| { + if (seen_errors.contains(error_name)) continue; + + names.putAssumeCapacityNoClobber(error_name, {}); + } + // No need to keep the hash map metadata correct; here we + // extract the (sorted) keys only. + else_error_ty = try mod.errorSetFromUnsortedNames(names.keys()); + }, } }, .Int, .ComptimeInt => { @@ -16295,6 +16208,7 @@ fn zirClosureCapture(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr fn zirClosureGet(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].inst_node; var scope: *CaptureScope = mod.declPtr(block.src_decl).src_scope.?; // Note: The target closure must be in this scope list. @@ -16305,8 +16219,8 @@ fn zirClosureGet(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! // Fail this decl if a scope it depended on failed. if (scope.failed()) { - if (sema.owner_func) |owner_func| { - owner_func.state = .dependency_failure; + if (sema.owner_func_index != .none) { + ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure; } else { sema.owner_decl.analysis = .dependency_failure; } @@ -16423,8 +16337,8 @@ fn zirBuiltinSrc( const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.Src, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); - const func = sema.func orelse return sema.fail(block, src, "@src outside function", .{}); - const fn_owner_decl = mod.declPtr(func.owner_decl); + if (sema.func_index == .none) return sema.fail(block, src, "@src outside function", .{}); + const fn_owner_decl = mod.funcOwnerDeclPtr(sema.func_index); const func_name_val = blk: { var anon_decl = try block.startAnonDecl(); @@ -16548,10 +16462,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const param_info_decl = mod.declPtr(param_info_decl_index); const param_info_ty = param_info_decl.val.toType(); - const param_vals = try sema.arena.alloc(InternPool.Index, mod.typeToFunc(ty).?.param_types.len); + const func_ty_info = mod.typeToFunc(ty).?; + const param_vals = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len); for (param_vals, 0..) |*param_val, i| { - const info = mod.typeToFunc(ty).?; - const param_ty = info.param_types[i]; + const param_ty = func_ty_info.param_types.get(ip)[i]; const is_generic = param_ty == .generic_poison_type; const param_ty_val = try ip.get(gpa, .{ .opt = .{ .ty = try ip.get(gpa, .{ .opt_type = .type_type }), @@ -16560,7 +16474,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const is_noalias = blk: { const index = std.math.cast(u5, i) orelse break :blk false; - break :blk @as(u1, @truncate(info.noalias_bits >> index)) != 0; + break :blk @as(u1, @truncate(func_ty_info.noalias_bits >> index)) != 0; }; const param_fields = .{ @@ -16603,23 +16517,25 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }); }; - const info = mod.typeToFunc(ty).?; const ret_ty_opt = try mod.intern(.{ .opt = .{ .ty = try ip.get(gpa, .{ .opt_type = .type_type }), - .val = if (info.return_type == .generic_poison_type) .none else info.return_type, + .val = if (func_ty_info.return_type == .generic_poison_type) + .none + else + func_ty_info.return_type, } }); const callconv_ty = try sema.getBuiltinType("CallingConvention"); const field_values = .{ // calling_convention: CallingConvention, - (try mod.enumValueFieldIndex(callconv_ty, @intFromEnum(info.cc))).toIntern(), + (try mod.enumValueFieldIndex(callconv_ty, @intFromEnum(func_ty_info.cc))).toIntern(), // alignment: comptime_int, (try mod.intValue(Type.comptime_int, ty.abiAlignment(mod))).toIntern(), // is_generic: bool, - Value.makeBool(info.is_generic).toIntern(), + Value.makeBool(func_ty_info.is_generic).toIntern(), // is_var_args: bool, - Value.makeBool(info.is_var_args).toIntern(), + Value.makeBool(func_ty_info.is_var_args).toIntern(), // return_type: ?type, ret_ty_opt, // args: []const Fn.Param, @@ -16860,50 +16776,51 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai try sema.queueFullTypeResolution(error_field_ty); - // If the error set is inferred it must be resolved at this point - try sema.resolveInferredErrorSetTy(block, src, ty); - // Build our list of Error values // Optional value is only null if anyerror // Value can be zero-length slice otherwise - const error_field_vals = if (ty.isAnyError(mod)) null else blk: { - const vals = try sema.arena.alloc(InternPool.Index, ty.errorSetNames(mod).len); - for (vals, 0..) |*field_val, i| { - // TODO: write something like getCoercedInts to avoid needing to dupe - const name = try sema.arena.dupe(u8, ip.stringToSlice(ty.errorSetNames(mod)[i])); - const name_val = v: { - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); - const new_decl_ty = try mod.arrayType(.{ - .len = name.len, - .child = .u8_type, - }); - const new_decl = try anon_decl.finish( - new_decl_ty, - (try mod.intern(.{ .aggregate = .{ - .ty = new_decl_ty.toIntern(), - .storage = .{ .bytes = name }, - } })).toValue(), - .none, // default alignment - ); - break :v try mod.intern(.{ .ptr = .{ - .ty = .slice_const_u8_type, - .addr = .{ .decl = new_decl }, - .len = (try mod.intValue(Type.usize, name.len)).toIntern(), + const error_field_vals = switch (try sema.resolveInferredErrorSetTy(block, src, ty.toIntern())) { + .anyerror_type => null, + else => |err_set_ty_index| blk: { + const names = ip.indexToKey(err_set_ty_index).error_set_type.names; + const vals = try sema.arena.alloc(InternPool.Index, names.len); + for (vals, 0..) |*field_val, i| { + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(names.get(ip)[i])); + const name_val = v: { + var anon_decl = try block.startAnonDecl(); + defer anon_decl.deinit(); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + }); + const new_decl = try anon_decl.finish( + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), + .none, // default alignment + ); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), + } }); + }; + + const error_field_fields = .{ + // name: []const u8, + name_val, + }; + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = error_field_ty.toIntern(), + .storage = .{ .elems = &error_field_fields }, } }); - }; + } - const error_field_fields = .{ - // name: []const u8, - name_val, - }; - field_val.* = try mod.intern(.{ .aggregate = .{ - .ty = error_field_ty.toIntern(), - .storage = .{ .elems = &error_field_fields }, - } }); - } - - break :blk vals; + break :blk vals; + }, }; // Build our ?[]const Error value @@ -18425,9 +18342,12 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) // This is only relevant at runtime. if (start_block.is_comptime or start_block.is_typeof) return; - if (!sema.mod.backendSupportsFeature(.error_return_trace)) return; - if (!sema.owner_func.?.calls_or_awaits_errorable_fn) return; - if (!sema.mod.comp.bin_file.options.error_return_tracing) return; + const mod = sema.mod; + const ip = &mod.intern_pool; + + if (!mod.backendSupportsFeature(.error_return_trace)) return; + if (!ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn) return; + if (!mod.comp.bin_file.options.error_return_tracing) return; const tracy = trace(@src()); defer tracy.end(); @@ -18464,17 +18384,30 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void { const mod = sema.mod; - const gpa = sema.gpa; const ip = &mod.intern_pool; assert(sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion); + const err_set_ty = sema.fn_ret_ty.errorUnionSet(mod).toIntern(); + switch (err_set_ty) { + .adhoc_inferred_error_set_type => { + const ies = sema.fn_ret_ty_ies.?; + assert(ies.func == .none); + try addToInferredErrorSetPtr(mod, ies, sema.typeOf(uncasted_operand)); + }, + else => if (ip.isInferredErrorSetType(err_set_ty)) { + const ies = sema.fn_ret_ty_ies.?; + assert(ies.func == sema.func_index); + try addToInferredErrorSetPtr(mod, ies, sema.typeOf(uncasted_operand)); + }, + } +} - if (mod.typeToInferredErrorSet(sema.fn_ret_ty.errorUnionSet(mod))) |ies| { - const op_ty = sema.typeOf(uncasted_operand); - switch (op_ty.zigTypeTag(mod)) { - .ErrorSet => try ies.addErrorSet(op_ty, ip, gpa), - .ErrorUnion => try ies.addErrorSet(op_ty.errorUnionSet(mod), ip, gpa), - else => {}, - } +fn addToInferredErrorSetPtr(mod: *Module, ies: *InferredErrorSet, op_ty: Type) !void { + const gpa = mod.gpa; + const ip = &mod.intern_pool; + switch (op_ty.zigTypeTag(mod)) { + .ErrorSet => try ies.addErrorSet(op_ty, ip, gpa), + .ErrorUnion => try ies.addErrorSet(op_ty.errorUnionSet(mod), ip, gpa), + else => {}, } } @@ -18488,7 +18421,7 @@ fn analyzeRet( // add the error tag to the inferred error set of the in-scope function, so // that the coercion below works correctly. const mod = sema.mod; - if (sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) { + if (sema.fn_ret_ty_ies != null and sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) { try sema.addToInferredErrorSet(uncasted_operand); } const operand = sema.coerceExtra(block, sema.fn_ret_ty, uncasted_operand, src, .{ .is_ret = true }) catch |err| switch (err) { @@ -19461,13 +19394,14 @@ fn zirErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { const mod = sema.mod; + const ip = &mod.intern_pool; const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern()); - if (sema.owner_func != null and - sema.owner_func.?.calls_or_awaits_errorable_fn and + if (sema.owner_func_index != .none and + ip.funcAnalysis(sema.owner_func_index).calls_or_awaits_errorable_fn and mod.comp.bin_file.options.error_return_tracing and mod.backendSupportsFeature(.error_return_trace)) { @@ -19920,7 +19854,7 @@ fn zirReify( return sema.addType(Type.anyerror); const len = try sema.usizeCast(block, src, payload_val.sliceLen(mod)); - var names: Module.Fn.InferredErrorSet.NameMap = .{}; + var names: InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, len); for (0..len) |i| { const elem_val = try payload_val.elemValue(mod, i); @@ -20431,8 +20365,6 @@ fn zirReify( .is_var_args = is_var_args, .is_generic = false, .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, .section_is_generic = false, .addrspace_is_generic = false, }); @@ -20936,8 +20868,8 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat break :disjoint true; } - try sema.resolveInferredErrorSetTy(block, src, dest_ty); - try sema.resolveInferredErrorSetTy(block, operand_src, operand_ty); + _ = try sema.resolveInferredErrorSetTy(block, src, dest_ty.toIntern()); + _ = try sema.resolveInferredErrorSetTy(block, operand_src, operand_ty.toIntern()); for (dest_ty.errorSetNames(mod)) |dest_err_name| { if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_name)) break :disjoint false; @@ -23917,7 +23849,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A break :blk mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val); } else target_util.defaultAddressSpace(target, .function); - const @"linksection": FuncLinkSection = if (extra.data.bits.has_section_body) blk: { + const section: Section = if (extra.data.bits.has_section_body) blk: { const body_len = sema.code.extra[extra_index]; extra_index += 1; const body = sema.code.extra[extra_index..][0..body_len]; @@ -23926,20 +23858,20 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const ty = Type.slice_const_u8; const val = try sema.resolveGenericBody(block, section_src, body, inst, ty, "linksection must be comptime-known"); if (val.isGenericPoison()) { - break :blk FuncLinkSection{ .generic = {} }; + break :blk .generic; } - break :blk FuncLinkSection{ .explicit = try val.toIpString(ty, mod) }; + break :blk .{ .explicit = try val.toIpString(ty, mod) }; } else if (extra.data.bits.has_section_ref) blk: { const section_ref = @as(Zir.Inst.Ref, @enumFromInt(sema.code.extra[extra_index])); extra_index += 1; const section_name = sema.resolveConstStringIntern(block, section_src, section_ref, "linksection must be comptime-known") catch |err| switch (err) { error.GenericPoison => { - break :blk FuncLinkSection{ .generic = {} }; + break :blk .generic; }, else => |e| return e, }; - break :blk FuncLinkSection{ .explicit = section_name }; - } else FuncLinkSection{ .default = {} }; + break :blk .{ .explicit = section_name }; + } else .default; const cc: ?std.builtin.CallingConvention = if (extra.data.bits.has_cc_body) blk: { const body_len = sema.code.extra[extra_index]; @@ -24013,7 +23945,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A inst, @"align", @"addrspace", - @"linksection", + section, cc, ret_ty, is_var_args, @@ -24846,9 +24778,9 @@ fn prepareSimplePanic(sema: *Sema, block: *Block) !void { const tv = try mod.declPtr(decl_index).typedValue(); assert(tv.ty.zigTypeTag(mod) == .Fn); assert(try sema.fnHasRuntimeBits(tv.ty)); - const func_index = mod.intern_pool.indexToFunc(tv.val.toIntern()).unwrap().?; + const func_index = tv.val.toIntern(); try mod.ensureFuncBodyAnalysisQueued(func_index); - mod.panic_func_index = func_index.toOptional(); + mod.panic_func_index = func_index; } if (mod.null_stack_trace == .none) { @@ -24982,7 +24914,7 @@ fn panicWithMsg(sema: *Sema, block: *Block, msg_inst: Air.Inst.Ref) !void { try sema.prepareSimplePanic(block); - const panic_func = mod.funcPtrUnwrap(mod.panic_func_index).?; + const panic_func = mod.funcInfo(mod.panic_func_index); const panic_fn = try sema.analyzeDeclVal(block, .unneeded, panic_func.owner_decl); const null_stack_trace = try sema.addConstant(mod.null_stack_trace.toValue()); @@ -25688,7 +25620,7 @@ fn fieldCallBind( if (mod.typeToFunc(decl_type)) |func_type| f: { if (func_type.param_types.len == 0) break :f; - const first_param_type = func_type.param_types[0].toType(); + const first_param_type = func_type.param_types.get(ip)[0].toType(); // zig fmt: off if (first_param_type.isGenericPoison() or ( first_param_type.zigTypeTag(mod) == .Pointer and @@ -27526,7 +27458,7 @@ fn coerceExtra( errdefer msg.destroy(sema.gpa); const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; - const src_decl = mod.declPtr(sema.func.?.owner_decl); + const src_decl = mod.funcOwnerDeclPtr(sema.func_index); try mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "'noreturn' declared here", .{}); break :msg msg; }; @@ -27556,9 +27488,11 @@ fn coerceExtra( try in_memory_result.report(sema, block, inst_src, msg); // Add notes about function return type - if (opts.is_ret and mod.test_functions.get(sema.func.?.owner_decl) == null) { + if (opts.is_ret and + mod.test_functions.get(mod.funcOwnerDeclIndex(sema.func_index)) == null) + { const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; - const src_decl = mod.declPtr(sema.func.?.owner_decl); + const src_decl = mod.funcOwnerDeclPtr(sema.func_index); if (inst_ty.isError(mod) and !dest_ty.isError(mod)) { try mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function cannot return an error", .{}); } else { @@ -28160,42 +28094,29 @@ fn coerceInMemoryAllowedErrorSets( return .ok; } - if (mod.typeToInferredErrorSetIndex(dest_ty).unwrap()) |dst_ies_index| { - const dst_ies = mod.inferredErrorSetPtr(dst_ies_index); - // We will make an effort to return `ok` without resolving either error set, to - // avoid unnecessary "unable to resolve error set" dependency loop errors. - switch (src_ty.toIntern()) { - .anyerror_type => {}, - else => switch (ip.indexToKey(src_ty.toIntern())) { - .inferred_error_set_type => |src_index| { - // If both are inferred error sets of functions, and - // the dest includes the source function, the coercion is OK. - // This check is important because it works without forcing a full resolution - // of inferred error sets. - if (dst_ies.inferred_error_sets.contains(src_index)) { - return .ok; - } - }, - .error_set_type => |error_set_type| { - for (error_set_type.names) |name| { - if (!dst_ies.errors.contains(name)) break; - } else return .ok; - }, - else => unreachable, - }, - } + if (dest_ty.toIntern() == .adhoc_inferred_error_set_type) { + // We are trying to coerce an error set to the current function's + // inferred error set. + const dst_ies = sema.fn_ret_ty_ies.?; + try dst_ies.addErrorSet(src_ty, ip, gpa); + return .ok; + } - if (dst_ies.func == sema.owner_func_index.unwrap()) { - // We are trying to coerce an error set to the current function's - // inferred error set. - try dst_ies.addErrorSet(src_ty, ip, gpa); - return .ok; + if (ip.isInferredErrorSetType(dest_ty.toIntern())) { + const dst_ies_func_index = ip.iesFuncIndex(dest_ty.toIntern()); + if (sema.fn_ret_ty_ies) |dst_ies| { + if (dst_ies.func == dst_ies_func_index) { + // We are trying to coerce an error set to the current function's + // inferred error set. + try dst_ies.addErrorSet(src_ty, ip, gpa); + return .ok; + } } - - try sema.resolveInferredErrorSet(block, dest_src, dst_ies_index); - // isAnyError might have changed from a false negative to a true positive after resolution. - if (dest_ty.isAnyError(mod)) { - return .ok; + switch (try sema.resolveInferredErrorSet(block, dest_src, dest_ty.toIntern())) { + // isAnyError might have changed from a false negative to a true + // positive after resolution. + .anyerror_type => return .ok, + else => {}, } } @@ -28210,17 +28131,15 @@ fn coerceInMemoryAllowedErrorSets( }, else => switch (ip.indexToKey(src_ty.toIntern())) { - .inferred_error_set_type => |src_index| { - const src_data = mod.inferredErrorSetPtr(src_index); - - try sema.resolveInferredErrorSet(block, src_src, src_index); + .inferred_error_set_type => { + const resolved_src_ty = try sema.resolveInferredErrorSet(block, src_src, src_ty.toIntern()); // src anyerror status might have changed after the resolution. - if (src_ty.isAnyError(mod)) { + if (resolved_src_ty == .anyerror_type) { // dest_ty.isAnyError(mod) == true is already checked for at this point. return .from_anyerror; } - for (src_data.errors.keys()) |key| { + for (ip.indexToKey(resolved_src_ty).error_set_type.names.get(ip)) |key| { if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), key)) { try missing_error_buf.append(key); } @@ -28235,7 +28154,7 @@ fn coerceInMemoryAllowedErrorSets( return .ok; }, .error_set_type => |error_set_type| { - for (error_set_type.names) |name| { + for (error_set_type.names.get(ip)) |name| { if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), name)) { try missing_error_buf.append(name); } @@ -28264,11 +28183,12 @@ fn coerceInMemoryAllowedFns( src_src: LazySrcLoc, ) !InMemoryCoercionResult { const mod = sema.mod; + const ip = &mod.intern_pool; + + const dest_info = mod.typeToFunc(dest_ty).?; + const src_info = mod.typeToFunc(src_ty).?; { - const dest_info = mod.typeToFunc(dest_ty).?; - const src_info = mod.typeToFunc(src_ty).?; - if (dest_info.is_var_args != src_info.is_var_args) { return InMemoryCoercionResult{ .fn_var_args = dest_info.is_var_args }; } @@ -28302,9 +28222,6 @@ fn coerceInMemoryAllowedFns( } const params_len = params_len: { - const dest_info = mod.typeToFunc(dest_ty).?; - const src_info = mod.typeToFunc(src_ty).?; - if (dest_info.param_types.len != src_info.param_types.len) { return InMemoryCoercionResult{ .fn_param_count = .{ .actual = src_info.param_types.len, @@ -28323,13 +28240,10 @@ fn coerceInMemoryAllowedFns( }; for (0..params_len) |param_i| { - const dest_info = mod.typeToFunc(dest_ty).?; - const src_info = mod.typeToFunc(src_ty).?; + const dest_param_ty = dest_info.param_types.get(ip)[param_i].toType(); + const src_param_ty = src_info.param_types.get(ip)[param_i].toType(); - const dest_param_ty = dest_info.param_types[param_i].toType(); - const src_param_ty = src_info.param_types[param_i].toType(); - - const param_i_small = @as(u5, @intCast(param_i)); + const param_i_small: u5 = @intCast(param_i); if (dest_info.paramIsComptime(param_i_small) != src_info.paramIsComptime(param_i_small)) { return InMemoryCoercionResult{ .fn_param_comptime = .{ .index = param_i, @@ -30471,6 +30385,7 @@ fn addReferencedBy( fn ensureDeclAnalyzed(sema: *Sema, decl_index: Decl.Index) CompileError!void { const mod = sema.mod; + const ip = &mod.intern_pool; const decl = mod.declPtr(decl_index); if (decl.analysis == .in_progress) { const msg = try Module.ErrorMsg.create(sema.gpa, decl.srcLoc(mod), "dependency loop detected", .{}); @@ -30478,8 +30393,8 @@ fn ensureDeclAnalyzed(sema: *Sema, decl_index: Decl.Index) CompileError!void { } mod.ensureDeclAnalyzed(decl_index) catch |err| { - if (sema.owner_func) |owner_func| { - owner_func.state = .dependency_failure; + if (sema.owner_func_index != .none) { + ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure; } else { sema.owner_decl.analysis = .dependency_failure; } @@ -30487,10 +30402,12 @@ fn ensureDeclAnalyzed(sema: *Sema, decl_index: Decl.Index) CompileError!void { }; } -fn ensureFuncBodyAnalyzed(sema: *Sema, func: Module.Fn.Index) CompileError!void { - sema.mod.ensureFuncBodyAnalyzed(func) catch |err| { - if (sema.owner_func) |owner_func| { - owner_func.state = .dependency_failure; +fn ensureFuncBodyAnalyzed(sema: *Sema, func: InternPool.Index) CompileError!void { + const mod = sema.mod; + const ip = &mod.intern_pool; + mod.ensureFuncBodyAnalyzed(func) catch |err| { + if (sema.owner_func_index != .none) { + ip.funcAnalysis(sema.owner_func_index).state = .dependency_failure; } else { sema.owner_decl.analysis = .dependency_failure; } @@ -30566,7 +30483,8 @@ fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: Decl.Index) !void { const tv = try decl.typedValue(); if (tv.ty.zigTypeTag(mod) != .Fn) return; if (!try sema.fnHasRuntimeBits(tv.ty)) return; - const func_index = mod.intern_pool.indexToFunc(tv.val.toIntern()).unwrap() orelse return; // undef or extern_fn + const func_index = tv.val.toIntern(); + if (!mod.intern_pool.isFuncBody(func_index)) return; // undef or extern function try mod.ensureFuncBodyAnalysisQueued(func_index); } @@ -30582,7 +30500,7 @@ fn analyzeRef( if (try sema.resolveMaybeUndefVal(operand)) |val| { switch (mod.intern_pool.indexToKey(val.toIntern())) { .extern_func => |extern_func| return sema.analyzeDeclRef(extern_func.decl), - .func => |func| return sema.analyzeDeclRef(mod.funcPtr(func.index).owner_decl), + .func => |func| return sema.analyzeDeclRef(func.owner_decl), else => {}, } var anon_decl = try block.startAnonDecl(); @@ -30752,73 +30670,85 @@ fn analyzeIsNonErrComptimeOnly( operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { const mod = sema.mod; + const ip = &mod.intern_pool; const operand_ty = sema.typeOf(operand); const ot = operand_ty.zigTypeTag(mod); - if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true; - if (ot == .ErrorSet) return Air.Inst.Ref.bool_false; + if (ot != .ErrorSet and ot != .ErrorUnion) return .bool_true; + if (ot == .ErrorSet) return .bool_false; assert(ot == .ErrorUnion); const payload_ty = operand_ty.errorUnionPayload(mod); if (payload_ty.zigTypeTag(mod) == .NoReturn) { - return Air.Inst.Ref.bool_false; + return .bool_false; } if (Air.refToIndex(operand)) |operand_inst| { switch (sema.air_instructions.items(.tag)[operand_inst]) { - .wrap_errunion_payload => return Air.Inst.Ref.bool_true, - .wrap_errunion_err => return Air.Inst.Ref.bool_false, + .wrap_errunion_payload => return .bool_true, + .wrap_errunion_err => return .bool_false, else => {}, } } else if (operand == .undef) { return sema.addConstUndef(Type.bool); } else if (@intFromEnum(operand) < InternPool.static_len) { // None of the ref tags can be errors. - return Air.Inst.Ref.bool_true; + return .bool_true; } const maybe_operand_val = try sema.resolveMaybeUndefVal(operand); // exception if the error union error set is known to be empty, // we allow the comparison but always make it comptime-known. - const set_ty = operand_ty.errorUnionSet(mod); - switch (set_ty.toIntern()) { + const set_ty = ip.errorUnionSet(operand_ty.toIntern()); + switch (set_ty) { .anyerror_type => {}, - else => switch (mod.intern_pool.indexToKey(set_ty.toIntern())) { + else => switch (ip.indexToKey(set_ty)) { .error_set_type => |error_set_type| { - if (error_set_type.names.len == 0) return Air.Inst.Ref.bool_true; + if (error_set_type.names.len == 0) return .bool_true; }, - .inferred_error_set_type => |ies_index| blk: { + .inferred_error_set_type => |func_index| blk: { // If the error set is empty, we must return a comptime true or false. // However we want to avoid unnecessarily resolving an inferred error set // in case it is already non-empty. - const ies = mod.inferredErrorSetPtr(ies_index); - if (ies.is_anyerror) break :blk; - if (ies.errors.count() != 0) break :blk; + switch (ip.funcIesResolved(func_index).*) { + .anyerror_type => break :blk, + .none => {}, + else => |i| if (ip.indexToKey(i).error_set_type.names.len != 0) break :blk, + } if (maybe_operand_val == null) { - // Try to avoid resolving inferred error set if possible. - if (ies.errors.count() != 0) break :blk; - if (ies.is_anyerror) break :blk; - for (ies.inferred_error_sets.keys()) |other_ies_index| { - if (ies_index == other_ies_index) continue; - try sema.resolveInferredErrorSet(block, src, other_ies_index); - const other_ies = mod.inferredErrorSetPtr(other_ies_index); - if (other_ies.is_anyerror) { - ies.is_anyerror = true; - ies.is_resolved = true; - break :blk; + if (sema.fn_ret_ty_ies) |ies| { + if (set_ty == .adhoc_inferred_error_set_type or + ies.func == func_index) + { + // Try to avoid resolving inferred error set if possible. + if (ies.errors.count() != 0) return .none; + switch (ies.resolved) { + .anyerror_type => return .none, + .none => {}, + else => switch (ip.indexToKey(ies.resolved).error_set_type.names.len) { + 0 => return .bool_true, + else => return .none, + }, + } + for (ies.inferred_error_sets.keys()) |other_ies_index| { + if (set_ty == other_ies_index) continue; + const other_resolved = + try sema.resolveInferredErrorSet(block, src, other_ies_index); + if (other_resolved == .anyerror_type) { + ies.resolved = .anyerror_type; + return .none; + } + if (ip.indexToKey(other_resolved).error_set_type.names.len != 0) + return .none; + } + return .bool_true; } - - if (other_ies.errors.count() != 0) break :blk; } - if (ies.func == sema.owner_func_index.unwrap()) { - // We're checking the inferred errorset of the current function and none of - // its child inferred error sets contained any errors meaning that any value - // so far with this type can't contain errors either. - return Air.Inst.Ref.bool_true; - } - try sema.resolveInferredErrorSet(block, src, ies_index); - if (ies.is_anyerror) break :blk; - if (ies.errors.count() == 0) return Air.Inst.Ref.bool_true; + const resolved_ty = try sema.resolveInferredErrorSet(block, src, set_ty); + if (resolved_ty == .anyerror_type) + break :blk; + if (ip.indexToKey(resolved_ty).error_set_type.names.len == 0) + return .bool_true; } }, else => unreachable, @@ -30830,12 +30760,12 @@ fn analyzeIsNonErrComptimeOnly( return sema.addConstUndef(Type.bool); } if (err_union.getErrorName(mod) == .none) { - return Air.Inst.Ref.bool_true; + return .bool_true; } else { - return Air.Inst.Ref.bool_false; + return .bool_false; } } - return Air.Inst.Ref.none; + return .none; } fn analyzeIsNonErr( @@ -31768,24 +31698,39 @@ fn wrapErrorUnionSet( const inst_ty = sema.typeOf(inst); const dest_err_set_ty = dest_ty.errorUnionSet(mod); if (try sema.resolveMaybeUndefVal(inst)) |val| { + const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; switch (dest_err_set_ty.toIntern()) { .anyerror_type => {}, + .adhoc_inferred_error_set_type => ok: { + const ies = sema.fn_ret_ty_ies.?; + switch (ies.resolved) { + .anyerror_type => break :ok, + .none => if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) { + break :ok; + }, + else => |i| if (ip.indexToKey(i).error_set_type.nameIndex(ip, expected_name) != null) { + break :ok; + }, + } + return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); + }, else => switch (ip.indexToKey(dest_err_set_ty.toIntern())) { .error_set_type => |error_set_type| ok: { - const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; if (error_set_type.nameIndex(ip, expected_name) != null) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, - .inferred_error_set_type => |ies_index| ok: { - const ies = mod.inferredErrorSetPtr(ies_index); - const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; - + .inferred_error_set_type => |func_index| ok: { // We carefully do this in an order that avoids unnecessarily // resolving the destination error set type. - if (ies.is_anyerror) break :ok; - - if (ies.errors.contains(expected_name)) break :ok; - if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) break :ok; + switch (ip.funcIesResolved(func_index).*) { + .anyerror_type => break :ok, + .none => if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) { + break :ok; + }, + else => |i| if (ip.indexToKey(i).error_set_type.nameIndex(ip, expected_name) != null) { + break :ok; + }, + } return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); }, @@ -31794,9 +31739,7 @@ fn wrapErrorUnionSet( } return sema.addConstant((try mod.intern(.{ .error_union = .{ .ty = dest_ty.toIntern(), - .val = .{ - .err_name = mod.intern_pool.indexToKey(try val.intern(dest_err_set_ty, mod)).err.name, - }, + .val = .{ .err_name = expected_name }, } })).toValue()); } @@ -33273,17 +33216,31 @@ fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike { }; } +pub fn resolveIes(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError!void { + const mod = sema.mod; + const ip = &mod.intern_pool; + + if (sema.fn_ret_ty_ies) |ies| { + try sema.resolveInferredErrorSetPtr(block, src, ies); + assert(ies.resolved != .none); + ip.funcIesResolved(sema.func_index).* = ies.resolved; + } +} + pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void { const mod = sema.mod; - try sema.resolveTypeFully(mod.typeToFunc(fn_ty).?.return_type.toType()); + const ip = &mod.intern_pool; + const fn_ty_info = mod.typeToFunc(fn_ty).?; - if (mod.comp.bin_file.options.error_return_tracing and mod.typeToFunc(fn_ty).?.return_type.toType().isError(mod)) { + try sema.resolveTypeFully(fn_ty_info.return_type.toType()); + + if (mod.comp.bin_file.options.error_return_tracing and fn_ty_info.return_type.toType().isError(mod)) { // Ensure the type exists so that backends can assume that. _ = try sema.getBuiltinType("StackTrace"); } - for (0..mod.typeToFunc(fn_ty).?.param_types.len) |i| { - try sema.resolveTypeFully(mod.typeToFunc(fn_ty).?.param_types[i].toType()); + for (0..fn_ty_info.param_types.len) |i| { + try sema.resolveTypeFully(fn_ty_info.param_types.get(ip)[i].toType()); } } @@ -33448,7 +33405,9 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { // the function is instantiated. return; } - for (info.param_types) |param_ty| { + const ip = &mod.intern_pool; + for (0..info.param_types.len) |i| { + const param_ty = info.param_types.get(ip)[i]; try sema.resolveTypeLayout(param_ty.toType()); } try sema.resolveTypeLayout(info.return_type.toType()); @@ -33578,10 +33537,9 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, - .func = null, .func_index = .none, .fn_ret_ty = Type.void, - .owner_func = null, + .fn_ret_ty_ies = null, .owner_func_index = .none, .comptime_mutable_decls = &comptime_mutable_decls, }; @@ -33600,10 +33558,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .inlining = null, .is_comptime = true, }; - defer { - assert(block.instructions.items.len == 0); - block.params.deinit(gpa); - } + defer assert(block.instructions.items.len == 0); const backing_int_src: LazySrcLoc = .{ .node_offset_container_tag = 0 }; const backing_int_ty = blk: { @@ -33633,10 +33588,9 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, - .func = null, .func_index = .none, .fn_ret_ty = Type.void, - .owner_func = null, + .fn_ret_ty_ies = null, .owner_func_index = .none, .comptime_mutable_decls = undefined, }; @@ -33808,6 +33762,7 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .bool, .void, .anyerror, + .adhoc_inferred_error_set, .noreturn, .generic_poison, .atomic_order, @@ -33943,7 +33898,9 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { // the function is instantiated. return; } - for (info.param_types) |param_ty| { + const ip = &mod.intern_pool; + for (0..info.param_types.len) |i| { + const param_ty = info.param_types.get(ip)[i]; try sema.resolveTypeFully(param_ty.toType()); } try sema.resolveTypeFully(info.return_type.toType()); @@ -34056,6 +34013,7 @@ pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { .void_type, .type_type, .anyerror_type, + .adhoc_inferred_error_set_type, .comptime_int_type, .comptime_float_type, .noreturn_type, @@ -34209,29 +34167,28 @@ fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_obj: *Module.Union) Compi union_obj.status = .have_field_types; } +/// Returns a normal error set corresponding to the fully populated inferred +/// error set. fn resolveInferredErrorSet( sema: *Sema, block: *Block, src: LazySrcLoc, - ies_index: Module.Fn.InferredErrorSet.Index, -) CompileError!void { + ies_index: InternPool.Index, +) CompileError!InternPool.Index { const mod = sema.mod; - const ies = mod.inferredErrorSetPtr(ies_index); - - if (ies.is_resolved) return; - - const func = mod.funcPtr(ies.func); - if (func.state == .in_progress) { + const ip = &mod.intern_pool; + const func_index = ip.iesFuncIndex(ies_index); + const func = mod.funcInfo(func_index); + const resolved_ty = func.resolvedErrorSet(ip).*; + if (resolved_ty != .none) return resolved_ty; + if (func.analysis(ip).state == .in_progress) return sema.fail(block, src, "unable to resolve inferred error set", .{}); - } - // In order to ensure that all dependencies are properly added to the set, we - // need to ensure the function body is analyzed of the inferred error set. - // However, in the case of comptime/inline function calls with inferred error sets, - // each call gets a new InferredErrorSet object, which contains the same - // `Module.Fn.Index`. Not only is the function not relevant to the inferred error set - // in this case, it may be a generic function which would cause an assertion failure - // if we called `ensureFuncBodyAnalyzed` on it here. + // In order to ensure that all dependencies are properly added to the set, + // we need to ensure the function body is analyzed of the inferred error + // set. However, in the case of comptime/inline function calls with + // inferred error sets, each call gets an adhoc InferredErrorSet object, which + // has no corresponding function body. const ies_func_owner_decl = mod.declPtr(func.owner_decl); const ies_func_info = mod.typeToFunc(ies_func_owner_decl.ty).?; // if ies declared by a inline function with generic return type, the return_type should be generic_poison, @@ -34239,7 +34196,7 @@ fn resolveInferredErrorSet( // so here we can simply skip this case. if (ies_func_info.return_type == .generic_poison_type) { assert(ies_func_info.cc == .Inline); - } else if (mod.typeToInferredErrorSet(ies_func_info.return_type.toType().errorUnionSet(mod)).? == ies) { + } else if (ip.errorUnionSet(ies_func_info.return_type) == ies_index) { if (ies_func_info.is_generic) { const msg = msg: { const msg = try sema.errMsg(block, src, "unable to resolve inferred error set of generic function", .{}); @@ -34252,33 +34209,101 @@ fn resolveInferredErrorSet( } // In this case we are dealing with the actual InferredErrorSet object that // corresponds to the function, not one created to track an inline/comptime call. - try sema.ensureFuncBodyAnalyzed(ies.func); + try sema.ensureFuncBodyAnalyzed(func_index); } - ies.is_resolved = true; + // This will now have been resolved by the logic at the end of `Module.analyzeFnBody` + // which calls `resolveInferredErrorSetPtr`. + const final_resolved_ty = func.resolvedErrorSet(ip).*; + assert(final_resolved_ty != .none); + return final_resolved_ty; +} + +pub fn resolveInferredErrorSetPtr( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + ies: *InferredErrorSet, +) CompileError!void { + const mod = sema.mod; + const ip = &mod.intern_pool; + + if (ies.resolved != .none) return; + + const ies_index = ip.errorUnionSet(sema.fn_ret_ty.toIntern()); for (ies.inferred_error_sets.keys()) |other_ies_index| { if (ies_index == other_ies_index) continue; - try sema.resolveInferredErrorSet(block, src, other_ies_index); - - const other_ies = mod.inferredErrorSetPtr(other_ies_index); - for (other_ies.errors.keys()) |key| { - try ies.errors.put(sema.gpa, key, {}); + switch (try sema.resolveInferredErrorSet(block, src, other_ies_index)) { + .anyerror_type => { + ies.resolved = .anyerror_type; + return; + }, + else => |error_set_ty_index| { + const names = ip.indexToKey(error_set_ty_index).error_set_type.names; + for (names.get(ip)) |name| { + try ies.errors.put(sema.arena, name, {}); + } + }, } - if (other_ies.is_anyerror) - ies.is_anyerror = true; } + + const resolved_error_set_ty = try mod.errorSetFromUnsortedNames(ies.errors.keys()); + ies.resolved = resolved_error_set_ty.toIntern(); +} + +fn resolveAdHocInferredErrorSet( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + value: InternPool.Index, +) CompileError!InternPool.Index { + const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; + const new_ty = try resolveAdHocInferredErrorSetTy(sema, block, src, ip.typeOf(value)); + if (new_ty == .none) return value; + return ip.getCoerced(gpa, value, new_ty); +} + +fn resolveAdHocInferredErrorSetTy( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + ty: InternPool.Index, +) CompileError!InternPool.Index { + const ies = sema.fn_ret_ty_ies orelse return .none; + const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; + const error_union_info = switch (ip.indexToKey(ty)) { + .error_union_type => |x| x, + else => return .none, + }; + if (error_union_info.error_set_type != .adhoc_inferred_error_set_type) + return .none; + + try sema.resolveInferredErrorSetPtr(block, src, ies); + const new_ty = try ip.get(gpa, .{ .error_union_type = .{ + .error_set_type = ies.resolved, + .payload_type = error_union_info.payload_type, + } }); + return new_ty; } fn resolveInferredErrorSetTy( sema: *Sema, block: *Block, src: LazySrcLoc, - ty: Type, -) CompileError!void { + ty: InternPool.Index, +) CompileError!InternPool.Index { const mod = sema.mod; - if (mod.typeToInferredErrorSetIndex(ty).unwrap()) |ies_index| { - try sema.resolveInferredErrorSet(block, src, ies_index); + const ip = &mod.intern_pool; + if (ty == .anyerror_type) return ty; + switch (ip.indexToKey(ty)) { + .error_set_type => return ty, + .inferred_error_set_type => return sema.resolveInferredErrorSet(block, src, ty), + else => unreachable, } } @@ -34346,10 +34371,9 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, - .func = null, .func_index = .none, .fn_ret_ty = Type.void, - .owner_func = null, + .fn_ret_ty_ies = null, .owner_func_index = .none, .comptime_mutable_decls = &comptime_mutable_decls, }; @@ -34368,10 +34392,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void .inlining = null, .is_comptime = true, }; - defer { - assert(block_scope.instructions.items.len == 0); - block_scope.params.deinit(gpa); - } + defer assert(block_scope.instructions.items.len == 0); struct_obj.fields = .{}; try struct_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len); @@ -34693,10 +34714,9 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, - .func = null, .func_index = .none, .fn_ret_ty = Type.void, - .owner_func = null, + .fn_ret_ty_ies = null, .owner_func_index = .none, .comptime_mutable_decls = &comptime_mutable_decls, }; @@ -34715,10 +34735,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { .inlining = null, .is_comptime = true, }; - defer { - assert(block_scope.instructions.items.len == 0); - block_scope.params.deinit(gpa); - } + defer assert(block_scope.instructions.items.len == 0); if (body.len != 0) { try sema.analyzeBody(&block_scope, body); @@ -35050,7 +35067,7 @@ fn generateUnionTagTypeNumbered( errdefer mod.destroyDecl(new_decl_index); const fqn = try union_obj.getFullyQualifiedName(mod); const name = try mod.intern_pool.getOrPutStringFmt(gpa, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(&mod.intern_pool)}); - try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ + try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, .{ .ty = Type.noreturn, .val = Value.@"unreachable", }, name); @@ -35101,7 +35118,7 @@ fn generateUnionTagTypeSimple( errdefer mod.destroyDecl(new_decl_index); const fqn = try union_obj.getFullyQualifiedName(mod); const name = try mod.intern_pool.getOrPutStringFmt(gpa, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(&mod.intern_pool)}); - try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ + try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, .{ .ty = Type.noreturn, .val = Value.@"unreachable", }, name); @@ -35148,10 +35165,7 @@ fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { .inlining = null, .is_comptime = true, }; - defer { - block.instructions.deinit(gpa); - block.params.deinit(gpa); - } + defer block.instructions.deinit(gpa); const decl_index = try getBuiltinDecl(sema, &block, name); return sema.analyzeDeclVal(&block, src, decl_index); @@ -35202,10 +35216,7 @@ fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type { .inlining = null, .is_comptime = true, }; - defer { - block.instructions.deinit(sema.gpa); - block.params.deinit(sema.gpa); - } + defer block.instructions.deinit(sema.gpa); const src = LazySrcLoc.nodeOffset(0); const result_ty = sema.analyzeAsType(&block, src, ty_inst) catch |err| switch (err) { @@ -35261,6 +35272,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .bool_type, .type_type, .anyerror_type, + .adhoc_inferred_error_set_type, .comptime_int_type, .comptime_float_type, .enum_literal_type, @@ -35314,6 +35326,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .var_args_param_type, .none, => unreachable, + _ => switch (mod.intern_pool.items.items(.tag)[@intFromEnum(ty.toIntern())]) { .type_int_signed, // i0 handled above .type_int_unsigned, // u0 handled above @@ -35322,11 +35335,13 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .type_optional, // ?noreturn handled above .type_anyframe, .type_error_union, + .type_anyerror_union, .type_error_set, .type_inferred_error_set, .type_opaque, .type_function, => null, + .simple_type, // handled above // values, not types .undef, @@ -35370,7 +35385,9 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .float_comptime_float, .variable, .extern_func, - .func, + .func_decl, + .func_instance, + .func_coerced, .only_possible_value, .union_value, .bytes, @@ -35379,6 +35396,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { // memoized value, not types .memoized_call, => unreachable, + .type_array_big, .type_array_small, .type_vector, @@ -35911,6 +35929,7 @@ pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { .prefetch_options, .export_options, .extern_options, + .adhoc_inferred_error_set, => false, .type, @@ -36772,7 +36791,7 @@ fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type { const arena = sema.arena; const lhs_names = lhs.errorSetNames(mod); const rhs_names = rhs.errorSetNames(mod); - var names: Module.Fn.InferredErrorSet.NameMap = .{}; + var names: InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(arena, lhs_names.len); for (lhs_names) |name| { diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 5abcd7b280..2b68a99f6a 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -205,7 +205,7 @@ pub fn print( mod.declPtr(extern_func.decl).name.fmt(ip), }), .func => |func| return writer.print("(function '{}')", .{ - mod.declPtr(mod.funcPtr(func.index).owner_decl).name.fmt(ip), + mod.declPtr(func.owner_decl).name.fmt(ip), }), .int => |int| switch (int.storage) { inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}), diff --git a/src/Zir.zig b/src/Zir.zig index 230937e1ec..572471c863 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -65,9 +65,13 @@ pub const ExtraIndex = enum(u32) { _, }; +fn ExtraData(comptime T: type) type { + return struct { data: T, end: usize }; +} + /// Returns the requested data, as well as the new index which is at the start of the /// trailers for the object. -pub fn extraData(code: Zir, comptime T: type, index: usize) struct { data: T, end: usize } { +pub fn extraData(code: Zir, comptime T: type, index: usize) ExtraData(T) { const fields = @typeInfo(T).Struct.fields; var i: usize = index; var result: T = undefined; @@ -90,13 +94,24 @@ pub fn extraData(code: Zir, comptime T: type, index: usize) struct { data: T, en }; } -/// Given an index into `string_bytes` returns the null-terminated string found there. +/// TODO migrate to use this for type safety +pub const NullTerminatedString = enum(u32) { + _, +}; + +/// TODO: migrate to nullTerminatedString2 for type safety pub fn nullTerminatedString(code: Zir, index: usize) [:0]const u8 { - var end: usize = index; + return nullTerminatedString2(code, @enumFromInt(index)); +} + +/// Given an index into `string_bytes` returns the null-terminated string found there. +pub fn nullTerminatedString2(code: Zir, index: NullTerminatedString) [:0]const u8 { + const start = @intFromEnum(index); + var end: u32 = start; while (code.string_bytes[end] != 0) { end += 1; } - return code.string_bytes[index..end :0]; + return code.string_bytes[start..end :0]; } pub fn refSlice(code: Zir, start: usize, len: usize) []Inst.Ref { @@ -2076,6 +2091,7 @@ pub const Inst = struct { slice_const_u8_sentinel_0_type = @intFromEnum(InternPool.Index.slice_const_u8_sentinel_0_type), optional_noreturn_type = @intFromEnum(InternPool.Index.optional_noreturn_type), anyerror_void_error_union_type = @intFromEnum(InternPool.Index.anyerror_void_error_union_type), + adhoc_inferred_error_set_type = @intFromEnum(InternPool.Index.adhoc_inferred_error_set_type), generic_poison_type = @intFromEnum(InternPool.Index.generic_poison_type), empty_struct_type = @intFromEnum(InternPool.Index.empty_struct_type), undef = @intFromEnum(InternPool.Index.undef), diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index a87845df2e..0f088fa867 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -13,6 +13,7 @@ const Value = @import("../../value.zig").Value; const TypedValue = @import("../../TypedValue.zig"); const link = @import("../../link.zig"); const Module = @import("../../Module.zig"); +const InternPool = @import("../../InternPool.zig"); const Compilation = @import("../../Compilation.zig"); const ErrorMsg = Module.ErrorMsg; const Target = std.Target; @@ -49,7 +50,8 @@ liveness: Liveness, bin_file: *link.File, debug_output: DebugInfoOutput, target: *const std.Target, -mod_fn: *const Module.Fn, +func_index: InternPool.Index, +owner_decl: Module.Decl.Index, err_msg: ?*ErrorMsg, args: []MCValue, ret_mcv: MCValue, @@ -199,7 +201,7 @@ const DbgInfoReloc = struct { else => unreachable, // not a possible argument }; - try dw.genArgDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, loc); + try dw.genArgDbgInfo(reloc.name, reloc.ty, function.owner_decl, loc); }, .plan9 => {}, .none => {}, @@ -245,7 +247,7 @@ const DbgInfoReloc = struct { break :blk .nop; }, }; - try dw.genVarDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, is_ptr, loc); + try dw.genVarDbgInfo(reloc.name, reloc.ty, function.owner_decl, is_ptr, loc); }, .plan9 => {}, .none => {}, @@ -328,7 +330,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn_index: Module.Fn.Index, + func_index: InternPool.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -339,8 +341,8 @@ pub fn generate( } const mod = bin_file.options.module.?; - const module_fn = mod.funcPtr(module_fn_index); - const fn_owner_decl = mod.declPtr(module_fn.owner_decl); + const func = mod.funcInfo(func_index); + const fn_owner_decl = mod.declPtr(func.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -359,7 +361,8 @@ pub fn generate( .debug_output = debug_output, .target = &bin_file.options.target, .bin_file = bin_file, - .mod_fn = module_fn, + .func_index = func_index, + .owner_decl = func.owner_decl, .err_msg = null, .args = undefined, // populated after `resolveCallingConventionValues` .ret_mcv = undefined, // populated after `resolveCallingConventionValues` @@ -368,8 +371,8 @@ pub fn generate( .branch_stack = &branch_stack, .src_loc = src_loc, .stack_align = undefined, - .end_di_line = module_fn.rbrace_line, - .end_di_column = module_fn.rbrace_column, + .end_di_line = func.rbrace_line, + .end_di_column = func.rbrace_column, }; defer function.stack.deinit(bin_file.allocator); defer function.blocks.deinit(bin_file.allocator); @@ -416,8 +419,8 @@ pub fn generate( .src_loc = src_loc, .code = code, .prev_di_pc = 0, - .prev_di_line = module_fn.lbrace_line, - .prev_di_column = module_fn.lbrace_column, + .prev_di_line = func.lbrace_line, + .prev_di_column = func.lbrace_column, .stack_size = function.max_end_stack, .saved_regs_stack_space = function.saved_regs_stack_space, }; @@ -4011,12 +4014,12 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type const atom_index = switch (self.bin_file.tag) { .macho => blk: { const macho_file = self.bin_file.cast(link.File.MachO).?; - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); + const atom = try macho_file.getOrCreateAtomForDecl(self.owner_decl); break :blk macho_file.getAtom(atom).getSymbolIndex().?; }, .coff => blk: { const coff_file = self.bin_file.cast(link.File.Coff).?; - const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); + const atom = try coff_file.getOrCreateAtomForDecl(self.owner_decl); break :blk coff_file.getAtom(atom).getSymbolIndex().?; }, else => unreachable, // unsupported target format @@ -4190,10 +4193,11 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { while (self.args[arg_index] == .none) arg_index += 1; self.arg_index = arg_index + 1; + const mod = self.bin_file.options.module.?; const ty = self.typeOfIndex(inst); const tag = self.air.instructions.items(.tag)[inst]; const src_index = self.air.instructions.items(.data)[inst].arg.src_index; - const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index); + const name = mod.getParamName(self.func_index, src_index); try self.dbg_info_relocs.append(self.gpa, .{ .tag = tag, @@ -4348,7 +4352,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name); if (self.bin_file.cast(link.File.MachO)) |macho_file| { const sym_index = try macho_file.getGlobalSymbol(decl_name, lib_name); - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); + const atom = try macho_file.getOrCreateAtomForDecl(self.owner_decl); const atom_index = macho_file.getAtom(atom).getSymbolIndex().?; _ = try self.addInst(.{ .tag = .call_extern, @@ -4617,9 +4621,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.bin_file.options.module.?; - const function = mod.funcPtr(ty_fn.func); + const func = mod.funcInfo(ty_fn.func); // TODO emit debug info for function change - _ = function; + _ = func; return self.finishAir(inst, .dead, .{ .none, .none, .none }); } @@ -5529,12 +5533,12 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const atom_index = switch (self.bin_file.tag) { .macho => blk: { const macho_file = self.bin_file.cast(link.File.MachO).?; - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); + const atom = try macho_file.getOrCreateAtomForDecl(self.owner_decl); break :blk macho_file.getAtom(atom).getSymbolIndex().?; }, .coff => blk: { const coff_file = self.bin_file.cast(link.File.Coff).?; - const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); + const atom = try coff_file.getOrCreateAtomForDecl(self.owner_decl); break :blk coff_file.getAtom(atom).getSymbolIndex().?; }, else => unreachable, // unsupported target format @@ -5650,12 +5654,12 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void const atom_index = switch (self.bin_file.tag) { .macho => blk: { const macho_file = self.bin_file.cast(link.File.MachO).?; - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); + const atom = try macho_file.getOrCreateAtomForDecl(self.owner_decl); break :blk macho_file.getAtom(atom).getSymbolIndex().?; }, .coff => blk: { const coff_file = self.bin_file.cast(link.File.Coff).?; - const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); + const atom = try coff_file.getOrCreateAtomForDecl(self.owner_decl); break :blk coff_file.getAtom(atom).getSymbolIndex().?; }, else => unreachable, // unsupported target format @@ -5847,12 +5851,12 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I const atom_index = switch (self.bin_file.tag) { .macho => blk: { const macho_file = self.bin_file.cast(link.File.MachO).?; - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); + const atom = try macho_file.getOrCreateAtomForDecl(self.owner_decl); break :blk macho_file.getAtom(atom).getSymbolIndex().?; }, .coff => blk: { const coff_file = self.bin_file.cast(link.File.Coff).?; - const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); + const atom = try coff_file.getOrCreateAtomForDecl(self.owner_decl); break :blk coff_file.getAtom(atom).getSymbolIndex().?; }, else => unreachable, // unsupported target format @@ -6164,7 +6168,7 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { self.bin_file, self.src_loc, arg_tv, - self.mod_fn.owner_decl, + self.owner_decl, )) { .mcv => |mcv| switch (mcv) { .none => .none, @@ -6198,6 +6202,7 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const fn_info = mod.typeToFunc(fn_ty).?; const cc = fn_info.cc; var result: CallMCValues = .{ @@ -6240,10 +6245,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } } - for (fn_info.param_types, 0..) |ty, i| { + for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| { const param_size = @as(u32, @intCast(ty.toType().abiSize(mod))); if (param_size == 0) { - result.args[i] = .{ .none = {} }; + result_arg.* = .{ .none = {} }; continue; } @@ -6256,7 +6261,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { if (std.math.divCeil(u32, param_size, 8) catch unreachable <= 8 - ncrn) { if (param_size <= 8) { - result.args[i] = .{ .register = self.registerAlias(c_abi_int_param_regs[ncrn], ty.toType()) }; + result_arg.* = .{ .register = self.registerAlias(c_abi_int_param_regs[ncrn], ty.toType()) }; ncrn += 1; } else { return self.fail("TODO MCValues with multiple registers", .{}); @@ -6273,7 +6278,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } } - result.args[i] = .{ .stack_argument_offset = nsaa }; + result_arg.* = .{ .stack_argument_offset = nsaa }; nsaa += param_size; } } @@ -6305,16 +6310,16 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var stack_offset: u32 = 0; - for (fn_info.param_types, 0..) |ty, i| { + for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| { if (ty.toType().abiSize(mod) > 0) { const param_size = @as(u32, @intCast(ty.toType().abiSize(mod))); const param_alignment = ty.toType().abiAlignment(mod); stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment); - result.args[i] = .{ .stack_argument_offset = stack_offset }; + result_arg.* = .{ .stack_argument_offset = stack_offset }; stack_offset += param_size; } else { - result.args[i] = .{ .none = {} }; + result_arg.* = .{ .none = {} }; } } diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index d8c16fa434..135f118731 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -13,6 +13,7 @@ const Value = @import("../../value.zig").Value; const TypedValue = @import("../../TypedValue.zig"); const link = @import("../../link.zig"); const Module = @import("../../Module.zig"); +const InternPool = @import("../../InternPool.zig"); const Compilation = @import("../../Compilation.zig"); const ErrorMsg = Module.ErrorMsg; const Target = std.Target; @@ -50,7 +51,7 @@ liveness: Liveness, bin_file: *link.File, debug_output: DebugInfoOutput, target: *const std.Target, -mod_fn: *const Module.Fn, +func_index: InternPool.Index, err_msg: ?*ErrorMsg, args: []MCValue, ret_mcv: MCValue, @@ -258,6 +259,7 @@ const DbgInfoReloc = struct { } fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) error{OutOfMemory}!void { + const mod = function.bin_file.options.module.?; switch (function.debug_output) { .dwarf => |dw| { const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (reloc.mcv) { @@ -278,7 +280,7 @@ const DbgInfoReloc = struct { else => unreachable, // not a possible argument }; - try dw.genArgDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, loc); + try dw.genArgDbgInfo(reloc.name, reloc.ty, mod.funcOwnerDeclIndex(function.func_index), loc); }, .plan9 => {}, .none => {}, @@ -286,6 +288,7 @@ const DbgInfoReloc = struct { } fn genVarDbgInfo(reloc: DbgInfoReloc, function: Self) !void { + const mod = function.bin_file.options.module.?; const is_ptr = switch (reloc.tag) { .dbg_var_ptr => true, .dbg_var_val => false, @@ -321,7 +324,7 @@ const DbgInfoReloc = struct { break :blk .nop; }, }; - try dw.genVarDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, is_ptr, loc); + try dw.genVarDbgInfo(reloc.name, reloc.ty, mod.funcOwnerDeclIndex(function.func_index), is_ptr, loc); }, .plan9 => {}, .none => {}, @@ -334,7 +337,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn_index: Module.Fn.Index, + func_index: InternPool.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -345,8 +348,8 @@ pub fn generate( } const mod = bin_file.options.module.?; - const module_fn = mod.funcPtr(module_fn_index); - const fn_owner_decl = mod.declPtr(module_fn.owner_decl); + const func = mod.funcInfo(func_index); + const fn_owner_decl = mod.declPtr(func.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -365,7 +368,7 @@ pub fn generate( .target = &bin_file.options.target, .bin_file = bin_file, .debug_output = debug_output, - .mod_fn = module_fn, + .func_index = func_index, .err_msg = null, .args = undefined, // populated after `resolveCallingConventionValues` .ret_mcv = undefined, // populated after `resolveCallingConventionValues` @@ -374,8 +377,8 @@ pub fn generate( .branch_stack = &branch_stack, .src_loc = src_loc, .stack_align = undefined, - .end_di_line = module_fn.rbrace_line, - .end_di_column = module_fn.rbrace_column, + .end_di_line = func.rbrace_line, + .end_di_column = func.rbrace_column, }; defer function.stack.deinit(bin_file.allocator); defer function.blocks.deinit(bin_file.allocator); @@ -422,8 +425,8 @@ pub fn generate( .src_loc = src_loc, .code = code, .prev_di_pc = 0, - .prev_di_line = module_fn.lbrace_line, - .prev_di_column = module_fn.lbrace_column, + .prev_di_line = func.lbrace_line, + .prev_di_column = func.lbrace_column, .stack_size = function.max_end_stack, .saved_regs_stack_space = function.saved_regs_stack_space, }; @@ -4163,10 +4166,11 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { while (self.args[arg_index] == .none) arg_index += 1; self.arg_index = arg_index + 1; + const mod = self.bin_file.options.module.?; const ty = self.typeOfIndex(inst); const tag = self.air.instructions.items(.tag)[inst]; const src_index = self.air.instructions.items(.data)[inst].arg.src_index; - const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index); + const name = mod.getParamName(self.func_index, src_index); try self.dbg_info_relocs.append(self.gpa, .{ .tag = tag, @@ -4569,9 +4573,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.bin_file.options.module.?; - const function = mod.funcPtr(ty_fn.func); + const func = mod.funcInfo(ty_fn.func); // TODO emit debug info for function change - _ = function; + _ = func; return self.finishAir(inst, .dead, .{ .none, .none, .none }); } @@ -6113,11 +6117,12 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { } fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { + const mod = self.bin_file.options.module.?; const mcv: MCValue = switch (try codegen.genTypedValue( self.bin_file, self.src_loc, arg_tv, - self.mod_fn.owner_decl, + mod.funcOwnerDeclIndex(self.func_index), )) { .mcv => |mcv| switch (mcv) { .none => .none, @@ -6149,6 +6154,7 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const fn_info = mod.typeToFunc(fn_ty).?; const cc = fn_info.cc; var result: CallMCValues = .{ @@ -6194,14 +6200,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } } - for (fn_info.param_types, 0..) |ty, i| { + for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| { if (ty.toType().abiAlignment(mod) == 8) ncrn = std.mem.alignForward(usize, ncrn, 2); const param_size = @as(u32, @intCast(ty.toType().abiSize(mod))); if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) { if (param_size <= 4) { - result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] }; + result_arg.* = .{ .register = c_abi_int_param_regs[ncrn] }; ncrn += 1; } else { return self.fail("TODO MCValues with multiple registers", .{}); @@ -6213,7 +6219,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { if (ty.toType().abiAlignment(mod) == 8) nsaa = std.mem.alignForward(u32, nsaa, 8); - result.args[i] = .{ .stack_argument_offset = nsaa }; + result_arg.* = .{ .stack_argument_offset = nsaa }; nsaa += param_size; } } @@ -6244,16 +6250,16 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var stack_offset: u32 = 0; - for (fn_info.param_types, 0..) |ty, i| { + for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| { if (ty.toType().abiSize(mod) > 0) { const param_size = @as(u32, @intCast(ty.toType().abiSize(mod))); const param_alignment = ty.toType().abiAlignment(mod); stack_offset = std.mem.alignForward(u32, stack_offset, param_alignment); - result.args[i] = .{ .stack_argument_offset = stack_offset }; + result_arg.* = .{ .stack_argument_offset = stack_offset }; stack_offset += param_size; } else { - result.args[i] = .{ .none = {} }; + result_arg.* = .{ .none = {} }; } } diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index c240ff16f1..e43778510b 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -12,6 +12,7 @@ const Value = @import("../../value.zig").Value; const TypedValue = @import("../../TypedValue.zig"); const link = @import("../../link.zig"); const Module = @import("../../Module.zig"); +const InternPool = @import("../../InternPool.zig"); const Compilation = @import("../../Compilation.zig"); const ErrorMsg = Module.ErrorMsg; const Target = std.Target; @@ -43,7 +44,7 @@ air: Air, liveness: Liveness, bin_file: *link.File, target: *const std.Target, -mod_fn: *const Module.Fn, +func_index: InternPool.Index, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, err_msg: ?*ErrorMsg, @@ -217,7 +218,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn_index: Module.Fn.Index, + func_index: InternPool.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -228,8 +229,8 @@ pub fn generate( } const mod = bin_file.options.module.?; - const module_fn = mod.funcPtr(module_fn_index); - const fn_owner_decl = mod.declPtr(module_fn.owner_decl); + const func = mod.funcInfo(func_index); + const fn_owner_decl = mod.declPtr(func.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -247,7 +248,7 @@ pub fn generate( .liveness = liveness, .target = &bin_file.options.target, .bin_file = bin_file, - .mod_fn = module_fn, + .func_index = func_index, .code = code, .debug_output = debug_output, .err_msg = null, @@ -258,8 +259,8 @@ pub fn generate( .branch_stack = &branch_stack, .src_loc = src_loc, .stack_align = undefined, - .end_di_line = module_fn.rbrace_line, - .end_di_column = module_fn.rbrace_column, + .end_di_line = func.rbrace_line, + .end_di_column = func.rbrace_column, }; defer function.stack.deinit(bin_file.allocator); defer function.blocks.deinit(bin_file.allocator); @@ -301,8 +302,8 @@ pub fn generate( .src_loc = src_loc, .code = code, .prev_di_pc = 0, - .prev_di_line = module_fn.lbrace_line, - .prev_di_column = module_fn.lbrace_column, + .prev_di_line = func.lbrace_line, + .prev_di_column = func.lbrace_column, }; defer emit.deinit(); @@ -1627,13 +1628,15 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { } fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void { + const mod = self.bin_file.options.module.?; const arg = self.air.instructions.items(.data)[inst].arg; const ty = self.air.getRefType(arg.ty); - const name = self.mod_fn.getParamName(self.bin_file.options.module.?, arg.src_index); + const owner_decl = mod.funcOwnerDeclIndex(self.func_index); + const name = mod.getParamName(self.func_index, arg.src_index); switch (self.debug_output) { .dwarf => |dw| switch (mcv) { - .register => |reg| try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, .{ + .register => |reg| try dw.genArgDbgInfo(name, ty, owner_decl, .{ .register = reg.dwarfLocOp(), }), .stack_offset => {}, @@ -1742,24 +1745,28 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } if (try self.air.value(callee, mod)) |func_value| { - if (mod.funcPtrUnwrap(mod.intern_pool.indexToFunc(func_value.ip_index))) |func| { - const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); - const atom = elf_file.getAtom(atom_index); - _ = try atom.getOrCreateOffsetTableEntry(elf_file); - const got_addr = @as(u32, @intCast(atom.getOffsetTableAddress(elf_file))); - try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr }); - _ = try self.addInst(.{ - .tag = .jalr, - .data = .{ .i_type = .{ - .rd = .ra, - .rs1 = .ra, - .imm12 = 0, - } }, - }); - } else if (mod.intern_pool.indexToKey(func_value.ip_index) == .extern_func) { - return self.fail("TODO implement calling extern functions", .{}); - } else { - return self.fail("TODO implement calling bitcasted functions", .{}); + switch (mod.intern_pool.indexToKey(func_value.ip_index)) { + .func => |func| { + const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); + const atom = elf_file.getAtom(atom_index); + _ = try atom.getOrCreateOffsetTableEntry(elf_file); + const got_addr = @as(u32, @intCast(atom.getOffsetTableAddress(elf_file))); + try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr }); + _ = try self.addInst(.{ + .tag = .jalr, + .data = .{ .i_type = .{ + .rd = .ra, + .rs1 = .ra, + .imm12 = 0, + } }, + }); + }, + .extern_func => { + return self.fail("TODO implement calling extern functions", .{}); + }, + else => { + return self.fail("TODO implement calling bitcasted functions", .{}); + }, } } else { return self.fail("TODO implement calling runtime-known function pointer", .{}); @@ -1876,9 +1883,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.bin_file.options.module.?; - const function = mod.funcPtr(ty_fn.func); + const func = mod.funcInfo(ty_fn.func); // TODO emit debug info for function change - _ = function; + _ = func; return self.finishAir(inst, .dead, .{ .none, .none, .none }); } @@ -2569,11 +2576,12 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { } fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { + const mod = self.bin_file.options.module.?; const mcv: MCValue = switch (try codegen.genTypedValue( self.bin_file, self.src_loc, typed_value, - self.mod_fn.owner_decl, + mod.funcOwnerDeclIndex(self.func_index), )) { .mcv => |mcv| switch (mcv) { .none => .none, @@ -2605,6 +2613,7 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const fn_info = mod.typeToFunc(fn_ty).?; const cc = fn_info.cc; var result: CallMCValues = .{ @@ -2636,14 +2645,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var next_stack_offset: u32 = 0; const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 }; - for (fn_info.param_types, 0..) |ty, i| { + for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| { const param_size = @as(u32, @intCast(ty.toType().abiSize(mod))); if (param_size <= 8) { if (next_register < argument_registers.len) { - result.args[i] = .{ .register = argument_registers[next_register] }; + result_arg.* = .{ .register = argument_registers[next_register] }; next_register += 1; } else { - result.args[i] = .{ .stack_offset = next_stack_offset }; + result_arg.* = .{ .stack_offset = next_stack_offset }; next_register += next_stack_offset; } } else if (param_size <= 16) { @@ -2652,11 +2661,11 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } else if (next_register < argument_registers.len) { return self.fail("TODO MCValues split register + stack", .{}); } else { - result.args[i] = .{ .stack_offset = next_stack_offset }; + result_arg.* = .{ .stack_offset = next_stack_offset }; next_register += next_stack_offset; } } else { - result.args[i] = .{ .stack_offset = next_stack_offset }; + result_arg.* = .{ .stack_offset = next_stack_offset }; next_register += next_stack_offset; } } diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index f8dd621ca0..6d575a9d45 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -11,6 +11,7 @@ const Allocator = mem.Allocator; const builtin = @import("builtin"); const link = @import("../../link.zig"); const Module = @import("../../Module.zig"); +const InternPool = @import("../../InternPool.zig"); const TypedValue = @import("../../TypedValue.zig"); const ErrorMsg = Module.ErrorMsg; const codegen = @import("../../codegen.zig"); @@ -52,7 +53,7 @@ air: Air, liveness: Liveness, bin_file: *link.File, target: *const std.Target, -mod_fn: *const Module.Fn, +func_index: InternPool.Index, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, err_msg: ?*ErrorMsg, @@ -260,7 +261,7 @@ const BigTomb = struct { pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn_index: Module.Fn.Index, + func_index: InternPool.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -271,8 +272,8 @@ pub fn generate( } const mod = bin_file.options.module.?; - const module_fn = mod.funcPtr(module_fn_index); - const fn_owner_decl = mod.declPtr(module_fn.owner_decl); + const func = mod.funcInfo(func_index); + const fn_owner_decl = mod.declPtr(func.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -289,8 +290,8 @@ pub fn generate( .air = air, .liveness = liveness, .target = &bin_file.options.target, + .func_index = func_index, .bin_file = bin_file, - .mod_fn = module_fn, .code = code, .debug_output = debug_output, .err_msg = null, @@ -301,8 +302,8 @@ pub fn generate( .branch_stack = &branch_stack, .src_loc = src_loc, .stack_align = undefined, - .end_di_line = module_fn.rbrace_line, - .end_di_column = module_fn.rbrace_column, + .end_di_line = func.rbrace_line, + .end_di_column = func.rbrace_column, }; defer function.stack.deinit(bin_file.allocator); defer function.blocks.deinit(bin_file.allocator); @@ -344,8 +345,8 @@ pub fn generate( .src_loc = src_loc, .code = code, .prev_di_pc = 0, - .prev_di_line = module_fn.lbrace_line, - .prev_di_column = module_fn.lbrace_column, + .prev_di_line = func.lbrace_line, + .prev_di_column = func.lbrace_column, }; defer emit.deinit(); @@ -1345,37 +1346,41 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // on linking. if (try self.air.value(callee, mod)) |func_value| { if (self.bin_file.tag == link.File.Elf.base_tag) { - if (mod.funcPtrUnwrap(mod.intern_pool.indexToFunc(func_value.ip_index))) |func| { - const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { - const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); - const atom = elf_file.getAtom(atom_index); - _ = try atom.getOrCreateOffsetTableEntry(elf_file); - break :blk @as(u32, @intCast(atom.getOffsetTableAddress(elf_file))); - } else unreachable; + switch (mod.intern_pool.indexToKey(func_value.ip_index)) { + .func => |func| { + const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { + const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); + const atom = elf_file.getAtom(atom_index); + _ = try atom.getOrCreateOffsetTableEntry(elf_file); + break :blk @as(u32, @intCast(atom.getOffsetTableAddress(elf_file))); + } else unreachable; - try self.genSetReg(Type.usize, .o7, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .o7, .{ .memory = got_addr }); - _ = try self.addInst(.{ - .tag = .jmpl, - .data = .{ - .arithmetic_3op = .{ - .is_imm = false, - .rd = .o7, - .rs1 = .o7, - .rs2_or_imm = .{ .rs2 = .g0 }, + _ = try self.addInst(.{ + .tag = .jmpl, + .data = .{ + .arithmetic_3op = .{ + .is_imm = false, + .rd = .o7, + .rs1 = .o7, + .rs2_or_imm = .{ .rs2 = .g0 }, + }, }, - }, - }); + }); - // TODO Find a way to fill this delay slot - _ = try self.addInst(.{ - .tag = .nop, - .data = .{ .nop = {} }, - }); - } else if (mod.intern_pool.indexToKey(func_value.ip_index) == .extern_func) { - return self.fail("TODO implement calling extern functions", .{}); - } else { - return self.fail("TODO implement calling bitcasted functions", .{}); + // TODO Find a way to fill this delay slot + _ = try self.addInst(.{ + .tag = .nop, + .data = .{ .nop = {} }, + }); + }, + .extern_func => { + return self.fail("TODO implement calling extern functions", .{}); + }, + else => { + return self.fail("TODO implement calling bitcasted functions", .{}); + }, } } else @panic("TODO SPARCv9 currently does not support non-ELF binaries"); } else { @@ -1660,9 +1665,9 @@ fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.bin_file.options.module.?; - const function = mod.funcPtr(ty_fn.func); + const func = mod.funcInfo(ty_fn.func); // TODO emit debug info for function change - _ = function; + _ = func; return self.finishAir(inst, .dead, .{ .none, .none, .none }); } @@ -3595,13 +3600,15 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live } fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void { + const mod = self.bin_file.options.module.?; const arg = self.air.instructions.items(.data)[inst].arg; const ty = self.air.getRefType(arg.ty); - const name = self.mod_fn.getParamName(self.bin_file.options.module.?, arg.src_index); + const owner_decl = mod.funcOwnerDeclIndex(self.func_index); + const name = mod.getParamName(self.func_index, arg.src_index); switch (self.debug_output) { .dwarf => |dw| switch (mcv) { - .register => |reg| try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, .{ + .register => |reg| try dw.genArgDbgInfo(name, ty, owner_decl, .{ .register = reg.dwarfLocOp(), }), else => {}, @@ -4127,11 +4134,12 @@ fn genStoreASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Re } fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { + const mod = self.bin_file.options.module.?; const mcv: MCValue = switch (try codegen.genTypedValue( self.bin_file, self.src_loc, typed_value, - self.mod_fn.owner_decl, + mod.funcOwnerDeclIndex(self.func_index), )) { .mcv => |mcv| switch (mcv) { .none => .none, @@ -4452,6 +4460,7 @@ fn realStackOffset(off: u32) u32 { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) !CallMCValues { const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const fn_info = mod.typeToFunc(fn_ty).?; const cc = fn_info.cc; var result: CallMCValues = .{ @@ -4486,14 +4495,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) .callee => abi.c_abi_int_param_regs_callee_view, }; - for (fn_info.param_types, 0..) |ty, i| { + for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| { const param_size = @as(u32, @intCast(ty.toType().abiSize(mod))); if (param_size <= 8) { if (next_register < argument_registers.len) { - result.args[i] = .{ .register = argument_registers[next_register] }; + result_arg.* = .{ .register = argument_registers[next_register] }; next_register += 1; } else { - result.args[i] = .{ .stack_offset = next_stack_offset }; + result_arg.* = .{ .stack_offset = next_stack_offset }; next_register += next_stack_offset; } } else if (param_size <= 16) { @@ -4502,11 +4511,11 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) } else if (next_register < argument_registers.len) { return self.fail("TODO MCValues split register + stack", .{}); } else { - result.args[i] = .{ .stack_offset = next_stack_offset }; + result_arg.* = .{ .stack_offset = next_stack_offset }; next_register += next_stack_offset; } } else { - result.args[i] = .{ .stack_offset = next_stack_offset }; + result_arg.* = .{ .stack_offset = next_stack_offset }; next_register += next_stack_offset; } } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index ab6ded682e..14d2cf95c4 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -650,7 +650,7 @@ air: Air, liveness: Liveness, gpa: mem.Allocator, debug_output: codegen.DebugInfoOutput, -mod_fn: *const Module.Fn, +func_index: InternPool.Index, /// Contains a list of current branches. /// When we return from a branch, the branch will be popped from this list, /// which means branches can only contain references from within its own branch, @@ -1202,7 +1202,7 @@ fn genFunctype( pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - func_index: Module.Fn.Index, + func_index: InternPool.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -1210,7 +1210,7 @@ pub fn generate( ) codegen.CodeGenError!codegen.Result { _ = src_loc; const mod = bin_file.options.module.?; - const func = mod.funcPtr(func_index); + const func = mod.funcInfo(func_index); var code_gen: CodeGen = .{ .gpa = bin_file.allocator, .air = air, @@ -1223,7 +1223,7 @@ pub fn generate( .target = bin_file.options.target, .bin_file = bin_file.cast(link.File.Wasm).?, .debug_output = debug_output, - .mod_fn = func, + .func_index = func_index, }; defer code_gen.deinit(); @@ -1237,8 +1237,9 @@ pub fn generate( fn genFunc(func: *CodeGen) InnerError!void { const mod = func.bin_file.base.options.module.?; + const ip = &mod.intern_pool; const fn_info = mod.typeToFunc(func.decl.ty).?; - var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type.toType(), mod); + var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), fn_info.return_type.toType(), mod); defer func_type.deinit(func.gpa); _ = try func.bin_file.storeDeclType(func.decl_index, func_type); @@ -1347,6 +1348,7 @@ const CallWValues = struct { fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues { const mod = func.bin_file.base.options.module.?; + const ip = &mod.intern_pool; const fn_info = mod.typeToFunc(fn_ty).?; const cc = fn_info.cc; var result: CallWValues = .{ @@ -1369,7 +1371,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV switch (cc) { .Unspecified => { - for (fn_info.param_types) |ty| { + for (fn_info.param_types.get(ip)) |ty| { if (!ty.toType().hasRuntimeBitsIgnoreComptime(mod)) { continue; } @@ -1379,7 +1381,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV } }, .C => { - for (fn_info.param_types) |ty| { + for (fn_info.param_types.get(ip)) |ty| { const ty_classes = abi.classifyType(ty.toType(), mod); for (ty_classes) |class| { if (class == .none) continue; @@ -2185,6 +2187,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const ty = func.typeOf(pl_op.operand); const mod = func.bin_file.base.options.module.?; + const ip = &mod.intern_pool; const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, .Pointer => ty.childType(mod), @@ -2203,7 +2206,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } else if (func_val.getExternFunc(mod)) |extern_func| { const ext_decl = mod.declPtr(extern_func.decl); const ext_info = mod.typeToFunc(ext_decl.ty).?; - var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type.toType(), mod); + var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types.get(ip), ext_info.return_type.toType(), mod); defer func_type.deinit(func.gpa); const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_func.decl); const atom = func.bin_file.getAtomPtr(atom_index); @@ -2253,7 +2256,7 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const operand = try func.resolveInst(pl_op.operand); try func.emitWValue(operand); - var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type.toType(), mod); + var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types.get(ip), fn_info.return_type.toType(), mod); defer fn_type.deinit(func.gpa); const fn_type_index = try func.bin_file.putOrGetFuncType(fn_type); @@ -2564,8 +2567,8 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { switch (func.debug_output) { .dwarf => |dwarf| { const src_index = func.air.instructions.items(.data)[inst].arg.src_index; - const name = func.mod_fn.getParamName(func.bin_file.base.options.module.?, src_index); - try dwarf.genArgDbgInfo(name, arg_ty, func.mod_fn.owner_decl, .{ + const name = mod.getParamName(func.func_index, src_index); + try dwarf.genArgDbgInfo(name, arg_ty, mod.funcOwnerDeclIndex(func.func_index), .{ .wasm_local = arg.local.value, }); }, @@ -6198,6 +6201,7 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) !void { if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{}); + const mod = func.bin_file.base.options.module.?; const pl_op = func.air.instructions.items(.data)[inst].pl_op; const ty = func.typeOf(pl_op.operand); const operand = try func.resolveInst(pl_op.operand); @@ -6214,7 +6218,7 @@ fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) !void { break :blk .nop; }, }; - try func.debug_output.dwarf.genVarDbgInfo(name, ty, func.mod_fn.owner_decl, is_ptr, loc); + try func.debug_output.dwarf.genVarDbgInfo(name, ty, mod.funcOwnerDeclIndex(func.func_index), is_ptr, loc); func.finishAir(inst, .none, &.{}); } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 651c2bfb8c..76e63675a1 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -110,20 +110,21 @@ const FrameAddr = struct { index: FrameIndex, off: i32 = 0 }; const RegisterOffset = struct { reg: Register, off: i32 = 0 }; const Owner = union(enum) { - mod_fn: *const Module.Fn, + func_index: InternPool.Index, lazy_sym: link.File.LazySymbol, fn getDecl(owner: Owner, mod: *Module) Module.Decl.Index { return switch (owner) { - .mod_fn => |mod_fn| mod_fn.owner_decl, + .func_index => |func_index| mod.funcOwnerDeclIndex(func_index), .lazy_sym => |lazy_sym| lazy_sym.ty.getOwnerDecl(mod), }; } fn getSymbolIndex(owner: Owner, ctx: *Self) !u32 { switch (owner) { - .mod_fn => |mod_fn| { - const decl_index = mod_fn.owner_decl; + .func_index => |func_index| { + const mod = ctx.bin_file.options.module.?; + const decl_index = mod.funcOwnerDeclIndex(func_index); if (ctx.bin_file.cast(link.File.MachO)) |macho_file| { const atom = try macho_file.getOrCreateAtomForDecl(decl_index); return macho_file.getAtom(atom).getSymbolIndex().?; @@ -638,7 +639,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn_index: Module.Fn.Index, + func_index: InternPool.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -649,8 +650,8 @@ pub fn generate( } const mod = bin_file.options.module.?; - const module_fn = mod.funcPtr(module_fn_index); - const fn_owner_decl = mod.declPtr(module_fn.owner_decl); + const func = mod.funcInfo(func_index); + const fn_owner_decl = mod.declPtr(func.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -662,15 +663,15 @@ pub fn generate( .target = &bin_file.options.target, .bin_file = bin_file, .debug_output = debug_output, - .owner = .{ .mod_fn = module_fn }, + .owner = .{ .func_index = func_index }, .err_msg = null, .args = undefined, // populated after `resolveCallingConventionValues` .ret_mcv = undefined, // populated after `resolveCallingConventionValues` .fn_type = fn_type, .arg_index = 0, .src_loc = src_loc, - .end_di_line = module_fn.rbrace_line, - .end_di_column = module_fn.rbrace_column, + .end_di_line = func.rbrace_line, + .end_di_column = func.rbrace_column, }; defer { function.frame_allocs.deinit(gpa); @@ -687,17 +688,16 @@ pub fn generate( if (builtin.mode == .Debug) function.mir_to_air_map.deinit(gpa); } - wip_mir_log.debug("{}:", .{function.fmtDecl(module_fn.owner_decl)}); + wip_mir_log.debug("{}:", .{function.fmtDecl(func.owner_decl)}); + + const ip = &mod.intern_pool; try function.frame_allocs.resize(gpa, FrameIndex.named_count); function.frame_allocs.set( @intFromEnum(FrameIndex.stack_frame), FrameAlloc.init(.{ .size = 0, - .alignment = if (mod.align_stack_fns.get(module_fn_index)) |set_align_stack| - @intCast(set_align_stack.alignment.toByteUnitsOptional().?) - else - 1, + .alignment = @intCast(func.analysis(ip).stack_alignment.toByteUnitsOptional() orelse 1), }), ); function.frame_allocs.set( @@ -761,8 +761,8 @@ pub fn generate( .debug_output = debug_output, .code = code, .prev_di_pc = 0, - .prev_di_line = module_fn.lbrace_line, - .prev_di_column = module_fn.lbrace_column, + .prev_di_line = func.lbrace_line, + .prev_di_column = func.lbrace_column, }; defer emit.deinit(); emit.emitMir() catch |err| switch (err) { @@ -7942,7 +7942,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { const ty = self.typeOfIndex(inst); const src_index = self.air.instructions.items(.data)[inst].arg.src_index; - const name = self.owner.mod_fn.getParamName(mod, src_index); + const name = mod.getParamName(self.owner.func_index, src_index); try self.genArgDbgInfo(ty, name, dst_mcv); break :result dst_mcv; @@ -8139,7 +8139,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (try self.air.value(callee, mod)) |func_value| { const func_key = mod.intern_pool.indexToKey(func_value.ip_index); if (switch (func_key) { - .func => |func| mod.funcPtr(func.index).owner_decl, + .func => |func| func.owner_decl, .ptr => |ptr| switch (ptr.addr) { .decl => |decl| decl, else => null, @@ -8582,9 +8582,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = self.bin_file.options.module.?; - const function = mod.funcPtr(ty_fn.func); + const func = mod.funcInfo(ty_fn.func); // TODO emit debug info for function change - _ = function; + _ = func; return self.finishAir(inst, .unreach, .{ .none, .none, .none }); } @@ -11719,11 +11719,12 @@ fn resolveCallingConventionValues( stack_frame_base: FrameIndex, ) !CallMCValues { const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const cc = fn_info.cc; const param_types = try self.gpa.alloc(Type, fn_info.param_types.len + var_args.len); defer self.gpa.free(param_types); - for (param_types[0..fn_info.param_types.len], fn_info.param_types) |*dest, src| { + for (param_types[0..fn_info.param_types.len], fn_info.param_types.get(ip)) |*dest, src| { dest.* = src.toType(); } // TODO: promote var arg types diff --git a/src/codegen.zig b/src/codegen.zig index 69499fb1ad..dcdf3f37fb 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -67,7 +67,7 @@ pub const DebugInfoOutput = union(enum) { pub fn generateFunction( bin_file: *link.File, src_loc: Module.SrcLoc, - func_index: Module.Fn.Index, + func_index: InternPool.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), diff --git a/src/codegen/c.zig b/src/codegen/c.zig index b685263759..1ab5a976cd 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -257,7 +257,8 @@ pub fn fmtIdent(ident: []const u8) std.fmt.Formatter(formatIdent) { return .{ .data = ident }; } -/// This data is available when outputting .c code for a `Module.Fn.Index`. +/// This data is available when outputting .c code for a `InternPool.Index` +/// that corresponds to `func`. /// It is not available when generating .h file. pub const Function = struct { air: Air, @@ -268,7 +269,7 @@ pub const Function = struct { next_block_index: usize = 0, object: Object, lazy_fns: LazyFnMap, - func_index: Module.Fn.Index, + func_index: InternPool.Index, /// All the locals, to be emitted at the top of the function. locals: std.ArrayListUnmanaged(Local) = .{}, /// Which locals are available for reuse, based on Type. @@ -1487,6 +1488,7 @@ pub const DeclGen = struct { ) !void { const store = &dg.ctypes.set; const mod = dg.module; + const ip = &mod.intern_pool; const fn_decl = mod.declPtr(fn_decl_index); const fn_cty_idx = try dg.typeToIndex(fn_decl.ty, kind); @@ -1499,7 +1501,7 @@ pub const DeclGen = struct { else => unreachable, } } - if (fn_decl.val.getFunction(mod)) |func| if (func.is_cold) try w.writeAll("zig_cold "); + if (fn_decl.val.getFunction(mod)) |func| if (func.analysis(ip).is_cold) try w.writeAll("zig_cold "); if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn "); const trailing = try renderTypePrefix( @@ -1744,7 +1746,7 @@ pub const DeclGen = struct { return switch (mod.intern_pool.indexToKey(tv.val.ip_index)) { .variable => |variable| mod.decl_exports.contains(variable.decl), .extern_func => true, - .func => |func| mod.decl_exports.contains(mod.funcPtr(func.index).owner_decl), + .func => |func| mod.decl_exports.contains(func.owner_decl), else => unreachable, }; } @@ -1800,7 +1802,12 @@ pub const DeclGen = struct { } } - fn writeCValueMember(dg: *DeclGen, writer: anytype, c_value: CValue, member: CValue) !void { + fn writeCValueMember( + dg: *DeclGen, + writer: anytype, + c_value: CValue, + member: CValue, + ) error{ OutOfMemory, AnalysisFail }!void { try dg.writeCValue(writer, c_value); try writer.writeByte('.'); try dg.writeCValue(writer, member); @@ -4161,7 +4168,7 @@ fn airCall( const callee_val = (try f.air.value(pl_op.operand, mod)) orelse break :known; break :fn_decl switch (mod.intern_pool.indexToKey(callee_val.ip_index)) { .extern_func => |extern_func| extern_func.decl, - .func => |func| mod.funcPtr(func.index).owner_decl, + .func => |func| func.owner_decl, .ptr => |ptr| switch (ptr.addr) { .decl => |decl| decl, else => break :known, @@ -4238,9 +4245,9 @@ fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue { const ty_fn = f.air.instructions.items(.data)[inst].ty_fn; const mod = f.object.dg.module; const writer = f.object.writer(); - const function = mod.funcPtr(ty_fn.func); + const owner_decl = mod.funcOwnerDeclPtr(ty_fn.func); try writer.print("/* dbg func:{s} */\n", .{ - mod.intern_pool.stringToSlice(mod.declPtr(function.owner_decl).name), + mod.intern_pool.stringToSlice(owner_decl.name), }); return .none; } diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig index dbdb65ac29..01540a9f2d 100644 --- a/src/codegen/c/type.zig +++ b/src/codegen/c/type.zig @@ -1722,6 +1722,7 @@ pub const CType = extern union { .Fn => { const info = mod.typeToFunc(ty).?; + const ip = &mod.intern_pool; if (!info.is_generic) { if (lookup.isMutable()) { const param_kind: Kind = switch (kind) { @@ -1730,7 +1731,7 @@ pub const CType = extern union { .payload => unreachable, }; _ = try lookup.typeToIndex(info.return_type.toType(), param_kind); - for (info.param_types) |param_type| { + for (info.param_types.get(ip)) |param_type| { if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; _ = try lookup.typeToIndex(param_type.toType(), param_kind); } @@ -2014,6 +2015,7 @@ pub const CType = extern union { .function, .varargs_function, => { + const ip = &mod.intern_pool; const info = mod.typeToFunc(ty).?; assert(!info.is_generic); const param_kind: Kind = switch (kind) { @@ -2023,14 +2025,14 @@ pub const CType = extern union { }; var c_params_len: usize = 0; - for (info.param_types) |param_type| { + for (info.param_types.get(ip)) |param_type| { if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; c_params_len += 1; } const params_pl = try arena.alloc(Index, c_params_len); var c_param_i: usize = 0; - for (info.param_types) |param_type| { + for (info.param_types.get(ip)) |param_type| { if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; params_pl[c_param_i] = store.set.typeToIndex(param_type.toType(), mod, param_kind).?; c_param_i += 1; @@ -2147,6 +2149,7 @@ pub const CType = extern union { => { if (ty.zigTypeTag(mod) != .Fn) return false; + const ip = &mod.intern_pool; const info = mod.typeToFunc(ty).?; assert(!info.is_generic); const data = cty.cast(Payload.Function).?.data; @@ -2160,7 +2163,7 @@ pub const CType = extern union { return false; var c_param_i: usize = 0; - for (info.param_types) |param_type| { + for (info.param_types.get(ip)) |param_type| { if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; if (c_param_i >= data.param_types.len) return false; @@ -2202,6 +2205,7 @@ pub const CType = extern union { autoHash(hasher, t); const mod = self.lookup.getModule(); + const ip = &mod.intern_pool; switch (t) { .fwd_anon_struct, .fwd_anon_union, @@ -2270,7 +2274,7 @@ pub const CType = extern union { }; self.updateHasherRecurse(hasher, info.return_type.toType(), param_kind); - for (info.param_types) |param_type| { + for (info.param_types.get(ip)) |param_type| { if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; self.updateHasherRecurse(hasher, param_type.toType(), param_kind); } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index ec456e53a7..4960414499 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -867,14 +867,15 @@ pub const Object = struct { pub fn updateFunc( o: *Object, mod: *Module, - func_index: Module.Fn.Index, + func_index: InternPool.Index, air: Air, liveness: Liveness, ) !void { - const func = mod.funcPtr(func_index); + const func = mod.funcInfo(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); const target = mod.getTarget(); + const ip = &mod.intern_pool; var dg: DeclGen = .{ .object = o, @@ -885,26 +886,25 @@ pub const Object = struct { const llvm_func = try o.resolveLlvmFunction(decl_index); - if (mod.align_stack_fns.get(func_index)) |align_info| { - o.addFnAttrInt(llvm_func, "alignstack", align_info.alignment.toByteUnitsOptional().?); - o.addFnAttr(llvm_func, "noinline"); - } else { - Object.removeFnAttr(llvm_func, "alignstack"); - if (!func.is_noinline) Object.removeFnAttr(llvm_func, "noinline"); - } - - if (func.is_cold) { - o.addFnAttr(llvm_func, "cold"); - } else { - Object.removeFnAttr(llvm_func, "cold"); - } - - if (func.is_noinline) { + if (func.analysis(ip).is_noinline) { o.addFnAttr(llvm_func, "noinline"); } else { Object.removeFnAttr(llvm_func, "noinline"); } + if (func.analysis(ip).stack_alignment.toByteUnitsOptional()) |alignment| { + o.addFnAttrInt(llvm_func, "alignstack", alignment); + o.addFnAttr(llvm_func, "noinline"); + } else { + Object.removeFnAttr(llvm_func, "alignstack"); + } + + if (func.analysis(ip).is_cold) { + o.addFnAttr(llvm_func, "cold"); + } else { + Object.removeFnAttr(llvm_func, "cold"); + } + // TODO: disable this if safety is off for the function scope const ssp_buf_size = mod.comp.bin_file.options.stack_protector; if (ssp_buf_size != 0) { @@ -921,7 +921,7 @@ pub const Object = struct { o.addFnAttrString(llvm_func, "no-stack-arg-probe", ""); } - if (mod.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |section| + if (ip.stringToSliceUnwrap(decl.@"linksection")) |section| llvm_func.setSection(section); // Remove all the basic blocks of a function in order to start over, generating @@ -968,7 +968,7 @@ pub const Object = struct { .byval => { assert(!it.byval_attr); const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types[param_index].toType(); + const param_ty = fn_info.param_types.get(ip)[param_index].toType(); const param = llvm_func.getParam(llvm_arg_i); try args.ensureUnusedCapacity(1); @@ -987,7 +987,7 @@ pub const Object = struct { llvm_arg_i += 1; }, .byref => { - const param_ty = fn_info.param_types[it.zig_index - 1].toType(); + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); const param_llvm_ty = try o.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); const alignment = param_ty.abiAlignment(mod); @@ -1006,7 +1006,7 @@ pub const Object = struct { } }, .byref_mut => { - const param_ty = fn_info.param_types[it.zig_index - 1].toType(); + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); const param_llvm_ty = try o.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); const alignment = param_ty.abiAlignment(mod); @@ -1026,7 +1026,7 @@ pub const Object = struct { }, .abi_sized_int => { assert(!it.byval_attr); - const param_ty = fn_info.param_types[it.zig_index - 1].toType(); + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; @@ -1053,7 +1053,7 @@ pub const Object = struct { }, .slice => { assert(!it.byval_attr); - const param_ty = fn_info.param_types[it.zig_index - 1].toType(); + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); const ptr_info = param_ty.ptrInfo(mod); if (math.cast(u5, it.zig_index - 1)) |i| { @@ -1083,7 +1083,7 @@ pub const Object = struct { .multiple_llvm_types => { assert(!it.byval_attr); const field_types = it.llvm_types_buffer[0..it.llvm_types_len]; - const param_ty = fn_info.param_types[it.zig_index - 1].toType(); + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); const param_llvm_ty = try o.lowerType(param_ty); const param_alignment = param_ty.abiAlignment(mod); const arg_ptr = buildAllocaInner(o.context, builder, llvm_func, false, param_llvm_ty, param_alignment, target); @@ -1114,7 +1114,7 @@ pub const Object = struct { args.appendAssumeCapacity(casted); }, .float_array => { - const param_ty = fn_info.param_types[it.zig_index - 1].toType(); + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); const param_llvm_ty = try o.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; @@ -1132,7 +1132,7 @@ pub const Object = struct { } }, .i32_array, .i64_array => { - const param_ty = fn_info.param_types[it.zig_index - 1].toType(); + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); const param_llvm_ty = try o.lowerType(param_ty); const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; @@ -1168,7 +1168,7 @@ pub const Object = struct { const decl_di_ty = try o.lowerDebugType(decl.ty, .full); const subprogram = dib.createFunction( di_file.?.toScope(), - mod.intern_pool.stringToSlice(decl.name), + ip.stringToSlice(decl.name), llvm_func.getValueName(), di_file.?, line_number, @@ -1460,6 +1460,7 @@ pub const Object = struct { const target = o.target; const dib = o.di_builder.?; const mod = o.module; + const ip = &mod.intern_pool; switch (ty.zigTypeTag(mod)) { .Void, .NoReturn => { const di_type = dib.createBasicType("void", 0, DW.ATE.signed); @@ -1492,7 +1493,6 @@ pub const Object = struct { return enum_di_ty; } - const ip = &mod.intern_pool; const enum_type = ip.indexToKey(ty.toIntern()).enum_type; const enumerators = try gpa.alloc(*llvm.DIEnumerator, enum_type.names.len); @@ -1518,7 +1518,7 @@ pub const Object = struct { if (@sizeOf(usize) == @sizeOf(u64)) { enumerators[i] = dib.createEnumerator2( field_name_z, - @as(c_uint, @intCast(bigint.limbs.len)), + @intCast(bigint.limbs.len), bigint.limbs.ptr, int_info.bits, int_info.signedness == .unsigned, @@ -2320,8 +2320,8 @@ pub const Object = struct { try param_di_types.append(try o.lowerDebugType(ptr_ty, .full)); } - for (0..mod.typeToFunc(ty).?.param_types.len) |i| { - const param_ty = mod.typeToFunc(ty).?.param_types[i].toType(); + for (0..fn_info.param_types.len) |i| { + const param_ty = fn_info.param_types.get(ip)[i].toType(); if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; if (isByRef(param_ty, mod)) { @@ -2475,9 +2475,10 @@ pub const Object = struct { const fn_type = try o.lowerType(zig_fn_type); const fqn = try decl.getFullyQualifiedName(mod); + const ip = &mod.intern_pool; const llvm_addrspace = toLlvmAddressSpace(decl.@"addrspace", target); - const llvm_fn = o.llvm_module.addFunctionInAddressSpace(mod.intern_pool.stringToSlice(fqn), fn_type, llvm_addrspace); + const llvm_fn = o.llvm_module.addFunctionInAddressSpace(ip.stringToSlice(fqn), fn_type, llvm_addrspace); gop.value_ptr.* = llvm_fn; const is_extern = decl.isExtern(mod); @@ -2486,8 +2487,8 @@ pub const Object = struct { llvm_fn.setUnnamedAddr(.True); } else { if (target.isWasm()) { - o.addFnAttrString(llvm_fn, "wasm-import-name", mod.intern_pool.stringToSlice(decl.name)); - if (mod.intern_pool.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| { + o.addFnAttrString(llvm_fn, "wasm-import-name", ip.stringToSlice(decl.name)); + if (ip.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| { if (!std.mem.eql(u8, lib_name, "c")) { o.addFnAttrString(llvm_fn, "wasm-import-module", lib_name); } @@ -2546,13 +2547,13 @@ pub const Object = struct { while (it.next()) |lowering| switch (lowering) { .byval => { const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types[param_index].toType(); + const param_ty = fn_info.param_types.get(ip)[param_index].toType(); if (!isByRef(param_ty, mod)) { o.addByValParamAttrs(llvm_fn, param_ty, param_index, fn_info, it.llvm_index - 1); } }, .byref => { - const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1]; const param_llvm_ty = try o.lowerType(param_ty.toType()); const alignment = param_ty.toType().abiAlignment(mod); o.addByRefParamAttrs(llvm_fn, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); @@ -3031,6 +3032,7 @@ pub const Object = struct { fn lowerTypeFn(o: *Object, fn_ty: Type) Allocator.Error!*llvm.Type { const mod = o.module; + const ip = &mod.intern_pool; const fn_info = mod.typeToFunc(fn_ty).?; const llvm_ret_ty = try lowerFnRetTy(o, fn_info); @@ -3052,19 +3054,19 @@ pub const Object = struct { while (it.next()) |lowering| switch (lowering) { .no_bits => continue, .byval => { - const param_ty = fn_info.param_types[it.zig_index - 1].toType(); + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); try llvm_params.append(try o.lowerType(param_ty)); }, .byref, .byref_mut => { try llvm_params.append(o.context.pointerType(0)); }, .abi_sized_int => { - const param_ty = fn_info.param_types[it.zig_index - 1].toType(); + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); const abi_size = @as(c_uint, @intCast(param_ty.abiSize(mod))); try llvm_params.append(o.context.intType(abi_size * 8)); }, .slice => { - const param_ty = fn_info.param_types[it.zig_index - 1].toType(); + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); const ptr_ty = if (param_ty.zigTypeTag(mod) == .Optional) param_ty.optionalChild(mod).slicePtrFieldType(mod) else @@ -3083,7 +3085,7 @@ pub const Object = struct { try llvm_params.append(o.context.intType(16)); }, .float_array => |count| { - const param_ty = fn_info.param_types[it.zig_index - 1].toType(); + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); const float_ty = try o.lowerType(aarch64_c_abi.getFloatArrayType(param_ty, mod).?); const field_count = @as(c_uint, @intCast(count)); const arr_ty = float_ty.arrayType(field_count); @@ -3137,8 +3139,7 @@ pub const Object = struct { return llvm_type.getUndef(); } - const val_key = mod.intern_pool.indexToKey(tv.val.toIntern()); - switch (val_key) { + switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { .int_type, .ptr_type, .array_type, @@ -3175,12 +3176,14 @@ pub const Object = struct { .enum_literal, .empty_enum_value, => unreachable, // non-runtime values - .extern_func, .func => { - const fn_decl_index = switch (val_key) { - .extern_func => |extern_func| extern_func.decl, - .func => |func| mod.funcPtr(func.index).owner_decl, - else => unreachable, - }; + .extern_func => |extern_func| { + const fn_decl_index = extern_func.decl; + const fn_decl = mod.declPtr(fn_decl_index); + try mod.markDeclAlive(fn_decl); + return o.resolveLlvmFunction(fn_decl_index); + }, + .func => |func| { + const fn_decl_index = func.owner_decl; const fn_decl = mod.declPtr(fn_decl_index); try mod.markDeclAlive(fn_decl); return o.resolveLlvmFunction(fn_decl_index); @@ -4598,6 +4601,7 @@ pub const FuncGen = struct { const args = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[extra.end..][0..extra.data.args_len])); const o = self.dg.object; const mod = o.module; + const ip = &mod.intern_pool; const callee_ty = self.typeOf(pl_op.operand); const zig_fn_ty = switch (callee_ty.zigTypeTag(mod)) { .Fn => callee_ty, @@ -4801,14 +4805,14 @@ pub const FuncGen = struct { while (it.next()) |lowering| switch (lowering) { .byval => { const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types[param_index].toType(); + const param_ty = fn_info.param_types.get(ip)[param_index].toType(); if (!isByRef(param_ty, mod)) { o.addByValParamAttrs(call, param_ty, param_index, fn_info, it.llvm_index - 1); } }, .byref => { const param_index = it.zig_index - 1; - const param_ty = fn_info.param_types[param_index].toType(); + const param_ty = fn_info.param_types.get(ip)[param_index].toType(); const param_llvm_ty = try o.lowerType(param_ty); const alignment = param_ty.abiAlignment(mod); o.addByRefParamAttrs(call, it.llvm_index - 1, alignment, it.byval_attr, param_llvm_ty); @@ -4828,7 +4832,7 @@ pub const FuncGen = struct { .slice => { assert(!it.byval_attr); - const param_ty = fn_info.param_types[it.zig_index - 1].toType(); + const param_ty = fn_info.param_types.get(ip)[it.zig_index - 1].toType(); const ptr_info = param_ty.ptrInfo(mod); const llvm_arg_i = it.llvm_index - 2; @@ -4930,7 +4934,7 @@ pub const FuncGen = struct { fg.context.pointerType(0).constNull(), null_opt_addr_global, }; - const panic_func = mod.funcPtrUnwrap(mod.panic_func_index).?; + const panic_func = mod.funcInfo(mod.panic_func_index); const panic_decl = mod.declPtr(panic_func.owner_decl); const fn_info = mod.typeToFunc(panic_decl.ty).?; const panic_global = try o.resolveLlvmFunction(panic_func.owner_decl); @@ -6030,7 +6034,7 @@ pub const FuncGen = struct { const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = o.module; - const func = mod.funcPtr(ty_fn.func); + const func = mod.funcInfo(ty_fn.func); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); const di_file = try o.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); @@ -6039,7 +6043,7 @@ pub const FuncGen = struct { const cur_debug_location = self.builder.getCurrentDebugLocation2(); try self.dbg_inlined.append(self.gpa, .{ - .loc = @as(*llvm.DILocation, @ptrCast(cur_debug_location)), + .loc = @ptrCast(cur_debug_location), .scope = self.di_scope.?, .base_line = self.base_line, }); @@ -6057,8 +6061,6 @@ pub const FuncGen = struct { .is_var_args = false, .is_generic = false, .is_noinline = false, - .align_is_generic = false, - .cc_is_generic = false, .section_is_generic = false, .addrspace_is_generic = false, }); @@ -6090,8 +6092,7 @@ pub const FuncGen = struct { const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; const mod = o.module; - const func = mod.funcPtr(ty_fn.func); - const decl = mod.declPtr(func.owner_decl); + const decl = mod.funcOwnerDeclPtr(ty_fn.func); const di_file = try o.getDIFile(self.gpa, mod.namespacePtr(decl.src_namespace).file_scope); self.di_file = di_file; const old = self.dbg_inlined.pop(); @@ -8137,12 +8138,13 @@ pub const FuncGen = struct { } const src_index = self.air.instructions.items(.data)[inst].arg.src_index; - const func = self.dg.decl.getOwnedFunction(mod).?; + const func_index = self.dg.decl.getOwnedFunctionIndex(); + const func = mod.funcInfo(func_index); const lbrace_line = mod.declPtr(func.owner_decl).src_line + func.lbrace_line + 1; const lbrace_col = func.lbrace_column + 1; const di_local_var = dib.createParameterVariable( self.di_scope.?, - func.getParamName(mod, src_index).ptr, // TODO test 0 bit args + mod.getParamName(func_index, src_index).ptr, // TODO test 0 bit args self.di_file.?, lbrace_line, try o.lowerDebugType(inst_ty, .full), @@ -10653,30 +10655,31 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField { } fn firstParamSRet(fn_info: InternPool.Key.FuncType, mod: *Module) bool { - if (!fn_info.return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) return false; + const return_type = fn_info.return_type.toType(); + if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) return false; const target = mod.getTarget(); switch (fn_info.cc) { - .Unspecified, .Inline => return isByRef(fn_info.return_type.toType(), mod), + .Unspecified, .Inline => return isByRef(return_type, mod), .C => switch (target.cpu.arch) { .mips, .mipsel => return false, .x86_64 => switch (target.os.tag) { - .windows => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory, - else => return firstParamSRetSystemV(fn_info.return_type.toType(), mod), + .windows => return x86_64_abi.classifyWindows(return_type, mod) == .memory, + else => return firstParamSRetSystemV(return_type, mod), }, - .wasm32 => return wasm_c_abi.classifyType(fn_info.return_type.toType(), mod)[0] == .indirect, - .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory, - .arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type.toType(), mod, .ret)) { + .wasm32 => return wasm_c_abi.classifyType(return_type, mod)[0] == .indirect, + .aarch64, .aarch64_be => return aarch64_c_abi.classifyType(return_type, mod) == .memory, + .arm, .armeb => switch (arm_c_abi.classifyType(return_type, mod, .ret)) { .memory, .i64_array => return true, .i32_array => |size| return size != 1, .byval => return false, }, - .riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory, + .riscv32, .riscv64 => return riscv_c_abi.classifyType(return_type, mod) == .memory, else => return false, // TODO investigate C ABI for other architectures }, - .SysV => return firstParamSRetSystemV(fn_info.return_type.toType(), mod), - .Win64 => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory, - .Stdcall => return !isScalar(mod, fn_info.return_type.toType()), + .SysV => return firstParamSRetSystemV(return_type, mod), + .Win64 => return x86_64_abi.classifyWindows(return_type, mod) == .memory, + .Stdcall => return !isScalar(mod, return_type), else => return false, } } @@ -10888,13 +10891,17 @@ const ParamTypeIterator = struct { pub fn next(it: *ParamTypeIterator) ?Lowering { if (it.zig_index >= it.fn_info.param_types.len) return null; - const ty = it.fn_info.param_types[it.zig_index]; + const mod = it.object.module; + const ip = &mod.intern_pool; + const ty = it.fn_info.param_types.get(ip)[it.zig_index]; it.byval_attr = false; return nextInner(it, ty.toType()); } /// `airCall` uses this instead of `next` so that it can take into account variadic functions. pub fn nextCall(it: *ParamTypeIterator, fg: *FuncGen, args: []const Air.Inst.Ref) ?Lowering { + const mod = it.object.module; + const ip = &mod.intern_pool; if (it.zig_index >= it.fn_info.param_types.len) { if (it.zig_index >= args.len) { return null; @@ -10902,7 +10909,7 @@ const ParamTypeIterator = struct { return nextInner(it, fg.typeOf(args[it.zig_index])); } } else { - return nextInner(it, it.fn_info.param_types[it.zig_index].toType()); + return nextInner(it, it.fn_info.param_types.get(ip)[it.zig_index].toType()); } } diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index eb697ea94e..9f84781966 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -238,7 +238,7 @@ pub const DeclGen = struct { if (ty.zigTypeTag(mod) == .Fn) { const fn_decl_index = switch (mod.intern_pool.indexToKey(val.ip_index)) { .extern_func => |extern_func| extern_func.decl, - .func => |func| mod.funcPtr(func.index).owner_decl, + .func => |func| func.owner_decl, else => unreachable, }; const spv_decl_index = try self.resolveDecl(fn_decl_index); @@ -255,13 +255,14 @@ pub const DeclGen = struct { /// Fetch or allocate a result id for decl index. This function also marks the decl as alive. /// Note: Function does not actually generate the decl. fn resolveDecl(self: *DeclGen, decl_index: Module.Decl.Index) !SpvModule.Decl.Index { - const decl = self.module.declPtr(decl_index); - try self.module.markDeclAlive(decl); + const mod = self.module; + const decl = mod.declPtr(decl_index); + try mod.markDeclAlive(decl); const entry = try self.decl_link.getOrPut(decl_index); if (!entry.found_existing) { // TODO: Extern fn? - const kind: SpvModule.DeclKind = if (decl.val.getFunctionIndex(self.module) != .none) + const kind: SpvModule.DeclKind = if (decl.val.isFuncBody(mod)) .func else .global; @@ -1268,6 +1269,7 @@ pub const DeclGen = struct { }, .Fn => switch (repr) { .direct => { + const ip = &mod.intern_pool; const fn_info = mod.typeToFunc(ty).?; // TODO: Put this somewhere in Sema.zig if (fn_info.is_var_args) @@ -1275,8 +1277,8 @@ pub const DeclGen = struct { const param_ty_refs = try self.gpa.alloc(CacheRef, fn_info.param_types.len); defer self.gpa.free(param_ty_refs); - for (param_ty_refs, 0..) |*param_type, i| { - param_type.* = try self.resolveType(fn_info.param_types[i].toType(), .direct); + for (param_ty_refs, fn_info.param_types.get(ip)) |*param_type, fn_param_type| { + param_type.* = try self.resolveType(fn_param_type.toType(), .direct); } const return_ty_ref = try self.resolveType(fn_info.return_type.toType(), .direct); @@ -1576,6 +1578,7 @@ pub const DeclGen = struct { fn genDecl(self: *DeclGen) !void { const mod = self.module; + const ip = &mod.intern_pool; const decl = mod.declPtr(self.decl_index); const spv_decl_index = try self.resolveDecl(self.decl_index); @@ -1594,7 +1597,8 @@ pub const DeclGen = struct { const fn_info = mod.typeToFunc(decl.ty).?; try self.args.ensureUnusedCapacity(self.gpa, fn_info.param_types.len); - for (fn_info.param_types) |param_type| { + for (0..fn_info.param_types.len) |i| { + const param_type = fn_info.param_types.get(ip)[i]; const param_type_id = try self.resolveTypeId(param_type.toType()); const arg_result_id = self.spv.allocId(); try self.func.prologue.emit(self.spv.gpa, .OpFunctionParameter, .{ @@ -1621,7 +1625,7 @@ pub const DeclGen = struct { try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {}); try self.spv.addFunction(spv_decl_index, self.func); - const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(self.module)); + const fqn = ip.stringToSlice(try decl.getFullyQualifiedName(self.module)); try self.spv.sections.debug_names.emit(self.gpa, .OpName, .{ .target = decl_id, diff --git a/src/link.zig b/src/link.zig index 148138a149..eb6c085663 100644 --- a/src/link.zig +++ b/src/link.zig @@ -16,6 +16,7 @@ const Compilation = @import("Compilation.zig"); const LibCInstallation = @import("libc_installation.zig").LibCInstallation; const Liveness = @import("Liveness.zig"); const Module = @import("Module.zig"); +const InternPool = @import("InternPool.zig"); const Package = @import("Package.zig"); const Type = @import("type.zig").Type; const TypedValue = @import("TypedValue.zig"); @@ -562,7 +563,7 @@ pub const File = struct { } /// May be called before or after updateDeclExports for any given Decl. - pub fn updateFunc(base: *File, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) UpdateDeclError!void { + pub fn updateFunc(base: *File, module: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) UpdateDeclError!void { if (build_options.only_c) { assert(base.tag == .c); return @fieldParentPtr(C, "base", base).updateFunc(module, func_index, air, liveness); diff --git a/src/link/C.zig b/src/link/C.zig index e3f8653852..df7bfba354 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -88,13 +88,13 @@ pub fn freeDecl(self: *C, decl_index: Module.Decl.Index) void { } } -pub fn updateFunc(self: *C, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *C, module: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { const tracy = trace(@src()); defer tracy.end(); const gpa = self.base.allocator; - const func = module.funcPtr(func_index); + const func = module.funcInfo(func_index); const decl_index = func.owner_decl; const gop = try self.decl_table.getOrPut(gpa, decl_index); if (!gop.found_existing) { diff --git a/src/link/Coff.zig b/src/link/Coff.zig index a724d4023a..8720fc1037 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1032,7 +1032,7 @@ fn freeAtom(self: *Coff, atom_index: Atom.Index) void { self.getAtomPtr(atom_index).sym_index = 0; } -pub fn updateFunc(self: *Coff, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Coff, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .coff) { @panic("Attempted to compile for object format that was disabled by build configuration"); } @@ -1044,7 +1044,7 @@ pub fn updateFunc(self: *Coff, mod: *Module, func_index: Module.Fn.Index, air: A const tracy = trace(@src()); defer tracy.end(); - const func = mod.funcPtr(func_index); + const func = mod.funcInfo(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -1424,7 +1424,7 @@ pub fn updateDeclExports( // detect the default subsystem. for (exports) |exp| { const exported_decl = mod.declPtr(exp.exported_decl); - if (exported_decl.getOwnedFunctionIndex(mod) == .none) continue; + if (exported_decl.getOwnedFunction(mod) == null) continue; const winapi_cc = switch (self.base.options.target.cpu.arch) { .x86 => std.builtin.CallingConvention.Stdcall, else => std.builtin.CallingConvention.C, diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 499855b330..6b7744644e 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -1043,6 +1043,7 @@ pub fn commitDeclState( var dbg_line_buffer = &decl_state.dbg_line; var dbg_info_buffer = &decl_state.dbg_info; const decl = mod.declPtr(decl_index); + const ip = &mod.intern_pool; const target_endian = self.target.cpu.arch.endian(); @@ -1241,20 +1242,9 @@ pub fn commitDeclState( while (sym_index < decl_state.abbrev_table.items.len) : (sym_index += 1) { const symbol = &decl_state.abbrev_table.items[sym_index]; const ty = symbol.type; - const deferred: bool = blk: { - if (ty.isAnyError(mod)) break :blk true; - switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .inferred_error_set_type => |ies_index| { - const ies = mod.inferredErrorSetPtr(ies_index); - if (!ies.is_resolved) break :blk true; - }, - else => {}, - } - break :blk false; - }; - if (deferred) continue; + if (ip.isErrorSetType(ty.toIntern())) continue; - symbol.offset = @as(u32, @intCast(dbg_info_buffer.items.len)); + symbol.offset = @intCast(dbg_info_buffer.items.len); try decl_state.addDbgInfoType(mod, di_atom_index, ty); } } @@ -1265,18 +1255,7 @@ pub fn commitDeclState( if (reloc.target) |target| { const symbol = decl_state.abbrev_table.items[target]; const ty = symbol.type; - const deferred: bool = blk: { - if (ty.isAnyError(mod)) break :blk true; - switch (mod.intern_pool.indexToKey(ty.ip_index)) { - .inferred_error_set_type => |ies_index| { - const ies = mod.inferredErrorSetPtr(ies_index); - if (!ies.is_resolved) break :blk true; - }, - else => {}, - } - break :blk false; - }; - if (deferred) { + if (ip.isErrorSetType(ty.toIntern())) { log.debug("resolving %{d} deferred until flush", .{target}); try self.global_abbrev_relocs.append(gpa, .{ .target = null, @@ -2505,18 +2484,18 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void { defer arena_alloc.deinit(); const arena = arena_alloc.allocator(); - // TODO: don't create a zig type for this, just make the dwarf info - // without touching the zig type system. - const names = try arena.dupe(InternPool.NullTerminatedString, module.global_error_set.keys()); - std.mem.sort(InternPool.NullTerminatedString, names, {}, InternPool.NullTerminatedString.indexLessThan); - - const error_ty = try module.intern(.{ .error_set_type = .{ .names = names } }); var dbg_info_buffer = std.ArrayList(u8).init(arena); - try addDbgInfoErrorSet(module, error_ty.toType(), self.target, &dbg_info_buffer); + try addDbgInfoErrorSetNames( + module, + Type.anyerror, + module.global_error_set.keys(), + self.target, + &dbg_info_buffer, + ); const di_atom_index = try self.createAtom(.di_atom); log.debug("updateDeclDebugInfoAllocation in flushModule", .{}); - try self.updateDeclDebugInfoAllocation(di_atom_index, @as(u32, @intCast(dbg_info_buffer.items.len))); + try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(dbg_info_buffer.items.len)); log.debug("writeDeclDebugInfo in flushModule", .{}); try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items); @@ -2633,6 +2612,17 @@ fn addDbgInfoErrorSet( ty: Type, target: std.Target, dbg_info_buffer: *std.ArrayList(u8), +) !void { + return addDbgInfoErrorSetNames(mod, ty, ty.errorSetNames(mod), target, dbg_info_buffer); +} + +fn addDbgInfoErrorSetNames( + mod: *Module, + /// Used for printing the type name only. + ty: Type, + error_names: []const InternPool.NullTerminatedString, + target: std.Target, + dbg_info_buffer: *std.ArrayList(u8), ) !void { const target_endian = target.cpu.arch.endian(); @@ -2655,7 +2645,6 @@ fn addDbgInfoErrorSet( // DW.AT.const_value, DW.FORM.data8 mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), 0, target_endian); - const error_names = ty.errorSetNames(mod); for (error_names) |error_name_ip| { const int = try mod.getErrorValue(error_name_ip); const error_name = mod.intern_pool.stringToSlice(error_name_ip); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 0258b0a6a7..4bb049e074 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2575,7 +2575,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s return local_sym; } -pub fn updateFunc(self: *Elf, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Elf, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } @@ -2586,7 +2586,7 @@ pub fn updateFunc(self: *Elf, mod: *Module, func_index: Module.Fn.Index, air: Ai const tracy = trace(@src()); defer tracy.end(); - const func = mod.funcPtr(func_index); + const func = mod.funcInfo(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 80195a454d..6953cda929 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1845,7 +1845,7 @@ fn addStubEntry(self: *MachO, target: SymbolWithLoc) !void { self.markRelocsDirtyByTarget(target); } -pub fn updateFunc(self: *MachO, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *MachO, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .macho) { @panic("Attempted to compile for object format that was disabled by build configuration"); } @@ -1855,7 +1855,7 @@ pub fn updateFunc(self: *MachO, mod: *Module, func_index: Module.Fn.Index, air: const tracy = trace(@src()); defer tracy.end(); - const func = mod.funcPtr(func_index); + const func = mod.funcInfo(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); diff --git a/src/link/NvPtx.zig b/src/link/NvPtx.zig index b74518d930..fc3d659770 100644 --- a/src/link/NvPtx.zig +++ b/src/link/NvPtx.zig @@ -13,6 +13,7 @@ const assert = std.debug.assert; const log = std.log.scoped(.link); const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const Compilation = @import("../Compilation.zig"); const link = @import("../link.zig"); const trace = @import("../tracy.zig").trace; @@ -68,7 +69,7 @@ pub fn deinit(self: *NvPtx) void { self.base.allocator.free(self.ptx_file_name); } -pub fn updateFunc(self: *NvPtx, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *NvPtx, module: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { if (!build_options.have_llvm) return; try self.llvm_object.updateFunc(module, func_index, air, liveness); } diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index ad5292aa88..66dce7d0dc 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -4,6 +4,7 @@ const Plan9 = @This(); const link = @import("../link.zig"); const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const Compilation = @import("../Compilation.zig"); const aout = @import("Plan9/aout.zig"); const codegen = @import("../codegen.zig"); @@ -344,12 +345,12 @@ fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !voi } } -pub fn updateFunc(self: *Plan9, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *Plan9, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .plan9) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - const func = mod.funcPtr(func_index); + const func = mod.funcInfo(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); self.freeUnnamedConsts(decl_index); @@ -908,7 +909,7 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void { // in the deleteUnusedDecl function. const mod = self.base.options.module.?; const decl = mod.declPtr(decl_index); - const is_fn = decl.val.getFunctionIndex(mod) != .none; + const is_fn = decl.val.isFuncBody(mod); if (is_fn) { var symidx_and_submap = self.fn_decl_table.get(decl.getFileScope(mod)).?; var submap = symidx_and_submap.functions; diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 5bbd5ebdc0..da5fef7c85 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -29,6 +29,7 @@ const assert = std.debug.assert; const log = std.log.scoped(.link); const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const Compilation = @import("../Compilation.zig"); const link = @import("../link.zig"); const codegen = @import("../codegen/spirv.zig"); @@ -103,12 +104,12 @@ pub fn deinit(self: *SpirV) void { self.decl_link.deinit(); } -pub fn updateFunc(self: *SpirV, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { +pub fn updateFunc(self: *SpirV, module: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } - const func = module.funcPtr(func_index); + const func = module.funcInfo(func_index); var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &self.spv, &self.decl_link); defer decl_gen.deinit(); @@ -138,7 +139,7 @@ pub fn updateDeclExports( exports: []const *Module.Export, ) !void { const decl = mod.declPtr(decl_index); - if (decl.val.getFunctionIndex(mod) != .none and decl.ty.fnCallingConvention(mod) == .Kernel) { + if (decl.val.isFuncBody(mod) and decl.ty.fnCallingConvention(mod) == .Kernel) { // TODO: Unify with resolveDecl in spirv.zig. const entry = try self.decl_link.getOrPut(decl_index); if (!entry.found_existing) { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 429fc12f14..86c1f045a3 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -12,6 +12,7 @@ const log = std.log.scoped(.link); pub const Atom = @import("Wasm/Atom.zig"); const Dwarf = @import("Dwarf.zig"); const Module = @import("../Module.zig"); +const InternPool = @import("../InternPool.zig"); const Compilation = @import("../Compilation.zig"); const CodeGen = @import("../arch/wasm/CodeGen.zig"); const codegen = @import("../codegen.zig"); @@ -1338,7 +1339,7 @@ pub fn allocateSymbol(wasm: *Wasm) !u32 { return index; } -pub fn updateFunc(wasm: *Wasm, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void { +pub fn updateFunc(wasm: *Wasm, mod: *Module, func_index: InternPool.Index, air: Air, liveness: Liveness) !void { if (build_options.skip_non_native and builtin.object_format != .wasm) { @panic("Attempted to compile for object format that was disabled by build configuration"); } @@ -1349,7 +1350,7 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func_index: Module.Fn.Index, air: A const tracy = trace(@src()); defer tracy.end(); - const func = mod.funcPtr(func_index); + const func = mod.funcInfo(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); const atom_index = try wasm.getOrCreateAtomForDecl(decl_index); diff --git a/src/print_air.zig b/src/print_air.zig index 5e04f1057b..d16aa1e0ae 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -665,7 +665,7 @@ const Writer = struct { fn writeDbgInline(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const ty_fn = w.air.instructions.items(.data)[inst].ty_fn; const func_index = ty_fn.func; - const owner_decl = w.module.declPtr(w.module.funcPtr(func_index).owner_decl); + const owner_decl = w.module.funcOwnerDeclPtr(func_index); try s.print("{}", .{owner_decl.name.fmt(&w.module.intern_pool)}); } diff --git a/src/type.zig b/src/type.zig index e2e2171026..c70544d189 100644 --- a/src/type.zig +++ b/src/type.zig @@ -250,21 +250,19 @@ pub const Type = struct { try print(error_union_type.payload_type.toType(), writer, mod); return; }, - .inferred_error_set_type => |index| { - const ies = mod.inferredErrorSetPtr(index); - const func = ies.func; - + .inferred_error_set_type => |func_index| { try writer.writeAll("@typeInfo(@typeInfo(@TypeOf("); - const owner_decl = mod.declPtr(mod.funcPtr(func).owner_decl); + const owner_decl = mod.funcOwnerDeclPtr(func_index); try owner_decl.renderFullyQualifiedName(mod, writer); try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set"); }, .error_set_type => |error_set_type| { + const ip = &mod.intern_pool; const names = error_set_type.names; try writer.writeAll("error{"); - for (names, 0..) |name, i| { + for (names.get(ip), 0..) |name, i| { if (i != 0) try writer.writeByte(','); - try writer.print("{}", .{name.fmt(&mod.intern_pool)}); + try writer.print("{}", .{name.fmt(ip)}); } try writer.writeAll("}"); }, @@ -294,6 +292,7 @@ pub const Type = struct { .comptime_int, .comptime_float, .noreturn, + .adhoc_inferred_error_set, => return writer.writeAll(@tagName(s)), .null, @@ -367,7 +366,8 @@ pub const Type = struct { try writer.writeAll("noinline "); } try writer.writeAll("fn("); - for (fn_info.param_types, 0..) |param_ty, i| { + const param_types = fn_info.param_types.get(&mod.intern_pool); + for (param_types, 0..) |param_ty, i| { if (i != 0) try writer.writeAll(", "); if (std.math.cast(u5, i)) |index| { if (fn_info.paramIsComptime(index)) { @@ -384,7 +384,7 @@ pub const Type = struct { } } if (fn_info.is_var_args) { - if (fn_info.param_types.len != 0) { + if (param_types.len != 0) { try writer.writeAll(", "); } try writer.writeAll("..."); @@ -534,6 +534,7 @@ pub const Type = struct { .c_longdouble, .bool, .anyerror, + .adhoc_inferred_error_set, .anyopaque, .atomic_order, .atomic_rmw_op, @@ -697,6 +698,7 @@ pub const Type = struct { => true, .anyerror, + .adhoc_inferred_error_set, .anyopaque, .atomic_order, .atomic_rmw_op, @@ -955,7 +957,9 @@ pub const Type = struct { }, // TODO revisit this when we have the concept of the error tag type - .anyerror => return AbiAlignmentAdvanced{ .scalar = 2 }, + .anyerror, + .adhoc_inferred_error_set, + => return AbiAlignmentAdvanced{ .scalar = 2 }, .void, .type, @@ -1419,7 +1423,9 @@ pub const Type = struct { => return AbiSizeAdvanced{ .scalar = 0 }, // TODO revisit this when we have the concept of the error tag type - .anyerror => return AbiSizeAdvanced{ .scalar = 2 }, + .anyerror, + .adhoc_inferred_error_set, + => return AbiSizeAdvanced{ .scalar = 2 }, .prefetch_options => unreachable, // missing call to resolveTypeFields .export_options => unreachable, // missing call to resolveTypeFields @@ -1662,7 +1668,9 @@ pub const Type = struct { .void => return 0, // TODO revisit this when we have the concept of the error tag type - .anyerror => return 16, + .anyerror, + .adhoc_inferred_error_set, + => return 16, .anyopaque => unreachable, .type => unreachable, @@ -2050,21 +2058,19 @@ pub const Type = struct { /// Asserts that the type is an error union. pub fn errorUnionSet(ty: Type, mod: *Module) Type { - return mod.intern_pool.indexToKey(ty.toIntern()).error_union_type.error_set_type.toType(); + return mod.intern_pool.errorUnionSet(ty.toIntern()).toType(); } /// Returns false for unresolved inferred error sets. pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; return switch (ty.toIntern()) { .anyerror_type => false, - else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + else => switch (ip.indexToKey(ty.toIntern())) { .error_set_type => |error_set_type| error_set_type.names.len == 0, - .inferred_error_set_type => |index| { - const inferred_error_set = mod.inferredErrorSetPtr(index); - // Can't know for sure. - if (!inferred_error_set.is_resolved) return false; - if (inferred_error_set.is_anyerror) return false; - return inferred_error_set.errors.count() == 0; + .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .none, .anyerror_type => false, + else => |t| ip.indexToKey(t).error_set_type.names.len == 0, }, else => unreachable, }, @@ -2075,10 +2081,11 @@ pub const Type = struct { /// Note that the result may be a false negative if the type did not get error set /// resolution prior to this call. pub fn isAnyError(ty: Type, mod: *Module) bool { + const ip = &mod.intern_pool; return switch (ty.toIntern()) { .anyerror_type => true, else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .inferred_error_set_type => |i| mod.inferredErrorSetPtr(i).is_anyerror, + .inferred_error_set_type => |i| ip.funcIesResolved(i).* == .anyerror_type, else => false, }, }; @@ -2102,13 +2109,11 @@ pub const Type = struct { return switch (ty) { .anyerror_type => true, else => switch (ip.indexToKey(ty)) { - .error_set_type => |error_set_type| { - return error_set_type.nameIndex(ip, name) != null; - }, - .inferred_error_set_type => |index| { - const ies = ip.inferredErrorSetPtrConst(index); - if (ies.is_anyerror) return true; - return ies.errors.contains(name); + .error_set_type => |error_set_type| error_set_type.nameIndex(ip, name) != null, + .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .anyerror_type => true, + .none => false, + else => |t| ip.indexToKey(t).error_set_type.nameIndex(ip, name) != null, }, else => unreachable, }, @@ -2128,12 +2133,14 @@ pub const Type = struct { const field_name_interned = ip.getString(name).unwrap() orelse return false; return error_set_type.nameIndex(ip, field_name_interned) != null; }, - .inferred_error_set_type => |index| { - const ies = ip.inferredErrorSetPtr(index); - if (ies.is_anyerror) return true; - // If the string is not interned, then the field certainly is not present. - const field_name_interned = ip.getString(name).unwrap() orelse return false; - return ies.errors.contains(field_name_interned); + .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .anyerror_type => true, + .none => false, + else => |t| { + // If the string is not interned, then the field certainly is not present. + const field_name_interned = ip.getString(name).unwrap() orelse return false; + return ip.indexToKey(t).error_set_type.nameIndex(ip, field_name_interned) != null; + }, }, else => unreachable, }, @@ -2231,7 +2238,7 @@ pub const Type = struct { var ty = starting_ty; while (true) switch (ty.toIntern()) { - .anyerror_type => { + .anyerror_type, .adhoc_inferred_error_set_type => { // TODO revisit this when error sets support custom int types return .{ .signedness = .unsigned, .bits = 16 }; }, @@ -2365,7 +2372,7 @@ pub const Type = struct { /// Asserts the type is a function or a function pointer. pub fn fnReturnType(ty: Type, mod: *Module) Type { - return mod.intern_pool.funcReturnType(ty.toIntern()).toType(); + return mod.intern_pool.funcTypeReturnType(ty.toIntern()).toType(); } /// Asserts the type is a function. @@ -2505,6 +2512,7 @@ pub const Type = struct { .export_options, .extern_options, .type_info, + .adhoc_inferred_error_set, => return null, .void => return Value.void, @@ -2699,6 +2707,7 @@ pub const Type = struct { .bool, .void, .anyerror, + .adhoc_inferred_error_set, .noreturn, .generic_poison, .atomic_order, @@ -2942,14 +2951,15 @@ pub const Type = struct { } // Asserts that `ty` is an error set and not `anyerror`. + // Asserts that `ty` is resolved if it is an inferred error set. pub fn errorSetNames(ty: Type, mod: *Module) []const InternPool.NullTerminatedString { - return switch (mod.intern_pool.indexToKey(ty.toIntern())) { - .error_set_type => |x| x.names, - .inferred_error_set_type => |index| { - const inferred_error_set = mod.inferredErrorSetPtr(index); - assert(inferred_error_set.is_resolved); - assert(!inferred_error_set.is_anyerror); - return inferred_error_set.errors.keys(); + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ty.toIntern())) { + .error_set_type => |x| x.names.get(ip), + .inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) { + .none => unreachable, // unresolved inferred error set + .anyerror_type => unreachable, + else => |t| ip.indexToKey(t).error_set_type.names.get(ip), }, else => unreachable, }; diff --git a/src/value.zig b/src/value.zig index d8536ef2a6..ec0f359671 100644 --- a/src/value.zig +++ b/src/value.zig @@ -262,6 +262,11 @@ pub const Value = struct { return ip.getOrPutTrailingString(gpa, len); } + pub fn intern2(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { + if (val.ip_index != .none) return val.ip_index; + return intern(val, ty, mod); + } + pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { if (val.ip_index != .none) return (try mod.getCoerced(val, ty)).toIntern(); switch (val.tag()) { @@ -473,12 +478,15 @@ pub const Value = struct { }; } - pub fn getFunction(val: Value, mod: *Module) ?*Module.Fn { - return mod.funcPtrUnwrap(val.getFunctionIndex(mod)); + pub fn isFuncBody(val: Value, mod: *Module) bool { + return mod.intern_pool.isFuncBody(val.toIntern()); } - pub fn getFunctionIndex(val: Value, mod: *Module) Module.Fn.OptionalIndex { - return if (val.ip_index != .none) mod.intern_pool.indexToFunc(val.toIntern()) else .none; + pub fn getFunction(val: Value, mod: *Module) ?InternPool.Key.Func { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .func => |x| x, + else => null, + }; } pub fn getExternFunc(val: Value, mod: *Module) ?InternPool.Key.ExternFunc { @@ -1462,7 +1470,7 @@ pub const Value = struct { return switch (mod.intern_pool.indexToKey(val.toIntern())) { .variable => |variable| variable.decl, .extern_func => |extern_func| extern_func.decl, - .func => |func| mod.funcPtr(func.index).owner_decl, + .func => |func| func.owner_decl, .ptr => |ptr| switch (ptr.addr) { .decl => |decl| decl, .mut_decl => |mut_decl| mut_decl.decl, diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig index 7d4a841a62..a8a7d3c0ed 100644 --- a/test/behavior/generics.zig +++ b/test/behavior/generics.zig @@ -443,3 +443,16 @@ test "generic function passed as comptime argument" { }; try S.doMath(std.math.add, 5, 6); } + +test "return type of generic function is function pointer" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + + const S = struct { + fn b(comptime T: type) ?*const fn () error{}!T { + return null; + } + }; + + try expect(null == S.b(void)); +} diff --git a/test/cases/compile_errors/anytype_param_requires_comptime.zig b/test/cases/compile_errors/anytype_param_requires_comptime.zig index 3e2b32b408..453bd5bce5 100644 --- a/test/cases/compile_errors/anytype_param_requires_comptime.zig +++ b/test/cases/compile_errors/anytype_param_requires_comptime.zig @@ -16,5 +16,7 @@ pub export fn entry() void { // backend=stage2 // target=native // -// :7:14: error: unable to resolve comptime value -// :7:14: note: argument to parameter with comptime-only type must be comptime-known +// :7:14: error: runtime-known argument passed to comptime-only type parameter +// :9:12: note: declared here +// :4:16: note: struct requires comptime because of this field +// :4:16: note: types are not available at runtime diff --git a/test/cases/compile_errors/export_function_with_comptime_parameter.zig b/test/cases/compile_errors/export_function_with_comptime_parameter.zig index 8d5dbef1c3..948053534d 100644 --- a/test/cases/compile_errors/export_function_with_comptime_parameter.zig +++ b/test/cases/compile_errors/export_function_with_comptime_parameter.zig @@ -6,4 +6,4 @@ export fn foo(comptime x: anytype, y: i32) i32 { // backend=stage2 // target=native // -// :1:15: error: comptime parameters not allowed in function with calling convention 'C' +// :1:27: error: comptime parameters not allowed in function with calling convention 'C' diff --git a/test/cases/compile_errors/export_generic_function.zig b/test/cases/compile_errors/export_generic_function.zig index 4ffbad9df7..65469be6d9 100644 --- a/test/cases/compile_errors/export_generic_function.zig +++ b/test/cases/compile_errors/export_generic_function.zig @@ -7,4 +7,4 @@ export fn foo(num: anytype) i32 { // backend=stage2 // target=native // -// :1:15: error: generic parameters not allowed in function with calling convention 'C' +// :1:20: error: generic parameters not allowed in function with calling convention 'C' diff --git a/test/cases/compile_errors/extern_function_with_comptime_parameter.zig b/test/cases/compile_errors/extern_function_with_comptime_parameter.zig index fac09cc265..b8f5f0b1b8 100644 --- a/test/cases/compile_errors/extern_function_with_comptime_parameter.zig +++ b/test/cases/compile_errors/extern_function_with_comptime_parameter.zig @@ -19,5 +19,5 @@ comptime { // target=native // // :5:30: error: comptime parameters not allowed in function with calling convention 'C' -// :6:30: error: generic parameters not allowed in function with calling convention 'C' +// :6:41: error: generic parameters not allowed in function with calling convention 'C' // :1:15: error: comptime parameters not allowed in function with calling convention 'C' diff --git a/test/cases/compile_errors/generic_function_instance_with_non-constant_expression.zig b/test/cases/compile_errors/generic_function_instance_with_non-constant_expression.zig index 18c60cd4aa..829db19d87 100644 --- a/test/cases/compile_errors/generic_function_instance_with_non-constant_expression.zig +++ b/test/cases/compile_errors/generic_function_instance_with_non-constant_expression.zig @@ -13,5 +13,5 @@ export fn entry() usize { // backend=stage2 // target=native // -// :5:16: error: unable to resolve comptime value -// :5:16: note: parameter is comptime +// :5:16: error: runtime-known argument passed to comptime parameter +// :1:17: note: declared comptime here diff --git a/test/standalone.zig b/test/standalone.zig index d812669664..cfdb09ea07 100644 --- a/test/standalone.zig +++ b/test/standalone.zig @@ -213,10 +213,6 @@ pub const build_cases = [_]BuildCase{ // .build_root = "test/standalone/sigpipe", // .import = @import("standalone/sigpipe/build.zig"), //}, - .{ - .build_root = "test/standalone/issue_13030", - .import = @import("standalone/issue_13030/build.zig"), - }, // TODO restore this test //.{ // .build_root = "test/standalone/options", diff --git a/test/standalone/issue_13030/build.zig b/test/standalone/issue_13030/build.zig deleted file mode 100644 index e31863fee2..0000000000 --- a/test/standalone/issue_13030/build.zig +++ /dev/null @@ -1,24 +0,0 @@ -const std = @import("std"); -const builtin = @import("builtin"); -const CrossTarget = std.zig.CrossTarget; - -pub fn build(b: *std.Build) void { - const test_step = b.step("test", "Test it"); - b.default_step = test_step; - - add(b, test_step, .Debug); - add(b, test_step, .ReleaseFast); - add(b, test_step, .ReleaseSmall); - add(b, test_step, .ReleaseSafe); -} - -fn add(b: *std.Build, test_step: *std.Build.Step, optimize: std.builtin.OptimizeMode) void { - const obj = b.addObject(.{ - .name = "main", - .root_source_file = .{ .path = "main.zig" }, - .optimize = optimize, - .target = .{}, - }); - - test_step.dependOn(&obj.step); -} diff --git a/test/standalone/issue_13030/main.zig b/test/standalone/issue_13030/main.zig deleted file mode 100644 index 5e4c976db3..0000000000 --- a/test/standalone/issue_13030/main.zig +++ /dev/null @@ -1,7 +0,0 @@ -fn b(comptime T: type) ?*const fn () error{}!T { - return null; -} - -export fn entry() void { - _ = b(void); -}