diff --git a/build.zig b/build.zig index 847303340d..9cfebebc56 100644 --- a/build.zig +++ b/build.zig @@ -30,6 +30,7 @@ pub fn build(b: *std.Build) !void { const test_step = b.step("test", "Run all the tests"); const skip_install_lib_files = b.option(bool, "no-lib", "skip copying of lib/ files and langref to installation prefix. Useful for development") orelse false; const skip_install_langref = b.option(bool, "no-langref", "skip copying of langref to the installation prefix") orelse skip_install_lib_files; + const no_bin = b.option(bool, "no-bin", "skip emitting compiler binary") orelse false; const docgen_exe = b.addExecutable(.{ .name = "docgen", @@ -166,6 +167,7 @@ pub fn build(b: *std.Build) !void { exe.pie = pie; exe.sanitize_thread = sanitize_thread; exe.entitlements = entitlements; + if (no_bin) exe.emit_bin = .no_emit; exe.build_id = b.option( std.Build.Step.Compile.BuildId, diff --git a/doc/langref.html.in b/doc/langref.html.in index 14dda686a9..6740d147bd 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -10176,7 +10176,7 @@ pub fn main() void { {#header_open|Invalid Error Set Cast#}
At compile-time:
- {#code_begin|test_err|test_comptime_invalid_error_set_cast|'error.B' not a member of error set 'error{A,C}'#} + {#code_begin|test_err|test_comptime_invalid_error_set_cast|'error.B' not a member of error set 'error{C,A}'#} const Set1 = error{ A, B, diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index bbfa588d6d..c2a2486dfa 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -459,6 +459,28 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { return self.items[prev_len..][0..n]; } + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is a slice pointing to the newly allocated elements. + /// The returned pointer becomes invalid when the list is resized. + /// Resizes list if `self.capacity` is not large enough. + pub fn addManyAsSlice(self: *Self, n: usize) Allocator.Error![]T { + const prev_len = self.items.len; + try self.resize(self.items.len + n); + return self.items[prev_len..][0..n]; + } + + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is a slice pointing to the newly allocated elements. + /// Asserts that there is already space for the new item without allocating more. + /// **Does not** invalidate element pointers. + /// The returned pointer becomes invalid when the list is resized. + pub fn addManyAsSliceAssumeCapacity(self: *Self, n: usize) []T { + assert(self.items.len + n <= self.capacity); + const prev_len = self.items.len; + self.items.len += n; + return self.items[prev_len..][0..n]; + } + /// Remove and return the last element from the list. /// Asserts the list has at least one item. /// Invalidates pointers to the removed element. @@ -949,6 +971,28 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ return self.items[prev_len..][0..n]; } + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is a slice pointing to the newly allocated elements. + /// The returned pointer becomes invalid when the list is resized. + /// Resizes list if `self.capacity` is not large enough. + pub fn addManyAsSlice(self: *Self, allocator: Allocator, n: usize) Allocator.Error![]T { + const prev_len = self.items.len; + try self.resize(allocator, self.items.len + n); + return self.items[prev_len..][0..n]; + } + + /// Resize the array, adding `n` new elements, which have `undefined` values. + /// The return value is a slice pointing to the newly allocated elements. + /// Asserts that there is already space for the new item without allocating more. + /// **Does not** invalidate element pointers. + /// The returned pointer becomes invalid when the list is resized. + pub fn addManyAsSliceAssumeCapacity(self: *Self, n: usize) []T { + assert(self.items.len + n <= self.capacity); + const prev_len = self.items.len; + self.items.len += n; + return self.items[prev_len..][0..n]; + } + /// Remove and return the last element from the list. /// Asserts the list has at least one item. /// Invalidates pointers to last element. diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index ec69270d15..ef93bb14ee 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -143,7 +143,7 @@ pub const Mode = OptimizeMode; /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. -pub const CallingConvention = enum { +pub const CallingConvention = enum(u8) { /// This is the default Zig calling convention used when not using `export` on `fn` /// and no other calling convention is specified. Unspecified, @@ -190,7 +190,7 @@ pub const CallingConvention = enum { /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. -pub const AddressSpace = enum { +pub const AddressSpace = enum(u5) { generic, gs, fs, @@ -283,7 +283,7 @@ pub const Type = union(enum) { /// This data structure is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. - pub const Size = enum { + pub const Size = enum(u2) { One, Many, Slice, diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig index 7cf0f4681a..db85242002 100644 --- a/lib/std/child_process.zig +++ b/lib/std/child_process.zig @@ -530,7 +530,7 @@ pub const ChildProcess = struct { // can fail between fork() and execve(). // Therefore, we do all the allocation for the execve() before the fork(). // This means we must do the null-termination of argv and env vars here. - const argv_buf = try arena.allocSentinel(?[*:0]u8, self.argv.len, null); + const argv_buf = try arena.allocSentinel(?[*:0]const u8, self.argv.len, null); for (self.argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr; const envp = m: { @@ -542,7 +542,7 @@ pub const ChildProcess = struct { } else if (builtin.output_mode == .Exe) { // Then we have Zig start code and this works. // TODO type-safety for null-termination of `os.environ`. - break :m @ptrCast([*:null]?[*:0]u8, os.environ.ptr); + break :m @ptrCast([*:null]const ?[*:0]const u8, os.environ.ptr); } else { // TODO come up with a solution for this. @compileError("missing std lib enhancement: ChildProcess implementation has no way to collect the environment variables to forward to the child process"); diff --git a/lib/std/crypto/tls/Client.zig b/lib/std/crypto/tls/Client.zig index 2745bd4e6f..5b9b00538a 100644 --- a/lib/std/crypto/tls/Client.zig +++ b/lib/std/crypto/tls/Client.zig @@ -1256,10 +1256,8 @@ fn limitedOverlapCopy(frag: []u8, in: usize) void { // A single, non-overlapping memcpy suffices. @memcpy(frag[0..first.len], first); } else { - // Need two memcpy calls because one alone would overlap. - @memcpy(frag[0..in], first[0..in]); - const leftover = first.len - in; - @memcpy(frag[in..][0..leftover], first[in..][0..leftover]); + // One memcpy call would overlap, so just do this instead. + std.mem.copyForwards(u8, frag, first); } } diff --git a/lib/std/dwarf.zig b/lib/std/dwarf.zig index dbe1f6e8b6..4de08b25d7 100644 --- a/lib/std/dwarf.zig +++ b/lib/std/dwarf.zig @@ -936,6 +936,7 @@ pub const DwarfInfo = struct { const ranges_val = compile_unit.die.getAttr(AT.ranges) orelse continue; const ranges_offset = switch (ranges_val.*) { .SecOffset => |off| off, + .Const => |c| try c.asUnsignedLe(), .RangeListOffset => |idx| off: { if (compile_unit.is_64) { const offset_loc = @intCast(usize, compile_unit.rnglists_base + 8 * idx); diff --git a/lib/std/hash.zig b/lib/std/hash.zig index 5c85b38d55..eca7a70159 100644 --- a/lib/std/hash.zig +++ b/lib/std/hash.zig @@ -36,6 +36,20 @@ const xxhash = @import("hash/xxhash.zig"); pub const XxHash64 = xxhash.XxHash64; pub const XxHash32 = xxhash.XxHash32; +/// This is handy if you have a u32 and want a u32 and don't want to take a +/// detour through many layers of abstraction elsewhere in the std.hash +/// namespace. +/// Copied from https://nullprogram.com/blog/2018/07/31/ +pub fn uint32(input: u32) u32 { + var x: u32 = input; + x ^= x >> 16; + x *%= 0x7feb352d; + x ^= x >> 15; + x *%= 0x846ca68b; + x ^= x >> 16; + return x; +} + test { _ = adler; _ = auto_hash; diff --git a/lib/std/hash/auto_hash.zig b/lib/std/hash/auto_hash.zig index 0c88caae7e..251ac120f6 100644 --- a/lib/std/hash/auto_hash.zig +++ b/lib/std/hash/auto_hash.zig @@ -91,15 +91,21 @@ pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void { // Help the optimizer see that hashing an int is easy by inlining! // TODO Check if the situation is better after #561 is resolved. - .Int => { - if (comptime meta.trait.hasUniqueRepresentation(Key)) { - @call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key) }); - } else { - // Take only the part containing the key value, the remaining - // bytes are undefined and must not be hashed! - const byte_size = comptime std.math.divCeil(comptime_int, @bitSizeOf(Key), 8) catch unreachable; - @call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key)[0..byte_size] }); - } + .Int => |int| switch (int.signedness) { + .signed => hash(hasher, @bitCast(@Type(.{ .Int = .{ + .bits = int.bits, + .signedness = .unsigned, + } }), key), strat), + .unsigned => { + if (comptime meta.trait.hasUniqueRepresentation(Key)) { + @call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key) }); + } else { + // Take only the part containing the key value, the remaining + // bytes are undefined and must not be hashed! + const byte_size = comptime std.math.divCeil(comptime_int, @bitSizeOf(Key), 8) catch unreachable; + @call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key)[0..byte_size] }); + } + }, }, .Bool => hash(hasher, @boolToInt(key), strat), diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index c4d3ccf077..ec79d843da 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -2158,6 +2158,9 @@ pub const Const = struct { pub fn to(self: Const, comptime T: type) ConvertError!T { switch (@typeInfo(T)) { .Int => |info| { + // Make sure -0 is handled correctly. + if (self.eqZero()) return 0; + const UT = std.meta.Int(.unsigned, info.bits); if (!self.fitsInTwosComp(info.signedness, info.bits)) { @@ -2509,7 +2512,7 @@ pub const Const = struct { return total_limb_lz + bits - total_limb_bits; } - pub fn ctz(a: Const) Limb { + pub fn ctz(a: Const, bits: Limb) Limb { // Limbs are stored in little-endian order. var result: Limb = 0; for (a.limbs) |limb| { @@ -2517,7 +2520,7 @@ pub const Const = struct { result += limb_tz; if (limb_tz != @sizeOf(Limb) * 8) break; } - return result; + return @min(result, bits); } }; diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 212d09a1a8..d6ca4a9ea1 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -4226,7 +4226,8 @@ pub fn alignForwardLog2(addr: usize, log2_alignment: u8) usize { /// The alignment must be a power of 2 and greater than 0. /// Asserts that rounding up the address does not cause integer overflow. pub fn alignForwardGeneric(comptime T: type, addr: T, alignment: T) T { - assert(isValidAlignGeneric(T, alignment)); + assert(alignment > 0); + assert(std.math.isPowerOfTwo(alignment)); return alignBackwardGeneric(T, addr + (alignment - 1), alignment); } diff --git a/lib/std/process.zig b/lib/std/process.zig index 80be705187..6ad0df868e 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -1131,7 +1131,7 @@ pub fn execve( defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const argv_buf = try arena.allocSentinel(?[*:0]u8, argv.len, null); + const argv_buf = try arena.allocSentinel(?[*:0]const u8, argv.len, null); for (argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr; const envp = m: { @@ -1143,7 +1143,7 @@ pub fn execve( } else if (builtin.output_mode == .Exe) { // Then we have Zig start code and this works. // TODO type-safety for null-termination of `os.environ`. - break :m @ptrCast([*:null]?[*:0]u8, os.environ.ptr); + break :m @ptrCast([*:null]const ?[*:0]const u8, os.environ.ptr); } else { // TODO come up with a solution for this. @compileError("missing std lib enhancement: std.process.execv implementation has no way to collect the environment variables to forward to the child process"); diff --git a/src/Air.zig b/src/Air.zig index 7ee36206f1..b179a3c024 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -5,16 +5,18 @@ const std = @import("std"); const builtin = @import("builtin"); +const assert = std.debug.assert; + +const Air = @This(); const Value = @import("value.zig").Value; const Type = @import("type.zig").Type; -const assert = std.debug.assert; -const Air = @This(); +const InternPool = @import("InternPool.zig"); +const Module = @import("Module.zig"); instructions: std.MultiArrayList(Inst).Slice, /// The meaning of this data is determined by `Inst.Tag` value. /// The first few indexes are reserved. See `ExtraIndex` for the values. extra: []const u32, -values: []const Value, pub const ExtraIndex = enum(u32) { /// Payload index of the main `Block` in the `extra` array. @@ -183,6 +185,18 @@ pub const Inst = struct { /// Allocates stack local memory. /// Uses the `ty` field. alloc, + /// This special instruction only exists temporarily during semantic + /// analysis and is guaranteed to be unreachable in machine code + /// backends. It tracks a set of types that have been stored to an + /// inferred allocation. + /// Uses the `inferred_alloc` field. + inferred_alloc, + /// This special instruction only exists temporarily during semantic + /// analysis and is guaranteed to be unreachable in machine code + /// backends. Used to coordinate alloc_inferred, store_to_inferred_ptr, + /// and resolve_inferred_alloc instructions for comptime code. + /// Uses the `inferred_alloc_comptime` field. + inferred_alloc_comptime, /// If the function will pass the result by-ref, this instruction returns the /// result pointer. Otherwise it is equivalent to `alloc`. /// Uses the `ty` field. @@ -394,11 +408,9 @@ pub const Inst = struct { /// was executed on the operand. /// Uses the `ty_pl` field. Payload is `TryPtr`. try_ptr, - /// A comptime-known value. Uses the `ty_pl` field, payload is index of - /// `values` array. - constant, - /// A comptime-known type. Uses the `ty` field. - const_ty, + /// A comptime-known value via an index into the InternPool. + /// Uses the `interned` field. + interned, /// Notes the beginning of a source code statement and marks the line and column. /// Result type is always void. /// Uses the `dbg_stmt` field. @@ -408,10 +420,10 @@ pub const Inst = struct { /// Marks the end of a semantic scope for debug info variables. dbg_block_end, /// Marks the start of an inline call. - /// Uses `ty_pl` with the payload being the index of a Value.Function in air.values. + /// Uses the `ty_fn` field. dbg_inline_begin, /// Marks the end of an inline call. - /// Uses `ty_pl` with the payload being the index of a Value.Function in air.values. + /// Uses the `ty_fn` field. dbg_inline_end, /// Marks the beginning of a local variable. The operand is a pointer pointing /// to the storage for the variable. The local may be a const or a var. @@ -837,7 +849,96 @@ pub const Inst = struct { /// The position of an AIR instruction within the `Air` instructions array. pub const Index = u32; - pub const Ref = @import("Zir.zig").Inst.Ref; + pub const Ref = enum(u32) { + u1_type = @enumToInt(InternPool.Index.u1_type), + u8_type = @enumToInt(InternPool.Index.u8_type), + i8_type = @enumToInt(InternPool.Index.i8_type), + u16_type = @enumToInt(InternPool.Index.u16_type), + i16_type = @enumToInt(InternPool.Index.i16_type), + u29_type = @enumToInt(InternPool.Index.u29_type), + u32_type = @enumToInt(InternPool.Index.u32_type), + i32_type = @enumToInt(InternPool.Index.i32_type), + u64_type = @enumToInt(InternPool.Index.u64_type), + i64_type = @enumToInt(InternPool.Index.i64_type), + u80_type = @enumToInt(InternPool.Index.u80_type), + u128_type = @enumToInt(InternPool.Index.u128_type), + i128_type = @enumToInt(InternPool.Index.i128_type), + usize_type = @enumToInt(InternPool.Index.usize_type), + isize_type = @enumToInt(InternPool.Index.isize_type), + c_char_type = @enumToInt(InternPool.Index.c_char_type), + c_short_type = @enumToInt(InternPool.Index.c_short_type), + c_ushort_type = @enumToInt(InternPool.Index.c_ushort_type), + c_int_type = @enumToInt(InternPool.Index.c_int_type), + c_uint_type = @enumToInt(InternPool.Index.c_uint_type), + c_long_type = @enumToInt(InternPool.Index.c_long_type), + c_ulong_type = @enumToInt(InternPool.Index.c_ulong_type), + c_longlong_type = @enumToInt(InternPool.Index.c_longlong_type), + c_ulonglong_type = @enumToInt(InternPool.Index.c_ulonglong_type), + c_longdouble_type = @enumToInt(InternPool.Index.c_longdouble_type), + f16_type = @enumToInt(InternPool.Index.f16_type), + f32_type = @enumToInt(InternPool.Index.f32_type), + f64_type = @enumToInt(InternPool.Index.f64_type), + f80_type = @enumToInt(InternPool.Index.f80_type), + f128_type = @enumToInt(InternPool.Index.f128_type), + anyopaque_type = @enumToInt(InternPool.Index.anyopaque_type), + bool_type = @enumToInt(InternPool.Index.bool_type), + void_type = @enumToInt(InternPool.Index.void_type), + type_type = @enumToInt(InternPool.Index.type_type), + anyerror_type = @enumToInt(InternPool.Index.anyerror_type), + comptime_int_type = @enumToInt(InternPool.Index.comptime_int_type), + comptime_float_type = @enumToInt(InternPool.Index.comptime_float_type), + noreturn_type = @enumToInt(InternPool.Index.noreturn_type), + anyframe_type = @enumToInt(InternPool.Index.anyframe_type), + null_type = @enumToInt(InternPool.Index.null_type), + undefined_type = @enumToInt(InternPool.Index.undefined_type), + enum_literal_type = @enumToInt(InternPool.Index.enum_literal_type), + atomic_order_type = @enumToInt(InternPool.Index.atomic_order_type), + atomic_rmw_op_type = @enumToInt(InternPool.Index.atomic_rmw_op_type), + calling_convention_type = @enumToInt(InternPool.Index.calling_convention_type), + address_space_type = @enumToInt(InternPool.Index.address_space_type), + float_mode_type = @enumToInt(InternPool.Index.float_mode_type), + reduce_op_type = @enumToInt(InternPool.Index.reduce_op_type), + call_modifier_type = @enumToInt(InternPool.Index.call_modifier_type), + prefetch_options_type = @enumToInt(InternPool.Index.prefetch_options_type), + export_options_type = @enumToInt(InternPool.Index.export_options_type), + extern_options_type = @enumToInt(InternPool.Index.extern_options_type), + type_info_type = @enumToInt(InternPool.Index.type_info_type), + manyptr_u8_type = @enumToInt(InternPool.Index.manyptr_u8_type), + manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type), + manyptr_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.manyptr_const_u8_sentinel_0_type), + single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type), + slice_const_u8_type = @enumToInt(InternPool.Index.slice_const_u8_type), + slice_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.slice_const_u8_sentinel_0_type), + anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), + generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), + empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type), + undef = @enumToInt(InternPool.Index.undef), + zero = @enumToInt(InternPool.Index.zero), + zero_usize = @enumToInt(InternPool.Index.zero_usize), + zero_u8 = @enumToInt(InternPool.Index.zero_u8), + one = @enumToInt(InternPool.Index.one), + one_usize = @enumToInt(InternPool.Index.one_usize), + one_u8 = @enumToInt(InternPool.Index.one_u8), + four_u8 = @enumToInt(InternPool.Index.four_u8), + negative_one = @enumToInt(InternPool.Index.negative_one), + calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), + calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), + void_value = @enumToInt(InternPool.Index.void_value), + unreachable_value = @enumToInt(InternPool.Index.unreachable_value), + null_value = @enumToInt(InternPool.Index.null_value), + bool_true = @enumToInt(InternPool.Index.bool_true), + bool_false = @enumToInt(InternPool.Index.bool_false), + empty_struct = @enumToInt(InternPool.Index.empty_struct), + generic_poison = @enumToInt(InternPool.Index.generic_poison), + + /// This Ref does not correspond to any AIR instruction or constant + /// value. It is used to handle argument types of var args functions. + var_args_param_type = @enumToInt(InternPool.Index.var_args_param_type), + /// This Ref does not correspond to any AIR instruction or constant + /// value and may instead be used as a sentinel to indicate null. + none = @enumToInt(InternPool.Index.none), + _, + }; /// All instructions have an 8-byte payload, which is contained within /// this union. `Tag` determines which union field is active, as well as @@ -845,6 +946,7 @@ pub const Inst = struct { pub const Data = union { no_op: void, un_op: Ref, + interned: InternPool.Index, bin_op: struct { lhs: Ref, @@ -864,6 +966,10 @@ pub const Inst = struct { // Index into a different array. payload: u32, }, + ty_fn: struct { + ty: Ref, + func: Module.Fn.Index, + }, br: struct { block_inst: Index, operand: Ref, @@ -896,6 +1002,19 @@ pub const Inst = struct { // Index into a different array. payload: u32, }, + inferred_alloc_comptime: InferredAllocComptime, + inferred_alloc: InferredAlloc, + + pub const InferredAllocComptime = struct { + decl_index: Module.Decl.Index, + alignment: InternPool.Alignment, + is_const: bool, + }; + + pub const InferredAlloc = struct { + alignment: InternPool.Alignment, + is_const: bool, + }; // Make sure we don't accidentally add a field to make this union // bigger than expected. Note that in Debug builds, Zig is allowed @@ -974,8 +1093,7 @@ pub const FieldParentPtr = struct { pub const Shuffle = struct { a: Inst.Ref, b: Inst.Ref, - // index to air_values - mask: u32, + mask: InternPool.Index, mask_len: u32, }; @@ -1064,15 +1182,15 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { return air.extra[extra.end..][0..extra.data.body_len]; } -pub fn typeOf(air: Air, inst: Air.Inst.Ref) Type { +pub fn typeOf(air: Air, inst: Air.Inst.Ref, ip: *const InternPool) Type { const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[ref_int].ty; + if (ref_int < InternPool.static_keys.len) { + return InternPool.static_keys[ref_int].typeOf().toType(); } - return air.typeOfIndex(@intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len)); + return air.typeOfIndex(ref_int - ref_start_index, ip); } -pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { +pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: *const InternPool) Type { const datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst]) { .add, @@ -1114,7 +1232,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .div_exact_optimized, .rem_optimized, .mod_optimized, - => return air.typeOf(datas[inst].bin_op.lhs), + => return air.typeOf(datas[inst].bin_op.lhs, ip), .sqrt, .sin, @@ -1132,7 +1250,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .trunc_float, .neg, .neg_optimized, - => return air.typeOf(datas[inst].un_op), + => return air.typeOf(datas[inst].un_op, ip), .cmp_lt, .cmp_lte, @@ -1159,8 +1277,6 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .error_set_has_value, => return Type.bool, - .const_ty => return Type.type, - .alloc, .ret_ptr, .err_return_trace, @@ -1171,7 +1287,6 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .assembly, .block, - .constant, .struct_field_ptr, .struct_field_val, .slice_elem_ptr, @@ -1194,6 +1309,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .try_ptr, => return air.getRefType(datas[inst].ty_pl.ty), + .interned => return ip.typeOf(datas[inst].interned).toType(), + .not, .bitcast, .load, @@ -1243,7 +1360,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .ret_load, .unreach, .trap, - => return Type.initTag(.noreturn), + => return Type.noreturn, .breakpoint, .dbg_stmt, @@ -1280,63 +1397,67 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .wasm_memory_grow => return Type.i32, .wasm_memory_size => return Type.u32, - .bool_to_int => return Type.initTag(.u1), + .bool_to_int => return Type.u1, - .tag_name, .error_name => return Type.initTag(.const_slice_u8_sentinel_0), + .tag_name, .error_name => return Type.slice_const_u8_sentinel_0, .call, .call_always_tail, .call_never_tail, .call_never_inline => { - const callee_ty = air.typeOf(datas[inst].pl_op.operand); - switch (callee_ty.zigTypeTag()) { - .Fn => return callee_ty.fnReturnType(), - .Pointer => return callee_ty.childType().fnReturnType(), - else => unreachable, - } + const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip); + return callee_ty.fnReturnTypeIp(ip); }, .slice_elem_val, .ptr_elem_val, .array_elem_val => { - const ptr_ty = air.typeOf(datas[inst].bin_op.lhs); - return ptr_ty.elemType(); + const ptr_ty = air.typeOf(datas[inst].bin_op.lhs, ip); + return ptr_ty.childTypeIp(ip); }, .atomic_load => { - const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr); - return ptr_ty.elemType(); + const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr, ip); + return ptr_ty.childTypeIp(ip); }, .atomic_rmw => { - const ptr_ty = air.typeOf(datas[inst].pl_op.operand); - return ptr_ty.elemType(); + const ptr_ty = air.typeOf(datas[inst].pl_op.operand, ip); + return ptr_ty.childTypeIp(ip); }, - .reduce, .reduce_optimized => return air.typeOf(datas[inst].reduce.operand).childType(), + .reduce, .reduce_optimized => { + const operand_ty = air.typeOf(datas[inst].reduce.operand, ip); + return ip.indexToKey(operand_ty.ip_index).vector_type.child.toType(); + }, - .mul_add => return air.typeOf(datas[inst].pl_op.operand), + .mul_add => return air.typeOf(datas[inst].pl_op.operand, ip), .select => { const extra = air.extraData(Air.Bin, datas[inst].pl_op.payload).data; - return air.typeOf(extra.lhs); + return air.typeOf(extra.lhs, ip); }, .@"try" => { - const err_union_ty = air.typeOf(datas[inst].pl_op.operand); - return err_union_ty.errorUnionPayload(); + const err_union_ty = air.typeOf(datas[inst].pl_op.operand, ip); + return ip.indexToKey(err_union_ty.ip_index).error_union_type.payload_type.toType(); }, .work_item_id, .work_group_size, .work_group_id, => return Type.u32, + + .inferred_alloc => unreachable, + .inferred_alloc_comptime => unreachable, } } pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { const ref_int = @enumToInt(ref); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - var buffer: Value.ToTypeBuffer = undefined; - return Air.Inst.Ref.typed_value_map[ref_int].val.toType(&buffer); + if (ref_int < ref_start_index) { + const ip_index = @intToEnum(InternPool.Index, ref_int); + return ip_index.toType(); } - const inst_index = ref_int - Air.Inst.Ref.typed_value_map.len; + const inst_index = ref_int - ref_start_index; const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); - assert(air_tags[inst_index] == .const_ty); - return air_datas[inst_index].ty; + return switch (air_tags[inst_index]) { + .interned => air_datas[inst_index].interned.toType(), + else => unreachable, + }; } /// Returns the requested data, as well as the new index which is at the start of the @@ -1350,7 +1471,8 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end u32 => air.extra[i], Inst.Ref => @intToEnum(Inst.Ref, air.extra[i]), i32 => @bitCast(i32, air.extra[i]), - else => @compileError("bad field type"), + InternPool.Index => @intToEnum(InternPool.Index, air.extra[i]), + else => @compileError("bad field type: " ++ @typeName(field.type)), }; i += 1; } @@ -1363,17 +1485,17 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end pub fn deinit(air: *Air, gpa: std.mem.Allocator) void { air.instructions.deinit(gpa); gpa.free(air.extra); - gpa.free(air.values); air.* = undefined; } -const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len; +pub const ref_start_index: u32 = InternPool.static_len; -pub fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { - return @intToEnum(Air.Inst.Ref, ref_start_index + inst); +pub fn indexToRef(inst: Inst.Index) Inst.Ref { + return @intToEnum(Inst.Ref, ref_start_index + inst); } -pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { +pub fn refToIndex(inst: Inst.Ref) ?Inst.Index { + assert(inst != .none); const ref_int = @enumToInt(inst); if (ref_int >= ref_start_index) { return ref_int - ref_start_index; @@ -1382,18 +1504,23 @@ pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { } } +pub fn refToIndexAllowNone(inst: Inst.Ref) ?Inst.Index { + if (inst == .none) return null; + return refToIndex(inst); +} + /// Returns `null` if runtime-known. -pub fn value(air: Air, inst: Air.Inst.Ref) ?Value { +pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value { const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[ref_int].val; + if (ref_int < ref_start_index) { + const ip_index = @intToEnum(InternPool.Index, ref_int); + return ip_index.toValue(); } - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const inst_index = @intCast(Air.Inst.Index, ref_int - ref_start_index); const air_datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst_index]) { - .constant => return air.values[air_datas[inst_index].ty_pl.payload], - .const_ty => unreachable, - else => return air.typeOfIndex(inst_index).onePossibleValue(), + .interned => return air_datas[inst_index].interned.toValue(), + else => return air.typeOfIndex(inst_index, &mod.intern_pool).onePossibleValue(mod), } } @@ -1406,10 +1533,11 @@ pub fn nullTerminatedString(air: Air, index: usize) [:0]const u8 { return bytes[0..end :0]; } -/// Returns whether the given instruction must always be lowered, for instance because it can cause -/// side effects. If an instruction does not need to be lowered, and Liveness determines its result -/// is unused, backends should avoid lowering it. -pub fn mustLower(air: Air, inst: Air.Inst.Index) bool { +/// Returns whether the given instruction must always be lowered, for instance +/// because it can cause side effects. If an instruction does not need to be +/// lowered, and Liveness determines its result is unused, backends should +/// avoid lowering it. +pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool { const data = air.instructions.items(.data)[inst]; return switch (air.instructions.items(.tag)[inst]) { .arg, @@ -1498,6 +1626,8 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index) bool { .mul_with_overflow, .shl_with_overflow, .alloc, + .inferred_alloc, + .inferred_alloc_comptime, .ret_ptr, .bit_and, .bit_or, @@ -1546,8 +1676,7 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index) bool { .cmp_neq_optimized, .cmp_vector, .cmp_vector_optimized, - .constant, - .const_ty, + .interned, .is_null, .is_non_null, .is_null_ptr, @@ -1616,8 +1745,8 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index) bool { => false, .assembly => @truncate(u1, air.extraData(Air.Asm, data.ty_pl.payload).data.flags >> 31) != 0, - .load => air.typeOf(data.ty_op.operand).isVolatilePtr(), - .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs).isVolatilePtr(), - .atomic_load => air.typeOf(data.atomic_load.ptr).isVolatilePtr(), + .load => air.typeOf(data.ty_op.operand, ip).isVolatilePtrIp(ip), + .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtrIp(ip), + .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip), }; } diff --git a/src/AstGen.zig b/src/AstGen.zig index b38067fd03..17cf2aae64 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -3934,7 +3934,7 @@ fn fnDecl( var section_gz = decl_gz.makeSubBlock(params_scope); defer section_gz.unstack(); const section_ref: Zir.Inst.Ref = if (fn_proto.ast.section_expr == 0) .none else inst: { - const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .const_slice_u8_type } }, fn_proto.ast.section_expr); + const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, fn_proto.ast.section_expr); if (section_gz.instructionsSlice().len == 0) { // In this case we will send a len=0 body which can be encoded more efficiently. break :inst inst; @@ -4137,7 +4137,7 @@ fn globalVarDecl( break :inst try expr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .address_space_type } }, var_decl.ast.addrspace_node); }; const section_inst: Zir.Inst.Ref = if (var_decl.ast.section_node == 0) .none else inst: { - break :inst try comptimeExpr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .const_slice_u8_type } }, var_decl.ast.section_node); + break :inst try comptimeExpr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .slice_const_u8_type } }, var_decl.ast.section_node); }; const has_section_or_addrspace = section_inst != .none or addrspace_inst != .none; wip_members.nextDecl(is_pub, is_export, align_inst != .none, has_section_or_addrspace); @@ -4497,7 +4497,7 @@ fn testDecl( .cc_gz = null, .align_ref = .none, .align_gz = null, - .ret_ref = .void_type, + .ret_ref = .anyerror_void_error_union_type, .ret_gz = null, .section_ref = .none, .section_gz = null, @@ -4510,7 +4510,7 @@ fn testDecl( .body_gz = &fn_block, .lib_name = 0, .is_var_args = false, - .is_inferred_error = true, + .is_inferred_error = false, .is_test = true, .is_extern = false, .is_noinline = false, @@ -7878,7 +7878,7 @@ fn unionInit( params: []const Ast.Node.Index, ) InnerError!Zir.Inst.Ref { const union_type = try typeExpr(gz, scope, params[0]); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]); const field_type = try gz.addPlNode(.field_type_ref, params[1], Zir.Inst.FieldTypeRef{ .container_type = union_type, .field_name = field_name, @@ -8100,12 +8100,12 @@ fn builtinCall( if (ri.rl == .ref) { return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{ .lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]), - .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]), + .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]), }); } const result = try gz.addPlNode(.field_val_named, node, Zir.Inst.FieldNamed{ .lhs = try expr(gz, scope, .{ .rl = .none }, params[0]), - .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]), + .field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]), }); return rvalue(gz, ri, result, node); }, @@ -8271,11 +8271,11 @@ fn builtinCall( .align_of => return simpleUnOpType(gz, scope, ri, node, params[0], .align_of), .ptr_to_int => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .ptr_to_int), - .compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .compile_error), + .compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .compile_error), .set_eval_branch_quota => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0], .set_eval_branch_quota), .enum_to_int => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .enum_to_int), .bool_to_int => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .bool_to_int), - .embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .embed_file), + .embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .embed_file), .error_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .anyerror_type } }, params[0], .error_name), .set_runtime_safety => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .set_runtime_safety), .sqrt => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sqrt), @@ -8334,7 +8334,7 @@ fn builtinCall( }, .panic => { try emitDbgNode(gz, node); - return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .panic); + return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .panic); }, .trap => { try emitDbgNode(gz, node); @@ -8450,7 +8450,7 @@ fn builtinCall( }, .c_define => { if (!gz.c_import) return gz.astgen.failNode(node, "C define valid only inside C import block", .{}); - const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0]); + const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0]); const value = try comptimeExpr(gz, scope, .{ .rl = .none }, params[1]); const result = try gz.addExtendedPayload(.c_define, Zir.Inst.BinNode{ .node = gz.nodeIndexToRelative(node), @@ -8530,7 +8530,7 @@ fn builtinCall( return rvalue(gz, ri, result, node); }, .call => { - const modifier = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .modifier_type } }, params[0]); + const modifier = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .call_modifier_type } }, params[0]); const callee = try expr(gz, scope, .{ .rl = .none }, params[1]); const args = try expr(gz, scope, .{ .rl = .none }, params[2]); const result = try gz.addPlNode(.builtin_call, node, Zir.Inst.BuiltinCall{ @@ -8546,7 +8546,7 @@ fn builtinCall( }, .field_parent_ptr => { const parent_type = try typeExpr(gz, scope, params[0]); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]); const result = try gz.addPlNode(.field_parent_ptr, node, Zir.Inst.FieldParentPtr{ .parent_type = parent_type, .field_name = field_name, @@ -8701,7 +8701,7 @@ fn hasDeclOrField( tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const container_type = try typeExpr(gz, scope, lhs_node); - const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, rhs_node); + const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, rhs_node); const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = container_type, .rhs = name, @@ -8851,7 +8851,7 @@ fn simpleCBuiltin( ) InnerError!Zir.Inst.Ref { const name: []const u8 = if (tag == .c_undef) "C undef" else "C include"; if (!gz.c_import) return gz.astgen.failNode(node, "{s} valid only inside C import block", .{name}); - const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, operand_node); + const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, operand_node); _ = try gz.addExtendedPayload(tag, Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(node), .operand = operand, @@ -8869,7 +8869,7 @@ fn offsetOf( tag: Zir.Inst.Tag, ) InnerError!Zir.Inst.Ref { const type_inst = try typeExpr(gz, scope, lhs_node); - const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, rhs_node); + const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, rhs_node); const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{ .lhs = type_inst, .rhs = field_name, @@ -10271,6 +10271,8 @@ fn rvalue( as_ty | @enumToInt(Zir.Inst.Ref.i32_type), as_ty | @enumToInt(Zir.Inst.Ref.u64_type), as_ty | @enumToInt(Zir.Inst.Ref.i64_type), + as_ty | @enumToInt(Zir.Inst.Ref.u128_type), + as_ty | @enumToInt(Zir.Inst.Ref.i128_type), as_ty | @enumToInt(Zir.Inst.Ref.usize_type), as_ty | @enumToInt(Zir.Inst.Ref.isize_type), as_ty | @enumToInt(Zir.Inst.Ref.c_char_type), @@ -10296,15 +10298,30 @@ fn rvalue( as_ty | @enumToInt(Zir.Inst.Ref.comptime_int_type), as_ty | @enumToInt(Zir.Inst.Ref.comptime_float_type), as_ty | @enumToInt(Zir.Inst.Ref.noreturn_type), + as_ty | @enumToInt(Zir.Inst.Ref.anyframe_type), as_ty | @enumToInt(Zir.Inst.Ref.null_type), as_ty | @enumToInt(Zir.Inst.Ref.undefined_type), - as_ty | @enumToInt(Zir.Inst.Ref.fn_noreturn_no_args_type), - as_ty | @enumToInt(Zir.Inst.Ref.fn_void_no_args_type), - as_ty | @enumToInt(Zir.Inst.Ref.fn_naked_noreturn_no_args_type), - as_ty | @enumToInt(Zir.Inst.Ref.fn_ccc_void_no_args_type), - as_ty | @enumToInt(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type), - as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_type), as_ty | @enumToInt(Zir.Inst.Ref.enum_literal_type), + as_ty | @enumToInt(Zir.Inst.Ref.atomic_order_type), + as_ty | @enumToInt(Zir.Inst.Ref.atomic_rmw_op_type), + as_ty | @enumToInt(Zir.Inst.Ref.calling_convention_type), + as_ty | @enumToInt(Zir.Inst.Ref.address_space_type), + as_ty | @enumToInt(Zir.Inst.Ref.float_mode_type), + as_ty | @enumToInt(Zir.Inst.Ref.reduce_op_type), + as_ty | @enumToInt(Zir.Inst.Ref.call_modifier_type), + as_ty | @enumToInt(Zir.Inst.Ref.prefetch_options_type), + as_ty | @enumToInt(Zir.Inst.Ref.export_options_type), + as_ty | @enumToInt(Zir.Inst.Ref.extern_options_type), + as_ty | @enumToInt(Zir.Inst.Ref.type_info_type), + as_ty | @enumToInt(Zir.Inst.Ref.manyptr_u8_type), + as_ty | @enumToInt(Zir.Inst.Ref.manyptr_const_u8_type), + as_ty | @enumToInt(Zir.Inst.Ref.manyptr_const_u8_sentinel_0_type), + as_ty | @enumToInt(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type), + as_ty | @enumToInt(Zir.Inst.Ref.slice_const_u8_type), + as_ty | @enumToInt(Zir.Inst.Ref.slice_const_u8_sentinel_0_type), + as_ty | @enumToInt(Zir.Inst.Ref.anyerror_void_error_union_type), + as_ty | @enumToInt(Zir.Inst.Ref.generic_poison_type), + as_ty | @enumToInt(Zir.Inst.Ref.empty_struct_type), as_comptime_int | @enumToInt(Zir.Inst.Ref.zero), as_comptime_int | @enumToInt(Zir.Inst.Ref.one), as_bool | @enumToInt(Zir.Inst.Ref.bool_true), @@ -10677,8 +10694,8 @@ fn identAsString(astgen: *AstGen, ident_token: Ast.TokenIndex) !u32 { const string_bytes = &astgen.string_bytes; const str_index = @intCast(u32, string_bytes.items.len); try astgen.appendIdentStr(ident_token, string_bytes); - const key = string_bytes.items[str_index..]; - const gop = try astgen.string_table.getOrPutContextAdapted(gpa, @as([]const u8, key), StringIndexAdapter{ + const key: []const u8 = string_bytes.items[str_index..]; + const gop = try astgen.string_table.getOrPutContextAdapted(gpa, key, StringIndexAdapter{ .bytes = string_bytes, }, StringIndexContext{ .bytes = string_bytes, diff --git a/src/Autodoc.zig b/src/Autodoc.zig index 879f0a6b15..1cdb768311 100644 --- a/src/Autodoc.zig +++ b/src/Autodoc.zig @@ -8,6 +8,7 @@ const CompilationModule = @import("Module.zig"); const File = CompilationModule.File; const Module = @import("Package.zig"); const Tokenizer = std.zig.Tokenizer; +const InternPool = @import("InternPool.zig"); const Zir = @import("Zir.zig"); const Ref = Zir.Inst.Ref; const log = std.log.scoped(.autodoc); @@ -95,8 +96,6 @@ pub fn generateZirData(self: *Autodoc) !void { } } - log.debug("Ref map size: {}", .{Ref.typed_value_map.len}); - const root_src_dir = self.comp_module.main_pkg.root_src_directory; const root_src_path = self.comp_module.main_pkg.root_src_path; const joined_src_path = try root_src_dir.join(self.arena, &.{root_src_path}); @@ -108,18 +107,20 @@ pub fn generateZirData(self: *Autodoc) !void { const file = self.comp_module.import_table.get(abs_root_src_path).?; // file is expected to be present in the import table // Append all the types in Zir.Inst.Ref. { - try self.types.append(self.arena, .{ - .ComptimeExpr = .{ .name = "ComptimeExpr" }, - }); - - // this skips Ref.none but it's ok becuse we replaced it with ComptimeExpr - var i: u32 = 1; - while (i <= @enumToInt(Ref.anyerror_void_error_union_type)) : (i += 1) { + comptime std.debug.assert(@enumToInt(InternPool.Index.first_type) == 0); + var i: u32 = 0; + while (i <= @enumToInt(InternPool.Index.last_type)) : (i += 1) { + const ip_index = @intToEnum(InternPool.Index, i); var tmpbuf = std.ArrayList(u8).init(self.arena); - try Ref.typed_value_map[i].val.fmtDebug().format("", .{}, tmpbuf.writer()); + if (ip_index == .generic_poison_type) { + // Not a real type, doesn't have a normal name + try tmpbuf.writer().writeAll("(generic poison)"); + } else { + try ip_index.toType().fmt(self.comp_module).format("", .{}, tmpbuf.writer()); + } try self.types.append( self.arena, - switch (@intToEnum(Ref, i)) { + switch (ip_index) { else => blk: { // TODO: map the remaining refs to a correct type // instead of just assinging "array" to them. @@ -1040,7 +1041,7 @@ fn walkInstruction( .ret_load => { const un_node = data[inst_index].un_node; const res_ptr_ref = un_node.operand; - const res_ptr_inst = @enumToInt(res_ptr_ref) - Ref.typed_value_map.len; + const res_ptr_inst = Zir.refToIndex(res_ptr_ref).?; // TODO: this instruction doesn't let us know trivially if there's // branching involved or not. For now here's the strat: // We search backwarts until `ret_ptr` for `store_node`, @@ -2157,11 +2158,10 @@ fn walkInstruction( const lhs_ref = blk: { var lhs_extra = extra; while (true) { - if (@enumToInt(lhs_extra.data.lhs) < Ref.typed_value_map.len) { + const lhs = Zir.refToIndex(lhs_extra.data.lhs) orelse { break :blk lhs_extra.data.lhs; - } + }; - const lhs = @enumToInt(lhs_extra.data.lhs) - Ref.typed_value_map.len; if (tags[lhs] != .field_val and tags[lhs] != .field_ptr and tags[lhs] != .field_type) break :blk lhs_extra.data.lhs; @@ -2188,8 +2188,7 @@ fn walkInstruction( // TODO: double check that we really don't need type info here const wr = blk: { - if (@enumToInt(lhs_ref) >= Ref.typed_value_map.len) { - const lhs_inst = @enumToInt(lhs_ref) - Ref.typed_value_map.len; + if (Zir.refToIndex(lhs_ref)) |lhs_inst| { if (tags[lhs_inst] == .call or tags[lhs_inst] == .field_call) { break :blk DocData.WalkResult{ .expr = .{ @@ -4672,16 +4671,19 @@ fn walkRef( ref: Ref, need_type: bool, // true when the caller needs also a typeRef for the return value ) AutodocErrors!DocData.WalkResult { - const enum_value = @enumToInt(ref); - if (enum_value <= @enumToInt(Ref.anyerror_void_error_union_type)) { + if (ref == .none) { + return .{ .expr = .{ .comptimeExpr = 0 } }; + } else if (@enumToInt(ref) <= @enumToInt(InternPool.Index.last_type)) { // We can just return a type that indexes into `types` with the // enum value because in the beginning we pre-filled `types` with // the types that are listed in `Ref`. return DocData.WalkResult{ .typeRef = .{ .type = @enumToInt(std.builtin.TypeId.Type) }, - .expr = .{ .type = enum_value }, + .expr = .{ .type = @enumToInt(ref) }, }; - } else if (enum_value < Ref.typed_value_map.len) { + } else if (Zir.refToIndex(ref)) |zir_index| { + return self.walkInstruction(file, parent_scope, parent_src, zir_index, need_type); + } else { switch (ref) { else => { panicWithContext( @@ -4774,9 +4776,6 @@ fn walkRef( // } }; // }, } - } else { - const zir_index = enum_value - Ref.typed_value_map.len; - return self.walkInstruction(file, parent_scope, parent_src, zir_index, need_type); } } diff --git a/src/Compilation.zig b/src/Compilation.zig index cbdc789d40..9397bc93a9 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -87,6 +87,7 @@ clang_preprocessor_mode: ClangPreprocessorMode, /// Whether to print clang argvs to stdout. verbose_cc: bool, verbose_air: bool, +verbose_intern_pool: bool, verbose_llvm_ir: ?[]const u8, verbose_llvm_bc: ?[]const u8, verbose_cimport: bool, @@ -226,7 +227,7 @@ const Job = union(enum) { /// Write the constant value for a Decl to the output file. codegen_decl: Module.Decl.Index, /// Write the machine code for a function to the output file. - codegen_func: *Module.Fn, + codegen_func: Module.Fn.Index, /// Render the .h file snippet for the Decl. emit_h_decl: Module.Decl.Index, /// The Decl needs to be analyzed and possibly export itself. @@ -593,6 +594,7 @@ pub const InitOptions = struct { verbose_cc: bool = false, verbose_link: bool = false, verbose_air: bool = false, + verbose_intern_pool: bool = false, verbose_llvm_ir: ?[]const u8 = null, verbose_llvm_bc: ?[]const u8 = null, verbose_cimport: bool = false, @@ -1315,9 +1317,9 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { .global_zir_cache = global_zir_cache, .local_zir_cache = local_zir_cache, .emit_h = emit_h, - .error_name_list = .{}, + .tmp_hack_arena = std.heap.ArenaAllocator.init(gpa), }; - try module.error_name_list.append(gpa, "(no error)"); + try module.init(); break :blk module; } else blk: { @@ -1574,6 +1576,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { .clang_preprocessor_mode = options.clang_preprocessor_mode, .verbose_cc = options.verbose_cc, .verbose_air = options.verbose_air, + .verbose_intern_pool = options.verbose_intern_pool, .verbose_llvm_ir = options.verbose_llvm_ir, .verbose_llvm_bc = options.verbose_llvm_bc, .verbose_cimport = options.verbose_cimport, @@ -2026,6 +2029,13 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void try comp.performAllTheWork(main_progress_node); if (comp.bin_file.options.module) |module| { + if (builtin.mode == .Debug and comp.verbose_intern_pool) { + std.debug.print("intern pool stats for '{s}':\n", .{ + comp.bin_file.options.root_name, + }); + module.intern_pool.dump(); + } + if (comp.bin_file.options.is_test and comp.totalErrorCount() == 0) { // The `test_functions` decl has been intentionally postponed until now, // at which point we must populate it with the list of test functions that @@ -2042,7 +2052,7 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void assert(decl.deletion_flag); assert(decl.dependants.count() == 0); const is_anon = if (decl.zir_decl_index == 0) blk: { - break :blk decl.src_namespace.anon_decls.swapRemove(decl_index); + break :blk module.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index); } else false; try module.clearDecl(decl_index, null); @@ -2523,8 +2533,7 @@ pub fn totalErrorCount(self: *Compilation) u32 { // the previous parse success, including compile errors, but we cannot // emit them until the file succeeds parsing. for (module.failed_decls.keys()) |key| { - const decl = module.declPtr(key); - if (decl.getFileScope().okToReportErrors()) { + if (module.declFileScope(key).okToReportErrors()) { total += 1; if (module.cimport_errors.get(key)) |errors| { total += errors.len; @@ -2533,8 +2542,7 @@ pub fn totalErrorCount(self: *Compilation) u32 { } if (module.emit_h) |emit_h| { for (emit_h.failed_decls.keys()) |key| { - const decl = module.declPtr(key); - if (decl.getFileScope().okToReportErrors()) { + if (module.declFileScope(key).okToReportErrors()) { total += 1; } } @@ -2618,7 +2626,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { var it = module.failed_files.iterator(); while (it.next()) |entry| { if (entry.value_ptr.*) |msg| { - try addModuleErrorMsg(&bundle, msg.*); + try addModuleErrorMsg(module, &bundle, msg.*); } else { // Must be ZIR errors. Note that this may include AST errors. // addZirErrorMessages asserts that the tree is loaded. @@ -2631,17 +2639,17 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { var it = module.failed_embed_files.iterator(); while (it.next()) |entry| { const msg = entry.value_ptr.*; - try addModuleErrorMsg(&bundle, msg.*); + try addModuleErrorMsg(module, &bundle, msg.*); } } { var it = module.failed_decls.iterator(); while (it.next()) |entry| { - const decl = module.declPtr(entry.key_ptr.*); + const decl_index = entry.key_ptr.*; // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. - if (decl.getFileScope().okToReportErrors()) { - try addModuleErrorMsg(&bundle, entry.value_ptr.*.*); + if (module.declFileScope(decl_index).okToReportErrors()) { + try addModuleErrorMsg(module, &bundle, entry.value_ptr.*.*); if (module.cimport_errors.get(entry.key_ptr.*)) |cimport_errors| for (cimport_errors) |c_error| { try bundle.addRootErrorMessage(.{ .msg = try bundle.addString(std.mem.span(c_error.msg)), @@ -2662,16 +2670,16 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { if (module.emit_h) |emit_h| { var it = emit_h.failed_decls.iterator(); while (it.next()) |entry| { - const decl = module.declPtr(entry.key_ptr.*); + const decl_index = entry.key_ptr.*; // Skip errors for Decls within files that had a parse failure. // We'll try again once parsing succeeds. - if (decl.getFileScope().okToReportErrors()) { - try addModuleErrorMsg(&bundle, entry.value_ptr.*.*); + if (module.declFileScope(decl_index).okToReportErrors()) { + try addModuleErrorMsg(module, &bundle, entry.value_ptr.*.*); } } } for (module.failed_exports.values()) |value| { - try addModuleErrorMsg(&bundle, value.*); + try addModuleErrorMsg(module, &bundle, value.*); } } @@ -2703,7 +2711,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { const values = module.compile_log_decls.values(); // First one will be the error; subsequent ones will be notes. const err_decl = module.declPtr(keys[0]); - const src_loc = err_decl.nodeOffsetSrcLoc(values[0]); + const src_loc = err_decl.nodeOffsetSrcLoc(values[0], module); const err_msg = Module.ErrorMsg{ .src_loc = src_loc, .msg = "found compile log statement", @@ -2714,12 +2722,12 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle { for (keys[1..], 0..) |key, i| { const note_decl = module.declPtr(key); err_msg.notes[i] = .{ - .src_loc = note_decl.nodeOffsetSrcLoc(values[i + 1]), + .src_loc = note_decl.nodeOffsetSrcLoc(values[i + 1], module), .msg = "also here", }; } - try addModuleErrorMsg(&bundle, err_msg); + try addModuleErrorMsg(module, &bundle, err_msg); } } @@ -2775,8 +2783,9 @@ pub const ErrorNoteHashContext = struct { } }; -pub fn addModuleErrorMsg(eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) !void { +pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) !void { const gpa = eb.gpa; + const ip = &mod.intern_pool; const err_source = module_err_msg.src_loc.file_scope.getSource(gpa) catch |err| { const file_path = try module_err_msg.src_loc.file_scope.fullPath(gpa); defer gpa.free(file_path); @@ -2802,7 +2811,7 @@ pub fn addModuleErrorMsg(eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) .src_loc = .none, }); break; - } else if (module_reference.decl == null) { + } else if (module_reference.decl == .none) { try ref_traces.append(gpa, .{ .decl_name = 0, .src_loc = .none, @@ -2815,7 +2824,7 @@ pub fn addModuleErrorMsg(eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) const rt_file_path = try module_reference.src_loc.file_scope.fullPath(gpa); defer gpa.free(rt_file_path); try ref_traces.append(gpa, .{ - .decl_name = try eb.addString(std.mem.sliceTo(module_reference.decl.?, 0)), + .decl_name = try eb.addString(ip.stringToSliceUnwrap(module_reference.decl).?), .src_loc = try eb.addSourceLocation(.{ .src_path = try eb.addString(rt_file_path), .span_start = span.start, @@ -3204,7 +3213,8 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v // Tests are always emitted in test binaries. The decl_refs are created by // Module.populateTestFunctions, but this will not queue body analysis, so do // that now. - try module.ensureFuncBodyAnalysisQueued(decl.val.castTag(.function).?.data); + const func_index = module.intern_pool.indexToFunc(decl.val.ip_index).unwrap().?; + try module.ensureFuncBodyAnalysisQueued(func_index); } }, .update_embed_file => |embed_file| { @@ -3228,7 +3238,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v try module.failed_decls.ensureUnusedCapacity(gpa, 1); module.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(module), "unable to update line number: {s}", .{@errorName(err)}, )); @@ -3841,7 +3851,7 @@ fn reportRetryableEmbedFileError( const mod = comp.bin_file.options.module.?; const gpa = mod.gpa; - const src_loc: Module.SrcLoc = mod.declPtr(embed_file.owner_decl).srcLoc(); + const src_loc: Module.SrcLoc = mod.declPtr(embed_file.owner_decl).srcLoc(mod); const err_msg = if (embed_file.pkg.root_src_directory.path) |dir_path| try Module.ErrorMsg.create( @@ -5417,6 +5427,7 @@ fn buildOutputFromZig( .verbose_cc = comp.verbose_cc, .verbose_link = comp.bin_file.options.verbose_link, .verbose_air = comp.verbose_air, + .verbose_intern_pool = comp.verbose_intern_pool, .verbose_llvm_ir = comp.verbose_llvm_ir, .verbose_llvm_bc = comp.verbose_llvm_bc, .verbose_cimport = comp.verbose_cimport, @@ -5495,6 +5506,7 @@ pub fn build_crt_file( .verbose_cc = comp.verbose_cc, .verbose_link = comp.bin_file.options.verbose_link, .verbose_air = comp.verbose_air, + .verbose_intern_pool = comp.verbose_intern_pool, .verbose_llvm_ir = comp.verbose_llvm_ir, .verbose_llvm_bc = comp.verbose_llvm_bc, .verbose_cimport = comp.verbose_cimport, diff --git a/src/InternPool.zig b/src/InternPool.zig index 74155ca657..c208fcf18a 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1,75 +1,903 @@ +//! All interned objects have both a value and a type. +//! This data structure is self-contained, with the following exceptions: +//! * type_struct via Module.Struct.Index +//! * type_opaque via Module.Namespace.Index and Module.Decl.Index + +/// Maps `Key` to `Index`. `Key` objects are not stored anywhere; they are +/// constructed lazily. map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, items: std.MultiArrayList(Item) = .{}, extra: std.ArrayListUnmanaged(u32) = .{}, +/// On 32-bit systems, this array is ignored and extra is used for everything. +/// On 64-bit systems, this array is used for big integers and associated metadata. +/// Use the helper methods instead of accessing this directly in order to not +/// violate the above mechanism. +limbs: std.ArrayListUnmanaged(u64) = .{}, +/// In order to store references to strings in fewer bytes, we copy all +/// string bytes into here. String bytes can be null. It is up to whomever +/// is referencing the data here whether they want to store both index and length, +/// thus allowing null bytes, or store only index, and use null-termination. The +/// `string_bytes` array is agnostic to either usage. +string_bytes: std.ArrayListUnmanaged(u8) = .{}, -const InternPool = @This(); +/// Struct objects are stored in this data structure because: +/// * They contain pointers such as the field maps. +/// * They need to be mutated after creation. +allocated_structs: std.SegmentedList(Module.Struct, 0) = .{}, +/// When a Struct object is freed from `allocated_structs`, it is pushed into this stack. +structs_free_list: std.ArrayListUnmanaged(Module.Struct.Index) = .{}, + +/// Union objects are stored in this data structure because: +/// * They contain pointers such as the field maps. +/// * They need to be mutated after creation. +allocated_unions: std.SegmentedList(Module.Union, 0) = .{}, +/// When a Union object is freed from `allocated_unions`, it is pushed into this stack. +unions_free_list: std.ArrayListUnmanaged(Module.Union.Index) = .{}, + +/// Fn objects are stored in this data structure because: +/// * They need to be mutated after creation. +allocated_funcs: std.SegmentedList(Module.Fn, 0) = .{}, +/// When a Fn object is freed from `allocated_funcs`, it is pushed into this stack. +funcs_free_list: std.ArrayListUnmanaged(Module.Fn.Index) = .{}, + +/// InferredErrorSet objects are stored in this data structure because: +/// * They contain pointers such as the errors map and the set of other inferred error sets. +/// * They need to be mutated after creation. +allocated_inferred_error_sets: std.SegmentedList(Module.Fn.InferredErrorSet, 0) = .{}, +/// When a Struct object is freed from `allocated_inferred_error_sets`, it is +/// pushed into this stack. +inferred_error_sets_free_list: std.ArrayListUnmanaged(Module.Fn.InferredErrorSet.Index) = .{}, + +/// Some types such as enums, structs, and unions need to store mappings from field names +/// to field index, or value to field index. In such cases, they will store the underlying +/// field names and values directly, relying on one of these maps, stored separately, +/// to provide lookup. +maps: std.ArrayListUnmanaged(std.AutoArrayHashMapUnmanaged(void, void)) = .{}, + +/// Used for finding the index inside `string_bytes`. +string_table: std.HashMapUnmanaged( + u32, + void, + std.hash_map.StringIndexContext, + std.hash_map.default_max_load_percentage, +) = .{}, + +const builtin = @import("builtin"); const std = @import("std"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; +const BigIntConst = std.math.big.int.Const; +const BigIntMutable = std.math.big.int.Mutable; +const Limb = std.math.big.Limb; +const Hash = std.hash.Wyhash; + +const InternPool = @This(); +const Module = @import("Module.zig"); +const Sema = @import("Sema.zig"); const KeyAdapter = struct { intern_pool: *const InternPool, pub fn eql(ctx: @This(), a: Key, b_void: void, b_map_index: usize) bool { _ = b_void; - return ctx.intern_pool.indexToKey(@intToEnum(Index, b_map_index)).eql(a); + return ctx.intern_pool.indexToKey(@intToEnum(Index, b_map_index)).eql(a, ctx.intern_pool); } pub fn hash(ctx: @This(), a: Key) u32 { + return a.hash32(ctx.intern_pool); + } +}; + +/// An index into `maps` which might be `none`. +pub const OptionalMapIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn unwrap(oi: OptionalMapIndex) ?MapIndex { + if (oi == .none) return null; + return @intToEnum(MapIndex, @enumToInt(oi)); + } +}; + +/// An index into `maps`. +pub const MapIndex = enum(u32) { + _, + + pub fn toOptional(i: MapIndex) OptionalMapIndex { + return @intToEnum(OptionalMapIndex, @enumToInt(i)); + } +}; + +pub const RuntimeIndex = enum(u32) { + zero = 0, + comptime_field_ptr = std.math.maxInt(u32), + _, + + pub fn increment(ri: *RuntimeIndex) void { + ri.* = @intToEnum(RuntimeIndex, @enumToInt(ri.*) + 1); + } +}; + +/// An index into `string_bytes`. +pub const String = enum(u32) { + _, +}; + +/// An index into `string_bytes`. +pub const NullTerminatedString = enum(u32) { + /// This is distinct from `none` - it is a valid index that represents empty string. + empty = 0, + _, + + pub fn toString(self: NullTerminatedString) String { + return @intToEnum(String, @enumToInt(self)); + } + + pub fn toOptional(self: NullTerminatedString) OptionalNullTerminatedString { + return @intToEnum(OptionalNullTerminatedString, @enumToInt(self)); + } + + const Adapter = struct { + strings: []const NullTerminatedString, + + pub fn eql(ctx: @This(), a: NullTerminatedString, b_void: void, b_map_index: usize) bool { + _ = b_void; + return a == ctx.strings[b_map_index]; + } + + pub fn hash(ctx: @This(), a: NullTerminatedString) u32 { + _ = ctx; + return std.hash.uint32(@enumToInt(a)); + } + }; + + /// Compare based on integer value alone, ignoring the string contents. + pub fn indexLessThan(ctx: void, a: NullTerminatedString, b: NullTerminatedString) bool { _ = ctx; - return a.hash(); + return @enumToInt(a) < @enumToInt(b); + } + + pub fn toUnsigned(self: NullTerminatedString, ip: *const InternPool) ?u32 { + const s = ip.stringToSlice(self); + if (s.len > 1 and s[0] == '0') return null; + if (std.mem.indexOfScalar(u8, s, '_')) |_| return null; + return std.fmt.parseUnsigned(u32, s, 10) catch null; + } + + const FormatData = struct { + string: NullTerminatedString, + ip: *const InternPool, + }; + fn format( + data: FormatData, + comptime specifier: []const u8, + _: std.fmt.FormatOptions, + writer: anytype, + ) @TypeOf(writer).Error!void { + const s = data.ip.stringToSlice(data.string); + if (comptime std.mem.eql(u8, specifier, "")) { + try writer.writeAll(s); + } else if (comptime std.mem.eql(u8, specifier, "i")) { + try writer.print("{}", .{std.zig.fmtId(s)}); + } else @compileError("invalid format string '" ++ specifier ++ "' for '" ++ @typeName(NullTerminatedString) ++ "'"); + } + + pub fn fmt(self: NullTerminatedString, ip: *const InternPool) std.fmt.Formatter(format) { + return .{ .data = .{ .string = self, .ip = ip } }; + } +}; + +/// An index into `string_bytes` which might be `none`. +pub const OptionalNullTerminatedString = enum(u32) { + /// This is distinct from `none` - it is a valid index that represents empty string. + empty = 0, + none = std.math.maxInt(u32), + _, + + pub fn unwrap(oi: OptionalNullTerminatedString) ?NullTerminatedString { + if (oi == .none) return null; + return @intToEnum(NullTerminatedString, @enumToInt(oi)); } }; pub const Key = union(enum) { - int_type: struct { - signedness: std.builtin.Signedness, - bits: u16, - }, - ptr_type: struct { - elem_type: Index, - sentinel: Index, - alignment: u16, - size: std.builtin.Type.Pointer.Size, - is_const: bool, - is_volatile: bool, - is_allowzero: bool, - address_space: std.builtin.AddressSpace, - }, - array_type: struct { - len: u64, - child: Index, - sentinel: Index, - }, - vector_type: struct { - len: u32, - child: Index, - }, - optional_type: struct { - payload_type: Index, - }, - error_union_type: struct { + int_type: IntType, + ptr_type: PtrType, + array_type: ArrayType, + vector_type: VectorType, + opt_type: Index, + /// `anyframe->T`. The payload is the child type, which may be `none` to indicate + /// `anyframe`. + anyframe_type: Index, + error_union_type: ErrorUnionType, + simple_type: SimpleType, + /// This represents a struct that has been explicitly declared in source code, + /// or was created with `@Type`. It is unique and based on a declaration. + /// It may be a tuple, if declared like this: `struct {A, B, C}`. + struct_type: StructType, + /// This is an anonymous struct or tuple type which has no corresponding + /// declaration. It is used for types that have no `struct` keyword in the + /// source code, and were not created via `@Type`. + anon_struct_type: AnonStructType, + union_type: UnionType, + opaque_type: OpaqueType, + enum_type: EnumType, + func_type: FuncType, + error_set_type: ErrorSetType, + inferred_error_set_type: Module.Fn.InferredErrorSet.Index, + + /// Typed `undefined`. This will never be `none`; untyped `undefined` is represented + /// via `simple_value` and has a named `Index` tag for it. + undef: Index, + runtime_value: TypeValue, + simple_value: SimpleValue, + variable: Variable, + extern_func: ExternFunc, + func: Func, + int: Key.Int, + err: Error, + error_union: ErrorUnion, + enum_literal: NullTerminatedString, + /// A specific enum tag, indicated by the integer tag value. + enum_tag: EnumTag, + /// An empty enum or union. TODO: this value's existence is strange, because such a type in + /// reality has no values. See #15909. + /// Payload is the type for which we are an empty value. + empty_enum_value: Index, + float: Float, + ptr: Ptr, + opt: Opt, + /// An instance of a struct, array, or vector. + /// Each element/field stored as an `Index`. + /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, + /// so the slice length will be one more than the type's array length. + aggregate: Aggregate, + /// An instance of a union. + un: Union, + + /// A comptime function call with a memoized result. + memoized_call: Key.MemoizedCall, + + pub const TypeValue = extern struct { + ty: Index, + val: Index, + }; + + pub const IntType = std.builtin.Type.Int; + + /// Extern for hashing via memory reinterpretation. + pub const ErrorUnionType = extern struct { error_set_type: Index, payload_type: Index, - }, - simple: Simple, + }; - pub fn hash(key: Key) u32 { - var hasher = std.hash.Wyhash.init(0); - switch (key) { - .int_type => |int_type| { - std.hash.autoHash(&hasher, int_type); - }, - .array_type => |array_type| { - std.hash.autoHash(&hasher, array_type); - }, - else => @panic("TODO"), + pub const ErrorSetType = struct { + /// Set of error names, sorted by null terminated string index. + names: []const NullTerminatedString, + /// This is ignored by `get` but will always be provided by `indexToKey`. + names_map: OptionalMapIndex = .none, + + /// Look up field index based on field name. + pub fn nameIndex(self: ErrorSetType, ip: *const InternPool, name: NullTerminatedString) ?u32 { + const map = &ip.maps.items[@enumToInt(self.names_map.unwrap().?)]; + const adapter: NullTerminatedString.Adapter = .{ .strings = self.names }; + const field_index = map.getIndexAdapted(name, adapter) orelse return null; + return @intCast(u32, field_index); } - return @truncate(u32, hasher.final()); + }; + + /// Extern layout so it can be hashed with `std.mem.asBytes`. + pub const PtrType = extern struct { + child: Index, + sentinel: Index = .none, + flags: Flags = .{}, + packed_offset: PackedOffset = .{ .bit_offset = 0, .host_size = 0 }, + + pub const VectorIndex = enum(u16) { + none = std.math.maxInt(u16), + runtime = std.math.maxInt(u16) - 1, + _, + }; + + pub const Flags = packed struct(u32) { + size: Size = .One, + /// `none` indicates the ABI alignment of the pointee_type. In this + /// case, this field *must* be set to `none`, otherwise the + /// `InternPool` equality and hashing functions will return incorrect + /// results. + alignment: Alignment = .none, + is_const: bool = false, + is_volatile: bool = false, + is_allowzero: bool = false, + /// See src/target.zig defaultAddressSpace function for how to obtain + /// an appropriate value for this field. + address_space: AddressSpace = .generic, + vector_index: VectorIndex = .none, + }; + + pub const PackedOffset = packed struct(u32) { + /// If this is non-zero it means the pointer points to a sub-byte + /// range of data, which is backed by a "host integer" with this + /// number of bytes. + /// When host_size=pointee_abi_size and bit_offset=0, this must be + /// represented with host_size=0 instead. + host_size: u16, + bit_offset: u16, + }; + + pub const Size = std.builtin.Type.Pointer.Size; + pub const AddressSpace = std.builtin.AddressSpace; + }; + + /// Extern so that hashing can be done via memory reinterpreting. + pub const ArrayType = extern struct { + len: u64, + child: Index, + sentinel: Index = .none, + }; + + /// Extern so that hashing can be done via memory reinterpreting. + pub const VectorType = extern struct { + len: u32, + child: Index, + }; + + pub const OpaqueType = extern struct { + /// The Decl that corresponds to the opaque itself. + decl: Module.Decl.Index, + /// Represents the declarations inside this opaque. + namespace: Module.Namespace.Index, + }; + + pub const StructType = extern struct { + /// The `none` tag is used to represent a struct with no fields. + index: Module.Struct.OptionalIndex, + /// May be `none` if the struct has no declarations. + namespace: Module.Namespace.OptionalIndex, + }; + + pub const AnonStructType = struct { + types: []const Index, + /// This may be empty, indicating this is a tuple. + names: []const NullTerminatedString, + /// These elements may be `none`, indicating runtime-known. + values: []const Index, + + pub fn isTuple(self: AnonStructType) bool { + return self.names.len == 0; + } + }; + + pub const UnionType = struct { + index: Module.Union.Index, + runtime_tag: RuntimeTag, + + pub const RuntimeTag = enum { none, safety, tagged }; + + pub fn hasTag(self: UnionType) bool { + return switch (self.runtime_tag) { + .none => false, + .tagged, .safety => true, + }; + } + }; + + pub const EnumType = struct { + /// The Decl that corresponds to the enum itself. + decl: Module.Decl.Index, + /// Represents the declarations inside this enum. + namespace: Module.Namespace.OptionalIndex, + /// An integer type which is used for the numerical value of the enum. + /// This field is present regardless of whether the enum has an + /// explicitly provided tag type or auto-numbered. + tag_ty: Index, + /// Set of field names in declaration order. + names: []const NullTerminatedString, + /// Maps integer tag value to field index. + /// Entries are in declaration order, same as `fields`. + /// If this is empty, it means the enum tags are auto-numbered. + values: []const Index, + tag_mode: TagMode, + /// This is ignored by `get` but will always be provided by `indexToKey`. + names_map: OptionalMapIndex = .none, + /// This is ignored by `get` but will be provided by `indexToKey` when + /// a value map exists. + values_map: OptionalMapIndex = .none, + + pub const TagMode = enum { + /// The integer tag type was auto-numbered by zig. + auto, + /// The integer tag type was provided by the enum declaration, and the enum + /// is exhaustive. + explicit, + /// The integer tag type was provided by the enum declaration, and the enum + /// is non-exhaustive. + nonexhaustive, + }; + + /// Look up field index based on field name. + pub fn nameIndex(self: EnumType, ip: *const InternPool, name: NullTerminatedString) ?u32 { + const map = &ip.maps.items[@enumToInt(self.names_map.unwrap().?)]; + const adapter: NullTerminatedString.Adapter = .{ .strings = self.names }; + const field_index = map.getIndexAdapted(name, adapter) orelse return null; + return @intCast(u32, field_index); + } + + /// Look up field index based on tag value. + /// Asserts that `values_map` is not `none`. + /// This function returns `null` when `tag_val` does not have the + /// integer tag type of the enum. + pub fn tagValueIndex(self: EnumType, ip: *const InternPool, tag_val: Index) ?u32 { + assert(tag_val != .none); + // TODO: we should probably decide a single interface for this function, but currently + // it's being called with both tag values and underlying ints. Fix this! + const int_tag_val = switch (ip.indexToKey(tag_val)) { + .enum_tag => |enum_tag| enum_tag.int, + .int => tag_val, + else => unreachable, + }; + if (self.values_map.unwrap()) |values_map| { + const map = &ip.maps.items[@enumToInt(values_map)]; + const adapter: Index.Adapter = .{ .indexes = self.values }; + const field_index = map.getIndexAdapted(int_tag_val, adapter) orelse return null; + return @intCast(u32, field_index); + } + // Auto-numbered enum. Convert `int_tag_val` to field index. + switch (ip.indexToKey(int_tag_val).int.storage) { + .u64 => |x| { + if (x >= self.names.len) return null; + return @intCast(u32, x); + }, + .i64, .big_int => return null, // out of range + .lazy_align, .lazy_size => unreachable, + } + } + }; + + pub const IncompleteEnumType = struct { + /// Same as corresponding `EnumType` field. + decl: Module.Decl.Index, + /// Same as corresponding `EnumType` field. + namespace: Module.Namespace.OptionalIndex, + /// The field names and field values are not known yet, but + /// the number of fields must be known ahead of time. + fields_len: u32, + /// This information is needed so that the size does not change + /// later when populating field values. + has_values: bool, + /// Same as corresponding `EnumType` field. + tag_mode: EnumType.TagMode, + /// This may be updated via `setTagType` later. + tag_ty: Index = .none, + + pub fn toEnumType(self: @This()) EnumType { + return .{ + .decl = self.decl, + .namespace = self.namespace, + .tag_ty = self.tag_ty, + .tag_mode = self.tag_mode, + .names = &.{}, + .values = &.{}, + }; + } + + /// Only the decl is used for hashing and equality, so we can construct + /// this minimal key for use with `map`. + pub fn toKey(self: @This()) Key { + return .{ .enum_type = self.toEnumType() }; + } + }; + + pub const FuncType = struct { + param_types: []Index, + return_type: Index, + /// Tells whether a parameter is comptime. See `paramIsComptime` helper + /// method for accessing this. + comptime_bits: u32, + /// Tells whether a parameter is noalias. See `paramIsNoalias` helper + /// method for accessing this. + noalias_bits: u32, + /// `none` indicates the function has the default alignment for + /// function code on the target. In this case, this field *must* be set + /// to `none`, otherwise the `InternPool` equality and hashing + /// functions will return incorrect results. + alignment: Alignment, + cc: std.builtin.CallingConvention, + is_var_args: bool, + is_generic: bool, + is_noinline: bool, + align_is_generic: bool, + cc_is_generic: bool, + section_is_generic: bool, + addrspace_is_generic: bool, + + pub fn paramIsComptime(self: @This(), i: u5) bool { + assert(i < self.param_types.len); + return @truncate(u1, self.comptime_bits >> i) != 0; + } + + pub fn paramIsNoalias(self: @This(), i: u5) bool { + assert(i < self.param_types.len); + return @truncate(u1, self.noalias_bits >> i) != 0; + } + }; + + pub const Variable = struct { + ty: Index, + init: Index, + decl: Module.Decl.Index, + lib_name: OptionalNullTerminatedString = .none, + is_extern: bool = false, + is_const: bool = false, + is_threadlocal: bool = false, + is_weak_linkage: bool = false, + }; + + pub const ExternFunc = struct { + ty: Index, + /// The Decl that corresponds to the function itself. + decl: Module.Decl.Index, + /// Library name if specified. + /// For example `extern "c" fn write(...) usize` would have 'c' as library name. + /// Index into the string table bytes. + lib_name: OptionalNullTerminatedString, + }; + + /// Extern so it can be hashed by reinterpreting memory. + pub const Func = extern struct { + ty: Index, + index: Module.Fn.Index, + }; + + pub const Int = struct { + ty: Index, + storage: Storage, + + pub const Storage = union(enum) { + u64: u64, + i64: i64, + big_int: BigIntConst, + lazy_align: Index, + lazy_size: Index, + + /// Big enough to fit any non-BigInt value + pub const BigIntSpace = struct { + /// The +1 is headroom so that operations such as incrementing once + /// or decrementing once are possible without using an allocator. + limbs: [(@sizeOf(u64) / @sizeOf(std.math.big.Limb)) + 1]std.math.big.Limb, + }; + + pub fn toBigInt(storage: Storage, space: *BigIntSpace) BigIntConst { + return switch (storage) { + .big_int => |x| x, + inline .u64, .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(), + .lazy_align, .lazy_size => unreachable, + }; + } + }; + }; + + pub const Error = extern struct { + ty: Index, + name: NullTerminatedString, + }; + + pub const ErrorUnion = struct { + ty: Index, + val: Value, + + pub const Value = union(enum) { + err_name: NullTerminatedString, + payload: Index, + }; + }; + + pub const EnumTag = extern struct { + /// The enum type. + ty: Index, + /// The integer tag value which has the integer tag type of the enum. + int: Index, + }; + + pub const Float = struct { + ty: Index, + /// The storage used must match the size of the float type being represented. + storage: Storage, + + pub const Storage = union(enum) { + f16: f16, + f32: f32, + f64: f64, + f80: f80, + f128: f128, + }; + }; + + pub const Ptr = struct { + /// This is the pointer type, not the element type. + ty: Index, + /// The value of the address that the pointer points to. + addr: Addr, + /// This could be `none` if size is not a slice. + len: Index = .none, + + pub const Addr = union(enum) { + decl: Module.Decl.Index, + mut_decl: MutDecl, + comptime_field: Index, + int: Index, + eu_payload: Index, + opt_payload: Index, + elem: BaseIndex, + field: BaseIndex, + + pub const MutDecl = struct { + decl: Module.Decl.Index, + runtime_index: RuntimeIndex, + }; + pub const BaseIndex = struct { + base: Index, + index: u64, + }; + }; + }; + + /// `null` is represented by the `val` field being `none`. + pub const Opt = extern struct { + /// This is the optional type; not the payload type. + ty: Index, + /// This could be `none`, indicating the optional is `null`. + val: Index, + }; + + pub const Union = extern struct { + /// This is the union type; not the field type. + ty: Index, + /// Indicates the active field. + tag: Index, + /// The value of the active field. + val: Index, + }; + + pub const Aggregate = struct { + ty: Index, + storage: Storage, + + pub const Storage = union(enum) { + bytes: []const u8, + elems: []const Index, + repeated_elem: Index, + + pub fn values(self: *const Storage) []const Index { + return switch (self.*) { + .bytes => &.{}, + .elems => |elems| elems, + .repeated_elem => |*elem| @as(*const [1]Index, elem), + }; + } + }; + }; + + pub const MemoizedCall = struct { + func: Module.Fn.Index, + arg_values: []const Index, + result: Index, + }; + + pub fn hash32(key: Key, ip: *const InternPool) u32 { + return @truncate(u32, key.hash64(ip)); } - pub fn eql(a: Key, b: Key) bool { - const KeyTag = std.meta.Tag(Key); + pub fn hash64(key: Key, ip: *const InternPool) u64 { + const asBytes = std.mem.asBytes; + const KeyTag = @typeInfo(Key).Union.tag_type.?; + const seed = @enumToInt(@as(KeyTag, key)); + return switch (key) { + // TODO: assert no padding in these types + inline .ptr_type, + .func, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .simple_value, + .opt, + .struct_type, + .undef, + .err, + .enum_literal, + .enum_tag, + .empty_enum_value, + .inferred_error_set_type, + .un, + => |x| Hash.hash(seed, asBytes(&x)), + + .int_type => |x| Hash.hash(seed + @enumToInt(x.signedness), asBytes(&x.bits)), + .union_type => |x| Hash.hash(seed + @enumToInt(x.runtime_tag), asBytes(&x.index)), + + .error_union => |x| switch (x.val) { + .err_name => |y| Hash.hash(seed + 0, asBytes(&x.ty) ++ asBytes(&y)), + .payload => |y| Hash.hash(seed + 1, asBytes(&x.ty) ++ asBytes(&y)), + }, + + .runtime_value => |x| Hash.hash(seed, asBytes(&x.val)), + .opaque_type => |x| Hash.hash(seed, asBytes(&x.decl)), + + .enum_type => |enum_type| { + var hasher = Hash.init(seed); + std.hash.autoHash(&hasher, enum_type.decl); + return hasher.final(); + }, + + .variable => |variable| { + var hasher = Hash.init(seed); + std.hash.autoHash(&hasher, variable.decl); + return hasher.final(); + }, + .extern_func => |x| Hash.hash(seed, asBytes(&x.ty) ++ asBytes(&x.decl)), + + .int => |int| { + var hasher = Hash.init(seed); + // Canonicalize all integers by converting them to BigIntConst. + switch (int.storage) { + .u64, .i64, .big_int => { + var buffer: Key.Int.Storage.BigIntSpace = undefined; + const big_int = int.storage.toBigInt(&buffer); + + std.hash.autoHash(&hasher, int.ty); + std.hash.autoHash(&hasher, big_int.positive); + for (big_int.limbs) |limb| std.hash.autoHash(&hasher, limb); + }, + .lazy_align, .lazy_size => |lazy_ty| { + std.hash.autoHash( + &hasher, + @as(@typeInfo(Key.Int.Storage).Union.tag_type.?, int.storage), + ); + std.hash.autoHash(&hasher, lazy_ty); + }, + } + return hasher.final(); + }, + + .float => |float| { + var hasher = Hash.init(seed); + std.hash.autoHash(&hasher, float.ty); + switch (float.storage) { + inline else => |val| std.hash.autoHash( + &hasher, + @bitCast(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(val))), val), + ), + } + return hasher.final(); + }, + + .ptr => |ptr| { + // Int-to-ptr pointers are hashed separately than decl-referencing pointers. + // This is sound due to pointer provenance rules. + const addr: @typeInfo(Key.Ptr.Addr).Union.tag_type.? = ptr.addr; + const seed2 = seed + @enumToInt(addr); + const common = asBytes(&ptr.ty) ++ asBytes(&ptr.len); + return switch (ptr.addr) { + .decl => |x| Hash.hash(seed2, common ++ asBytes(&x)), + + .mut_decl => |x| Hash.hash( + seed2, + asBytes(&x.decl) ++ asBytes(&x.runtime_index), + ), + + .int, .eu_payload, .opt_payload, .comptime_field => |int| Hash.hash( + seed2, + asBytes(&int), + ), + + .elem, .field => |x| Hash.hash( + seed2, + asBytes(&x.base) ++ asBytes(&x.index), + ), + }; + }, + + .aggregate => |aggregate| { + var hasher = Hash.init(seed); + std.hash.autoHash(&hasher, aggregate.ty); + const len = ip.aggregateTypeLen(aggregate.ty); + const child = switch (ip.indexToKey(aggregate.ty)) { + .array_type => |array_type| array_type.child, + .vector_type => |vector_type| vector_type.child, + .anon_struct_type, .struct_type => .none, + else => unreachable, + }; + + if (child == .u8_type) { + switch (aggregate.storage) { + .bytes => |bytes| for (bytes[0..@intCast(usize, len)]) |byte| { + std.hash.autoHash(&hasher, KeyTag.int); + std.hash.autoHash(&hasher, byte); + }, + .elems => |elems| for (elems[0..@intCast(usize, len)]) |elem| { + const elem_key = ip.indexToKey(elem); + std.hash.autoHash(&hasher, @as(KeyTag, elem_key)); + switch (elem_key) { + .undef => {}, + .int => |int| std.hash.autoHash( + &hasher, + @intCast(u8, int.storage.u64), + ), + else => unreachable, + } + }, + .repeated_elem => |elem| { + const elem_key = ip.indexToKey(elem); + var remaining = len; + while (remaining > 0) : (remaining -= 1) { + std.hash.autoHash(&hasher, @as(KeyTag, elem_key)); + switch (elem_key) { + .undef => {}, + .int => |int| std.hash.autoHash( + &hasher, + @intCast(u8, int.storage.u64), + ), + else => unreachable, + } + } + }, + } + return hasher.final(); + } + + switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| for (elems[0..@intCast(usize, len)]) |elem| + std.hash.autoHash(&hasher, elem), + .repeated_elem => |elem| { + var remaining = len; + while (remaining > 0) : (remaining -= 1) std.hash.autoHash(&hasher, elem); + }, + } + return hasher.final(); + }, + + .error_set_type => |error_set_type| { + var hasher = Hash.init(seed); + for (error_set_type.names) |elem| std.hash.autoHash(&hasher, elem); + return hasher.final(); + }, + + .anon_struct_type => |anon_struct_type| { + var hasher = Hash.init(seed); + for (anon_struct_type.types) |elem| std.hash.autoHash(&hasher, elem); + for (anon_struct_type.values) |elem| std.hash.autoHash(&hasher, elem); + for (anon_struct_type.names) |elem| std.hash.autoHash(&hasher, elem); + return hasher.final(); + }, + + .func_type => |func_type| { + var hasher = Hash.init(seed); + for (func_type.param_types) |param_type| std.hash.autoHash(&hasher, param_type); + std.hash.autoHash(&hasher, func_type.return_type); + std.hash.autoHash(&hasher, func_type.comptime_bits); + std.hash.autoHash(&hasher, func_type.noalias_bits); + std.hash.autoHash(&hasher, func_type.alignment); + std.hash.autoHash(&hasher, func_type.cc); + std.hash.autoHash(&hasher, func_type.is_var_args); + std.hash.autoHash(&hasher, func_type.is_generic); + std.hash.autoHash(&hasher, func_type.is_noinline); + return hasher.final(); + }, + + .memoized_call => |memoized_call| { + var hasher = Hash.init(seed); + std.hash.autoHash(&hasher, memoized_call.func); + for (memoized_call.arg_values) |arg| std.hash.autoHash(&hasher, arg); + return hasher.final(); + }, + }; + } + + pub fn eql(a: Key, b: Key, ip: *const InternPool) bool { + const KeyTag = @typeInfo(Key).Union.tag_type.?; const a_tag: KeyTag = a; const b_tag: KeyTag = b; if (a_tag != b_tag) return false; @@ -78,13 +906,327 @@ pub const Key = union(enum) { const b_info = b.int_type; return std.meta.eql(a_info, b_info); }, + .ptr_type => |a_info| { + const b_info = b.ptr_type; + return std.meta.eql(a_info, b_info); + }, .array_type => |a_info| { const b_info = b.array_type; return std.meta.eql(a_info, b_info); }, - else => @panic("TODO"), + .vector_type => |a_info| { + const b_info = b.vector_type; + return std.meta.eql(a_info, b_info); + }, + .opt_type => |a_info| { + const b_info = b.opt_type; + return a_info == b_info; + }, + .anyframe_type => |a_info| { + const b_info = b.anyframe_type; + return a_info == b_info; + }, + .error_union_type => |a_info| { + const b_info = b.error_union_type; + return std.meta.eql(a_info, b_info); + }, + .simple_type => |a_info| { + const b_info = b.simple_type; + return a_info == b_info; + }, + .simple_value => |a_info| { + const b_info = b.simple_value; + return a_info == b_info; + }, + .undef => |a_info| { + const b_info = b.undef; + return a_info == b_info; + }, + .runtime_value => |a_info| { + const b_info = b.runtime_value; + return a_info.val == b_info.val; + }, + .opt => |a_info| { + const b_info = b.opt; + return std.meta.eql(a_info, b_info); + }, + .struct_type => |a_info| { + const b_info = b.struct_type; + return std.meta.eql(a_info, b_info); + }, + .union_type => |a_info| { + const b_info = b.union_type; + return std.meta.eql(a_info, b_info); + }, + .un => |a_info| { + const b_info = b.un; + return std.meta.eql(a_info, b_info); + }, + .err => |a_info| { + const b_info = b.err; + return std.meta.eql(a_info, b_info); + }, + .error_union => |a_info| { + const b_info = b.error_union; + return std.meta.eql(a_info, b_info); + }, + .enum_literal => |a_info| { + const b_info = b.enum_literal; + return a_info == b_info; + }, + .enum_tag => |a_info| { + const b_info = b.enum_tag; + return std.meta.eql(a_info, b_info); + }, + .empty_enum_value => |a_info| { + const b_info = b.empty_enum_value; + return a_info == b_info; + }, + + .variable => |a_info| { + const b_info = b.variable; + return a_info.decl == b_info.decl; + }, + .extern_func => |a_info| { + const b_info = b.extern_func; + return a_info.ty == b_info.ty and a_info.decl == b_info.decl; + }, + .func => |a_info| { + const b_info = b.func; + return a_info.ty == b_info.ty and a_info.index == b_info.index; + }, + + .ptr => |a_info| { + const b_info = b.ptr; + if (a_info.ty != b_info.ty or a_info.len != b_info.len) return false; + + const AddrTag = @typeInfo(Key.Ptr.Addr).Union.tag_type.?; + if (@as(AddrTag, a_info.addr) != @as(AddrTag, b_info.addr)) return false; + + return switch (a_info.addr) { + .decl => |a_decl| a_decl == b_info.addr.decl, + .mut_decl => |a_mut_decl| std.meta.eql(a_mut_decl, b_info.addr.mut_decl), + .int => |a_int| a_int == b_info.addr.int, + .eu_payload => |a_eu_payload| a_eu_payload == b_info.addr.eu_payload, + .opt_payload => |a_opt_payload| a_opt_payload == b_info.addr.opt_payload, + .comptime_field => |a_comptime_field| a_comptime_field == b_info.addr.comptime_field, + .elem => |a_elem| std.meta.eql(a_elem, b_info.addr.elem), + .field => |a_field| std.meta.eql(a_field, b_info.addr.field), + }; + }, + + .int => |a_info| { + const b_info = b.int; + + if (a_info.ty != b_info.ty) + return false; + + return switch (a_info.storage) { + .u64 => |aa| switch (b_info.storage) { + .u64 => |bb| aa == bb, + .i64 => |bb| aa == bb, + .big_int => |bb| bb.orderAgainstScalar(aa) == .eq, + .lazy_align, .lazy_size => false, + }, + .i64 => |aa| switch (b_info.storage) { + .u64 => |bb| aa == bb, + .i64 => |bb| aa == bb, + .big_int => |bb| bb.orderAgainstScalar(aa) == .eq, + .lazy_align, .lazy_size => false, + }, + .big_int => |aa| switch (b_info.storage) { + .u64 => |bb| aa.orderAgainstScalar(bb) == .eq, + .i64 => |bb| aa.orderAgainstScalar(bb) == .eq, + .big_int => |bb| aa.eq(bb), + .lazy_align, .lazy_size => false, + }, + .lazy_align => |aa| switch (b_info.storage) { + .u64, .i64, .big_int, .lazy_size => false, + .lazy_align => |bb| aa == bb, + }, + .lazy_size => |aa| switch (b_info.storage) { + .u64, .i64, .big_int, .lazy_align => false, + .lazy_size => |bb| aa == bb, + }, + }; + }, + + .float => |a_info| { + const b_info = b.float; + + if (a_info.ty != b_info.ty) + return false; + + if (a_info.ty == .c_longdouble_type and a_info.storage != .f80) { + // These are strange: we'll sometimes represent them as f128, even if the + // underlying type is smaller. f80 is an exception: see float_c_longdouble_f80. + const a_val = switch (a_info.storage) { + inline else => |val| @floatCast(f128, val), + }; + const b_val = switch (b_info.storage) { + inline else => |val| @floatCast(f128, val), + }; + return a_val == b_val; + } + + const StorageTag = @typeInfo(Key.Float.Storage).Union.tag_type.?; + assert(@as(StorageTag, a_info.storage) == @as(StorageTag, b_info.storage)); + + return switch (a_info.storage) { + inline else => |val, tag| val == @field(b_info.storage, @tagName(tag)), + }; + }, + + .opaque_type => |a_info| { + const b_info = b.opaque_type; + return a_info.decl == b_info.decl; + }, + .enum_type => |a_info| { + const b_info = b.enum_type; + return a_info.decl == b_info.decl; + }, + .aggregate => |a_info| { + const b_info = b.aggregate; + if (a_info.ty != b_info.ty) return false; + + const len = ip.aggregateTypeLen(a_info.ty); + const StorageTag = @typeInfo(Key.Aggregate.Storage).Union.tag_type.?; + if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) { + for (0..@intCast(usize, len)) |elem_index| { + const a_elem = switch (a_info.storage) { + .bytes => |bytes| ip.getIfExists(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[elem_index] }, + } }) orelse return false, + .elems => |elems| elems[elem_index], + .repeated_elem => |elem| elem, + }; + const b_elem = switch (b_info.storage) { + .bytes => |bytes| ip.getIfExists(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[elem_index] }, + } }) orelse return false, + .elems => |elems| elems[elem_index], + .repeated_elem => |elem| elem, + }; + if (a_elem != b_elem) return false; + } + return true; + } + + switch (a_info.storage) { + .bytes => |a_bytes| { + const b_bytes = b_info.storage.bytes; + return std.mem.eql( + u8, + a_bytes[0..@intCast(usize, len)], + b_bytes[0..@intCast(usize, len)], + ); + }, + .elems => |a_elems| { + const b_elems = b_info.storage.elems; + return std.mem.eql( + Index, + a_elems[0..@intCast(usize, len)], + b_elems[0..@intCast(usize, len)], + ); + }, + .repeated_elem => |a_elem| { + const b_elem = b_info.storage.repeated_elem; + return a_elem == b_elem; + }, + } + }, + .anon_struct_type => |a_info| { + const b_info = b.anon_struct_type; + return std.mem.eql(Index, a_info.types, b_info.types) and + std.mem.eql(Index, a_info.values, b_info.values) and + std.mem.eql(NullTerminatedString, a_info.names, b_info.names); + }, + .error_set_type => |a_info| { + const b_info = b.error_set_type; + return std.mem.eql(NullTerminatedString, a_info.names, b_info.names); + }, + .inferred_error_set_type => |a_info| { + const b_info = b.inferred_error_set_type; + return a_info == b_info; + }, + + .func_type => |a_info| { + const b_info = b.func_type; + + return std.mem.eql(Index, a_info.param_types, b_info.param_types) and + a_info.return_type == b_info.return_type and + a_info.comptime_bits == b_info.comptime_bits and + a_info.noalias_bits == b_info.noalias_bits and + a_info.alignment == b_info.alignment and + a_info.cc == b_info.cc and + a_info.is_var_args == b_info.is_var_args and + a_info.is_generic == b_info.is_generic and + a_info.is_noinline == b_info.is_noinline; + }, + + .memoized_call => |a_info| { + const b_info = b.memoized_call; + return a_info.func == b_info.func and + std.mem.eql(Index, a_info.arg_values, b_info.arg_values); + }, } } + + pub fn typeOf(key: Key) Index { + return switch (key) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .error_set_type, + .inferred_error_set_type, + .simple_type, + .struct_type, + .union_type, + .opaque_type, + .enum_type, + .anon_struct_type, + .func_type, + => .type_type, + + inline .runtime_value, + .ptr, + .int, + .float, + .opt, + .variable, + .extern_func, + .func, + .err, + .error_union, + .enum_tag, + .aggregate, + .un, + => |x| x.ty, + + .enum_literal => .enum_literal_type, + + .undef => |x| x, + .empty_enum_value => |x| x, + + .simple_value => |s| switch (s) { + .undefined => .undefined_type, + .void => .void_type, + .null => .null_type, + .false, .true => .bool_type, + .empty_struct => .empty_struct_type, + .@"unreachable" => .noreturn_type, + .generic_poison => .generic_poison_type, + }, + + .memoized_call => unreachable, + }; + } }; pub const Item = struct { @@ -98,11 +1240,539 @@ pub const Item = struct { /// Two values which have the same type can be equality compared simply /// by checking if their indexes are equal, provided they are both in /// the same `InternPool`. +/// When adding a tag to this enum, consider adding a corresponding entry to +/// `primitives` in AstGen.zig. pub const Index = enum(u32) { + pub const first_type: Index = .u1_type; + pub const last_type: Index = .empty_struct_type; + pub const first_value: Index = .undef; + pub const last_value: Index = .empty_struct; + + u1_type, + u8_type, + i8_type, + u16_type, + i16_type, + u29_type, + u32_type, + i32_type, + u64_type, + i64_type, + u80_type, + u128_type, + i128_type, + usize_type, + isize_type, + c_char_type, + c_short_type, + c_ushort_type, + c_int_type, + c_uint_type, + c_long_type, + c_ulong_type, + c_longlong_type, + c_ulonglong_type, + c_longdouble_type, + f16_type, + f32_type, + f64_type, + f80_type, + f128_type, + anyopaque_type, + bool_type, + void_type, + type_type, + anyerror_type, + comptime_int_type, + comptime_float_type, + noreturn_type, + anyframe_type, + null_type, + undefined_type, + enum_literal_type, + atomic_order_type, + atomic_rmw_op_type, + calling_convention_type, + address_space_type, + float_mode_type, + reduce_op_type, + call_modifier_type, + prefetch_options_type, + export_options_type, + extern_options_type, + type_info_type, + manyptr_u8_type, + manyptr_const_u8_type, + manyptr_const_u8_sentinel_0_type, + single_const_pointer_to_comptime_int_type, + slice_const_u8_type, + slice_const_u8_sentinel_0_type, + anyerror_void_error_union_type, + generic_poison_type, + /// `@TypeOf(.{})` + empty_struct_type, + + /// `undefined` (untyped) + undef, + /// `0` (comptime_int) + zero, + /// `0` (usize) + zero_usize, + /// `0` (u8) + zero_u8, + /// `1` (comptime_int) + one, + /// `1` (usize) + one_usize, + /// `1` (u8) + one_u8, + /// `4` (u8) + four_u8, + /// `-1` (comptime_int) + negative_one, + /// `std.builtin.CallingConvention.C` + calling_convention_c, + /// `std.builtin.CallingConvention.Inline` + calling_convention_inline, + /// `{}` + void_value, + /// `unreachable` (noreturn type) + unreachable_value, + /// `null` (untyped) + null_value, + /// `true` + bool_true, + /// `false` + bool_false, + /// `.{}` (untyped) + empty_struct, + + /// Used for generic parameters where the type and value + /// is not known until generic function instantiation. + generic_poison, + + /// Used by Air/Sema only. + var_args_param_type = std.math.maxInt(u32) - 1, none = std.math.maxInt(u32), + _, + + pub fn toType(i: Index) @import("type.zig").Type { + assert(i != .none); + return .{ .ip_index = i }; + } + + pub fn toValue(i: Index) @import("value.zig").Value { + assert(i != .none); + return .{ + .ip_index = i, + .legacy = undefined, + }; + } + + /// Used for a map of `Index` values to the index within a list of `Index` values. + const Adapter = struct { + indexes: []const Index, + + pub fn eql(ctx: @This(), a: Index, b_void: void, b_map_index: usize) bool { + _ = b_void; + return a == ctx.indexes[b_map_index]; + } + + pub fn hash(ctx: @This(), a: Index) u32 { + _ = ctx; + return std.hash.uint32(@enumToInt(a)); + } + }; + + /// This function is used in the debugger pretty formatters in tools/ to fetch the + /// Tag to encoding mapping to facilitate fancy debug printing for this type. + fn dbHelper(self: *Index, tag_to_encoding_map: *struct { + const DataIsIndex = struct { data: Index }; + const DataIsExtraIndexOfEnumExplicit = struct { + const @"data.fields_len" = opaque {}; + data: *EnumExplicit, + @"trailing.names.len": *@"data.fields_len", + @"trailing.values.len": *@"data.fields_len", + trailing: struct { + names: []NullTerminatedString, + values: []Index, + }, + }; + const DataIsExtraIndexOfTypeStructAnon = struct { + const @"data.fields_len" = opaque {}; + data: *TypeStructAnon, + @"trailing.types.len": *@"data.fields_len", + @"trailing.values.len": *@"data.fields_len", + @"trailing.names.len": *@"data.fields_len", + trailing: struct { + types: []Index, + values: []Index, + names: []NullTerminatedString, + }, + }; + + type_int_signed: struct { data: u32 }, + type_int_unsigned: struct { data: u32 }, + type_array_big: struct { data: *Array }, + type_array_small: struct { data: *Vector }, + type_vector: struct { data: *Vector }, + type_pointer: struct { data: *Tag.TypePointer }, + type_slice: DataIsIndex, + type_optional: DataIsIndex, + type_anyframe: DataIsIndex, + type_error_union: struct { data: *Key.ErrorUnionType }, + type_error_set: struct { + const @"data.names_len" = opaque {}; + data: *ErrorSet, + @"trailing.names.len": *@"data.names_len", + trailing: struct { names: []NullTerminatedString }, + }, + type_inferred_error_set: struct { data: Module.Fn.InferredErrorSet.Index }, + type_enum_auto: struct { + const @"data.fields_len" = opaque {}; + data: *EnumAuto, + @"trailing.names.len": *@"data.fields_len", + trailing: struct { names: []NullTerminatedString }, + }, + type_enum_explicit: DataIsExtraIndexOfEnumExplicit, + type_enum_nonexhaustive: DataIsExtraIndexOfEnumExplicit, + simple_type: struct { data: SimpleType }, + type_opaque: struct { data: *Key.OpaqueType }, + type_struct: struct { data: Module.Struct.OptionalIndex }, + type_struct_ns: struct { data: Module.Namespace.Index }, + type_struct_anon: DataIsExtraIndexOfTypeStructAnon, + type_tuple_anon: DataIsExtraIndexOfTypeStructAnon, + type_union_tagged: struct { data: Module.Union.Index }, + type_union_untagged: struct { data: Module.Union.Index }, + type_union_safety: struct { data: Module.Union.Index }, + type_function: struct { + const @"data.params_len" = opaque {}; + data: *TypeFunction, + @"trailing.param_types.len": *@"data.params_len", + trailing: struct { param_types: []Index }, + }, + + undef: DataIsIndex, + runtime_value: struct { data: *Tag.TypeValue }, + simple_value: struct { data: SimpleValue }, + ptr_decl: struct { data: *PtrDecl }, + ptr_mut_decl: struct { data: *PtrMutDecl }, + ptr_comptime_field: struct { data: *PtrComptimeField }, + ptr_int: struct { data: *PtrBase }, + ptr_eu_payload: struct { data: *PtrBase }, + ptr_opt_payload: struct { data: *PtrBase }, + ptr_elem: struct { data: *PtrBaseIndex }, + ptr_field: struct { data: *PtrBaseIndex }, + ptr_slice: struct { data: *PtrSlice }, + opt_payload: struct { data: *Tag.TypeValue }, + opt_null: DataIsIndex, + int_u8: struct { data: u8 }, + int_u16: struct { data: u16 }, + int_u32: struct { data: u32 }, + int_i32: struct { data: i32 }, + int_usize: struct { data: u32 }, + int_comptime_int_u32: struct { data: u32 }, + int_comptime_int_i32: struct { data: i32 }, + int_small: struct { data: *IntSmall }, + int_positive: struct { data: u32 }, + int_negative: struct { data: u32 }, + int_lazy_align: struct { data: *IntLazy }, + int_lazy_size: struct { data: *IntLazy }, + error_set_error: struct { data: *Key.Error }, + error_union_error: struct { data: *Key.Error }, + error_union_payload: struct { data: *Tag.TypeValue }, + enum_literal: struct { data: NullTerminatedString }, + enum_tag: struct { data: *Tag.EnumTag }, + float_f16: struct { data: f16 }, + float_f32: struct { data: f32 }, + float_f64: struct { data: *Float64 }, + float_f80: struct { data: *Float80 }, + float_f128: struct { data: *Float128 }, + float_c_longdouble_f80: struct { data: *Float80 }, + float_c_longdouble_f128: struct { data: *Float128 }, + float_comptime_float: struct { data: *Float128 }, + variable: struct { data: *Tag.Variable }, + extern_func: struct { data: *Key.ExternFunc }, + func: struct { data: *Tag.Func }, + only_possible_value: DataIsIndex, + union_value: struct { data: *Key.Union }, + bytes: struct { data: *Bytes }, + aggregate: struct { + const @"data.ty.data.len orelse data.ty.data.fields_len" = opaque {}; + data: *Tag.Aggregate, + @"trailing.element_values.len": *@"data.ty.data.len orelse data.ty.data.fields_len", + trailing: struct { element_values: []Index }, + }, + repeated: struct { data: *Repeated }, + + memoized_call: struct { + const @"data.args_len" = opaque {}; + data: *MemoizedCall, + @"trailing.arg_values.len": *@"data.args_len", + trailing: struct { arg_values: []Index }, + }, + }) void { + _ = self; + const map_fields = @typeInfo(@typeInfo(@TypeOf(tag_to_encoding_map)).Pointer.child).Struct.fields; + @setEvalBranchQuota(2_000); + inline for (@typeInfo(Tag).Enum.fields, 0..) |tag, start| { + inline for (0..map_fields.len) |offset| { + if (comptime std.mem.eql(u8, tag.name, map_fields[(start + offset) % map_fields.len].name)) break; + } else { + @compileError(@typeName(Tag) ++ "." ++ tag.name ++ " missing dbHelper tag_to_encoding_map entry"); + } + } + } + + comptime { + if (builtin.mode == .Debug) { + _ = &dbHelper; + } + } }; +pub const static_keys = [_]Key{ + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 1, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 8, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 8, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 16, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 16, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 29, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 32, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 32, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 64, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 64, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 80, + } }, + + .{ .int_type = .{ + .signedness = .unsigned, + .bits = 128, + } }, + + .{ .int_type = .{ + .signedness = .signed, + .bits = 128, + } }, + + .{ .simple_type = .usize }, + .{ .simple_type = .isize }, + .{ .simple_type = .c_char }, + .{ .simple_type = .c_short }, + .{ .simple_type = .c_ushort }, + .{ .simple_type = .c_int }, + .{ .simple_type = .c_uint }, + .{ .simple_type = .c_long }, + .{ .simple_type = .c_ulong }, + .{ .simple_type = .c_longlong }, + .{ .simple_type = .c_ulonglong }, + .{ .simple_type = .c_longdouble }, + .{ .simple_type = .f16 }, + .{ .simple_type = .f32 }, + .{ .simple_type = .f64 }, + .{ .simple_type = .f80 }, + .{ .simple_type = .f128 }, + .{ .simple_type = .anyopaque }, + .{ .simple_type = .bool }, + .{ .simple_type = .void }, + .{ .simple_type = .type }, + .{ .simple_type = .anyerror }, + .{ .simple_type = .comptime_int }, + .{ .simple_type = .comptime_float }, + .{ .simple_type = .noreturn }, + .{ .anyframe_type = .none }, + .{ .simple_type = .null }, + .{ .simple_type = .undefined }, + .{ .simple_type = .enum_literal }, + .{ .simple_type = .atomic_order }, + .{ .simple_type = .atomic_rmw_op }, + .{ .simple_type = .calling_convention }, + .{ .simple_type = .address_space }, + .{ .simple_type = .float_mode }, + .{ .simple_type = .reduce_op }, + .{ .simple_type = .call_modifier }, + .{ .simple_type = .prefetch_options }, + .{ .simple_type = .export_options }, + .{ .simple_type = .extern_options }, + .{ .simple_type = .type_info }, + + .{ .ptr_type = .{ + .child = .u8_type, + .flags = .{ + .size = .Many, + }, + } }, + + // manyptr_const_u8_type + .{ .ptr_type = .{ + .child = .u8_type, + .flags = .{ + .size = .Many, + .is_const = true, + }, + } }, + + // manyptr_const_u8_sentinel_0_type + .{ .ptr_type = .{ + .child = .u8_type, + .sentinel = .zero_u8, + .flags = .{ + .size = .Many, + .is_const = true, + }, + } }, + + .{ .ptr_type = .{ + .child = .comptime_int_type, + .flags = .{ + .size = .One, + .is_const = true, + }, + } }, + + // slice_const_u8_type + .{ .ptr_type = .{ + .child = .u8_type, + .flags = .{ + .size = .Slice, + .is_const = true, + }, + } }, + + // slice_const_u8_sentinel_0_type + .{ .ptr_type = .{ + .child = .u8_type, + .sentinel = .zero_u8, + .flags = .{ + .size = .Slice, + .is_const = true, + }, + } }, + + // anyerror_void_error_union_type + .{ .error_union_type = .{ + .error_set_type = .anyerror_type, + .payload_type = .void_type, + } }, + + // generic_poison_type + .{ .simple_type = .generic_poison }, + + // empty_struct_type + .{ .anon_struct_type = .{ + .types = &.{}, + .names = &.{}, + .values = &.{}, + } }, + + .{ .simple_value = .undefined }, + + .{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .u64 = 0 }, + } }, + + .{ .int = .{ + .ty = .usize_type, + .storage = .{ .u64 = 0 }, + } }, + + .{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = 0 }, + } }, + + .{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .u64 = 1 }, + } }, + + .{ .int = .{ + .ty = .usize_type, + .storage = .{ .u64 = 1 }, + } }, + + // one_u8 + .{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = 1 }, + } }, + // four_u8 + .{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = 4 }, + } }, + // negative_one + .{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .i64 = -1 }, + } }, + // calling_convention_c + .{ .enum_tag = .{ + .ty = .calling_convention_type, + .int = .one_u8, + } }, + // calling_convention_inline + .{ .enum_tag = .{ + .ty = .calling_convention_type, + .int = .four_u8, + } }, + + .{ .simple_value = .void }, + .{ .simple_value = .@"unreachable" }, + .{ .simple_value = .null }, + .{ .simple_value = .true }, + .{ .simple_value = .false }, + .{ .simple_value = .empty_struct }, + .{ .simple_value = .generic_poison }, +}; + +/// How many items in the InternPool are statically known. +pub const static_len: u32 = static_keys.len; + pub const Tag = enum(u8) { /// An integer type. /// data is number of bits @@ -110,36 +1780,418 @@ pub const Tag = enum(u8) { /// An integer type. /// data is number of bits type_int_unsigned, - /// An array type. + /// An array type whose length requires 64 bits or which has a sentinel. /// data is payload to Array. - type_array, - /// A type or value that can be represented with only an enum tag. - /// data is Simple enum value - simple, - /// An unsigned integer value that can be represented by u32. + type_array_big, + /// An array type that has no sentinel and whose length fits in 32 bits. + /// data is payload to Vector. + type_array_small, + /// A vector type. + /// data is payload to Vector. + type_vector, + /// A fully explicitly specified pointer type. + type_pointer, + /// A slice type. + /// data is Index of underlying pointer type. + type_slice, + /// An optional type. + /// data is the child type. + type_optional, + /// The type `anyframe->T`. + /// data is the child type. + /// If the child type is `none`, the type is `anyframe`. + type_anyframe, + /// An error union type. + /// data is payload to `Key.ErrorUnionType`. + type_error_union, + /// An error set type. + /// data is payload to `ErrorSet`. + type_error_set, + /// The inferred error set type of a function. + /// data is `Module.Fn.InferredErrorSet.Index`. + type_inferred_error_set, + /// An enum type with auto-numbered tag values. + /// The enum is exhaustive. + /// data is payload index to `EnumAuto`. + type_enum_auto, + /// An enum type with an explicitly provided integer tag type. + /// The enum is exhaustive. + /// data is payload index to `EnumExplicit`. + type_enum_explicit, + /// An enum type with an explicitly provided integer tag type. + /// The enum is non-exhaustive. + /// data is payload index to `EnumExplicit`. + type_enum_nonexhaustive, + /// A type that can be represented with only an enum tag. + /// data is SimpleType enum value. + simple_type, + /// An opaque type. + /// data is index of Key.OpaqueType in extra. + type_opaque, + /// A struct type. + /// data is Module.Struct.OptionalIndex + /// The `none` tag is used to represent `@TypeOf(.{})`. + type_struct, + /// A struct type that has only a namespace; no fields, and there is no + /// Module.Struct object allocated for it. + /// data is Module.Namespace.Index. + type_struct_ns, + /// An AnonStructType which stores types, names, and values for fields. + /// data is extra index of `TypeStructAnon`. + type_struct_anon, + /// An AnonStructType which has only types and values for fields. + /// data is extra index of `TypeStructAnon`. + type_tuple_anon, + /// A tagged union type. + /// `data` is `Module.Union.Index`. + type_union_tagged, + /// An untagged union type. It also has no safety tag. + /// `data` is `Module.Union.Index`. + type_union_untagged, + /// An untagged union type which has a safety tag. + /// `data` is `Module.Union.Index`. + type_union_safety, + /// A function body type. + /// `data` is extra index to `TypeFunction`. + type_function, + + /// Typed `undefined`. + /// `data` is `Index` of the type. + /// Untyped `undefined` is stored instead via `simple_value`. + undef, + /// A wrapper for values which are comptime-known but should + /// semantically be runtime-known. + /// data is extra index of `TypeValue`. + runtime_value, + /// A value that can be represented with only an enum tag. + /// data is SimpleValue enum value. + simple_value, + /// A pointer to a decl. + /// data is extra index of `PtrDecl`, which contains the type and address. + ptr_decl, + /// A pointer to a decl that can be mutated at comptime. + /// data is extra index of `PtrMutDecl`, which contains the type and address. + ptr_mut_decl, + /// data is extra index of `PtrComptimeField`, which contains the pointer type and field value. + ptr_comptime_field, + /// A pointer with an integer value. + /// data is extra index of `PtrBase`, which contains the type and address. + /// Only pointer types are allowed to have this encoding. Optional types must use + /// `opt_payload` or `opt_null`. + ptr_int, + /// A pointer to the payload of an error union. + /// data is extra index of `PtrBase`, which contains the type and base pointer. + ptr_eu_payload, + /// A pointer to the payload of an optional. + /// data is extra index of `PtrBase`, which contains the type and base pointer. + ptr_opt_payload, + /// A pointer to an array element. + /// data is extra index of PtrBaseIndex, which contains the base array and element index. + /// In order to use this encoding, one must ensure that the `InternPool` + /// already contains the elem pointer type corresponding to this payload. + ptr_elem, + /// A pointer to a container field. + /// data is extra index of PtrBaseIndex, which contains the base container and field index. + ptr_field, + /// A slice. + /// data is extra index of PtrSlice, which contains the ptr and len values + ptr_slice, + /// An optional value that is non-null. + /// data is extra index of `TypeValue`. + /// The type is the optional type (not the payload type). + opt_payload, + /// An optional value that is null. + /// data is Index of the optional type. + opt_null, + /// Type: u8 + /// data is integer value + int_u8, + /// Type: u16 + /// data is integer value + int_u16, + /// Type: u32 /// data is integer value int_u32, - /// An unsigned integer value that can be represented by i32. + /// Type: i32 /// data is integer value bitcasted to u32. int_i32, - /// A positive integer value that does not fit in 32 bits. - /// data is a extra index to BigInt. - int_big_positive, - /// A negative integer value that does not fit in 32 bits. - /// data is a extra index to BigInt. - int_big_negative, - /// A float value that can be represented by f32. + /// A usize that fits in 32 bits. + /// data is integer value. + int_usize, + /// A comptime_int that fits in a u32. + /// data is integer value. + int_comptime_int_u32, + /// A comptime_int that fits in an i32. + /// data is integer value bitcasted to u32. + int_comptime_int_i32, + /// An integer value that fits in 32 bits with an explicitly provided type. + /// data is extra index of `IntSmall`. + int_small, + /// A positive integer value. + /// data is a limbs index to `Int`. + int_positive, + /// A negative integer value. + /// data is a limbs index to `Int`. + int_negative, + /// The ABI alignment of a lazy type. + /// data is extra index of `IntLazy`. + int_lazy_align, + /// The ABI size of a lazy type. + /// data is extra index of `IntLazy`. + int_lazy_size, + /// An error value. + /// data is extra index of `Key.Error`. + error_set_error, + /// An error union error. + /// data is extra index of `Key.Error`. + error_union_error, + /// An error union payload. + /// data is extra index of `TypeValue`. + error_union_payload, + /// An enum literal value. + /// data is `NullTerminatedString` of the error name. + enum_literal, + /// An enum tag value. + /// data is extra index of `EnumTag`. + enum_tag, + /// An f16 value. + /// data is float value bitcasted to u16 and zero-extended. + float_f16, + /// An f32 value. /// data is float value bitcasted to u32. float_f32, - /// A float value that can be represented by f64. - /// data is payload index to Float64. + /// An f64 value. + /// data is extra index to Float64. float_f64, - /// A float value that can be represented by f128. - /// data is payload index to Float128. + /// An f80 value. + /// data is extra index to Float80. + float_f80, + /// An f128 value. + /// data is extra index to Float128. float_f128, + /// A c_longdouble value of 80 bits. + /// data is extra index to Float80. + /// This is used when a c_longdouble value is provided as an f80, because f80 has unnormalized + /// values which cannot be losslessly represented as f128. It should only be used when the type + /// underlying c_longdouble for the target is 80 bits. + float_c_longdouble_f80, + /// A c_longdouble value of 128 bits. + /// data is extra index to Float128. + /// This is used when a c_longdouble value is provided as any type other than an f80, since all + /// other float types can be losslessly converted to and from f128. + float_c_longdouble_f128, + /// A comptime_float value. + /// data is extra index to Float128. + float_comptime_float, + /// A global variable. + /// data is extra index to Variable. + variable, + /// An extern function. + /// data is extra index to Key.ExternFunc. + extern_func, + /// A regular function. + /// data is extra index to Func. + func, + /// This represents the only possible value for *some* types which have + /// only one possible value. Not all only-possible-values are encoded this way; + /// for example structs which have all comptime fields are not encoded this way. + /// The set of values that are encoded this way is: + /// * An array or vector which has length 0. + /// * A struct which has all fields comptime-known. + /// * An empty enum or union. TODO: this value's existence is strange, because such a type in reality has no values. See #15909 + /// data is Index of the type, which is known to be zero bits at runtime. + only_possible_value, + /// data is extra index to Key.Union. + union_value, + /// An array of bytes. + /// data is extra index to `Bytes`. + bytes, + /// An instance of a struct, array, or vector. + /// data is extra index to `Aggregate`. + aggregate, + /// An instance of an array or vector with every element being the same value. + /// data is extra index to `Repeated`. + repeated, + + /// A memoized comptime function call result. + /// data is extra index to `MemoizedCall` + memoized_call, + + const ErrorUnionType = Key.ErrorUnionType; + const OpaqueType = Key.OpaqueType; + const TypeValue = Key.TypeValue; + const Error = Key.Error; + const EnumTag = Key.EnumTag; + const ExternFunc = Key.ExternFunc; + const Func = Key.Func; + const Union = Key.Union; + const TypePointer = Key.PtrType; + + fn Payload(comptime tag: Tag) type { + return switch (tag) { + .type_int_signed => unreachable, + .type_int_unsigned => unreachable, + .type_array_big => Array, + .type_array_small => Vector, + .type_vector => Vector, + .type_pointer => TypePointer, + .type_slice => unreachable, + .type_optional => unreachable, + .type_anyframe => unreachable, + .type_error_union => ErrorUnionType, + .type_error_set => ErrorSet, + .type_inferred_error_set => unreachable, + .type_enum_auto => EnumAuto, + .type_enum_explicit => EnumExplicit, + .type_enum_nonexhaustive => EnumExplicit, + .simple_type => unreachable, + .type_opaque => OpaqueType, + .type_struct => unreachable, + .type_struct_ns => unreachable, + .type_struct_anon => TypeStructAnon, + .type_tuple_anon => TypeStructAnon, + .type_union_tagged => unreachable, + .type_union_untagged => unreachable, + .type_union_safety => unreachable, + .type_function => TypeFunction, + + .undef => unreachable, + .runtime_value => TypeValue, + .simple_value => unreachable, + .ptr_decl => PtrDecl, + .ptr_mut_decl => PtrMutDecl, + .ptr_comptime_field => PtrComptimeField, + .ptr_int => PtrBase, + .ptr_eu_payload => PtrBase, + .ptr_opt_payload => PtrBase, + .ptr_elem => PtrBaseIndex, + .ptr_field => PtrBaseIndex, + .ptr_slice => PtrSlice, + .opt_payload => TypeValue, + .opt_null => unreachable, + .int_u8 => unreachable, + .int_u16 => unreachable, + .int_u32 => unreachable, + .int_i32 => unreachable, + .int_usize => unreachable, + .int_comptime_int_u32 => unreachable, + .int_comptime_int_i32 => unreachable, + .int_small => IntSmall, + .int_positive => unreachable, + .int_negative => unreachable, + .int_lazy_align => IntLazy, + .int_lazy_size => IntLazy, + .error_set_error => Error, + .error_union_error => Error, + .error_union_payload => TypeValue, + .enum_literal => unreachable, + .enum_tag => EnumTag, + .float_f16 => unreachable, + .float_f32 => unreachable, + .float_f64 => unreachable, + .float_f80 => unreachable, + .float_f128 => unreachable, + .float_c_longdouble_f80 => unreachable, + .float_c_longdouble_f128 => unreachable, + .float_comptime_float => unreachable, + .variable => Variable, + .extern_func => ExternFunc, + .func => Func, + .only_possible_value => unreachable, + .union_value => Union, + .bytes => Bytes, + .aggregate => Aggregate, + .repeated => Repeated, + .memoized_call => MemoizedCall, + }; + } + + pub const Variable = struct { + ty: Index, + /// May be `none`. + init: Index, + decl: Module.Decl.Index, + /// Library name if specified. + /// For example `extern "c" var stderrp = ...` would have 'c' as library name. + lib_name: OptionalNullTerminatedString, + flags: Flags, + + pub const Flags = packed struct(u32) { + is_extern: bool, + is_const: bool, + is_threadlocal: bool, + is_weak_linkage: bool, + _: u28 = 0, + }; + }; + + /// Trailing: + /// 0. element: Index for each len + /// len is determined by the aggregate type. + pub const Aggregate = struct { + /// The type of the aggregate. + ty: Index, + }; }; -pub const Simple = enum(u32) { +/// Trailing: +/// 0. name: NullTerminatedString for each names_len +pub const ErrorSet = struct { + names_len: u32, + /// Maps error names to declaration index. + names_map: MapIndex, +}; + +/// Trailing: +/// 0. param_type: Index for each params_len +pub const TypeFunction = struct { + params_len: u32, + return_type: Index, + comptime_bits: u32, + noalias_bits: u32, + flags: Flags, + + pub const Flags = packed struct(u32) { + alignment: Alignment, + cc: std.builtin.CallingConvention, + is_var_args: bool, + is_generic: bool, + is_noinline: bool, + align_is_generic: bool, + cc_is_generic: bool, + section_is_generic: bool, + addrspace_is_generic: bool, + _: u11 = 0, + }; +}; + +pub const Bytes = struct { + /// The type of the aggregate + ty: Index, + /// Index into string_bytes, of len ip.aggregateTypeLen(ty) + bytes: String, +}; + +pub const Repeated = struct { + /// The type of the aggregate. + ty: Index, + /// The value of every element. + elem_val: Index, +}; + +/// Trailing: +/// 0. type: Index for each fields_len +/// 1. value: Index for each fields_len +/// 2. name: NullTerminatedString for each fields_len +/// The set of field names is omitted when the `Tag` is `type_tuple_anon`. +pub const TypeStructAnon = struct { + fields_len: u32, +}; + +/// Having `SimpleType` and `SimpleValue` in separate enums makes it easier to +/// implement logic that only wants to deal with types because the logic can +/// ignore all simple values. Note that technically, types are values. +pub const SimpleType = enum(u32) { f16, f32, f64, @@ -147,6 +2199,7 @@ pub const Simple = enum(u32) { f128, usize, isize, + c_char, c_short, c_ushort, c_int, @@ -164,29 +2217,339 @@ pub const Simple = enum(u32) { comptime_int, comptime_float, noreturn, - @"anyframe", - null_type, - undefined_type, - enum_literal_type, - undefined, - void_value, null, - bool_true, - bool_false, + undefined, + enum_literal, + + atomic_order, + atomic_rmw_op, + calling_convention, + address_space, + float_mode, + reduce_op, + call_modifier, + prefetch_options, + export_options, + extern_options, + type_info, + + generic_poison, }; -pub const Array = struct { +pub const SimpleValue = enum(u32) { + /// This is untyped `undefined`. + undefined, + void, + /// This is untyped `null`. + null, + /// This is the untyped empty struct literal: `.{}` + empty_struct, + true, + false, + @"unreachable", + + generic_poison, +}; + +/// Stored as a power-of-two, with one special value to indicate none. +pub const Alignment = enum(u6) { + none = std.math.maxInt(u6), + _, + + pub fn toByteUnitsOptional(a: Alignment) ?u64 { + return switch (a) { + .none => null, + _ => @as(u64, 1) << @enumToInt(a), + }; + } + + pub fn toByteUnits(a: Alignment, default: u64) u64 { + return switch (a) { + .none => default, + _ => @as(u64, 1) << @enumToInt(a), + }; + } + + pub fn fromByteUnits(n: u64) Alignment { + if (n == 0) return .none; + assert(std.math.isPowerOfTwo(n)); + return @intToEnum(Alignment, @ctz(n)); + } + + pub fn fromNonzeroByteUnits(n: u64) Alignment { + assert(n != 0); + return fromByteUnits(n); + } + + pub fn min(a: Alignment, b: Alignment) Alignment { + return @intToEnum(Alignment, @min(@enumToInt(a), @enumToInt(b))); + } +}; + +/// Used for non-sentineled arrays that have length fitting in u32, as well as +/// vectors. +pub const Vector = struct { len: u32, child: Index, }; +pub const Array = struct { + len0: u32, + len1: u32, + child: Index, + sentinel: Index, + + pub const Length = PackedU64; + + pub fn getLength(a: Array) u64 { + return (PackedU64{ + .a = a.len0, + .b = a.len1, + }).get(); + } +}; + +/// Trailing: +/// 0. field name: NullTerminatedString for each fields_len; declaration order +/// 1. tag value: Index for each fields_len; declaration order +pub const EnumExplicit = struct { + /// The Decl that corresponds to the enum itself. + decl: Module.Decl.Index, + /// This may be `none` if there are no declarations. + namespace: Module.Namespace.OptionalIndex, + /// An integer type which is used for the numerical value of the enum, which + /// has been explicitly provided by the enum declaration. + int_tag_type: Index, + fields_len: u32, + /// Maps field names to declaration index. + names_map: MapIndex, + /// Maps field values to declaration index. + /// If this is `none`, it means the trailing tag values are absent because + /// they are auto-numbered. + values_map: OptionalMapIndex, +}; + +/// Trailing: +/// 0. field name: NullTerminatedString for each fields_len; declaration order +pub const EnumAuto = struct { + /// The Decl that corresponds to the enum itself. + decl: Module.Decl.Index, + /// This may be `none` if there are no declarations. + namespace: Module.Namespace.OptionalIndex, + /// An integer type which is used for the numerical value of the enum, which + /// was inferred by Zig based on the number of tags. + int_tag_type: Index, + fields_len: u32, + /// Maps field names to declaration index. + names_map: MapIndex, +}; + +pub const PackedU64 = packed struct(u64) { + a: u32, + b: u32, + + pub fn get(x: PackedU64) u64 { + return @bitCast(u64, x); + } + + pub fn init(x: u64) PackedU64 { + return @bitCast(PackedU64, x); + } +}; + +pub const PtrDecl = struct { + ty: Index, + decl: Module.Decl.Index, +}; + +pub const PtrMutDecl = struct { + ty: Index, + decl: Module.Decl.Index, + runtime_index: RuntimeIndex, +}; + +pub const PtrComptimeField = struct { + ty: Index, + field_val: Index, +}; + +pub const PtrBase = struct { + ty: Index, + base: Index, +}; + +pub const PtrBaseIndex = struct { + ty: Index, + base: Index, + index: Index, +}; + +pub const PtrSlice = struct { + /// The slice type. + ty: Index, + /// A many pointer value. + ptr: Index, + /// A usize value. + len: Index, +}; + +/// Trailing: Limb for every limbs_len +pub const Int = struct { + ty: Index, + limbs_len: u32, +}; + +pub const IntSmall = struct { + ty: Index, + value: u32, +}; + +pub const IntLazy = struct { + ty: Index, + lazy_ty: Index, +}; + +/// A f64 value, broken up into 2 u32 parts. +pub const Float64 = struct { + piece0: u32, + piece1: u32, + + pub fn get(self: Float64) f64 { + const int_bits = @as(u64, self.piece0) | (@as(u64, self.piece1) << 32); + return @bitCast(f64, int_bits); + } + + fn pack(val: f64) Float64 { + const bits = @bitCast(u64, val); + return .{ + .piece0 = @truncate(u32, bits), + .piece1 = @truncate(u32, bits >> 32), + }; + } +}; + +/// A f80 value, broken up into 2 u32 parts and a u16 part zero-padded to a u32. +pub const Float80 = struct { + piece0: u32, + piece1: u32, + piece2: u32, // u16 part, top bits + + pub fn get(self: Float80) f80 { + const int_bits = @as(u80, self.piece0) | + (@as(u80, self.piece1) << 32) | + (@as(u80, self.piece2) << 64); + return @bitCast(f80, int_bits); + } + + fn pack(val: f80) Float80 { + const bits = @bitCast(u80, val); + return .{ + .piece0 = @truncate(u32, bits), + .piece1 = @truncate(u32, bits >> 32), + .piece2 = @truncate(u16, bits >> 64), + }; + } +}; + +/// A f128 value, broken up into 4 u32 parts. +pub const Float128 = struct { + piece0: u32, + piece1: u32, + piece2: u32, + piece3: u32, + + pub fn get(self: Float128) f128 { + const int_bits = @as(u128, self.piece0) | + (@as(u128, self.piece1) << 32) | + (@as(u128, self.piece2) << 64) | + (@as(u128, self.piece3) << 96); + return @bitCast(f128, int_bits); + } + + fn pack(val: f128) Float128 { + const bits = @bitCast(u128, val); + return .{ + .piece0 = @truncate(u32, bits), + .piece1 = @truncate(u32, bits >> 32), + .piece2 = @truncate(u32, bits >> 64), + .piece3 = @truncate(u32, bits >> 96), + }; + } +}; + +/// Trailing: +/// 0. arg value: Index for each args_len +pub const MemoizedCall = struct { + func: Module.Fn.Index, + args_len: u32, + result: Index, +}; + +pub fn init(ip: *InternPool, gpa: Allocator) !void { + assert(ip.items.len == 0); + + // Reserve string index 0 for an empty string. + assert((try ip.getOrPutString(gpa, "")) == .empty); + + // So that we can use `catch unreachable` below. + try ip.items.ensureUnusedCapacity(gpa, static_keys.len); + try ip.map.ensureUnusedCapacity(gpa, static_keys.len); + try ip.extra.ensureUnusedCapacity(gpa, static_keys.len); + + // This inserts all the statically-known values into the intern pool in the + // order expected. + for (static_keys) |key| _ = ip.get(gpa, key) catch unreachable; + + if (std.debug.runtime_safety) { + // Sanity check. + assert(ip.indexToKey(.bool_true).simple_value == .true); + assert(ip.indexToKey(.bool_false).simple_value == .false); + + const cc_inline = ip.indexToKey(.calling_convention_inline).enum_tag.int; + const cc_c = ip.indexToKey(.calling_convention_c).enum_tag.int; + + assert(ip.indexToKey(cc_inline).int.storage.u64 == + @enumToInt(std.builtin.CallingConvention.Inline)); + + assert(ip.indexToKey(cc_c).int.storage.u64 == + @enumToInt(std.builtin.CallingConvention.C)); + + assert(ip.indexToKey(ip.typeOf(cc_inline)).int_type.bits == + @typeInfo(@typeInfo(std.builtin.CallingConvention).Enum.tag_type).Int.bits); + } + + assert(ip.items.len == static_keys.len); +} + pub fn deinit(ip: *InternPool, gpa: Allocator) void { ip.map.deinit(gpa); ip.items.deinit(gpa); ip.extra.deinit(gpa); + ip.limbs.deinit(gpa); + ip.string_bytes.deinit(gpa); + + ip.structs_free_list.deinit(gpa); + ip.allocated_structs.deinit(gpa); + + ip.unions_free_list.deinit(gpa); + ip.allocated_unions.deinit(gpa); + + ip.funcs_free_list.deinit(gpa); + ip.allocated_funcs.deinit(gpa); + + ip.inferred_error_sets_free_list.deinit(gpa); + ip.allocated_inferred_error_sets.deinit(gpa); + + for (ip.maps.items) |*map| map.deinit(gpa); + ip.maps.deinit(gpa); + + ip.string_table.deinit(gpa); + + ip.* = undefined; } -pub fn indexToKey(ip: InternPool, index: Index) Key { +pub fn indexToKey(ip: *const InternPool, index: Index) Key { + assert(index != .none); const item = ip.items.get(@enumToInt(index)); const data = item.data; return switch (item.tag) { @@ -202,89 +2565,1930 @@ pub fn indexToKey(ip: InternPool, index: Index) Key { .bits = @intCast(u16, data), }, }, - .type_array => { + .type_array_big => { const array_info = ip.extraData(Array, data); + return .{ .array_type = .{ + .len = array_info.getLength(), + .child = array_info.child, + .sentinel = array_info.sentinel, + } }; + }, + .type_array_small => { + const array_info = ip.extraData(Vector, data); return .{ .array_type = .{ .len = array_info.len, .child = array_info.child, .sentinel = .none, } }; }, - .simple => .{ .simple = @intToEnum(Simple, data) }, + .simple_type => .{ .simple_type = @intToEnum(SimpleType, data) }, + .simple_value => .{ .simple_value = @intToEnum(SimpleValue, data) }, - else => @panic("TODO"), + .type_vector => { + const vector_info = ip.extraData(Vector, data); + return .{ .vector_type = .{ + .len = vector_info.len, + .child = vector_info.child, + } }; + }, + + .type_pointer => .{ .ptr_type = ip.extraData(Tag.TypePointer, data) }, + + .type_slice => { + assert(ip.items.items(.tag)[data] == .type_pointer); + var ptr_info = ip.extraData(Tag.TypePointer, ip.items.items(.data)[data]); + ptr_info.flags.size = .Slice; + return .{ .ptr_type = ptr_info }; + }, + + .type_optional => .{ .opt_type = @intToEnum(Index, data) }, + .type_anyframe => .{ .anyframe_type = @intToEnum(Index, data) }, + + .type_error_union => .{ .error_union_type = ip.extraData(Key.ErrorUnionType, data) }, + .type_error_set => { + const error_set = ip.extraDataTrail(ErrorSet, data); + const names_len = error_set.data.names_len; + const names = ip.extra.items[error_set.end..][0..names_len]; + return .{ .error_set_type = .{ + .names = @ptrCast([]const NullTerminatedString, names), + .names_map = error_set.data.names_map.toOptional(), + } }; + }, + .type_inferred_error_set => .{ + .inferred_error_set_type = @intToEnum(Module.Fn.InferredErrorSet.Index, data), + }, + + .type_opaque => .{ .opaque_type = ip.extraData(Key.OpaqueType, data) }, + .type_struct => { + const struct_index = @intToEnum(Module.Struct.OptionalIndex, data); + const namespace = if (struct_index.unwrap()) |i| + ip.structPtrConst(i).namespace.toOptional() + else + .none; + return .{ .struct_type = .{ + .index = struct_index, + .namespace = namespace, + } }; + }, + .type_struct_ns => .{ .struct_type = .{ + .index = .none, + .namespace = @intToEnum(Module.Namespace.Index, data).toOptional(), + } }, + + .type_struct_anon => { + const type_struct_anon = ip.extraDataTrail(TypeStructAnon, data); + const fields_len = type_struct_anon.data.fields_len; + const types = ip.extra.items[type_struct_anon.end..][0..fields_len]; + const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; + const names = ip.extra.items[type_struct_anon.end + 2 * fields_len ..][0..fields_len]; + return .{ .anon_struct_type = .{ + .types = @ptrCast([]const Index, types), + .values = @ptrCast([]const Index, values), + .names = @ptrCast([]const NullTerminatedString, names), + } }; + }, + .type_tuple_anon => { + const type_struct_anon = ip.extraDataTrail(TypeStructAnon, data); + const fields_len = type_struct_anon.data.fields_len; + const types = ip.extra.items[type_struct_anon.end..][0..fields_len]; + const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; + return .{ .anon_struct_type = .{ + .types = @ptrCast([]const Index, types), + .values = @ptrCast([]const Index, values), + .names = &.{}, + } }; + }, + + .type_union_untagged => .{ .union_type = .{ + .index = @intToEnum(Module.Union.Index, data), + .runtime_tag = .none, + } }, + .type_union_tagged => .{ .union_type = .{ + .index = @intToEnum(Module.Union.Index, data), + .runtime_tag = .tagged, + } }, + .type_union_safety => .{ .union_type = .{ + .index = @intToEnum(Module.Union.Index, data), + .runtime_tag = .safety, + } }, + + .type_enum_auto => { + const enum_auto = ip.extraDataTrail(EnumAuto, data); + const names = @ptrCast( + []const NullTerminatedString, + ip.extra.items[enum_auto.end..][0..enum_auto.data.fields_len], + ); + return .{ .enum_type = .{ + .decl = enum_auto.data.decl, + .namespace = enum_auto.data.namespace, + .tag_ty = enum_auto.data.int_tag_type, + .names = names, + .values = &.{}, + .tag_mode = .auto, + .names_map = enum_auto.data.names_map.toOptional(), + .values_map = .none, + } }; + }, + .type_enum_explicit => ip.indexToKeyEnum(data, .explicit), + .type_enum_nonexhaustive => ip.indexToKeyEnum(data, .nonexhaustive), + .type_function => .{ .func_type = ip.indexToKeyFuncType(data) }, + + .undef => .{ .undef = @intToEnum(Index, data) }, + .runtime_value => .{ .runtime_value = ip.extraData(Tag.TypeValue, data) }, + .opt_null => .{ .opt = .{ + .ty = @intToEnum(Index, data), + .val = .none, + } }, + .opt_payload => { + const extra = ip.extraData(Tag.TypeValue, data); + return .{ .opt = .{ + .ty = extra.ty, + .val = extra.val, + } }; + }, + .ptr_decl => { + const info = ip.extraData(PtrDecl, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .decl = info.decl }, + } }; + }, + .ptr_mut_decl => { + const info = ip.extraData(PtrMutDecl, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .mut_decl = .{ + .decl = info.decl, + .runtime_index = info.runtime_index, + } }, + } }; + }, + .ptr_comptime_field => { + const info = ip.extraData(PtrComptimeField, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .comptime_field = info.field_val }, + } }; + }, + .ptr_int => { + const info = ip.extraData(PtrBase, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .int = info.base }, + } }; + }, + .ptr_eu_payload => { + const info = ip.extraData(PtrBase, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .eu_payload = info.base }, + } }; + }, + .ptr_opt_payload => { + const info = ip.extraData(PtrBase, data); + return .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .opt_payload = info.base }, + } }; + }, + .ptr_elem => { + // Avoid `indexToKey` recursion by asserting the tag encoding. + const info = ip.extraData(PtrBaseIndex, data); + const index_item = ip.items.get(@enumToInt(info.index)); + return switch (index_item.tag) { + .int_usize => .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .elem = .{ + .base = info.base, + .index = index_item.data, + } }, + } }, + .int_positive => @panic("TODO"), // implement along with behavior test coverage + else => unreachable, + }; + }, + .ptr_field => { + // Avoid `indexToKey` recursion by asserting the tag encoding. + const info = ip.extraData(PtrBaseIndex, data); + const index_item = ip.items.get(@enumToInt(info.index)); + return switch (index_item.tag) { + .int_usize => .{ .ptr = .{ + .ty = info.ty, + .addr = .{ .field = .{ + .base = info.base, + .index = index_item.data, + } }, + } }, + .int_positive => @panic("TODO"), // implement along with behavior test coverage + else => unreachable, + }; + }, + .ptr_slice => { + const info = ip.extraData(PtrSlice, data); + const ptr_item = ip.items.get(@enumToInt(info.ptr)); + return .{ + .ptr = .{ + .ty = info.ty, + .addr = switch (ptr_item.tag) { + .ptr_decl => .{ + .decl = ip.extraData(PtrDecl, ptr_item.data).decl, + }, + .ptr_mut_decl => b: { + const sub_info = ip.extraData(PtrMutDecl, ptr_item.data); + break :b .{ .mut_decl = .{ + .decl = sub_info.decl, + .runtime_index = sub_info.runtime_index, + } }; + }, + .ptr_comptime_field => .{ + .comptime_field = ip.extraData(PtrComptimeField, ptr_item.data).field_val, + }, + .ptr_int => .{ + .int = ip.extraData(PtrBase, ptr_item.data).base, + }, + .ptr_eu_payload => .{ + .eu_payload = ip.extraData(PtrBase, ptr_item.data).base, + }, + .ptr_opt_payload => .{ + .opt_payload = ip.extraData(PtrBase, ptr_item.data).base, + }, + .ptr_elem => b: { + // Avoid `indexToKey` recursion by asserting the tag encoding. + const sub_info = ip.extraData(PtrBaseIndex, ptr_item.data); + const index_item = ip.items.get(@enumToInt(sub_info.index)); + break :b switch (index_item.tag) { + .int_usize => .{ .elem = .{ + .base = sub_info.base, + .index = index_item.data, + } }, + .int_positive => @panic("TODO"), // implement along with behavior test coverage + else => unreachable, + }; + }, + .ptr_field => b: { + // Avoid `indexToKey` recursion by asserting the tag encoding. + const sub_info = ip.extraData(PtrBaseIndex, ptr_item.data); + const index_item = ip.items.get(@enumToInt(sub_info.index)); + break :b switch (index_item.tag) { + .int_usize => .{ .field = .{ + .base = sub_info.base, + .index = index_item.data, + } }, + .int_positive => @panic("TODO"), // implement along with behavior test coverage + else => unreachable, + }; + }, + else => unreachable, + }, + .len = info.len, + }, + }; + }, + .int_u8 => .{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = data }, + } }, + .int_u16 => .{ .int = .{ + .ty = .u16_type, + .storage = .{ .u64 = data }, + } }, + .int_u32 => .{ .int = .{ + .ty = .u32_type, + .storage = .{ .u64 = data }, + } }, + .int_i32 => .{ .int = .{ + .ty = .i32_type, + .storage = .{ .i64 = @bitCast(i32, data) }, + } }, + .int_usize => .{ .int = .{ + .ty = .usize_type, + .storage = .{ .u64 = data }, + } }, + .int_comptime_int_u32 => .{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .u64 = data }, + } }, + .int_comptime_int_i32 => .{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .i64 = @bitCast(i32, data) }, + } }, + .int_positive => ip.indexToKeyBigInt(data, true), + .int_negative => ip.indexToKeyBigInt(data, false), + .int_small => { + const info = ip.extraData(IntSmall, data); + return .{ .int = .{ + .ty = info.ty, + .storage = .{ .u64 = info.value }, + } }; + }, + .int_lazy_align, .int_lazy_size => |tag| { + const info = ip.extraData(IntLazy, data); + return .{ .int = .{ + .ty = info.ty, + .storage = switch (tag) { + .int_lazy_align => .{ .lazy_align = info.lazy_ty }, + .int_lazy_size => .{ .lazy_size = info.lazy_ty }, + else => unreachable, + }, + } }; + }, + .float_f16 => .{ .float = .{ + .ty = .f16_type, + .storage = .{ .f16 = @bitCast(f16, @intCast(u16, data)) }, + } }, + .float_f32 => .{ .float = .{ + .ty = .f32_type, + .storage = .{ .f32 = @bitCast(f32, data) }, + } }, + .float_f64 => .{ .float = .{ + .ty = .f64_type, + .storage = .{ .f64 = ip.extraData(Float64, data).get() }, + } }, + .float_f80 => .{ .float = .{ + .ty = .f80_type, + .storage = .{ .f80 = ip.extraData(Float80, data).get() }, + } }, + .float_f128 => .{ .float = .{ + .ty = .f128_type, + .storage = .{ .f128 = ip.extraData(Float128, data).get() }, + } }, + .float_c_longdouble_f80 => .{ .float = .{ + .ty = .c_longdouble_type, + .storage = .{ .f80 = ip.extraData(Float80, data).get() }, + } }, + .float_c_longdouble_f128 => .{ .float = .{ + .ty = .c_longdouble_type, + .storage = .{ .f128 = ip.extraData(Float128, data).get() }, + } }, + .float_comptime_float => .{ .float = .{ + .ty = .comptime_float_type, + .storage = .{ .f128 = ip.extraData(Float128, data).get() }, + } }, + .variable => { + const extra = ip.extraData(Tag.Variable, data); + return .{ .variable = .{ + .ty = extra.ty, + .init = extra.init, + .decl = extra.decl, + .lib_name = extra.lib_name, + .is_extern = extra.flags.is_extern, + .is_const = extra.flags.is_const, + .is_threadlocal = extra.flags.is_threadlocal, + .is_weak_linkage = extra.flags.is_weak_linkage, + } }; + }, + .extern_func => .{ .extern_func = ip.extraData(Tag.ExternFunc, data) }, + .func => .{ .func = ip.extraData(Tag.Func, data) }, + .only_possible_value => { + const ty = @intToEnum(Index, data); + const ty_item = ip.items.get(@enumToInt(ty)); + return switch (ty_item.tag) { + .type_array_big => { + const sentinel = @ptrCast( + *const [1]Index, + &ip.extra.items[ty_item.data + std.meta.fieldIndex(Array, "sentinel").?], + ); + return .{ .aggregate = .{ + .ty = ty, + .storage = .{ .elems = sentinel[0..@boolToInt(sentinel[0] != .none)] }, + } }; + }, + .type_array_small, .type_vector => .{ .aggregate = .{ + .ty = ty, + .storage = .{ .elems = &.{} }, + } }, + // TODO: migrate structs to properly use the InternPool rather + // than using the SegmentedList trick, then the struct type will + // have a slice of comptime values that can be used here for when + // the struct has one possible value due to all fields comptime (same + // as the tuple case below). + .type_struct, .type_struct_ns => .{ .aggregate = .{ + .ty = ty, + .storage = .{ .elems = &.{} }, + } }, + + // There is only one possible value precisely due to the + // fact that this values slice is fully populated! + .type_struct_anon, .type_tuple_anon => { + const type_struct_anon = ip.extraDataTrail(TypeStructAnon, ty_item.data); + const fields_len = type_struct_anon.data.fields_len; + const values = ip.extra.items[type_struct_anon.end + fields_len ..][0..fields_len]; + return .{ .aggregate = .{ + .ty = ty, + .storage = .{ .elems = @ptrCast([]const Index, values) }, + } }; + }, + + .type_enum_auto, + .type_enum_explicit, + .type_union_tagged, + .type_union_untagged, + .type_union_safety, + => .{ .empty_enum_value = ty }, + + else => unreachable, + }; + }, + .bytes => { + const extra = ip.extraData(Bytes, data); + const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(extra.ty)); + return .{ .aggregate = .{ + .ty = extra.ty, + .storage = .{ .bytes = ip.string_bytes.items[@enumToInt(extra.bytes)..][0..len] }, + } }; + }, + .aggregate => { + const extra = ip.extraDataTrail(Tag.Aggregate, data); + const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(extra.data.ty)); + const fields = @ptrCast([]const Index, ip.extra.items[extra.end..][0..len]); + return .{ .aggregate = .{ + .ty = extra.data.ty, + .storage = .{ .elems = fields }, + } }; + }, + .repeated => { + const extra = ip.extraData(Repeated, data); + return .{ .aggregate = .{ + .ty = extra.ty, + .storage = .{ .repeated_elem = extra.elem_val }, + } }; + }, + .union_value => .{ .un = ip.extraData(Key.Union, data) }, + .error_set_error => .{ .err = ip.extraData(Key.Error, data) }, + .error_union_error => { + const extra = ip.extraData(Key.Error, data); + return .{ .error_union = .{ + .ty = extra.ty, + .val = .{ .err_name = extra.name }, + } }; + }, + .error_union_payload => { + const extra = ip.extraData(Tag.TypeValue, data); + return .{ .error_union = .{ + .ty = extra.ty, + .val = .{ .payload = extra.val }, + } }; + }, + .enum_literal => .{ .enum_literal = @intToEnum(NullTerminatedString, data) }, + .enum_tag => .{ .enum_tag = ip.extraData(Tag.EnumTag, data) }, + + .memoized_call => { + const extra = ip.extraDataTrail(MemoizedCall, data); + return .{ .memoized_call = .{ + .func = extra.data.func, + .arg_values = @ptrCast([]const Index, ip.extra.items[extra.end..][0..extra.data.args_len]), + .result = extra.data.result, + } }; + }, }; } +fn indexToKeyFuncType(ip: *const InternPool, data: u32) Key.FuncType { + const type_function = ip.extraDataTrail(TypeFunction, data); + const param_types = @ptrCast( + []Index, + ip.extra.items[type_function.end..][0..type_function.data.params_len], + ); + return .{ + .param_types = param_types, + .return_type = type_function.data.return_type, + .comptime_bits = type_function.data.comptime_bits, + .noalias_bits = type_function.data.noalias_bits, + .alignment = type_function.data.flags.alignment, + .cc = type_function.data.flags.cc, + .is_var_args = type_function.data.flags.is_var_args, + .is_generic = type_function.data.flags.is_generic, + .is_noinline = type_function.data.flags.is_noinline, + .align_is_generic = type_function.data.flags.align_is_generic, + .cc_is_generic = type_function.data.flags.cc_is_generic, + .section_is_generic = type_function.data.flags.section_is_generic, + .addrspace_is_generic = type_function.data.flags.addrspace_is_generic, + }; +} + +fn indexToKeyEnum(ip: *const InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key { + const enum_explicit = ip.extraDataTrail(EnumExplicit, data); + const names = @ptrCast( + []const NullTerminatedString, + ip.extra.items[enum_explicit.end..][0..enum_explicit.data.fields_len], + ); + const values = if (enum_explicit.data.values_map != .none) @ptrCast( + []const Index, + ip.extra.items[enum_explicit.end + names.len ..][0..enum_explicit.data.fields_len], + ) else &[0]Index{}; + + return .{ .enum_type = .{ + .decl = enum_explicit.data.decl, + .namespace = enum_explicit.data.namespace, + .tag_ty = enum_explicit.data.int_tag_type, + .names = names, + .values = values, + .tag_mode = tag_mode, + .names_map = enum_explicit.data.names_map.toOptional(), + .values_map = enum_explicit.data.values_map, + } }; +} + +fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key { + const int_info = ip.limbData(Int, limb_index); + return .{ .int = .{ + .ty = int_info.ty, + .storage = .{ .big_int = .{ + .limbs = ip.limbSlice(Int, limb_index, int_info.limbs_len), + .positive = positive, + } }, + } }; +} + pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { const adapter: KeyAdapter = .{ .intern_pool = ip }; const gop = try ip.map.getOrPutAdapted(gpa, key, adapter); - if (gop.found_existing) { - return @intToEnum(Index, gop.index); - } + if (gop.found_existing) return @intToEnum(Index, gop.index); + try ip.items.ensureUnusedCapacity(gpa, 1); switch (key) { .int_type => |int_type| { - const tag: Tag = switch (int_type.signedness) { + const t: Tag = switch (int_type.signedness) { .signed => .type_int_signed, .unsigned => .type_int_unsigned, }; - try ip.items.append(gpa, .{ - .tag = tag, + ip.items.appendAssumeCapacity(.{ + .tag = t, .data = int_type.bits, }); }, + .ptr_type => |ptr_type| { + assert(ptr_type.child != .none); + assert(ptr_type.sentinel == .none or ip.typeOf(ptr_type.sentinel) == ptr_type.child); + + if (ptr_type.flags.size == .Slice) { + _ = ip.map.pop(); + var new_key = key; + new_key.ptr_type.flags.size = .Many; + const ptr_type_index = try ip.get(gpa, new_key); + assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); + try ip.items.ensureUnusedCapacity(gpa, 1); + ip.items.appendAssumeCapacity(.{ + .tag = .type_slice, + .data = @enumToInt(ptr_type_index), + }); + return @intToEnum(Index, ip.items.len - 1); + } + + var ptr_type_adjusted = ptr_type; + if (ptr_type.flags.size == .C) ptr_type_adjusted.flags.is_allowzero = true; + + ip.items.appendAssumeCapacity(.{ + .tag = .type_pointer, + .data = try ip.addExtra(gpa, ptr_type_adjusted), + }); + }, .array_type => |array_type| { - const len = @intCast(u32, array_type.len); // TODO have a big_array encoding - assert(array_type.sentinel == .none); // TODO have a sentinel_array encoding - try ip.items.append(gpa, .{ - .tag = .type_array, + assert(array_type.child != .none); + assert(array_type.sentinel == .none or ip.typeOf(array_type.sentinel) == array_type.child); + + if (std.math.cast(u32, array_type.len)) |len| { + if (array_type.sentinel == .none) { + ip.items.appendAssumeCapacity(.{ + .tag = .type_array_small, + .data = try ip.addExtra(gpa, Vector{ + .len = len, + .child = array_type.child, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + } + } + + const length = Array.Length.init(array_type.len); + ip.items.appendAssumeCapacity(.{ + .tag = .type_array_big, .data = try ip.addExtra(gpa, Array{ - .len = len, + .len0 = length.a, + .len1 = length.b, .child = array_type.child, + .sentinel = array_type.sentinel, }), }); }, - else => @panic("TODO"), + .vector_type => |vector_type| { + ip.items.appendAssumeCapacity(.{ + .tag = .type_vector, + .data = try ip.addExtra(gpa, Vector{ + .len = vector_type.len, + .child = vector_type.child, + }), + }); + }, + .opt_type => |payload_type| { + assert(payload_type != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .type_optional, + .data = @enumToInt(payload_type), + }); + }, + .anyframe_type => |payload_type| { + // payload_type might be none, indicating the type is `anyframe`. + ip.items.appendAssumeCapacity(.{ + .tag = .type_anyframe, + .data = @enumToInt(payload_type), + }); + }, + .error_union_type => |error_union_type| { + ip.items.appendAssumeCapacity(.{ + .tag = .type_error_union, + .data = try ip.addExtra(gpa, error_union_type), + }); + }, + .error_set_type => |error_set_type| { + assert(error_set_type.names_map == .none); + assert(std.sort.isSorted(NullTerminatedString, error_set_type.names, {}, NullTerminatedString.indexLessThan)); + const names_map = try ip.addMap(gpa); + try addStringsToMap(ip, gpa, names_map, error_set_type.names); + const names_len = @intCast(u32, error_set_type.names.len); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(ErrorSet).Struct.fields.len + names_len); + ip.items.appendAssumeCapacity(.{ + .tag = .type_error_set, + .data = ip.addExtraAssumeCapacity(ErrorSet{ + .names_len = names_len, + .names_map = names_map, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, error_set_type.names)); + }, + .inferred_error_set_type => |ies_index| { + ip.items.appendAssumeCapacity(.{ + .tag = .type_inferred_error_set, + .data = @enumToInt(ies_index), + }); + }, + .simple_type => |simple_type| { + ip.items.appendAssumeCapacity(.{ + .tag = .simple_type, + .data = @enumToInt(simple_type), + }); + }, + .simple_value => |simple_value| { + ip.items.appendAssumeCapacity(.{ + .tag = .simple_value, + .data = @enumToInt(simple_value), + }); + }, + .undef => |ty| { + assert(ty != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .undef, + .data = @enumToInt(ty), + }); + }, + .runtime_value => |runtime_value| { + assert(runtime_value.ty == ip.typeOf(runtime_value.val)); + ip.items.appendAssumeCapacity(.{ + .tag = .runtime_value, + .data = try ip.addExtra(gpa, runtime_value), + }); + }, + + .struct_type => |struct_type| { + ip.items.appendAssumeCapacity(if (struct_type.index.unwrap()) |i| .{ + .tag = .type_struct, + .data = @enumToInt(i), + } else if (struct_type.namespace.unwrap()) |i| .{ + .tag = .type_struct_ns, + .data = @enumToInt(i), + } else .{ + .tag = .type_struct, + .data = @enumToInt(Module.Struct.OptionalIndex.none), + }); + }, + + .anon_struct_type => |anon_struct_type| { + assert(anon_struct_type.types.len == anon_struct_type.values.len); + for (anon_struct_type.types) |elem| assert(elem != .none); + + const fields_len = @intCast(u32, anon_struct_type.types.len); + if (anon_struct_type.names.len == 0) { + try ip.extra.ensureUnusedCapacity( + gpa, + @typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 2), + ); + ip.items.appendAssumeCapacity(.{ + .tag = .type_tuple_anon, + .data = ip.addExtraAssumeCapacity(TypeStructAnon{ + .fields_len = fields_len, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.types)); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.values)); + return @intToEnum(Index, ip.items.len - 1); + } + + assert(anon_struct_type.names.len == anon_struct_type.types.len); + + try ip.extra.ensureUnusedCapacity( + gpa, + @typeInfo(TypeStructAnon).Struct.fields.len + (fields_len * 3), + ); + ip.items.appendAssumeCapacity(.{ + .tag = .type_struct_anon, + .data = ip.addExtraAssumeCapacity(TypeStructAnon{ + .fields_len = fields_len, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.types)); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.values)); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, anon_struct_type.names)); + return @intToEnum(Index, ip.items.len - 1); + }, + + .union_type => |union_type| { + ip.items.appendAssumeCapacity(.{ + .tag = switch (union_type.runtime_tag) { + .none => .type_union_untagged, + .safety => .type_union_safety, + .tagged => .type_union_tagged, + }, + .data = @enumToInt(union_type.index), + }); + }, + + .opaque_type => |opaque_type| { + ip.items.appendAssumeCapacity(.{ + .tag = .type_opaque, + .data = try ip.addExtra(gpa, opaque_type), + }); + }, + + .enum_type => |enum_type| { + assert(enum_type.tag_ty == .noreturn_type or ip.isIntegerType(enum_type.tag_ty)); + for (enum_type.values) |value| assert(ip.typeOf(value) == enum_type.tag_ty); + assert(enum_type.names_map == .none); + assert(enum_type.values_map == .none); + + switch (enum_type.tag_mode) { + .auto => { + const names_map = try ip.addMap(gpa); + try addStringsToMap(ip, gpa, names_map, enum_type.names); + + const fields_len = @intCast(u32, enum_type.names.len); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumAuto).Struct.fields.len + + fields_len); + ip.items.appendAssumeCapacity(.{ + .tag = .type_enum_auto, + .data = ip.addExtraAssumeCapacity(EnumAuto{ + .decl = enum_type.decl, + .namespace = enum_type.namespace, + .int_tag_type = enum_type.tag_ty, + .names_map = names_map, + .fields_len = fields_len, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.names)); + return @intToEnum(Index, ip.items.len - 1); + }, + .explicit => return finishGetEnum(ip, gpa, enum_type, .type_enum_explicit), + .nonexhaustive => return finishGetEnum(ip, gpa, enum_type, .type_enum_nonexhaustive), + } + }, + + .func_type => |func_type| { + assert(func_type.return_type != .none); + for (func_type.param_types) |param_type| assert(param_type != .none); + + const params_len = @intCast(u32, func_type.param_types.len); + + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(TypeFunction).Struct.fields.len + + params_len); + ip.items.appendAssumeCapacity(.{ + .tag = .type_function, + .data = ip.addExtraAssumeCapacity(TypeFunction{ + .params_len = params_len, + .return_type = func_type.return_type, + .comptime_bits = func_type.comptime_bits, + .noalias_bits = func_type.noalias_bits, + .flags = .{ + .alignment = func_type.alignment, + .cc = func_type.cc, + .is_var_args = func_type.is_var_args, + .is_generic = func_type.is_generic, + .is_noinline = func_type.is_noinline, + .align_is_generic = func_type.align_is_generic, + .cc_is_generic = func_type.cc_is_generic, + .section_is_generic = func_type.section_is_generic, + .addrspace_is_generic = func_type.addrspace_is_generic, + }, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, func_type.param_types)); + }, + + .variable => |variable| { + const has_init = variable.init != .none; + if (has_init) assert(variable.ty == ip.typeOf(variable.init)); + ip.items.appendAssumeCapacity(.{ + .tag = .variable, + .data = try ip.addExtra(gpa, Tag.Variable{ + .ty = variable.ty, + .init = variable.init, + .decl = variable.decl, + .lib_name = variable.lib_name, + .flags = .{ + .is_extern = variable.is_extern, + .is_const = variable.is_const, + .is_threadlocal = variable.is_threadlocal, + .is_weak_linkage = variable.is_weak_linkage, + }, + }), + }); + }, + + .extern_func => |extern_func| ip.items.appendAssumeCapacity(.{ + .tag = .extern_func, + .data = try ip.addExtra(gpa, @as(Tag.ExternFunc, extern_func)), + }), + + .func => |func| ip.items.appendAssumeCapacity(.{ + .tag = .func, + .data = try ip.addExtra(gpa, @as(Tag.Func, func)), + }), + + .ptr => |ptr| { + const ptr_type = ip.indexToKey(ptr.ty).ptr_type; + switch (ptr.len) { + .none => { + assert(ptr_type.flags.size != .Slice); + switch (ptr.addr) { + .decl => |decl| ip.items.appendAssumeCapacity(.{ + .tag = .ptr_decl, + .data = try ip.addExtra(gpa, PtrDecl{ + .ty = ptr.ty, + .decl = decl, + }), + }), + .mut_decl => |mut_decl| ip.items.appendAssumeCapacity(.{ + .tag = .ptr_mut_decl, + .data = try ip.addExtra(gpa, PtrMutDecl{ + .ty = ptr.ty, + .decl = mut_decl.decl, + .runtime_index = mut_decl.runtime_index, + }), + }), + .comptime_field => |field_val| { + assert(field_val != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .ptr_comptime_field, + .data = try ip.addExtra(gpa, PtrComptimeField{ + .ty = ptr.ty, + .field_val = field_val, + }), + }); + }, + .int, .eu_payload, .opt_payload => |base| { + switch (ptr.addr) { + .int => assert(ip.typeOf(base) == .usize_type), + .eu_payload => assert(ip.indexToKey( + ip.indexToKey(ip.typeOf(base)).ptr_type.child, + ) == .error_union_type), + .opt_payload => assert(ip.indexToKey( + ip.indexToKey(ip.typeOf(base)).ptr_type.child, + ) == .opt_type), + else => unreachable, + } + ip.items.appendAssumeCapacity(.{ + .tag = switch (ptr.addr) { + .int => .ptr_int, + .eu_payload => .ptr_eu_payload, + .opt_payload => .ptr_opt_payload, + else => unreachable, + }, + .data = try ip.addExtra(gpa, PtrBase{ + .ty = ptr.ty, + .base = base, + }), + }); + }, + .elem, .field => |base_index| { + const base_ptr_type = ip.indexToKey(ip.typeOf(base_index.base)).ptr_type; + switch (ptr.addr) { + .elem => assert(base_ptr_type.flags.size == .Many), + .field => { + assert(base_ptr_type.flags.size == .One); + switch (ip.indexToKey(base_ptr_type.child)) { + .anon_struct_type => |anon_struct_type| { + assert(ptr.addr == .field); + assert(base_index.index < anon_struct_type.types.len); + }, + .struct_type => |struct_type| { + assert(ptr.addr == .field); + assert(base_index.index < ip.structPtrUnwrapConst(struct_type.index).?.fields.count()); + }, + .union_type => |union_type| { + assert(ptr.addr == .field); + assert(base_index.index < ip.unionPtrConst(union_type.index).fields.count()); + }, + .ptr_type => |slice_type| { + assert(ptr.addr == .field); + assert(slice_type.flags.size == .Slice); + assert(base_index.index < 2); + }, + else => unreachable, + } + }, + else => unreachable, + } + _ = ip.map.pop(); + const index_index = try ip.get(gpa, .{ .int = .{ + .ty = .usize_type, + .storage = .{ .u64 = base_index.index }, + } }); + assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); + try ip.items.ensureUnusedCapacity(gpa, 1); + ip.items.appendAssumeCapacity(.{ + .tag = switch (ptr.addr) { + .elem => .ptr_elem, + .field => .ptr_field, + else => unreachable, + }, + .data = try ip.addExtra(gpa, PtrBaseIndex{ + .ty = ptr.ty, + .base = base_index.base, + .index = index_index, + }), + }); + }, + } + }, + else => { + // TODO: change Key.Ptr for slices to reference the manyptr value + // rather than having an addr field directly. Then we can avoid + // these problematic calls to pop(), get(), and getOrPutAdapted(). + assert(ptr_type.flags.size == .Slice); + _ = ip.map.pop(); + var new_key = key; + new_key.ptr.ty = ip.slicePtrType(ptr.ty); + new_key.ptr.len = .none; + assert(ip.indexToKey(new_key.ptr.ty).ptr_type.flags.size == .Many); + const ptr_index = try ip.get(gpa, new_key); + assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); + try ip.items.ensureUnusedCapacity(gpa, 1); + ip.items.appendAssumeCapacity(.{ + .tag = .ptr_slice, + .data = try ip.addExtra(gpa, PtrSlice{ + .ty = ptr.ty, + .ptr = ptr_index, + .len = ptr.len, + }), + }); + }, + } + assert(ptr.ty == ip.indexToKey(@intToEnum(Index, ip.items.len - 1)).ptr.ty); + }, + + .opt => |opt| { + assert(ip.isOptionalType(opt.ty)); + assert(opt.val == .none or ip.indexToKey(opt.ty).opt_type == ip.typeOf(opt.val)); + ip.items.appendAssumeCapacity(if (opt.val == .none) .{ + .tag = .opt_null, + .data = @enumToInt(opt.ty), + } else .{ + .tag = .opt_payload, + .data = try ip.addExtra(gpa, Tag.TypeValue{ + .ty = opt.ty, + .val = opt.val, + }), + }); + }, + + .int => |int| b: { + assert(ip.isIntegerType(int.ty)); + switch (int.storage) { + .u64, .i64, .big_int => {}, + .lazy_align, .lazy_size => |lazy_ty| { + ip.items.appendAssumeCapacity(.{ + .tag = switch (int.storage) { + else => unreachable, + .lazy_align => .int_lazy_align, + .lazy_size => .int_lazy_size, + }, + .data = try ip.addExtra(gpa, IntLazy{ + .ty = int.ty, + .lazy_ty = lazy_ty, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + }, + } + switch (int.ty) { + .u8_type => switch (int.storage) { + .big_int => |big_int| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_u8, + .data = big_int.to(u8) catch unreachable, + }); + break :b; + }, + inline .u64, .i64 => |x| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_u8, + .data = @intCast(u8, x), + }); + break :b; + }, + .lazy_align, .lazy_size => unreachable, + }, + .u16_type => switch (int.storage) { + .big_int => |big_int| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_u16, + .data = big_int.to(u16) catch unreachable, + }); + break :b; + }, + inline .u64, .i64 => |x| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_u16, + .data = @intCast(u16, x), + }); + break :b; + }, + .lazy_align, .lazy_size => unreachable, + }, + .u32_type => switch (int.storage) { + .big_int => |big_int| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_u32, + .data = big_int.to(u32) catch unreachable, + }); + break :b; + }, + inline .u64, .i64 => |x| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_u32, + .data = @intCast(u32, x), + }); + break :b; + }, + .lazy_align, .lazy_size => unreachable, + }, + .i32_type => switch (int.storage) { + .big_int => |big_int| { + const casted = big_int.to(i32) catch unreachable; + ip.items.appendAssumeCapacity(.{ + .tag = .int_i32, + .data = @bitCast(u32, casted), + }); + break :b; + }, + inline .u64, .i64 => |x| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_i32, + .data = @bitCast(u32, @intCast(i32, x)), + }); + break :b; + }, + .lazy_align, .lazy_size => unreachable, + }, + .usize_type => switch (int.storage) { + .big_int => |big_int| { + if (big_int.to(u32)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_usize, + .data = casted, + }); + break :b; + } else |_| {} + }, + inline .u64, .i64 => |x| { + if (std.math.cast(u32, x)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_usize, + .data = casted, + }); + break :b; + } + }, + .lazy_align, .lazy_size => unreachable, + }, + .comptime_int_type => switch (int.storage) { + .big_int => |big_int| { + if (big_int.to(u32)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_comptime_int_u32, + .data = casted, + }); + break :b; + } else |_| {} + if (big_int.to(i32)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_comptime_int_i32, + .data = @bitCast(u32, casted), + }); + break :b; + } else |_| {} + }, + inline .u64, .i64 => |x| { + if (std.math.cast(u32, x)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_comptime_int_u32, + .data = casted, + }); + break :b; + } + if (std.math.cast(i32, x)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_comptime_int_i32, + .data = @bitCast(u32, casted), + }); + break :b; + } + }, + .lazy_align, .lazy_size => unreachable, + }, + else => {}, + } + switch (int.storage) { + .big_int => |big_int| { + if (big_int.to(u32)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_small, + .data = try ip.addExtra(gpa, IntSmall{ + .ty = int.ty, + .value = casted, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + } else |_| {} + + const tag: Tag = if (big_int.positive) .int_positive else .int_negative; + try addInt(ip, gpa, int.ty, tag, big_int.limbs); + }, + inline .u64, .i64 => |x| { + if (std.math.cast(u32, x)) |casted| { + ip.items.appendAssumeCapacity(.{ + .tag = .int_small, + .data = try ip.addExtra(gpa, IntSmall{ + .ty = int.ty, + .value = casted, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + } + + var buf: [2]Limb = undefined; + const big_int = BigIntMutable.init(&buf, x).toConst(); + const tag: Tag = if (big_int.positive) .int_positive else .int_negative; + try addInt(ip, gpa, int.ty, tag, big_int.limbs); + }, + .lazy_align, .lazy_size => unreachable, + } + }, + + .err => |err| { + assert(ip.isErrorSetType(err.ty)); + ip.items.appendAssumeCapacity(.{ + .tag = .error_set_error, + .data = try ip.addExtra(gpa, err), + }); + }, + + .error_union => |error_union| { + assert(ip.isErrorUnionType(error_union.ty)); + ip.items.appendAssumeCapacity(switch (error_union.val) { + .err_name => |err_name| .{ + .tag = .error_union_error, + .data = try ip.addExtra(gpa, Key.Error{ + .ty = error_union.ty, + .name = err_name, + }), + }, + .payload => |payload| .{ + .tag = .error_union_payload, + .data = try ip.addExtra(gpa, Tag.TypeValue{ + .ty = error_union.ty, + .val = payload, + }), + }, + }); + }, + + .enum_literal => |enum_literal| ip.items.appendAssumeCapacity(.{ + .tag = .enum_literal, + .data = @enumToInt(enum_literal), + }), + + .enum_tag => |enum_tag| { + assert(ip.isEnumType(enum_tag.ty)); + switch (ip.indexToKey(enum_tag.ty)) { + .simple_type => assert(ip.isIntegerType(ip.typeOf(enum_tag.int))), + .enum_type => |enum_type| assert(ip.typeOf(enum_tag.int) == enum_type.tag_ty), + else => unreachable, + } + ip.items.appendAssumeCapacity(.{ + .tag = .enum_tag, + .data = try ip.addExtra(gpa, enum_tag), + }); + }, + + .empty_enum_value => |enum_or_union_ty| ip.items.appendAssumeCapacity(.{ + .tag = .only_possible_value, + .data = @enumToInt(enum_or_union_ty), + }), + + .float => |float| { + switch (float.ty) { + .f16_type => ip.items.appendAssumeCapacity(.{ + .tag = .float_f16, + .data = @bitCast(u16, float.storage.f16), + }), + .f32_type => ip.items.appendAssumeCapacity(.{ + .tag = .float_f32, + .data = @bitCast(u32, float.storage.f32), + }), + .f64_type => ip.items.appendAssumeCapacity(.{ + .tag = .float_f64, + .data = try ip.addExtra(gpa, Float64.pack(float.storage.f64)), + }), + .f80_type => ip.items.appendAssumeCapacity(.{ + .tag = .float_f80, + .data = try ip.addExtra(gpa, Float80.pack(float.storage.f80)), + }), + .f128_type => ip.items.appendAssumeCapacity(.{ + .tag = .float_f128, + .data = try ip.addExtra(gpa, Float128.pack(float.storage.f128)), + }), + .c_longdouble_type => switch (float.storage) { + .f80 => |x| ip.items.appendAssumeCapacity(.{ + .tag = .float_c_longdouble_f80, + .data = try ip.addExtra(gpa, Float80.pack(x)), + }), + inline .f16, .f32, .f64, .f128 => |x| ip.items.appendAssumeCapacity(.{ + .tag = .float_c_longdouble_f128, + .data = try ip.addExtra(gpa, Float128.pack(x)), + }), + }, + .comptime_float_type => ip.items.appendAssumeCapacity(.{ + .tag = .float_comptime_float, + .data = try ip.addExtra(gpa, Float128.pack(float.storage.f128)), + }), + else => unreachable, + } + }, + + .aggregate => |aggregate| { + const ty_key = ip.indexToKey(aggregate.ty); + const len = ip.aggregateTypeLen(aggregate.ty); + const child = switch (ty_key) { + .array_type => |array_type| array_type.child, + .vector_type => |vector_type| vector_type.child, + .anon_struct_type, .struct_type => .none, + else => unreachable, + }; + const sentinel = switch (ty_key) { + .array_type => |array_type| array_type.sentinel, + .vector_type, .anon_struct_type, .struct_type => .none, + else => unreachable, + }; + const len_including_sentinel = len + @boolToInt(sentinel != .none); + switch (aggregate.storage) { + .bytes => |bytes| { + assert(child == .u8_type); + if (bytes.len != len) { + assert(bytes.len == len_including_sentinel); + assert(bytes[@intCast(usize, len)] == ip.indexToKey(sentinel).int.storage.u64); + } + }, + .elems => |elems| { + if (elems.len != len) { + assert(elems.len == len_including_sentinel); + assert(elems[@intCast(usize, len)] == sentinel); + } + }, + .repeated_elem => |elem| { + assert(sentinel == .none or elem == sentinel); + }, + } + switch (ty_key) { + .array_type, .vector_type => { + for (aggregate.storage.values()) |elem| { + assert(ip.typeOf(elem) == child); + } + }, + .struct_type => |struct_type| { + for ( + aggregate.storage.values(), + ip.structPtrUnwrapConst(struct_type.index).?.fields.values(), + ) |elem, field| { + assert(ip.typeOf(elem) == field.ty.toIntern()); + } + }, + .anon_struct_type => |anon_struct_type| { + for (aggregate.storage.values(), anon_struct_type.types) |elem, ty| { + assert(ip.typeOf(elem) == ty); + } + }, + else => unreachable, + } + + if (len == 0) { + ip.items.appendAssumeCapacity(.{ + .tag = .only_possible_value, + .data = @enumToInt(aggregate.ty), + }); + return @intToEnum(Index, ip.items.len - 1); + } + + switch (ty_key) { + .anon_struct_type => |anon_struct_type| opv: { + switch (aggregate.storage) { + .bytes => |bytes| for (anon_struct_type.values, bytes) |value, byte| { + if (value != ip.getIfExists(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = byte }, + } })) break :opv; + }, + .elems => |elems| if (!std.mem.eql( + Index, + anon_struct_type.values, + elems, + )) break :opv, + .repeated_elem => |elem| for (anon_struct_type.values) |value| { + if (value != elem) break :opv; + }, + } + // This encoding works thanks to the fact that, as we just verified, + // the type itself contains a slice of values that can be provided + // in the aggregate fields. + ip.items.appendAssumeCapacity(.{ + .tag = .only_possible_value, + .data = @enumToInt(aggregate.ty), + }); + return @intToEnum(Index, ip.items.len - 1); + }, + else => {}, + } + + repeated: { + switch (aggregate.storage) { + .bytes => |bytes| for (bytes[1..@intCast(usize, len)]) |byte| + if (byte != bytes[0]) break :repeated, + .elems => |elems| for (elems[1..@intCast(usize, len)]) |elem| + if (elem != elems[0]) break :repeated, + .repeated_elem => {}, + } + const elem = switch (aggregate.storage) { + .bytes => |bytes| elem: { + _ = ip.map.pop(); + const elem = try ip.get(gpa, .{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[0] }, + } }); + assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing); + try ip.items.ensureUnusedCapacity(gpa, 1); + break :elem elem; + }, + .elems => |elems| elems[0], + .repeated_elem => |elem| elem, + }; + + try ip.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Repeated).Struct.fields.len, + ); + ip.items.appendAssumeCapacity(.{ + .tag = .repeated, + .data = ip.addExtraAssumeCapacity(Repeated{ + .ty = aggregate.ty, + .elem_val = elem, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + } + + if (child == .u8_type) bytes: { + const string_bytes_index = ip.string_bytes.items.len; + try ip.string_bytes.ensureUnusedCapacity(gpa, @intCast(usize, len_including_sentinel + 1)); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len); + switch (aggregate.storage) { + .bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes), + .elems => |elems| for (elems) |elem| switch (ip.indexToKey(elem)) { + .undef => { + ip.string_bytes.shrinkRetainingCapacity(string_bytes_index); + break :bytes; + }, + .int => |int| ip.string_bytes.appendAssumeCapacity( + @intCast(u8, int.storage.u64), + ), + else => unreachable, + }, + .repeated_elem => |elem| switch (ip.indexToKey(elem)) { + .undef => break :bytes, + .int => |int| @memset( + ip.string_bytes.addManyAsSliceAssumeCapacity(@intCast(usize, len)), + @intCast(u8, int.storage.u64), + ), + else => unreachable, + }, + } + const has_internal_null = + std.mem.indexOfScalar(u8, ip.string_bytes.items[string_bytes_index..], 0) != null; + if (sentinel != .none) ip.string_bytes.appendAssumeCapacity( + @intCast(u8, ip.indexToKey(sentinel).int.storage.u64), + ); + const string = if (has_internal_null) + @intToEnum(String, string_bytes_index) + else + (try ip.getOrPutTrailingString(gpa, @intCast(usize, len_including_sentinel))).toString(); + ip.items.appendAssumeCapacity(.{ + .tag = .bytes, + .data = ip.addExtraAssumeCapacity(Bytes{ + .ty = aggregate.ty, + .bytes = string, + }), + }); + return @intToEnum(Index, ip.items.len - 1); + } + + try ip.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Tag.Aggregate).Struct.fields.len + @intCast(usize, len_including_sentinel), + ); + ip.items.appendAssumeCapacity(.{ + .tag = .aggregate, + .data = ip.addExtraAssumeCapacity(Tag.Aggregate{ + .ty = aggregate.ty, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, aggregate.storage.elems)); + if (sentinel != .none) ip.extra.appendAssumeCapacity(@enumToInt(sentinel)); + }, + + .un => |un| { + assert(un.ty != .none); + assert(un.tag != .none); + assert(un.val != .none); + ip.items.appendAssumeCapacity(.{ + .tag = .union_value, + .data = try ip.addExtra(gpa, un), + }); + }, + + .memoized_call => |memoized_call| { + for (memoized_call.arg_values) |arg| assert(arg != .none); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(MemoizedCall).Struct.fields.len + + memoized_call.arg_values.len); + ip.items.appendAssumeCapacity(.{ + .tag = .memoized_call, + .data = ip.addExtraAssumeCapacity(MemoizedCall{ + .func = memoized_call.func, + .args_len = @intCast(u32, memoized_call.arg_values.len), + .result = memoized_call.result, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, memoized_call.arg_values)); + }, } return @intToEnum(Index, ip.items.len - 1); } +/// Provides API for completing an enum type after calling `getIncompleteEnum`. +pub const IncompleteEnumType = struct { + index: Index, + tag_ty_index: u32, + names_map: MapIndex, + names_start: u32, + values_map: OptionalMapIndex, + values_start: u32, + + pub fn setTagType(self: @This(), ip: *InternPool, tag_ty: Index) void { + assert(tag_ty == .noreturn_type or ip.isIntegerType(tag_ty)); + ip.extra.items[self.tag_ty_index] = @enumToInt(tag_ty); + } + + /// Returns the already-existing field with the same name, if any. + pub fn addFieldName( + self: @This(), + ip: *InternPool, + gpa: Allocator, + name: NullTerminatedString, + ) Allocator.Error!?u32 { + const map = &ip.maps.items[@enumToInt(self.names_map)]; + const field_index = map.count(); + const strings = ip.extra.items[self.names_start..][0..field_index]; + const adapter: NullTerminatedString.Adapter = .{ + .strings = @ptrCast([]const NullTerminatedString, strings), + }; + const gop = try map.getOrPutAdapted(gpa, name, adapter); + if (gop.found_existing) return @intCast(u32, gop.index); + ip.extra.items[self.names_start + field_index] = @enumToInt(name); + return null; + } + + /// Returns the already-existing field with the same value, if any. + /// Make sure the type of the value has the integer tag type of the enum. + pub fn addFieldValue( + self: @This(), + ip: *InternPool, + gpa: Allocator, + value: Index, + ) Allocator.Error!?u32 { + assert(ip.typeOf(value) == @intToEnum(Index, ip.extra.items[self.tag_ty_index])); + const map = &ip.maps.items[@enumToInt(self.values_map.unwrap().?)]; + const field_index = map.count(); + const indexes = ip.extra.items[self.values_start..][0..field_index]; + const adapter: Index.Adapter = .{ + .indexes = @ptrCast([]const Index, indexes), + }; + const gop = try map.getOrPutAdapted(gpa, value, adapter); + if (gop.found_existing) return @intCast(u32, gop.index); + ip.extra.items[self.values_start + field_index] = @enumToInt(value); + return null; + } +}; + +/// This is used to create an enum type in the `InternPool`, with the ability +/// to update the tag type, field names, and field values later. +pub fn getIncompleteEnum( + ip: *InternPool, + gpa: Allocator, + enum_type: Key.IncompleteEnumType, +) Allocator.Error!IncompleteEnumType { + switch (enum_type.tag_mode) { + .auto => return getIncompleteEnumAuto(ip, gpa, enum_type), + .explicit => return getIncompleteEnumExplicit(ip, gpa, enum_type, .type_enum_explicit), + .nonexhaustive => return getIncompleteEnumExplicit(ip, gpa, enum_type, .type_enum_nonexhaustive), + } +} + +fn getIncompleteEnumAuto( + ip: *InternPool, + gpa: Allocator, + enum_type: Key.IncompleteEnumType, +) Allocator.Error!IncompleteEnumType { + const int_tag_type = if (enum_type.tag_ty != .none) + enum_type.tag_ty + else + try ip.get(gpa, .{ .int_type = .{ + .bits = if (enum_type.fields_len == 0) 0 else std.math.log2_int_ceil(u32, enum_type.fields_len), + .signedness = .unsigned, + } }); + + // We must keep the map in sync with `items`. The hash and equality functions + // for enum types only look at the decl field, which is present even in + // an `IncompleteEnumType`. + const adapter: KeyAdapter = .{ .intern_pool = ip }; + const gop = try ip.map.getOrPutAdapted(gpa, enum_type.toKey(), adapter); + assert(!gop.found_existing); + + const names_map = try ip.addMap(gpa); + + const extra_fields_len: u32 = @typeInfo(EnumAuto).Struct.fields.len; + try ip.extra.ensureUnusedCapacity(gpa, extra_fields_len + enum_type.fields_len); + try ip.items.ensureUnusedCapacity(gpa, 1); + + const extra_index = ip.addExtraAssumeCapacity(EnumAuto{ + .decl = enum_type.decl, + .namespace = enum_type.namespace, + .int_tag_type = int_tag_type, + .names_map = names_map, + .fields_len = enum_type.fields_len, + }); + + ip.items.appendAssumeCapacity(.{ + .tag = .type_enum_auto, + .data = extra_index, + }); + ip.extra.appendNTimesAssumeCapacity(@enumToInt(Index.none), enum_type.fields_len); + return .{ + .index = @intToEnum(Index, ip.items.len - 1), + .tag_ty_index = extra_index + std.meta.fieldIndex(EnumAuto, "int_tag_type").?, + .names_map = names_map, + .names_start = extra_index + extra_fields_len, + .values_map = .none, + .values_start = undefined, + }; +} + +fn getIncompleteEnumExplicit( + ip: *InternPool, + gpa: Allocator, + enum_type: Key.IncompleteEnumType, + tag: Tag, +) Allocator.Error!IncompleteEnumType { + // We must keep the map in sync with `items`. The hash and equality functions + // for enum types only look at the decl field, which is present even in + // an `IncompleteEnumType`. + const adapter: KeyAdapter = .{ .intern_pool = ip }; + const gop = try ip.map.getOrPutAdapted(gpa, enum_type.toKey(), adapter); + assert(!gop.found_existing); + + const names_map = try ip.addMap(gpa); + const values_map: OptionalMapIndex = if (!enum_type.has_values) .none else m: { + const values_map = try ip.addMap(gpa); + break :m values_map.toOptional(); + }; + + const reserved_len = enum_type.fields_len + + if (enum_type.has_values) enum_type.fields_len else 0; + + const extra_fields_len: u32 = @typeInfo(EnumExplicit).Struct.fields.len; + try ip.extra.ensureUnusedCapacity(gpa, extra_fields_len + reserved_len); + try ip.items.ensureUnusedCapacity(gpa, 1); + + const extra_index = ip.addExtraAssumeCapacity(EnumExplicit{ + .decl = enum_type.decl, + .namespace = enum_type.namespace, + .int_tag_type = enum_type.tag_ty, + .fields_len = enum_type.fields_len, + .names_map = names_map, + .values_map = values_map, + }); + + ip.items.appendAssumeCapacity(.{ + .tag = tag, + .data = extra_index, + }); + // This is both fields and values (if present). + ip.extra.appendNTimesAssumeCapacity(@enumToInt(Index.none), reserved_len); + return .{ + .index = @intToEnum(Index, ip.items.len - 1), + .tag_ty_index = extra_index + std.meta.fieldIndex(EnumExplicit, "int_tag_type").?, + .names_map = names_map, + .names_start = extra_index + extra_fields_len, + .values_map = values_map, + .values_start = extra_index + extra_fields_len + enum_type.fields_len, + }; +} + +pub fn finishGetEnum( + ip: *InternPool, + gpa: Allocator, + enum_type: Key.EnumType, + tag: Tag, +) Allocator.Error!Index { + const names_map = try ip.addMap(gpa); + try addStringsToMap(ip, gpa, names_map, enum_type.names); + + const values_map: OptionalMapIndex = if (enum_type.values.len == 0) .none else m: { + const values_map = try ip.addMap(gpa); + try addIndexesToMap(ip, gpa, values_map, enum_type.values); + break :m values_map.toOptional(); + }; + const fields_len = @intCast(u32, enum_type.names.len); + try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(EnumExplicit).Struct.fields.len + + fields_len); + ip.items.appendAssumeCapacity(.{ + .tag = tag, + .data = ip.addExtraAssumeCapacity(EnumExplicit{ + .decl = enum_type.decl, + .namespace = enum_type.namespace, + .int_tag_type = enum_type.tag_ty, + .fields_len = fields_len, + .names_map = names_map, + .values_map = values_map, + }), + }); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.names)); + ip.extra.appendSliceAssumeCapacity(@ptrCast([]const u32, enum_type.values)); + return @intToEnum(Index, ip.items.len - 1); +} + +pub fn getIfExists(ip: *const InternPool, key: Key) ?Index { + const adapter: KeyAdapter = .{ .intern_pool = ip }; + const index = ip.map.getIndexAdapted(key, adapter) orelse return null; + return @intToEnum(Index, index); +} + +pub fn getAssumeExists(ip: *const InternPool, key: Key) Index { + return ip.getIfExists(key).?; +} + +fn addStringsToMap( + ip: *InternPool, + gpa: Allocator, + map_index: MapIndex, + strings: []const NullTerminatedString, +) Allocator.Error!void { + const map = &ip.maps.items[@enumToInt(map_index)]; + const adapter: NullTerminatedString.Adapter = .{ .strings = strings }; + for (strings) |string| { + const gop = try map.getOrPutAdapted(gpa, string, adapter); + assert(!gop.found_existing); + } +} + +fn addIndexesToMap( + ip: *InternPool, + gpa: Allocator, + map_index: MapIndex, + indexes: []const Index, +) Allocator.Error!void { + const map = &ip.maps.items[@enumToInt(map_index)]; + const adapter: Index.Adapter = .{ .indexes = indexes }; + for (indexes) |index| { + const gop = try map.getOrPutAdapted(gpa, index, adapter); + assert(!gop.found_existing); + } +} + +fn addMap(ip: *InternPool, gpa: Allocator) Allocator.Error!MapIndex { + const ptr = try ip.maps.addOne(gpa); + ptr.* = .{}; + return @intToEnum(MapIndex, ip.maps.items.len - 1); +} + +/// This operation only happens under compile error conditions. +/// Leak the index until the next garbage collection. +/// TODO: this is a bit problematic to implement, can we get away without it? +pub const remove = @compileError("InternPool.remove is not currently a supported operation; put a TODO there instead"); + +fn addInt(ip: *InternPool, gpa: Allocator, ty: Index, tag: Tag, limbs: []const Limb) !void { + const limbs_len = @intCast(u32, limbs.len); + try ip.reserveLimbs(gpa, @typeInfo(Int).Struct.fields.len + limbs_len); + ip.items.appendAssumeCapacity(.{ + .tag = tag, + .data = ip.addLimbsExtraAssumeCapacity(Int{ + .ty = ty, + .limbs_len = limbs_len, + }), + }); + ip.addLimbsAssumeCapacity(limbs); +} + fn addExtra(ip: *InternPool, gpa: Allocator, extra: anytype) Allocator.Error!u32 { - const fields = std.meta.fields(@TypeOf(extra)); + const fields = @typeInfo(@TypeOf(extra)).Struct.fields; try ip.extra.ensureUnusedCapacity(gpa, fields.len); return ip.addExtraAssumeCapacity(extra); } fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { - const fields = std.meta.fields(@TypeOf(extra)); const result = @intCast(u32, ip.extra.items.len); - inline for (fields) |field| { + inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| { ip.extra.appendAssumeCapacity(switch (field.type) { u32 => @field(extra, field.name), Index => @enumToInt(@field(extra, field.name)), + Module.Decl.Index => @enumToInt(@field(extra, field.name)), + Module.Namespace.Index => @enumToInt(@field(extra, field.name)), + Module.Namespace.OptionalIndex => @enumToInt(@field(extra, field.name)), + Module.Fn.Index => @enumToInt(@field(extra, field.name)), + MapIndex => @enumToInt(@field(extra, field.name)), + OptionalMapIndex => @enumToInt(@field(extra, field.name)), + RuntimeIndex => @enumToInt(@field(extra, field.name)), + String => @enumToInt(@field(extra, field.name)), + NullTerminatedString => @enumToInt(@field(extra, field.name)), + OptionalNullTerminatedString => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), - else => @compileError("bad field type"), + Tag.TypePointer.Flags => @bitCast(u32, @field(extra, field.name)), + TypeFunction.Flags => @bitCast(u32, @field(extra, field.name)), + Tag.TypePointer.PackedOffset => @bitCast(u32, @field(extra, field.name)), + Tag.TypePointer.VectorIndex => @enumToInt(@field(extra, field.name)), + Tag.Variable.Flags => @bitCast(u32, @field(extra, field.name)), + else => @compileError("bad field type: " ++ @typeName(field.type)), }); } return result; } -fn extraData(ip: InternPool, comptime T: type, index: usize) T { - const fields = std.meta.fields(T); - var i: usize = index; - var result: T = undefined; - inline for (fields) |field| { - @field(result, field.name) = switch (field.type) { - u32 => ip.extra.items[i], - Index => @intToEnum(Index, ip.extra.items[i]), - i32 => @bitCast(i32, ip.extra.items[i]), - else => @compileError("bad field type"), +fn reserveLimbs(ip: *InternPool, gpa: Allocator, n: usize) !void { + switch (@sizeOf(Limb)) { + @sizeOf(u32) => try ip.extra.ensureUnusedCapacity(gpa, n), + @sizeOf(u64) => try ip.limbs.ensureUnusedCapacity(gpa, n), + else => @compileError("unsupported host"), + } +} + +fn addLimbsExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { + switch (@sizeOf(Limb)) { + @sizeOf(u32) => return addExtraAssumeCapacity(ip, extra), + @sizeOf(u64) => {}, + else => @compileError("unsupported host"), + } + const result = @intCast(u32, ip.limbs.items.len); + inline for (@typeInfo(@TypeOf(extra)).Struct.fields, 0..) |field, i| { + const new: u32 = switch (field.type) { + u32 => @field(extra, field.name), + Index => @enumToInt(@field(extra, field.name)), + else => @compileError("bad field type: " ++ @typeName(field.type)), }; - i += 1; + if (i % 2 == 0) { + ip.limbs.appendAssumeCapacity(new); + } else { + ip.limbs.items[ip.limbs.items.len - 1] |= @as(u64, new) << 32; + } } return result; } +fn addLimbsAssumeCapacity(ip: *InternPool, limbs: []const Limb) void { + switch (@sizeOf(Limb)) { + @sizeOf(u32) => ip.extra.appendSliceAssumeCapacity(limbs), + @sizeOf(u64) => ip.limbs.appendSliceAssumeCapacity(limbs), + else => @compileError("unsupported host"), + } +} + +fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct { data: T, end: usize } { + var result: T = undefined; + const fields = @typeInfo(T).Struct.fields; + inline for (fields, 0..) |field, i| { + const int32 = ip.extra.items[i + index]; + @field(result, field.name) = switch (field.type) { + u32 => int32, + Index => @intToEnum(Index, int32), + Module.Decl.Index => @intToEnum(Module.Decl.Index, int32), + Module.Namespace.Index => @intToEnum(Module.Namespace.Index, int32), + Module.Namespace.OptionalIndex => @intToEnum(Module.Namespace.OptionalIndex, int32), + Module.Fn.Index => @intToEnum(Module.Fn.Index, int32), + MapIndex => @intToEnum(MapIndex, int32), + OptionalMapIndex => @intToEnum(OptionalMapIndex, int32), + RuntimeIndex => @intToEnum(RuntimeIndex, int32), + String => @intToEnum(String, int32), + NullTerminatedString => @intToEnum(NullTerminatedString, int32), + OptionalNullTerminatedString => @intToEnum(OptionalNullTerminatedString, int32), + i32 => @bitCast(i32, int32), + Tag.TypePointer.Flags => @bitCast(Tag.TypePointer.Flags, int32), + TypeFunction.Flags => @bitCast(TypeFunction.Flags, int32), + Tag.TypePointer.PackedOffset => @bitCast(Tag.TypePointer.PackedOffset, int32), + Tag.TypePointer.VectorIndex => @intToEnum(Tag.TypePointer.VectorIndex, int32), + Tag.Variable.Flags => @bitCast(Tag.Variable.Flags, int32), + else => @compileError("bad field type: " ++ @typeName(field.type)), + }; + } + return .{ + .data = result, + .end = index + fields.len, + }; +} + +fn extraData(ip: *const InternPool, comptime T: type, index: usize) T { + return extraDataTrail(ip, T, index).data; +} + +/// Asserts the struct has 32-bit fields and the number of fields is evenly divisible by 2. +fn limbData(ip: *const InternPool, comptime T: type, index: usize) T { + switch (@sizeOf(Limb)) { + @sizeOf(u32) => return extraData(ip, T, index), + @sizeOf(u64) => {}, + else => @compileError("unsupported host"), + } + var result: T = undefined; + inline for (@typeInfo(T).Struct.fields, 0..) |field, i| { + const host_int = ip.limbs.items[index + i / 2]; + const int32 = if (i % 2 == 0) + @truncate(u32, host_int) + else + @truncate(u32, host_int >> 32); + + @field(result, field.name) = switch (field.type) { + u32 => int32, + Index => @intToEnum(Index, int32), + else => @compileError("bad field type: " ++ @typeName(field.type)), + }; + } + return result; +} + +/// This function returns the Limb slice that is trailing data after a payload. +fn limbSlice(ip: *const InternPool, comptime S: type, limb_index: u32, len: u32) []const Limb { + const field_count = @typeInfo(S).Struct.fields.len; + switch (@sizeOf(Limb)) { + @sizeOf(u32) => { + const start = limb_index + field_count; + return ip.extra.items[start..][0..len]; + }, + @sizeOf(u64) => { + const start = limb_index + @divExact(field_count, 2); + return ip.limbs.items[start..][0..len]; + }, + else => @compileError("unsupported host"), + } +} + +const LimbsAsIndexes = struct { + start: u32, + len: u32, +}; + +fn limbsSliceToIndex(ip: *const InternPool, limbs: []const Limb) LimbsAsIndexes { + const host_slice = switch (@sizeOf(Limb)) { + @sizeOf(u32) => ip.extra.items, + @sizeOf(u64) => ip.limbs.items, + else => @compileError("unsupported host"), + }; + // TODO: https://github.com/ziglang/zig/issues/1738 + return .{ + .start = @intCast(u32, @divExact(@ptrToInt(limbs.ptr) - @ptrToInt(host_slice.ptr), @sizeOf(Limb))), + .len = @intCast(u32, limbs.len), + }; +} + +/// This function converts Limb array indexes to a primitive slice type. +fn limbsIndexToSlice(ip: *const InternPool, limbs: LimbsAsIndexes) []const Limb { + return switch (@sizeOf(Limb)) { + @sizeOf(u32) => ip.extra.items[limbs.start..][0..limbs.len], + @sizeOf(u64) => ip.limbs.items[limbs.start..][0..limbs.len], + else => @compileError("unsupported host"), + }; +} + test "basic usage" { const gpa = std.testing.allocator; @@ -314,3 +4518,1275 @@ test "basic usage" { } }); try std.testing.expect(another_array_i32 == array_i32); } + +pub fn childType(ip: *const InternPool, i: Index) Index { + return switch (ip.indexToKey(i)) { + .ptr_type => |ptr_type| ptr_type.child, + .vector_type => |vector_type| vector_type.child, + .array_type => |array_type| array_type.child, + .opt_type, .anyframe_type => |child| child, + else => unreachable, + }; +} + +/// Given a slice type, returns the type of the ptr field. +pub fn slicePtrType(ip: *const InternPool, i: Index) Index { + switch (i) { + .slice_const_u8_type => return .manyptr_const_u8_type, + .slice_const_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type, + else => {}, + } + const item = ip.items.get(@enumToInt(i)); + switch (item.tag) { + .type_slice => return @intToEnum(Index, item.data), + else => unreachable, // not a slice type + } +} + +/// Given a slice value, returns the value of the ptr field. +pub fn slicePtr(ip: *const InternPool, i: Index) Index { + const item = ip.items.get(@enumToInt(i)); + switch (item.tag) { + .ptr_slice => return ip.extraData(PtrSlice, item.data).ptr, + else => unreachable, // not a slice value + } +} + +/// Given a slice value, returns the value of the len field. +pub fn sliceLen(ip: *const InternPool, i: Index) Index { + const item = ip.items.get(@enumToInt(i)); + switch (item.tag) { + .ptr_slice => return ip.extraData(PtrSlice, item.data).len, + else => unreachable, // not a slice value + } +} + +/// Given an existing value, returns the same value but with the supplied type. +/// Only some combinations are allowed: +/// * identity coercion +/// * undef => any +/// * int <=> int +/// * int <=> enum +/// * enum_literal => enum +/// * ptr <=> ptr +/// * opt ptr <=> ptr +/// * opt ptr <=> opt ptr +/// * int <=> ptr +/// * null_value => opt +/// * payload => opt +/// * error set <=> error set +/// * error union <=> error union +/// * error set => error union +/// * payload => error union +/// * fn <=> fn +pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Allocator.Error!Index { + const old_ty = ip.typeOf(val); + if (old_ty == new_ty) return val; + switch (val) { + .undef => return ip.get(gpa, .{ .undef = new_ty }), + .null_value => if (ip.isOptionalType(new_ty)) + return ip.get(gpa, .{ .opt = .{ + .ty = new_ty, + .val = .none, + } }) + else if (ip.isPointerType(new_ty)) + return ip.get(gpa, .{ .ptr = .{ + .ty = new_ty, + .addr = .{ .int = .zero_usize }, + .len = switch (ip.indexToKey(new_ty).ptr_type.flags.size) { + .One, .Many, .C => .none, + .Slice => try ip.get(gpa, .{ .undef = .usize_type }), + }, + } }), + else => switch (ip.indexToKey(val)) { + .undef => return ip.get(gpa, .{ .undef = new_ty }), + .extern_func => |extern_func| if (ip.isFunctionType(new_ty)) + return ip.get(gpa, .{ .extern_func = .{ + .ty = new_ty, + .decl = extern_func.decl, + .lib_name = extern_func.lib_name, + } }), + .func => |func| if (ip.isFunctionType(new_ty)) + return ip.get(gpa, .{ .func = .{ + .ty = new_ty, + .index = func.index, + } }), + .int => |int| switch (ip.indexToKey(new_ty)) { + .enum_type => |enum_type| return ip.get(gpa, .{ .enum_tag = .{ + .ty = new_ty, + .int = try ip.getCoerced(gpa, val, enum_type.tag_ty), + } }), + .ptr_type => return ip.get(gpa, .{ .ptr = .{ + .ty = new_ty, + .addr = .{ .int = try ip.getCoerced(gpa, val, .usize_type) }, + } }), + else => if (ip.isIntegerType(new_ty)) + return getCoercedInts(ip, gpa, int, new_ty), + }, + .enum_tag => |enum_tag| if (ip.isIntegerType(new_ty)) + return getCoercedInts(ip, gpa, ip.indexToKey(enum_tag.int).int, new_ty), + .enum_literal => |enum_literal| switch (ip.indexToKey(new_ty)) { + .enum_type => |enum_type| { + const index = enum_type.nameIndex(ip, enum_literal).?; + return ip.get(gpa, .{ .enum_tag = .{ + .ty = new_ty, + .int = if (enum_type.values.len != 0) + enum_type.values[index] + else + try ip.get(gpa, .{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = index }, + } }), + } }); + }, + else => {}, + }, + .ptr => |ptr| if (ip.isPointerType(new_ty)) + return ip.get(gpa, .{ .ptr = .{ + .ty = new_ty, + .addr = ptr.addr, + .len = ptr.len, + } }) + else if (ip.isIntegerType(new_ty)) + switch (ptr.addr) { + .int => |int| return ip.getCoerced(gpa, int, new_ty), + else => {}, + }, + .opt => |opt| switch (ip.indexToKey(new_ty)) { + .ptr_type => |ptr_type| return switch (opt.val) { + .none => try ip.get(gpa, .{ .ptr = .{ + .ty = new_ty, + .addr = .{ .int = .zero_usize }, + .len = switch (ptr_type.flags.size) { + .One, .Many, .C => .none, + .Slice => try ip.get(gpa, .{ .undef = .usize_type }), + }, + } }), + else => |payload| try ip.getCoerced(gpa, payload, new_ty), + }, + .opt_type => |child_type| return try ip.get(gpa, .{ .opt = .{ + .ty = new_ty, + .val = switch (opt.val) { + .none => .none, + else => try ip.getCoerced(gpa, opt.val, child_type), + }, + } }), + else => {}, + }, + .err => |err| if (ip.isErrorSetType(new_ty)) + return ip.get(gpa, .{ .err = .{ + .ty = new_ty, + .name = err.name, + } }) + else if (ip.isErrorUnionType(new_ty)) + return ip.get(gpa, .{ .error_union = .{ + .ty = new_ty, + .val = .{ .err_name = err.name }, + } }), + .error_union => |error_union| if (ip.isErrorUnionType(new_ty)) + return ip.get(gpa, .{ .error_union = .{ + .ty = new_ty, + .val = error_union.val, + } }), + else => {}, + }, + } + switch (ip.indexToKey(new_ty)) { + .opt_type => |child_type| switch (val) { + .null_value => return ip.get(gpa, .{ .opt = .{ + .ty = new_ty, + .val = .none, + } }), + else => return ip.get(gpa, .{ .opt = .{ + .ty = new_ty, + .val = try ip.getCoerced(gpa, val, child_type), + } }), + }, + .error_union_type => |error_union_type| return ip.get(gpa, .{ .error_union = .{ + .ty = new_ty, + .val = .{ .payload = try ip.getCoerced(gpa, val, error_union_type.payload_type) }, + } }), + else => {}, + } + if (std.debug.runtime_safety) { + std.debug.panic("InternPool.getCoerced of {s} not implemented from {s} to {s}", .{ + @tagName(ip.indexToKey(val)), + @tagName(ip.indexToKey(old_ty)), + @tagName(ip.indexToKey(new_ty)), + }); + } + unreachable; +} + +/// Asserts `val` has an integer type. +/// Assumes `new_ty` is an integer type. +pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Index) Allocator.Error!Index { + // The key cannot be passed directly to `get`, otherwise in the case of + // big_int storage, the limbs would be invalidated before they are read. + // Here we pre-reserve the limbs to ensure that the logic in `addInt` will + // not use an invalidated limbs pointer. + const new_storage: Key.Int.Storage = switch (int.storage) { + .u64, .i64, .lazy_align, .lazy_size => int.storage, + .big_int => |big_int| storage: { + const positive = big_int.positive; + const limbs = ip.limbsSliceToIndex(big_int.limbs); + // This line invalidates the limbs slice, but the indexes computed in the + // previous line are still correct. + try reserveLimbs(ip, gpa, @typeInfo(Int).Struct.fields.len + big_int.limbs.len); + break :storage .{ .big_int = .{ + .limbs = ip.limbsIndexToSlice(limbs), + .positive = positive, + } }; + }, + }; + return ip.get(gpa, .{ .int = .{ + .ty = new_ty, + .storage = new_storage, + } }); +} + +pub fn indexToStructType(ip: *const InternPool, val: Index) Module.Struct.OptionalIndex { + assert(val != .none); + const tags = ip.items.items(.tag); + if (tags[@enumToInt(val)] != .type_struct) return .none; + const datas = ip.items.items(.data); + return @intToEnum(Module.Struct.Index, datas[@enumToInt(val)]).toOptional(); +} + +pub fn indexToUnionType(ip: *const InternPool, val: Index) Module.Union.OptionalIndex { + assert(val != .none); + const tags = ip.items.items(.tag); + switch (tags[@enumToInt(val)]) { + .type_union_tagged, .type_union_untagged, .type_union_safety => {}, + else => return .none, + } + const datas = ip.items.items(.data); + return @intToEnum(Module.Union.Index, datas[@enumToInt(val)]).toOptional(); +} + +pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType { + assert(val != .none); + const tags = ip.items.items(.tag); + const datas = ip.items.items(.data); + switch (tags[@enumToInt(val)]) { + .type_function => return indexToKeyFuncType(ip, datas[@enumToInt(val)]), + else => return null, + } +} + +pub fn indexToFunc(ip: *const InternPool, val: Index) Module.Fn.OptionalIndex { + assert(val != .none); + const tags = ip.items.items(.tag); + if (tags[@enumToInt(val)] != .func) return .none; + const datas = ip.items.items(.data); + return ip.extraData(Tag.Func, datas[@enumToInt(val)]).index.toOptional(); +} + +pub fn indexToInferredErrorSetType(ip: *const InternPool, val: Index) Module.Fn.InferredErrorSet.OptionalIndex { + assert(val != .none); + const tags = ip.items.items(.tag); + if (tags[@enumToInt(val)] != .type_inferred_error_set) return .none; + const datas = ip.items.items(.data); + return @intToEnum(Module.Fn.InferredErrorSet.Index, datas[@enumToInt(val)]).toOptional(); +} + +/// includes .comptime_int_type +pub fn isIntegerType(ip: *const InternPool, ty: Index) bool { + return switch (ty) { + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .comptime_int_type, + => true, + else => ip.indexToKey(ty) == .int_type, + }; +} + +/// does not include .enum_literal_type +pub fn isEnumType(ip: *const InternPool, ty: Index) bool { + return switch (ty) { + .atomic_order_type, + .atomic_rmw_op_type, + .calling_convention_type, + .address_space_type, + .float_mode_type, + .reduce_op_type, + .call_modifier_type, + => true, + else => ip.indexToKey(ty) == .enum_type, + }; +} + +pub fn isFunctionType(ip: *const InternPool, ty: Index) bool { + return ip.indexToKey(ty) == .func_type; +} + +pub fn isPointerType(ip: *const InternPool, ty: Index) bool { + return ip.indexToKey(ty) == .ptr_type; +} + +pub fn isOptionalType(ip: *const InternPool, ty: Index) bool { + return ip.indexToKey(ty) == .opt_type; +} + +/// includes .inferred_error_set_type +pub fn isErrorSetType(ip: *const InternPool, ty: Index) bool { + return ty == .anyerror_type or switch (ip.indexToKey(ty)) { + .error_set_type, .inferred_error_set_type => true, + else => false, + }; +} + +pub fn isInferredErrorSetType(ip: *const InternPool, ty: Index) bool { + return ip.indexToKey(ty) == .inferred_error_set_type; +} + +pub fn isErrorUnionType(ip: *const InternPool, ty: Index) bool { + return ip.indexToKey(ty) == .error_union_type; +} + +pub fn isAggregateType(ip: *const InternPool, ty: Index) bool { + return switch (ip.indexToKey(ty)) { + .array_type, .vector_type, .anon_struct_type, .struct_type => true, + else => false, + }; +} + +/// The is only legal because the initializer is not part of the hash. +pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { + const item = ip.items.get(@enumToInt(index)); + assert(item.tag == .variable); + ip.extra.items[item.data + std.meta.fieldIndex(Tag.Variable, "init").?] = @enumToInt(init_index); +} + +pub fn dump(ip: *const InternPool) void { + dumpStatsFallible(ip, std.heap.page_allocator) catch return; + dumpAllFallible(ip) catch return; +} + +fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { + const items_size = (1 + 4) * ip.items.len; + const extra_size = 4 * ip.extra.items.len; + const limbs_size = 8 * ip.limbs.items.len; + // TODO: fields size is not taken into account + const structs_size = ip.allocated_structs.len * + (@sizeOf(Module.Struct) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl)); + const unions_size = ip.allocated_unions.len * + (@sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl)); + const funcs_size = ip.allocated_funcs.len * + (@sizeOf(Module.Fn) + @sizeOf(Module.Decl)); + + // TODO: map overhead size is not taken into account + const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size + + structs_size + unions_size + funcs_size; + + std.debug.print( + \\InternPool size: {d} bytes + \\ {d} items: {d} bytes + \\ {d} extra: {d} bytes + \\ {d} limbs: {d} bytes + \\ {d} structs: {d} bytes + \\ {d} unions: {d} bytes + \\ {d} funcs: {d} bytes + \\ + , .{ + total_size, + ip.items.len, + items_size, + ip.extra.items.len, + extra_size, + ip.limbs.items.len, + limbs_size, + ip.allocated_structs.len, + structs_size, + ip.allocated_unions.len, + unions_size, + ip.allocated_funcs.len, + funcs_size, + }); + + const tags = ip.items.items(.tag); + const datas = ip.items.items(.data); + const TagStats = struct { + count: usize = 0, + bytes: usize = 0, + }; + var counts = std.AutoArrayHashMap(Tag, TagStats).init(arena); + for (tags, datas) |tag, data| { + const gop = try counts.getOrPut(tag); + if (!gop.found_existing) gop.value_ptr.* = .{}; + gop.value_ptr.count += 1; + gop.value_ptr.bytes += 1 + 4 + @as(usize, switch (tag) { + .type_int_signed => 0, + .type_int_unsigned => 0, + .type_array_small => @sizeOf(Vector), + .type_array_big => @sizeOf(Array), + .type_vector => @sizeOf(Vector), + .type_pointer => @sizeOf(Tag.TypePointer), + .type_slice => 0, + .type_optional => 0, + .type_anyframe => 0, + .type_error_union => @sizeOf(Key.ErrorUnionType), + .type_error_set => b: { + const info = ip.extraData(ErrorSet, data); + break :b @sizeOf(ErrorSet) + (@sizeOf(u32) * info.names_len); + }, + .type_inferred_error_set => @sizeOf(Module.Fn.InferredErrorSet), + .type_enum_explicit, .type_enum_nonexhaustive => @sizeOf(EnumExplicit), + .type_enum_auto => @sizeOf(EnumAuto), + .type_opaque => @sizeOf(Key.OpaqueType), + .type_struct => b: { + const struct_index = @intToEnum(Module.Struct.Index, data); + const struct_obj = ip.structPtrConst(struct_index); + break :b @sizeOf(Module.Struct) + + @sizeOf(Module.Namespace) + + @sizeOf(Module.Decl) + + (struct_obj.fields.count() * @sizeOf(Module.Struct.Field)); + }, + .type_struct_ns => @sizeOf(Module.Namespace), + .type_struct_anon => b: { + const info = ip.extraData(TypeStructAnon, data); + break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 3 * info.fields_len); + }, + .type_tuple_anon => b: { + const info = ip.extraData(TypeStructAnon, data); + break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 2 * info.fields_len); + }, + + .type_union_tagged, + .type_union_untagged, + .type_union_safety, + => @sizeOf(Module.Union) + @sizeOf(Module.Namespace) + @sizeOf(Module.Decl), + + .type_function => b: { + const info = ip.extraData(TypeFunction, data); + break :b @sizeOf(TypeFunction) + (@sizeOf(Index) * info.params_len); + }, + + .undef => 0, + .runtime_value => @sizeOf(Tag.TypeValue), + .simple_type => 0, + .simple_value => 0, + .ptr_decl => @sizeOf(PtrDecl), + .ptr_mut_decl => @sizeOf(PtrMutDecl), + .ptr_comptime_field => @sizeOf(PtrComptimeField), + .ptr_int => @sizeOf(PtrBase), + .ptr_eu_payload => @sizeOf(PtrBase), + .ptr_opt_payload => @sizeOf(PtrBase), + .ptr_elem => @sizeOf(PtrBaseIndex), + .ptr_field => @sizeOf(PtrBaseIndex), + .ptr_slice => @sizeOf(PtrSlice), + .opt_null => 0, + .opt_payload => @sizeOf(Tag.TypeValue), + .int_u8 => 0, + .int_u16 => 0, + .int_u32 => 0, + .int_i32 => 0, + .int_usize => 0, + .int_comptime_int_u32 => 0, + .int_comptime_int_i32 => 0, + .int_small => @sizeOf(IntSmall), + + .int_positive, + .int_negative, + => b: { + const int = ip.limbData(Int, data); + break :b @sizeOf(Int) + int.limbs_len * 8; + }, + + .int_lazy_align, .int_lazy_size => @sizeOf(IntLazy), + + .error_set_error, .error_union_error => @sizeOf(Key.Error), + .error_union_payload => @sizeOf(Tag.TypeValue), + .enum_literal => 0, + .enum_tag => @sizeOf(Tag.EnumTag), + + .bytes => b: { + const info = ip.extraData(Bytes, data); + const len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(info.ty)); + break :b @sizeOf(Bytes) + len + + @boolToInt(ip.string_bytes.items[@enumToInt(info.bytes) + len - 1] != 0); + }, + .aggregate => b: { + const info = ip.extraData(Tag.Aggregate, data); + const fields_len = @intCast(u32, ip.aggregateTypeLenIncludingSentinel(info.ty)); + break :b @sizeOf(Tag.Aggregate) + (@sizeOf(Index) * fields_len); + }, + .repeated => @sizeOf(Repeated), + + .float_f16 => 0, + .float_f32 => 0, + .float_f64 => @sizeOf(Float64), + .float_f80 => @sizeOf(Float80), + .float_f128 => @sizeOf(Float128), + .float_c_longdouble_f80 => @sizeOf(Float80), + .float_c_longdouble_f128 => @sizeOf(Float128), + .float_comptime_float => @sizeOf(Float128), + .variable => @sizeOf(Tag.Variable) + @sizeOf(Module.Decl), + .extern_func => @sizeOf(Tag.ExternFunc) + @sizeOf(Module.Decl), + .func => @sizeOf(Tag.Func) + @sizeOf(Module.Fn) + @sizeOf(Module.Decl), + .only_possible_value => 0, + .union_value => @sizeOf(Key.Union), + + .memoized_call => b: { + const info = ip.extraData(MemoizedCall, data); + break :b @sizeOf(MemoizedCall) + (@sizeOf(Index) * info.args_len); + }, + }); + } + const SortContext = struct { + map: *std.AutoArrayHashMap(Tag, TagStats), + pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool { + const values = ctx.map.values(); + return values[a_index].bytes > values[b_index].bytes; + //return values[a_index].count > values[b_index].count; + } + }; + counts.sort(SortContext{ .map = &counts }); + const len = @min(50, counts.count()); + std.debug.print(" top 50 tags:\n", .{}); + for (counts.keys()[0..len], counts.values()[0..len]) |tag, stats| { + std.debug.print(" {s}: {d} occurrences, {d} total bytes\n", .{ + @tagName(tag), stats.count, stats.bytes, + }); + } +} + +fn dumpAllFallible(ip: *const InternPool) anyerror!void { + const tags = ip.items.items(.tag); + const datas = ip.items.items(.data); + var bw = std.io.bufferedWriter(std.io.getStdErr().writer()); + const w = bw.writer(); + for (tags, datas, 0..) |tag, data, i| { + try w.print("${d} = {s}(", .{ i, @tagName(tag) }); + switch (tag) { + .simple_type => try w.print("{s}", .{@tagName(@intToEnum(SimpleType, data))}), + .simple_value => try w.print("{s}", .{@tagName(@intToEnum(SimpleValue, data))}), + + .type_int_signed, + .type_int_unsigned, + .type_array_small, + .type_array_big, + .type_vector, + .type_pointer, + .type_optional, + .type_anyframe, + .type_error_union, + .type_error_set, + .type_inferred_error_set, + .type_enum_explicit, + .type_enum_nonexhaustive, + .type_enum_auto, + .type_opaque, + .type_struct, + .type_struct_ns, + .type_struct_anon, + .type_tuple_anon, + .type_union_tagged, + .type_union_untagged, + .type_union_safety, + .type_function, + .undef, + .runtime_value, + .ptr_decl, + .ptr_mut_decl, + .ptr_comptime_field, + .ptr_int, + .ptr_eu_payload, + .ptr_opt_payload, + .ptr_elem, + .ptr_field, + .ptr_slice, + .opt_payload, + .int_u8, + .int_u16, + .int_u32, + .int_i32, + .int_usize, + .int_comptime_int_u32, + .int_comptime_int_i32, + .int_small, + .int_positive, + .int_negative, + .int_lazy_align, + .int_lazy_size, + .error_set_error, + .error_union_error, + .error_union_payload, + .enum_literal, + .enum_tag, + .bytes, + .aggregate, + .repeated, + .float_f16, + .float_f32, + .float_f64, + .float_f80, + .float_f128, + .float_c_longdouble_f80, + .float_c_longdouble_f128, + .float_comptime_float, + .variable, + .extern_func, + .func, + .union_value, + .memoized_call, + => try w.print("{d}", .{data}), + + .opt_null, + .type_slice, + .only_possible_value, + => try w.print("${d}", .{data}), + } + try w.writeAll(")\n"); + } + try bw.flush(); +} + +pub fn structPtr(ip: *InternPool, index: Module.Struct.Index) *Module.Struct { + return ip.allocated_structs.at(@enumToInt(index)); +} + +pub fn structPtrConst(ip: *const InternPool, index: Module.Struct.Index) *const Module.Struct { + return ip.allocated_structs.at(@enumToInt(index)); +} + +pub fn structPtrUnwrapConst(ip: *const InternPool, index: Module.Struct.OptionalIndex) ?*const Module.Struct { + return structPtrConst(ip, index.unwrap() orelse return null); +} + +pub fn unionPtr(ip: *InternPool, index: Module.Union.Index) *Module.Union { + return ip.allocated_unions.at(@enumToInt(index)); +} + +pub fn unionPtrConst(ip: *const InternPool, index: Module.Union.Index) *const Module.Union { + return ip.allocated_unions.at(@enumToInt(index)); +} + +pub fn funcPtr(ip: *InternPool, index: Module.Fn.Index) *Module.Fn { + return ip.allocated_funcs.at(@enumToInt(index)); +} + +pub fn funcPtrConst(ip: *const InternPool, index: Module.Fn.Index) *const Module.Fn { + return ip.allocated_funcs.at(@enumToInt(index)); +} + +pub fn inferredErrorSetPtr(ip: *InternPool, index: Module.Fn.InferredErrorSet.Index) *Module.Fn.InferredErrorSet { + return ip.allocated_inferred_error_sets.at(@enumToInt(index)); +} + +pub fn inferredErrorSetPtrConst(ip: *const InternPool, index: Module.Fn.InferredErrorSet.Index) *const Module.Fn.InferredErrorSet { + return ip.allocated_inferred_error_sets.at(@enumToInt(index)); +} + +pub fn createStruct( + ip: *InternPool, + gpa: Allocator, + initialization: Module.Struct, +) Allocator.Error!Module.Struct.Index { + if (ip.structs_free_list.popOrNull()) |index| { + ip.allocated_structs.at(@enumToInt(index)).* = initialization; + return index; + } + const ptr = try ip.allocated_structs.addOne(gpa); + ptr.* = initialization; + return @intToEnum(Module.Struct.Index, ip.allocated_structs.len - 1); +} + +pub fn destroyStruct(ip: *InternPool, gpa: Allocator, index: Module.Struct.Index) void { + ip.structPtr(index).* = undefined; + ip.structs_free_list.append(gpa, index) catch { + // In order to keep `destroyStruct` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Struct until garbage collection. + }; +} + +pub fn createUnion( + ip: *InternPool, + gpa: Allocator, + initialization: Module.Union, +) Allocator.Error!Module.Union.Index { + if (ip.unions_free_list.popOrNull()) |index| { + ip.allocated_unions.at(@enumToInt(index)).* = initialization; + return index; + } + const ptr = try ip.allocated_unions.addOne(gpa); + ptr.* = initialization; + return @intToEnum(Module.Union.Index, ip.allocated_unions.len - 1); +} + +pub fn destroyUnion(ip: *InternPool, gpa: Allocator, index: Module.Union.Index) void { + ip.unionPtr(index).* = undefined; + ip.unions_free_list.append(gpa, index) catch { + // In order to keep `destroyUnion` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Union until garbage collection. + }; +} + +pub fn createFunc( + ip: *InternPool, + gpa: Allocator, + initialization: Module.Fn, +) Allocator.Error!Module.Fn.Index { + if (ip.funcs_free_list.popOrNull()) |index| { + ip.allocated_funcs.at(@enumToInt(index)).* = initialization; + return index; + } + const ptr = try ip.allocated_funcs.addOne(gpa); + ptr.* = initialization; + return @intToEnum(Module.Fn.Index, ip.allocated_funcs.len - 1); +} + +pub fn destroyFunc(ip: *InternPool, gpa: Allocator, index: Module.Fn.Index) void { + ip.funcPtr(index).* = undefined; + ip.funcs_free_list.append(gpa, index) catch { + // In order to keep `destroyFunc` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Fn until garbage collection. + }; +} + +pub fn createInferredErrorSet( + ip: *InternPool, + gpa: Allocator, + initialization: Module.Fn.InferredErrorSet, +) Allocator.Error!Module.Fn.InferredErrorSet.Index { + if (ip.inferred_error_sets_free_list.popOrNull()) |index| { + ip.allocated_inferred_error_sets.at(@enumToInt(index)).* = initialization; + return index; + } + const ptr = try ip.allocated_inferred_error_sets.addOne(gpa); + ptr.* = initialization; + return @intToEnum(Module.Fn.InferredErrorSet.Index, ip.allocated_inferred_error_sets.len - 1); +} + +pub fn destroyInferredErrorSet(ip: *InternPool, gpa: Allocator, index: Module.Fn.InferredErrorSet.Index) void { + ip.inferredErrorSetPtr(index).* = undefined; + ip.inferred_error_sets_free_list.append(gpa, index) catch { + // In order to keep `destroyInferredErrorSet` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the InferredErrorSet until garbage collection. + }; +} + +pub fn getOrPutString( + ip: *InternPool, + gpa: Allocator, + s: []const u8, +) Allocator.Error!NullTerminatedString { + try ip.string_bytes.ensureUnusedCapacity(gpa, s.len + 1); + ip.string_bytes.appendSliceAssumeCapacity(s); + ip.string_bytes.appendAssumeCapacity(0); + return ip.getOrPutTrailingString(gpa, s.len + 1); +} + +pub fn getOrPutStringFmt( + ip: *InternPool, + gpa: Allocator, + comptime format: []const u8, + args: anytype, +) Allocator.Error!NullTerminatedString { + // ensure that references to string_bytes in args do not get invalidated + const len = @intCast(usize, std.fmt.count(format, args) + 1); + try ip.string_bytes.ensureUnusedCapacity(gpa, len); + ip.string_bytes.writer(undefined).print(format, args) catch unreachable; + ip.string_bytes.appendAssumeCapacity(0); + return ip.getOrPutTrailingString(gpa, len); +} + +pub fn getOrPutStringOpt( + ip: *InternPool, + gpa: Allocator, + optional_string: ?[]const u8, +) Allocator.Error!OptionalNullTerminatedString { + const s = optional_string orelse return .none; + const interned = try getOrPutString(ip, gpa, s); + return interned.toOptional(); +} + +/// Uses the last len bytes of ip.string_bytes as the key. +pub fn getOrPutTrailingString( + ip: *InternPool, + gpa: Allocator, + len: usize, +) Allocator.Error!NullTerminatedString { + const string_bytes = &ip.string_bytes; + const str_index = @intCast(u32, string_bytes.items.len - len); + if (len > 0 and string_bytes.getLast() == 0) { + _ = string_bytes.pop(); + } else { + try string_bytes.ensureUnusedCapacity(gpa, 1); + } + const key: []const u8 = string_bytes.items[str_index..]; + const gop = try ip.string_table.getOrPutContextAdapted(gpa, key, std.hash_map.StringIndexAdapter{ + .bytes = string_bytes, + }, std.hash_map.StringIndexContext{ + .bytes = string_bytes, + }); + if (gop.found_existing) { + string_bytes.shrinkRetainingCapacity(str_index); + return @intToEnum(NullTerminatedString, gop.key_ptr.*); + } else { + gop.key_ptr.* = str_index; + string_bytes.appendAssumeCapacity(0); + return @intToEnum(NullTerminatedString, str_index); + } +} + +pub fn getString(ip: *InternPool, s: []const u8) OptionalNullTerminatedString { + if (ip.string_table.getKeyAdapted(s, std.hash_map.StringIndexAdapter{ + .bytes = &ip.string_bytes, + })) |index| { + return @intToEnum(NullTerminatedString, index).toOptional(); + } else { + return .none; + } +} + +pub fn stringToSlice(ip: *const InternPool, s: NullTerminatedString) [:0]const u8 { + const string_bytes = ip.string_bytes.items; + const start = @enumToInt(s); + var end: usize = start; + while (string_bytes[end] != 0) end += 1; + return string_bytes[start..end :0]; +} + +pub fn stringToSliceUnwrap(ip: *const InternPool, s: OptionalNullTerminatedString) ?[:0]const u8 { + return ip.stringToSlice(s.unwrap() orelse return null); +} + +pub fn stringEqlSlice(ip: *const InternPool, a: NullTerminatedString, b: []const u8) bool { + return std.mem.eql(u8, stringToSlice(ip, a), b); +} + +pub fn typeOf(ip: *const InternPool, index: Index) Index { + // This optimization of static keys is required so that typeOf can be called + // on static keys that haven't been added yet during static key initialization. + // An alternative would be to topological sort the static keys, but this would + // mean that the range of type indices would not be dense. + return switch (index) { + .u1_type, + .u8_type, + .i8_type, + .u16_type, + .i16_type, + .u29_type, + .u32_type, + .i32_type, + .u64_type, + .i64_type, + .u80_type, + .u128_type, + .i128_type, + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .anyopaque_type, + .bool_type, + .void_type, + .type_type, + .anyerror_type, + .comptime_int_type, + .comptime_float_type, + .noreturn_type, + .anyframe_type, + .null_type, + .undefined_type, + .enum_literal_type, + .atomic_order_type, + .atomic_rmw_op_type, + .calling_convention_type, + .address_space_type, + .float_mode_type, + .reduce_op_type, + .call_modifier_type, + .prefetch_options_type, + .export_options_type, + .extern_options_type, + .type_info_type, + .manyptr_u8_type, + .manyptr_const_u8_type, + .manyptr_const_u8_sentinel_0_type, + .single_const_pointer_to_comptime_int_type, + .slice_const_u8_type, + .slice_const_u8_sentinel_0_type, + .anyerror_void_error_union_type, + .generic_poison_type, + .empty_struct_type, + => .type_type, + + .undef => .undefined_type, + .zero, .one, .negative_one => .comptime_int_type, + .zero_usize, .one_usize => .usize_type, + .zero_u8, .one_u8, .four_u8 => .u8_type, + .calling_convention_c, .calling_convention_inline => .calling_convention_type, + .void_value => .void_type, + .unreachable_value => .noreturn_type, + .null_value => .null_type, + .bool_true, .bool_false => .bool_type, + .empty_struct => .empty_struct_type, + .generic_poison => .generic_poison_type, + + // This optimization on tags is needed so that indexToKey can call + // typeOf without being recursive. + _ => switch (ip.items.items(.tag)[@enumToInt(index)]) { + .type_int_signed, + .type_int_unsigned, + .type_array_big, + .type_array_small, + .type_vector, + .type_pointer, + .type_slice, + .type_optional, + .type_anyframe, + .type_error_union, + .type_error_set, + .type_inferred_error_set, + .type_enum_auto, + .type_enum_explicit, + .type_enum_nonexhaustive, + .simple_type, + .type_opaque, + .type_struct, + .type_struct_ns, + .type_struct_anon, + .type_tuple_anon, + .type_union_tagged, + .type_union_untagged, + .type_union_safety, + .type_function, + => .type_type, + + .undef, + .opt_null, + .only_possible_value, + => @intToEnum(Index, ip.items.items(.data)[@enumToInt(index)]), + + .simple_value => unreachable, // handled via Index above + + inline .ptr_decl, + .ptr_mut_decl, + .ptr_comptime_field, + .ptr_int, + .ptr_eu_payload, + .ptr_opt_payload, + .ptr_elem, + .ptr_field, + .ptr_slice, + .opt_payload, + .error_union_payload, + .runtime_value, + .int_small, + .int_lazy_align, + .int_lazy_size, + .error_set_error, + .error_union_error, + .enum_tag, + .variable, + .extern_func, + .func, + .union_value, + .bytes, + .aggregate, + .repeated, + => |t| { + const extra_index = ip.items.items(.data)[@enumToInt(index)]; + const field_index = std.meta.fieldIndex(t.Payload(), "ty").?; + return @intToEnum(Index, ip.extra.items[extra_index + field_index]); + }, + + .int_u8 => .u8_type, + .int_u16 => .u16_type, + .int_u32 => .u32_type, + .int_i32 => .i32_type, + .int_usize => .usize_type, + + .int_comptime_int_u32, + .int_comptime_int_i32, + => .comptime_int_type, + + // Note these are stored in limbs data, not extra data. + .int_positive, + .int_negative, + => ip.limbData(Int, ip.items.items(.data)[@enumToInt(index)]).ty, + + .enum_literal => .enum_literal_type, + .float_f16 => .f16_type, + .float_f32 => .f32_type, + .float_f64 => .f64_type, + .float_f80 => .f80_type, + .float_f128 => .f128_type, + + .float_c_longdouble_f80, + .float_c_longdouble_f128, + => .c_longdouble_type, + + .float_comptime_float => .comptime_float_type, + + .memoized_call => unreachable, + }, + + .var_args_param_type => unreachable, + .none => unreachable, + }; +} + +/// Assumes that the enum's field indexes equal its value tags. +pub fn toEnum(ip: *const InternPool, comptime E: type, i: Index) E { + const int = ip.indexToKey(i).enum_tag.int; + return @intToEnum(E, ip.indexToKey(int).int.storage.u64); +} + +pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 { + return switch (ip.indexToKey(ty)) { + .struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(), + .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .array_type => |array_type| array_type.len, + .vector_type => |vector_type| vector_type.len, + else => unreachable, + }; +} + +pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 { + return switch (ip.indexToKey(ty)) { + .struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(), + .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .array_type => |array_type| array_type.len + @boolToInt(array_type.sentinel != .none), + .vector_type => |vector_type| vector_type.len, + else => unreachable, + }; +} + +pub fn isNoReturn(ip: *const InternPool, ty: Index) bool { + return switch (ty) { + .noreturn_type => true, + else => switch (ip.indexToKey(ty)) { + .error_set_type => |error_set_type| error_set_type.names.len == 0, + else => false, + }, + }; +} + +/// This is a particularly hot function, so we operate directly on encodings +/// rather than the more straightforward implementation of calling `indexToKey`. +pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPoison}!std.builtin.TypeId { + return switch (index) { + .u1_type, + .u8_type, + .i8_type, + .u16_type, + .i16_type, + .u29_type, + .u32_type, + .i32_type, + .u64_type, + .i64_type, + .u80_type, + .u128_type, + .i128_type, + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + => .Int, + + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + => .Float, + + .anyopaque_type => .Opaque, + .bool_type => .Bool, + .void_type => .Void, + .type_type => .Type, + .anyerror_type => .ErrorSet, + .comptime_int_type => .ComptimeInt, + .comptime_float_type => .ComptimeFloat, + .noreturn_type => .NoReturn, + .anyframe_type => .AnyFrame, + .null_type => .Null, + .undefined_type => .Undefined, + .enum_literal_type => .EnumLiteral, + + .atomic_order_type, + .atomic_rmw_op_type, + .calling_convention_type, + .address_space_type, + .float_mode_type, + .reduce_op_type, + .call_modifier_type, + => .Enum, + + .prefetch_options_type, + .export_options_type, + .extern_options_type, + => .Struct, + + .type_info_type => .Union, + + .manyptr_u8_type, + .manyptr_const_u8_type, + .manyptr_const_u8_sentinel_0_type, + .single_const_pointer_to_comptime_int_type, + .slice_const_u8_type, + .slice_const_u8_sentinel_0_type, + => .Pointer, + + .anyerror_void_error_union_type => .ErrorUnion, + .empty_struct_type => .Struct, + + .generic_poison_type => return error.GenericPoison, + + // values, not types + .undef => unreachable, + .zero => unreachable, + .zero_usize => unreachable, + .zero_u8 => unreachable, + .one => unreachable, + .one_usize => unreachable, + .one_u8 => unreachable, + .four_u8 => unreachable, + .negative_one => unreachable, + .calling_convention_c => unreachable, + .calling_convention_inline => unreachable, + .void_value => unreachable, + .unreachable_value => unreachable, + .null_value => unreachable, + .bool_true => unreachable, + .bool_false => unreachable, + .empty_struct => unreachable, + .generic_poison => unreachable, + + .var_args_param_type => unreachable, // special tag + + _ => switch (ip.items.items(.tag)[@enumToInt(index)]) { + .type_int_signed, + .type_int_unsigned, + => .Int, + + .type_array_big, + .type_array_small, + => .Array, + + .type_vector => .Vector, + + .type_pointer, + .type_slice, + => .Pointer, + + .type_optional => .Optional, + .type_anyframe => .AnyFrame, + .type_error_union => .ErrorUnion, + + .type_error_set, + .type_inferred_error_set, + => .ErrorSet, + + .type_enum_auto, + .type_enum_explicit, + .type_enum_nonexhaustive, + => .Enum, + + .simple_type => unreachable, // handled via Index tag above + + .type_opaque => .Opaque, + + .type_struct, + .type_struct_ns, + .type_struct_anon, + .type_tuple_anon, + => .Struct, + + .type_union_tagged, + .type_union_untagged, + .type_union_safety, + => .Union, + + .type_function => .Fn, + + // values, not types + .undef, + .runtime_value, + .simple_value, + .ptr_decl, + .ptr_mut_decl, + .ptr_comptime_field, + .ptr_int, + .ptr_eu_payload, + .ptr_opt_payload, + .ptr_elem, + .ptr_field, + .ptr_slice, + .opt_payload, + .opt_null, + .int_u8, + .int_u16, + .int_u32, + .int_i32, + .int_usize, + .int_comptime_int_u32, + .int_comptime_int_i32, + .int_small, + .int_positive, + .int_negative, + .int_lazy_align, + .int_lazy_size, + .error_set_error, + .error_union_error, + .error_union_payload, + .enum_literal, + .enum_tag, + .float_f16, + .float_f32, + .float_f64, + .float_f80, + .float_f128, + .float_c_longdouble_f80, + .float_c_longdouble_f128, + .float_comptime_float, + .variable, + .extern_func, + .func, + .only_possible_value, + .union_value, + .bytes, + .aggregate, + .repeated, + // memoization, not types + .memoized_call, + => unreachable, + }, + .none => unreachable, // special tag + }; +} diff --git a/src/Liveness.zig b/src/Liveness.zig index 59135ef5c8..b12b638208 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -5,15 +5,17 @@ //! Some instructions are special, such as: //! * Conditional Branches //! * Switch Branches -const Liveness = @This(); const std = @import("std"); -const trace = @import("tracy.zig").trace; const log = std.log.scoped(.liveness); const assert = std.debug.assert; const Allocator = std.mem.Allocator; -const Air = @import("Air.zig"); const Log2Int = std.math.Log2Int; +const Liveness = @This(); +const trace = @import("tracy.zig").trace; +const Air = @import("Air.zig"); +const InternPool = @import("InternPool.zig"); + pub const Verify = @import("Liveness/Verify.zig"); /// This array is split into sets of 4 bits per AIR instruction. @@ -129,7 +131,7 @@ fn LivenessPassData(comptime pass: LivenessPass) type { }; } -pub fn analyze(gpa: Allocator, air: Air) Allocator.Error!Liveness { +pub fn analyze(gpa: Allocator, air: Air, intern_pool: *const InternPool) Allocator.Error!Liveness { const tracy = trace(@src()); defer tracy.end(); @@ -142,6 +144,7 @@ pub fn analyze(gpa: Allocator, air: Air) Allocator.Error!Liveness { ), .extra = .{}, .special = .{}, + .intern_pool = intern_pool, }; errdefer gpa.free(a.tomb_bits); errdefer a.special.deinit(gpa); @@ -222,6 +225,7 @@ pub fn categorizeOperand( air: Air, inst: Air.Inst.Index, operand: Air.Inst.Index, + ip: *const InternPool, ) OperandCategory { const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); @@ -317,9 +321,10 @@ pub fn categorizeOperand( .arg, .alloc, + .inferred_alloc, + .inferred_alloc_comptime, .ret_ptr, - .constant, - .const_ty, + .interned, .trap, .breakpoint, .dbg_stmt, @@ -530,7 +535,7 @@ pub fn categorizeOperand( .aggregate_init => { const ty_pl = air_datas[inst].ty_pl; const aggregate_ty = air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLen()); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); const elements = @ptrCast([]const Air.Inst.Ref, air.extra[ty_pl.payload..][0..len]); if (elements.len <= bpi - 1) { @@ -621,7 +626,7 @@ pub fn categorizeOperand( var operand_live: bool = true; for (air.extra[cond_extra.end..][0..2]) |cond_inst| { - if (l.categorizeOperand(air, cond_inst, operand) == .tomb) + if (l.categorizeOperand(air, cond_inst, operand, ip) == .tomb) operand_live = false; switch (air_tags[cond_inst]) { @@ -818,6 +823,7 @@ pub const BigTomb = struct { const Analysis = struct { gpa: Allocator, air: Air, + intern_pool: *const InternPool, tomb_bits: []usize, special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), extra: std.ArrayListUnmanaged(u32), @@ -867,6 +873,7 @@ fn analyzeInst( data: *LivenessPassData(pass), inst: Air.Inst.Index, ) Allocator.Error!void { + const ip = a.intern_pool; const inst_tags = a.air.instructions.items(.tag); const inst_datas = a.air.instructions.items(.data); @@ -967,9 +974,7 @@ fn analyzeInst( .work_group_id, => return analyzeOperands(a, pass, data, inst, .{ .none, .none, .none }), - .constant, - .const_ty, - => unreachable, + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .trap, .unreach, @@ -1134,7 +1139,7 @@ fn analyzeInst( .aggregate_init => { const ty_pl = inst_datas[inst].ty_pl; const aggregate_ty = a.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLen()); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); const elements = @ptrCast([]const Air.Inst.Ref, a.air.extra[ty_pl.payload..][0..len]); if (elements.len <= bpi - 1) { @@ -1253,19 +1258,17 @@ fn analyzeOperands( ) Allocator.Error!void { const gpa = a.gpa; const inst_tags = a.air.instructions.items(.tag); + const ip = a.intern_pool; switch (pass) { .loop_analysis => { _ = data.live_set.remove(inst); for (operands) |op_ref| { - const operand = Air.refToIndex(op_ref) orelse continue; + const operand = Air.refToIndexAllowNone(op_ref) orelse continue; // Don't compute any liveness for constants - switch (inst_tags[operand]) { - .constant, .const_ty => continue, - else => {}, - } + if (inst_tags[operand] == .interned) continue; _ = try data.live_set.put(gpa, operand, {}); } @@ -1288,20 +1291,17 @@ fn analyzeOperands( // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. // That way, we can mark as many instructions as possible unused. - if (!immediate_death or a.air.mustLower(inst)) { + if (!immediate_death or a.air.mustLower(inst, ip)) { // Note that it's important we iterate over the operands backwards, so that if a dying // operand is used multiple times we mark its last use as its death. var i = operands.len; while (i > 0) { i -= 1; const op_ref = operands[i]; - const operand = Air.refToIndex(op_ref) orelse continue; + const operand = Air.refToIndexAllowNone(op_ref) orelse continue; // Don't compute any liveness for constants - switch (inst_tags[operand]) { - .constant, .const_ty => continue, - else => {}, - } + if (inst_tags[operand] == .interned) continue; const mask = @as(Bpi, 1) << @intCast(OperandInt, i); @@ -1407,7 +1407,7 @@ fn analyzeInstBlock( // If the block is noreturn, block deaths not only aren't useful, they're impossible to // find: there could be more stuff alive after the block than before it! - if (!a.air.getRefType(ty_pl.ty).isNoReturn()) { + if (!a.intern_pool.isNoReturn(a.air.getRefType(ty_pl.ty).ip_index)) { // The block kills the difference in the live sets const block_scope = data.block_scopes.get(inst).?; const num_deaths = data.live_set.count() - block_scope.live_set.count(); @@ -1819,6 +1819,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { /// Must be called with operands in reverse order. fn feed(big: *Self, op_ref: Air.Inst.Ref) !void { + const ip = big.a.intern_pool; // Note that after this, `operands_remaining` becomes the index of the current operand big.operands_remaining -= 1; @@ -1831,15 +1832,12 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { // Don't compute any liveness for constants const inst_tags = big.a.air.instructions.items(.tag); - switch (inst_tags[operand]) { - .constant, .const_ty => return, - else => {}, - } + if (inst_tags[operand] == .interned) return // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. // That way, we can mark as many instructions as possible unused. - if (big.will_die_immediately and !big.a.air.mustLower(big.inst)) return; + if (big.will_die_immediately and !big.a.air.mustLower(big.inst, ip)) return; const extra_byte = (big.operands_remaining - (bpi - 1)) / 31; const extra_bit = @intCast(u5, big.operands_remaining - (bpi - 1) - extra_byte * 31); diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index a55ebe52a6..a5fc592894 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -5,6 +5,7 @@ air: Air, liveness: Liveness, live: LiveMap = .{}, blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, LiveMap) = .{}, +intern_pool: *const InternPool, pub const Error = error{ LivenessInvalid, OutOfMemory }; @@ -27,10 +28,11 @@ pub fn verify(self: *Verify) Error!void { const LiveMap = std.AutoHashMapUnmanaged(Air.Inst.Index, void); fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { + const ip = self.intern_pool; const tag = self.air.instructions.items(.tag); const data = self.air.instructions.items(.data); for (body) |inst| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) { // This instruction will not be lowered and should be ignored. continue; } @@ -39,9 +41,10 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { // no operands .arg, .alloc, + .inferred_alloc, + .inferred_alloc_comptime, .ret_ptr, - .constant, - .const_ty, + .interned, .breakpoint, .dbg_stmt, .dbg_inline_begin, @@ -58,10 +61,10 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .work_item_id, .work_group_size, .work_group_id, - => try self.verifyInst(inst, .{ .none, .none, .none }), + => try self.verifyInstOperands(inst, .{ .none, .none, .none }), .trap, .unreach => { - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInstOperands(inst, .{ .none, .none, .none }); // This instruction terminates the function, so everything should be dead if (self.live.count() > 0) return invalid("%{}: instructions still alive", .{inst}); }, @@ -110,7 +113,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .c_va_copy, => { const ty_op = data[inst].ty_op; - try self.verifyInst(inst, .{ ty_op.operand, .none, .none }); + try self.verifyInstOperands(inst, .{ ty_op.operand, .none, .none }); }, .is_null, .is_non_null, @@ -146,13 +149,13 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .c_va_end, => { const un_op = data[inst].un_op; - try self.verifyInst(inst, .{ un_op, .none, .none }); + try self.verifyInstOperands(inst, .{ un_op, .none, .none }); }, .ret, .ret_load, => { const un_op = data[inst].un_op; - try self.verifyInst(inst, .{ un_op, .none, .none }); + try self.verifyInstOperands(inst, .{ un_op, .none, .none }); // This instruction terminates the function, so everything should be dead if (self.live.count() > 0) return invalid("%{}: instructions still alive", .{inst}); }, @@ -161,36 +164,36 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .wasm_memory_grow, => { const pl_op = data[inst].pl_op; - try self.verifyInst(inst, .{ pl_op.operand, .none, .none }); + try self.verifyInstOperands(inst, .{ pl_op.operand, .none, .none }); }, .prefetch => { const prefetch = data[inst].prefetch; - try self.verifyInst(inst, .{ prefetch.ptr, .none, .none }); + try self.verifyInstOperands(inst, .{ prefetch.ptr, .none, .none }); }, .reduce, .reduce_optimized, => { const reduce = data[inst].reduce; - try self.verifyInst(inst, .{ reduce.operand, .none, .none }); + try self.verifyInstOperands(inst, .{ reduce.operand, .none, .none }); }, .union_init => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.init, .none, .none }); + try self.verifyInstOperands(inst, .{ extra.init, .none, .none }); }, .struct_field_ptr, .struct_field_val => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.struct_operand, .none, .none }); + try self.verifyInstOperands(inst, .{ extra.struct_operand, .none, .none }); }, .field_parent_ptr => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.field_ptr, .none, .none }); + try self.verifyInstOperands(inst, .{ extra.field_ptr, .none, .none }); }, .atomic_load => { const atomic_load = data[inst].atomic_load; - try self.verifyInst(inst, .{ atomic_load.ptr, .none, .none }); + try self.verifyInstOperands(inst, .{ atomic_load.ptr, .none, .none }); }, // binary @@ -260,7 +263,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .memcpy, => { const bin_op = data[inst].bin_op; - try self.verifyInst(inst, .{ bin_op.lhs, bin_op.rhs, .none }); + try self.verifyInstOperands(inst, .{ bin_op.lhs, bin_op.rhs, .none }); }, .add_with_overflow, .sub_with_overflow, @@ -274,62 +277,62 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.lhs, extra.rhs, .none }); + try self.verifyInstOperands(inst, .{ extra.lhs, extra.rhs, .none }); }, .shuffle => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.a, extra.b, .none }); + try self.verifyInstOperands(inst, .{ extra.a, extra.b, .none }); }, .cmp_vector, .cmp_vector_optimized, => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.VectorCmp, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.lhs, extra.rhs, .none }); + try self.verifyInstOperands(inst, .{ extra.lhs, extra.rhs, .none }); }, .atomic_rmw => { const pl_op = data[inst].pl_op; const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data; - try self.verifyInst(inst, .{ pl_op.operand, extra.operand, .none }); + try self.verifyInstOperands(inst, .{ pl_op.operand, extra.operand, .none }); }, // ternary .select => { const pl_op = data[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; - try self.verifyInst(inst, .{ pl_op.operand, extra.lhs, extra.rhs }); + try self.verifyInstOperands(inst, .{ pl_op.operand, extra.lhs, extra.rhs }); }, .mul_add => { const pl_op = data[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; - try self.verifyInst(inst, .{ extra.lhs, extra.rhs, pl_op.operand }); + try self.verifyInstOperands(inst, .{ extra.lhs, extra.rhs, pl_op.operand }); }, .vector_store_elem => { const vector_store_elem = data[inst].vector_store_elem; const extra = self.air.extraData(Air.Bin, vector_store_elem.payload).data; - try self.verifyInst(inst, .{ vector_store_elem.vector_ptr, extra.lhs, extra.rhs }); + try self.verifyInstOperands(inst, .{ vector_store_elem.vector_ptr, extra.lhs, extra.rhs }); }, .cmpxchg_strong, .cmpxchg_weak, => { const ty_pl = data[inst].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; - try self.verifyInst(inst, .{ extra.ptr, extra.expected_value, extra.new_value }); + try self.verifyInstOperands(inst, .{ extra.ptr, extra.expected_value, extra.new_value }); }, // big tombs .aggregate_init => { const ty_pl = data[inst].ty_pl; const aggregate_ty = self.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLen()); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); var bt = self.liveness.iterateBigTomb(inst); for (elements) |element| { try self.verifyOperand(inst, element, bt.feed()); } - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, .call, .call_always_tail, .call_never_tail, .call_never_inline => { const pl_op = data[inst].pl_op; @@ -344,7 +347,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { for (args) |arg| { try self.verifyOperand(inst, arg, bt.feed()); } - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, .assembly => { const ty_pl = data[inst].ty_pl; @@ -370,7 +373,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { for (inputs) |input| { try self.verifyOperand(inst, input, bt.feed()); } - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, // control flow @@ -394,7 +397,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { for (cond_br_liveness.then_deaths) |death| try self.verifyDeath(inst, death); - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, .try_ptr => { const ty_pl = data[inst].ty_pl; @@ -416,7 +419,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { for (cond_br_liveness.then_deaths) |death| try self.verifyDeath(inst, death); - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, .br => { const br = data[inst].br; @@ -428,7 +431,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { } else { gop.value_ptr.* = try self.live.clone(self.gpa); } - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, .block => { const ty_pl = data[inst].ty_pl; @@ -450,7 +453,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { for (block_liveness.deaths) |death| try self.verifyDeath(inst, death); - if (block_ty.isNoReturn()) { + if (ip.isNoReturn(block_ty.toIntern())) { assert(!self.blocks.contains(inst)); } else { var live = self.blocks.fetchRemove(inst).?.value; @@ -459,7 +462,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { try self.verifyMatchingLiveness(inst, live); } - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInstOperands(inst, .{ .none, .none, .none }); }, .loop => { const ty_pl = data[inst].ty_pl; @@ -474,7 +477,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { // The same stuff should be alive after the loop as before it try self.verifyMatchingLiveness(inst, live); - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInstOperands(inst, .{ .none, .none, .none }); }, .cond_br => { const pl_op = data[inst].pl_op; @@ -497,7 +500,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { for (cond_br_liveness.else_deaths) |death| try self.verifyDeath(inst, death); try self.verifyBody(else_body); - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, .switch_br => { const pl_op = data[inst].pl_op; @@ -541,7 +544,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { try self.verifyBody(else_body); } - try self.verifyInst(inst, .{ .none, .none, .none }); + try self.verifyInst(inst); }, } } @@ -552,20 +555,22 @@ fn verifyDeath(self: *Verify, inst: Air.Inst.Index, operand: Air.Inst.Index) Err } fn verifyOperand(self: *Verify, inst: Air.Inst.Index, op_ref: Air.Inst.Ref, dies: bool) Error!void { - const operand = Air.refToIndex(op_ref) orelse return; - switch (self.air.instructions.items(.tag)[operand]) { - .constant, .const_ty => {}, - else => { - if (dies) { - if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand }); - } else { - if (!self.live.contains(operand)) return invalid("%{}: dead operand %{} reused", .{ inst, operand }); - } - }, + const operand = Air.refToIndexAllowNone(op_ref) orelse { + assert(!dies); + return; + }; + if (self.air.instructions.items(.tag)[operand] == .interned) { + assert(!dies); + return; + } + if (dies) { + if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand }); + } else { + if (!self.live.contains(operand)) return invalid("%{}: dead operand %{} reused", .{ inst, operand }); } } -fn verifyInst( +fn verifyInstOperands( self: *Verify, inst: Air.Inst.Index, operands: [Liveness.bpi - 1]Air.Inst.Ref, @@ -574,16 +579,15 @@ fn verifyInst( const dies = self.liveness.operandDies(inst, @intCast(Liveness.OperandInt, operand_index)); try self.verifyOperand(inst, operand, dies); } - const tag = self.air.instructions.items(.tag); - switch (tag[inst]) { - .constant, .const_ty => unreachable, - else => { - if (self.liveness.isUnused(inst)) { - assert(!self.live.contains(inst)); - } else { - try self.live.putNoClobber(self.gpa, inst, {}); - } - }, + try self.verifyInst(inst); +} + +fn verifyInst(self: *Verify, inst: Air.Inst.Index) Error!void { + if (self.air.instructions.items(.tag)[inst] == .interned) return; + if (self.liveness.isUnused(inst)) { + assert(!self.live.contains(inst)); + } else { + try self.live.putNoClobber(self.gpa, inst, {}); } } @@ -604,4 +608,5 @@ const log = std.log.scoped(.liveness_verify); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); +const InternPool = @import("../InternPool.zig"); const Verify = @This(); diff --git a/src/Module.zig b/src/Module.zig index a8f2281c4f..61f39a327a 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -32,6 +32,19 @@ const build_options = @import("build_options"); const Liveness = @import("Liveness.zig"); const isUpDir = @import("introspect.zig").isUpDir; const clang = @import("clang.zig"); +const InternPool = @import("InternPool.zig"); + +comptime { + @setEvalBranchQuota(4000); + for ( + @typeInfo(Zir.Inst.Ref).Enum.fields, + @typeInfo(Air.Inst.Ref).Enum.fields, + @typeInfo(InternPool.Index).Enum.fields, + ) |zir_field, air_field, ip_field| { + assert(mem.eql(u8, zir_field.name, ip_field.name)); + assert(mem.eql(u8, air_field.name, ip_field.name)); + } +} /// General-purpose allocator. Used for both temporary and long-term storage. gpa: Allocator, @@ -72,28 +85,29 @@ import_table: std.StringArrayHashMapUnmanaged(*File) = .{}, /// Keys are fully resolved file paths. This table owns the keys and values. embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{}, -/// This is a temporary addition to stage2 in order to match legacy behavior, -/// however the end-game once the lang spec is settled will be to use a global -/// InternPool for comptime memoized objects, making this behavior consistent across all types, -/// not only string literals. Or, we might decide to not guarantee string literals -/// to have equal comptime pointers, in which case this field can be deleted (perhaps -/// the commit that introduced it can simply be reverted). -/// This table uses an optional index so that when a Decl is destroyed, the string literal -/// is still reclaimable by a future Decl. -string_literal_table: std.HashMapUnmanaged(StringLiteralContext.Key, Decl.OptionalIndex, StringLiteralContext, std.hash_map.default_max_load_percentage) = .{}, -string_literal_bytes: ArrayListUnmanaged(u8) = .{}, +/// Stores all Type and Value objects; periodically garbage collected. +intern_pool: InternPool = .{}, +/// To be eliminated in a future commit by moving more data into InternPool. +/// Current uses that must be eliminated: +/// * Struct comptime_args +/// * Struct optimized_order +/// * Union fields +/// This memory lives until the Module is destroyed. +tmp_hack_arena: std.heap.ArenaAllocator, + +/// This is currently only used for string literals. +memoized_decls: std.AutoHashMapUnmanaged(InternPool.Index, Decl.Index) = .{}, + +monomorphed_func_keys: std.ArrayListUnmanaged(InternPool.Index) = .{}, /// The set of all the generic function instantiations. This is used so that when a generic /// function is called twice with the same comptime parameter arguments, both calls dispatch /// to the same function. monomorphed_funcs: MonomorphedFuncsSet = .{}, -/// The set of all comptime function calls that have been cached so that future calls -/// with the same parameters will get the same return value. -memoized_calls: MemoizedCallSet = .{}, /// Contains the values from `@setAlignStack`. A sparse table is used here /// instead of a field of `Fn` because usage of `@setAlignStack` is rare, while /// functions are many. -align_stack_fns: std.AutoHashMapUnmanaged(*const Fn, SetAlignStack) = .{}, +align_stack_fns: std.AutoHashMapUnmanaged(Fn.Index, SetAlignStack) = .{}, /// We optimize memory usage for a compilation with no compile errors by storing the /// error messages and mapping outside of `Decl`. @@ -120,13 +134,8 @@ cimport_errors: std.AutoArrayHashMapUnmanaged(Decl.Index, []CImportError) = .{}, /// contains Decls that need to be deleted if they end up having no references to them. deletion_set: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, -/// Error tags and their values, tag names are duped with mod.gpa. -/// Corresponds with `error_name_list`. -global_error_set: std.StringHashMapUnmanaged(ErrorInt) = .{}, - -/// ErrorInt -> []const u8 for fast lookups for @intToError at comptime -/// Corresponds with `global_error_set`. -error_name_list: ArrayListUnmanaged([]const u8), +/// Key is the error name, index is the error tag value. Index 0 has a length-0 string. +global_error_set: GlobalErrorSet = .{}, /// Incrementing integer used to compare against the corresponding Decl /// field to determine whether a Decl's status applies to an ongoing update, or a @@ -165,6 +174,11 @@ allocated_decls: std.SegmentedList(Decl, 0) = .{}, /// When a Decl object is freed from `allocated_decls`, it is pushed into this stack. decls_free_list: ArrayListUnmanaged(Decl.Index) = .{}, +/// Same pattern as with `allocated_decls`. +allocated_namespaces: std.SegmentedList(Namespace, 0) = .{}, +/// Same pattern as with `decls_free_list`. +namespaces_free_list: ArrayListUnmanaged(Namespace.Index) = .{}, + global_assembly: std.AutoHashMapUnmanaged(Decl.Index, []u8) = .{}, reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct { @@ -172,6 +186,8 @@ reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct { src: LazySrcLoc, }) = .{}, +pub const GlobalErrorSet = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void); + pub const CImportError = struct { offset: u32, line: u32, @@ -187,108 +203,40 @@ pub const CImportError = struct { } }; -pub const StringLiteralContext = struct { - bytes: *ArrayListUnmanaged(u8), +pub const MonomorphedFuncKey = struct { func: Fn.Index, args_index: u32, args_len: u32 }; - pub const Key = struct { - index: u32, - len: u32, - }; +pub const MonomorphedFuncAdaptedKey = struct { func: Fn.Index, args: []const InternPool.Index }; - pub fn eql(self: @This(), a: Key, b: Key) bool { - _ = self; - return a.index == b.index and a.len == b.len; - } - - pub fn hash(self: @This(), x: Key) u64 { - const x_slice = self.bytes.items[x.index..][0..x.len]; - return std.hash_map.hashString(x_slice); - } -}; - -pub const StringLiteralAdapter = struct { - bytes: *ArrayListUnmanaged(u8), - - pub fn eql(self: @This(), a_slice: []const u8, b: StringLiteralContext.Key) bool { - const b_slice = self.bytes.items[b.index..][0..b.len]; - return mem.eql(u8, a_slice, b_slice); - } - - pub fn hash(self: @This(), adapted_key: []const u8) u64 { - _ = self; - return std.hash_map.hashString(adapted_key); - } -}; - -const MonomorphedFuncsSet = std.HashMapUnmanaged( - *Fn, - void, +pub const MonomorphedFuncsSet = std.HashMapUnmanaged( + MonomorphedFuncKey, + InternPool.Index, MonomorphedFuncsContext, std.hash_map.default_max_load_percentage, ); -const MonomorphedFuncsContext = struct { - pub fn eql(ctx: @This(), a: *Fn, b: *Fn) bool { - _ = ctx; - return a == b; +pub const MonomorphedFuncsContext = struct { + mod: *Module, + + pub fn eql(_: @This(), a: MonomorphedFuncKey, b: MonomorphedFuncKey) bool { + return std.meta.eql(a, b); } - /// Must match `Sema.GenericCallAdapter.hash`. - pub fn hash(ctx: @This(), key: *Fn) u64 { - _ = ctx; - return key.hash; + pub fn hash(ctx: @This(), key: MonomorphedFuncKey) u64 { + const key_args = ctx.mod.monomorphed_func_keys.items[key.args_index..][0..key.args_len]; + return std.hash.Wyhash.hash(@enumToInt(key.func), std.mem.sliceAsBytes(key_args)); } }; -pub const MemoizedCallSet = std.HashMapUnmanaged( - MemoizedCall.Key, - MemoizedCall.Result, - MemoizedCall, - std.hash_map.default_max_load_percentage, -); +pub const MonomorphedFuncsAdaptedContext = struct { + mod: *Module, -pub const MemoizedCall = struct { - module: *Module, - - pub const Key = struct { - func: *Fn, - args: []TypedValue, - }; - - pub const Result = struct { - val: Value, - arena: std.heap.ArenaAllocator.State, - }; - - pub fn eql(ctx: @This(), a: Key, b: Key) bool { - if (a.func != b.func) return false; - - assert(a.args.len == b.args.len); - for (a.args, 0..) |a_arg, arg_i| { - const b_arg = b.args[arg_i]; - if (!a_arg.eql(b_arg, ctx.module)) { - return false; - } - } - - return true; + pub fn eql(ctx: @This(), adapted_key: MonomorphedFuncAdaptedKey, other_key: MonomorphedFuncKey) bool { + const other_key_args = ctx.mod.monomorphed_func_keys.items[other_key.args_index..][0..other_key.args_len]; + return adapted_key.func == other_key.func and std.mem.eql(InternPool.Index, adapted_key.args, other_key_args); } - /// Must match `Sema.GenericCallAdapter.hash`. - pub fn hash(ctx: @This(), key: Key) u64 { - var hasher = std.hash.Wyhash.init(0); - - // The generic function Decl is guaranteed to be the first dependency - // of each of its instantiations. - std.hash.autoHash(&hasher, key.func); - - // This logic must be kept in sync with the logic in `analyzeCall` that - // computes the hash. - for (key.args) |arg| { - arg.hash(&hasher, ctx.module); - } - - return hasher.final(); + pub fn hash(_: @This(), adapted_key: MonomorphedFuncAdaptedKey) u64 { + return std.hash.Wyhash.hash(@enumToInt(adapted_key.func), std.mem.sliceAsBytes(adapted_key.args)); } }; @@ -322,7 +270,7 @@ pub const GlobalEmitH = struct { pub const ErrorInt = u32; pub const Export = struct { - options: std.builtin.ExportOptions, + opts: Options, src: LazySrcLoc, /// The Decl that performs the export. Note that this is *not* the Decl being exported. owner_decl: Decl.Index, @@ -340,10 +288,17 @@ pub const Export = struct { complete, }, + pub const Options = struct { + name: InternPool.NullTerminatedString, + linkage: std.builtin.GlobalLinkage = .Strong, + section: InternPool.OptionalNullTerminatedString = .none, + visibility: std.builtin.SymbolVisibility = .default, + }; + pub fn getSrcLoc(exp: Export, mod: *Module) SrcLoc { const src_decl = mod.declPtr(exp.src_decl); return .{ - .file_scope = src_decl.getFileScope(), + .file_scope = src_decl.getFileScope(mod), .parent_decl_node = src_decl.src_node, .lazy = exp.src, }; @@ -351,61 +306,76 @@ pub const Export = struct { }; pub const CaptureScope = struct { + refs: u32, parent: ?*CaptureScope, /// Values from this decl's evaluation that will be closed over in - /// child decls. Values stored in the value_arena of the linked decl. - /// During sema, this map is backed by the gpa. Once sema completes, - /// it is reallocated using the value_arena. - captures: std.AutoHashMapUnmanaged(Zir.Inst.Index, TypedValue) = .{}, + /// child decls. This map is backed by the gpa, and deinited when + /// the refcount reaches 0. + captures: std.AutoHashMapUnmanaged(Zir.Inst.Index, Capture) = .{}, - pub fn failed(noalias self: *const @This()) bool { + pub const Capture = union(enum) { + comptime_val: InternPool.Index, // index of value + runtime_val: InternPool.Index, // index of type + }; + + pub fn failed(noalias self: *const CaptureScope) bool { return self.captures.available == 0 and self.captures.size == std.math.maxInt(u32); } - pub fn fail(noalias self: *@This()) void { + pub fn fail(noalias self: *CaptureScope, gpa: Allocator) void { + self.captures.deinit(gpa); self.captures.available = 0; self.captures.size = std.math.maxInt(u32); } + + pub fn incRef(self: *CaptureScope) void { + self.refs += 1; + } + + pub fn decRef(self: *CaptureScope, gpa: Allocator) void { + self.refs -= 1; + if (self.refs > 0) return; + if (self.parent) |p| p.decRef(gpa); + if (!self.failed()) { + self.captures.deinit(gpa); + } + gpa.destroy(self); + } }; pub const WipCaptureScope = struct { scope: *CaptureScope, finalized: bool, gpa: Allocator, - perm_arena: Allocator, - pub fn init(gpa: Allocator, perm_arena: Allocator, parent: ?*CaptureScope) !@This() { - const scope = try perm_arena.create(CaptureScope); - scope.* = .{ .parent = parent }; - return @This(){ + pub fn init(gpa: Allocator, parent: ?*CaptureScope) !WipCaptureScope { + const scope = try gpa.create(CaptureScope); + if (parent) |p| p.incRef(); + scope.* = .{ .refs = 1, .parent = parent }; + return .{ .scope = scope, .finalized = false, .gpa = gpa, - .perm_arena = perm_arena, }; } - pub fn finalize(noalias self: *@This()) !void { - assert(!self.finalized); - // use a temp to avoid unintentional aliasing due to RLS - const tmp = try self.scope.captures.clone(self.perm_arena); - self.scope.captures.deinit(self.gpa); - self.scope.captures = tmp; + pub fn finalize(noalias self: *WipCaptureScope) !void { self.finalized = true; } - pub fn reset(noalias self: *@This(), parent: ?*CaptureScope) !void { - if (!self.finalized) try self.finalize(); - self.scope = try self.perm_arena.create(CaptureScope); - self.scope.* = .{ .parent = parent }; - self.finalized = false; + pub fn reset(noalias self: *WipCaptureScope, parent: ?*CaptureScope) !void { + self.scope.decRef(self.gpa); + self.scope = try self.gpa.create(CaptureScope); + if (parent) |p| p.incRef(); + self.scope.* = .{ .refs = 1, .parent = parent }; } - pub fn deinit(noalias self: *@This()) void { - if (!self.finalized) { - self.scope.captures.deinit(self.gpa); - self.scope.fail(); + pub fn deinit(noalias self: *WipCaptureScope) void { + if (self.finalized) { + self.scope.decRef(self.gpa); + } else { + self.scope.fail(self.gpa); } self.* = undefined; } @@ -452,8 +422,7 @@ const ValueArena = struct { }; pub const Decl = struct { - /// Allocated with Module's allocator; outlives the ZIR code. - name: [*:0]const u8, + name: InternPool.NullTerminatedString, /// The most recent Type of the Decl after a successful semantic analysis. /// Populated when `has_tv`. ty: Type, @@ -461,20 +430,16 @@ pub const Decl = struct { /// Populated when `has_tv`. val: Value, /// Populated when `has_tv`. - /// Points to memory inside value_arena. - @"linksection": ?[*:0]const u8, + @"linksection": InternPool.OptionalNullTerminatedString, /// Populated when `has_tv`. @"align": u32, /// Populated when `has_tv`. @"addrspace": std.builtin.AddressSpace, - /// The memory for ty, val, align, linksection, and captures. - /// If this is `null` then there is no memory management needed. - value_arena: ?*ValueArena = null, /// The direct parent namespace of the Decl. /// Reference to externally owned memory. /// In the case of the Decl corresponding to a file, this is /// the namespace of the struct, since there is no parent. - src_namespace: *Namespace, + src_namespace: Namespace.Index, /// The scope which lexically contains this decl. A decl must depend /// on its lexical parent, in order to ensure that this pointer is valid. @@ -624,55 +589,17 @@ pub const Decl = struct { function_body, }; - pub fn clearName(decl: *Decl, gpa: Allocator) void { - gpa.free(mem.sliceTo(decl.name, 0)); - decl.name = undefined; - } - pub fn clearValues(decl: *Decl, mod: *Module) void { - const gpa = mod.gpa; - if (decl.getExternFn()) |extern_fn| { - extern_fn.deinit(gpa); - gpa.destroy(extern_fn); - } - if (decl.getFunction()) |func| { + if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { _ = mod.align_stack_fns.remove(func); - if (func.comptime_args != null) { - _ = mod.monomorphed_funcs.remove(func); - } - func.deinit(gpa); - gpa.destroy(func); + mod.destroyFunc(func); } - if (decl.getVariable()) |variable| { - variable.deinit(gpa); - gpa.destroy(variable); - } - if (decl.value_arena) |value_arena| { - if (decl.owns_tv) { - if (decl.val.castTag(.str_lit)) |str_lit| { - mod.string_literal_table.getPtrContext(str_lit.data, .{ - .bytes = &mod.string_literal_bytes, - }).?.* = .none; - } - } - value_arena.deinit(gpa); - decl.value_arena = null; - decl.has_tv = false; - decl.owns_tv = false; - } - } - - pub fn finalizeNewArena(decl: *Decl, arena: *std.heap.ArenaAllocator) !void { - assert(decl.value_arena == null); - const value_arena = try arena.allocator().create(ValueArena); - value_arena.* = .{ .state = arena.state }; - decl.value_arena = value_arena; } /// This name is relative to the containing namespace of the decl. /// The memory is owned by the containing File ZIR. - pub fn getName(decl: Decl) ?[:0]const u8 { - const zir = decl.getFileScope().zir; + pub fn getName(decl: Decl, mod: *Module) ?[:0]const u8 { + const zir = decl.getFileScope(mod).zir; return decl.getNameZir(zir); } @@ -683,8 +610,8 @@ pub const Decl = struct { return zir.nullTerminatedString(name_index); } - pub fn contentsHash(decl: Decl) std.zig.SrcHash { - const zir = decl.getFileScope().zir; + pub fn contentsHash(decl: Decl, mod: *Module) std.zig.SrcHash { + const zir = decl.getFileScope(mod).zir; return decl.contentsHashZir(zir); } @@ -695,31 +622,31 @@ pub const Decl = struct { return contents_hash; } - pub fn zirBlockIndex(decl: *const Decl) Zir.Inst.Index { + pub fn zirBlockIndex(decl: *const Decl, mod: *Module) Zir.Inst.Index { assert(decl.zir_decl_index != 0); - const zir = decl.getFileScope().zir; + const zir = decl.getFileScope(mod).zir; return zir.extra[decl.zir_decl_index + 6]; } - pub fn zirAlignRef(decl: Decl) Zir.Inst.Ref { + pub fn zirAlignRef(decl: Decl, mod: *Module) Zir.Inst.Ref { if (!decl.has_align) return .none; assert(decl.zir_decl_index != 0); - const zir = decl.getFileScope().zir; + const zir = decl.getFileScope(mod).zir; return @intToEnum(Zir.Inst.Ref, zir.extra[decl.zir_decl_index + 8]); } - pub fn zirLinksectionRef(decl: Decl) Zir.Inst.Ref { + pub fn zirLinksectionRef(decl: Decl, mod: *Module) Zir.Inst.Ref { if (!decl.has_linksection_or_addrspace) return .none; assert(decl.zir_decl_index != 0); - const zir = decl.getFileScope().zir; + const zir = decl.getFileScope(mod).zir; const extra_index = decl.zir_decl_index + 8 + @boolToInt(decl.has_align); return @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); } - pub fn zirAddrspaceRef(decl: Decl) Zir.Inst.Ref { + pub fn zirAddrspaceRef(decl: Decl, mod: *Module) Zir.Inst.Ref { if (!decl.has_linksection_or_addrspace) return .none; assert(decl.zir_decl_index != 0); - const zir = decl.getFileScope().zir; + const zir = decl.getFileScope(mod).zir; const extra_index = decl.zir_decl_index + 8 + @boolToInt(decl.has_align) + 1; return @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); } @@ -744,154 +671,167 @@ pub const Decl = struct { return LazySrcLoc.nodeOffset(decl.nodeIndexToRelative(node_index)); } - pub fn srcLoc(decl: Decl) SrcLoc { - return decl.nodeOffsetSrcLoc(0); + pub fn srcLoc(decl: Decl, mod: *Module) SrcLoc { + return decl.nodeOffsetSrcLoc(0, mod); } - pub fn nodeOffsetSrcLoc(decl: Decl, node_offset: i32) SrcLoc { + pub fn nodeOffsetSrcLoc(decl: Decl, node_offset: i32, mod: *Module) SrcLoc { return .{ - .file_scope = decl.getFileScope(), + .file_scope = decl.getFileScope(mod), .parent_decl_node = decl.src_node, .lazy = LazySrcLoc.nodeOffset(node_offset), }; } - pub fn srcToken(decl: Decl) Ast.TokenIndex { - const tree = &decl.getFileScope().tree; + pub fn srcToken(decl: Decl, mod: *Module) Ast.TokenIndex { + const tree = &decl.getFileScope(mod).tree; return tree.firstToken(decl.src_node); } - pub fn srcByteOffset(decl: Decl) u32 { - const tree = &decl.getFileScope().tree; + pub fn srcByteOffset(decl: Decl, mod: *Module) u32 { + const tree = &decl.getFileScope(mod).tree; return tree.tokens.items(.start)[decl.srcToken()]; } pub fn renderFullyQualifiedName(decl: Decl, mod: *Module, writer: anytype) !void { - const unqualified_name = mem.sliceTo(decl.name, 0); if (decl.name_fully_qualified) { - return writer.writeAll(unqualified_name); + try writer.print("{}", .{decl.name.fmt(&mod.intern_pool)}); + } else { + try mod.namespacePtr(decl.src_namespace).renderFullyQualifiedName(mod, decl.name, writer); } - return decl.src_namespace.renderFullyQualifiedName(mod, unqualified_name, writer); } pub fn renderFullyQualifiedDebugName(decl: Decl, mod: *Module, writer: anytype) !void { - const unqualified_name = mem.sliceTo(decl.name, 0); - return decl.src_namespace.renderFullyQualifiedDebugName(mod, unqualified_name, writer); + return mod.namespacePtr(decl.src_namespace).renderFullyQualifiedDebugName(mod, decl.name, writer); } - pub fn getFullyQualifiedName(decl: Decl, mod: *Module) ![:0]u8 { - var buffer = std.ArrayList(u8).init(mod.gpa); - defer buffer.deinit(); - try decl.renderFullyQualifiedName(mod, buffer.writer()); + pub fn getFullyQualifiedName(decl: Decl, mod: *Module) !InternPool.NullTerminatedString { + if (decl.name_fully_qualified) return decl.name; + + const ip = &mod.intern_pool; + const count = count: { + var count: usize = ip.stringToSlice(decl.name).len + 1; + var ns: Namespace.Index = decl.src_namespace; + while (true) { + const namespace = mod.namespacePtr(ns); + const ns_decl = mod.declPtr(namespace.getDeclIndex(mod)); + count += ip.stringToSlice(ns_decl.name).len + 1; + ns = namespace.parent.unwrap() orelse { + count += namespace.file_scope.sub_file_path.len; + break :count count; + }; + } + }; + + const gpa = mod.gpa; + const start = ip.string_bytes.items.len; + // Protects reads of interned strings from being reallocated during the call to + // renderFullyQualifiedName. + try ip.string_bytes.ensureUnusedCapacity(gpa, count); + decl.renderFullyQualifiedName(mod, ip.string_bytes.writer(gpa)) catch unreachable; // Sanitize the name for nvptx which is more restrictive. + // TODO This should be handled by the backend, not the frontend. Have a + // look at how the C backend does it for inspiration. if (mod.comp.bin_file.options.target.cpu.arch.isNvptx()) { - for (buffer.items) |*byte| switch (byte.*) { + for (ip.string_bytes.items[start..]) |*byte| switch (byte.*) { '{', '}', '*', '[', ']', '(', ')', ',', ' ', '\'' => byte.* = '_', else => {}, }; } - return buffer.toOwnedSliceSentinel(0); + return ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start); } pub fn typedValue(decl: Decl) error{AnalysisFail}!TypedValue { if (!decl.has_tv) return error.AnalysisFail; - return TypedValue{ - .ty = decl.ty, - .val = decl.val, - }; + return TypedValue{ .ty = decl.ty, .val = decl.val }; } - pub fn value(decl: *Decl) error{AnalysisFail}!Value { - return (try decl.typedValue()).val; + pub fn internValue(decl: *Decl, mod: *Module) Allocator.Error!InternPool.Index { + assert(decl.has_tv); + const ip_index = try decl.val.intern(decl.ty, mod); + decl.val = ip_index.toValue(); + return ip_index; } - pub fn isFunction(decl: Decl) !bool { + pub fn isFunction(decl: Decl, mod: *const Module) !bool { const tv = try decl.typedValue(); - return tv.ty.zigTypeTag() == .Fn; + return tv.ty.zigTypeTag(mod) == .Fn; } - /// If the Decl has a value and it is a struct, return it, + /// If the Decl owns its value and it is a struct, return it, /// otherwise null. - pub fn getStruct(decl: *Decl) ?*Struct { - if (!decl.owns_tv) return null; - const ty = (decl.val.castTag(.ty) orelse return null).data; - const struct_obj = (ty.castTag(.@"struct") orelse return null).data; - return struct_obj; + pub fn getOwnedStruct(decl: Decl, mod: *Module) ?*Struct { + return mod.structPtrUnwrap(decl.getOwnedStructIndex(mod)); } - /// If the Decl has a value and it is a union, return it, - /// otherwise null. - pub fn getUnion(decl: *Decl) ?*Union { - if (!decl.owns_tv) return null; - const ty = (decl.val.castTag(.ty) orelse return null).data; - const union_obj = (ty.cast(Type.Payload.Union) orelse return null).data; - return union_obj; + pub fn getOwnedStructIndex(decl: Decl, mod: *Module) Struct.OptionalIndex { + if (!decl.owns_tv) return .none; + if (decl.val.ip_index == .none) return .none; + return mod.intern_pool.indexToStructType(decl.val.toIntern()); } - /// If the Decl has a value and it is a function, return it, + /// If the Decl owns its value and it is a union, return it, /// otherwise null. - pub fn getFunction(decl: *const Decl) ?*Fn { + pub fn getOwnedUnion(decl: Decl, mod: *Module) ?*Union { if (!decl.owns_tv) return null; - const func = (decl.val.castTag(.function) orelse return null).data; - return func; + if (decl.val.ip_index == .none) return null; + return mod.typeToUnion(decl.val.toType()); } - /// If the Decl has a value and it is an extern function, returns it, + /// If the Decl owns its value and it is a function, return it, /// otherwise null. - pub fn getExternFn(decl: *const Decl) ?*ExternFn { - if (!decl.owns_tv) return null; - const extern_fn = (decl.val.castTag(.extern_fn) orelse return null).data; - return extern_fn; + pub fn getOwnedFunction(decl: Decl, mod: *Module) ?*Fn { + return mod.funcPtrUnwrap(decl.getOwnedFunctionIndex(mod)); } - /// If the Decl has a value and it is a variable, returns it, + pub fn getOwnedFunctionIndex(decl: Decl, mod: *Module) Fn.OptionalIndex { + return if (decl.owns_tv) decl.val.getFunctionIndex(mod) else .none; + } + + /// If the Decl owns its value and it is an extern function, returns it, /// otherwise null. - pub fn getVariable(decl: *const Decl) ?*Var { - if (!decl.owns_tv) return null; - const variable = (decl.val.castTag(.variable) orelse return null).data; - return variable; + pub fn getOwnedExternFunc(decl: Decl, mod: *Module) ?InternPool.Key.ExternFunc { + return if (decl.owns_tv) decl.val.getExternFunc(mod) else null; + } + + /// If the Decl owns its value and it is a variable, returns it, + /// otherwise null. + pub fn getOwnedVariable(decl: Decl, mod: *Module) ?InternPool.Key.Variable { + return if (decl.owns_tv) decl.val.getVariable(mod) else null; } /// Gets the namespace that this Decl creates by being a struct, union, /// enum, or opaque. /// Only returns it if the Decl is the owner. - pub fn getInnerNamespace(decl: *Decl) ?*Namespace { - if (!decl.owns_tv) return null; - const ty = (decl.val.castTag(.ty) orelse return null).data; - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - return &struct_obj.namespace; - }, - .enum_full, .enum_nonexhaustive => { - const enum_obj = ty.cast(Type.Payload.EnumFull).?.data; - return &enum_obj.namespace; - }, - .empty_struct => { - return ty.castTag(.empty_struct).?.data; - }, - .@"opaque" => { - const opaque_obj = ty.cast(Type.Payload.Opaque).?.data; - return &opaque_obj.namespace; - }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - return &union_obj.namespace; + pub fn getOwnedInnerNamespaceIndex(decl: Decl, mod: *Module) Namespace.OptionalIndex { + if (!decl.owns_tv) return .none; + return switch (decl.val.ip_index) { + .empty_struct_type => .none, + .none => .none, + else => switch (mod.intern_pool.indexToKey(decl.val.toIntern())) { + .opaque_type => |opaque_type| opaque_type.namespace.toOptional(), + .struct_type => |struct_type| struct_type.namespace, + .union_type => |union_type| mod.unionPtr(union_type.index).namespace.toOptional(), + .enum_type => |enum_type| enum_type.namespace, + else => .none, }, + }; + } - else => return null, - } + /// Same as `getInnerNamespaceIndex` but additionally obtains the pointer. + pub fn getOwnedInnerNamespace(decl: Decl, mod: *Module) ?*Namespace { + return mod.namespacePtrUnwrap(decl.getOwnedInnerNamespaceIndex(mod)); } pub fn dump(decl: *Decl) void { const loc = std.zig.findLineColumn(decl.scope.source.bytes, decl.src); - std.debug.print("{s}:{d}:{d} name={s} status={s}", .{ + std.debug.print("{s}:{d}:{d} name={d} status={s}", .{ decl.scope.sub_file_path, loc.line + 1, loc.column + 1, - mem.sliceTo(decl.name, 0), + @enumToInt(decl.name), @tagName(decl.analysis), }); if (decl.has_tv) { @@ -900,8 +840,8 @@ pub const Decl = struct { std.debug.print("\n", .{}); } - pub fn getFileScope(decl: Decl) *File { - return decl.src_namespace.file_scope; + pub fn getFileScope(decl: Decl, mod: *Module) *File { + return mod.namespacePtr(decl.src_namespace).file_scope; } pub fn removeDependant(decl: *Decl, other: Decl.Index) void { @@ -912,25 +852,29 @@ pub const Decl = struct { assert(decl.dependencies.swapRemove(other)); } - pub fn isExtern(decl: Decl) bool { + pub fn isExtern(decl: Decl, mod: *Module) bool { assert(decl.has_tv); - return switch (decl.val.tag()) { - .extern_fn => true, - .variable => decl.val.castTag(.variable).?.data.init.tag() == .unreachable_value, + return switch (mod.intern_pool.indexToKey(decl.val.toIntern())) { + .variable => |variable| variable.is_extern, + .extern_func => true, else => false, }; } - pub fn getAlignment(decl: Decl, target: Target) u32 { + pub fn getAlignment(decl: Decl, mod: *Module) u32 { assert(decl.has_tv); if (decl.@"align" != 0) { // Explicit alignment. return decl.@"align"; } else { // Natural alignment. - return decl.ty.abiAlignment(target); + return decl.ty.abiAlignment(mod); } } + + pub fn intern(decl: *Decl, mod: *Module) Allocator.Error!void { + decl.val = (try decl.val.intern(decl.ty, mod)).toValue(); + } }; /// This state is attached to every Decl when Module emit_h is non-null. @@ -938,38 +882,6 @@ pub const EmitH = struct { fwd_decl: ArrayListUnmanaged(u8) = .{}, }; -/// Represents the data that an explicit error set syntax provides. -pub const ErrorSet = struct { - /// The Decl that corresponds to the error set itself. - owner_decl: Decl.Index, - /// The string bytes are stored in the owner Decl arena. - /// These must be in sorted order. See sortNames. - names: NameMap, - - pub const NameMap = std.StringArrayHashMapUnmanaged(void); - - pub fn srcLoc(self: ErrorSet, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(self.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; - } - - /// sort the NameMap. This should be called whenever the map is modified. - /// alloc should be the allocator used for the NameMap data. - pub fn sortNames(names: *NameMap) void { - const Context = struct { - keys: [][]const u8, - pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool { - return std.mem.lessThan(u8, ctx.keys[a_index], ctx.keys[b_index]); - } - }; - names.sort(Context{ .keys = names.keys() }); - } -}; - pub const PropertyBoolean = enum { no, yes, unknown, wip }; /// Represents the data that a struct declaration provides. @@ -977,7 +889,7 @@ pub const Struct = struct { /// Set of field names in declaration order. fields: Fields, /// Represents the declarations inside this struct. - namespace: Namespace, + namespace: Namespace.Index, /// The Decl that corresponds to the struct itself. owner_decl: Decl.Index, /// Index of the struct_decl ZIR instruction. @@ -989,7 +901,7 @@ pub const Struct = struct { /// If the layout is packed, this is the backing integer type of the packed struct. /// Whether zig chooses this type or the user specifies it, it is stored here. /// This will be set to the noreturn type until status is `have_layout`. - backing_int_ty: Type = Type.initTag(.noreturn), + backing_int_ty: Type = Type.noreturn, status: enum { none, field_types_wip, @@ -1011,15 +923,37 @@ pub const Struct = struct { is_tuple: bool, assumed_runtime_bits: bool = false, - pub const Fields = std.StringArrayHashMapUnmanaged(Field); + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: OptionalIndex) ?Index { + if (oi == .none) return null; + return @intToEnum(Index, @enumToInt(oi)); + } + }; + + pub const Fields = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, Field); /// The `Type` and `Value` memory is owned by the arena of the Struct's owner_decl. pub const Field = struct { /// Uses `noreturn` to indicate `anytype`. /// undefined until `status` is >= `have_field_types`. ty: Type, - /// Uses `unreachable_value` to indicate no default. - default_val: Value, + /// Uses `none` to indicate no default. + default_val: InternPool.Index, /// Zero means to use the ABI alignment of the type. abi_align: u32, /// undefined until `status` is `have_layout`. @@ -1030,7 +964,7 @@ pub const Struct = struct { /// Returns the field alignment. If the struct is packed, returns 0. pub fn alignment( field: Field, - target: Target, + mod: *Module, layout: std.builtin.Type.ContainerLayout, ) u32 { if (field.abi_align != 0) { @@ -1038,24 +972,26 @@ pub const Struct = struct { return field.abi_align; } + const target = mod.getTarget(); + switch (layout) { .Packed => return 0, .Auto => { if (target.ofmt == .c) { - return alignmentExtern(field, target); + return alignmentExtern(field, mod); } else { - return field.ty.abiAlignment(target); + return field.ty.abiAlignment(mod); } }, - .Extern => return alignmentExtern(field, target), + .Extern => return alignmentExtern(field, mod), } } - pub fn alignmentExtern(field: Field, target: Target) u32 { + pub fn alignmentExtern(field: Field, mod: *Module) u32 { // This logic is duplicated in Type.abiAlignmentAdvanced. - const ty_abi_align = field.ty.abiAlignment(target); + const ty_abi_align = field.ty.abiAlignment(mod); - if (field.ty.isAbiInt() and field.ty.intInfo(target).bits >= 128) { + if (field.ty.isAbiInt(mod) and field.ty.intInfo(mod).bits >= 128) { // The C ABI requires 128 bit integer fields of structs // to be 16-bytes aligned. return @max(ty_abi_align, 16); @@ -1069,39 +1005,12 @@ pub const Struct = struct { /// runtime version of the struct. pub const omitted_field = std.math.maxInt(u32); - pub fn getFullyQualifiedName(s: *Struct, mod: *Module) ![:0]u8 { + pub fn getFullyQualifiedName(s: *Struct, mod: *Module) !InternPool.NullTerminatedString { return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod); } pub fn srcLoc(s: Struct, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(s.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; - } - - pub fn fieldSrcLoc(s: Struct, mod: *Module, query: FieldSrcQuery) SrcLoc { - @setCold(true); - const owner_decl = mod.declPtr(s.owner_decl); - const file = owner_decl.getFileScope(); - const tree = file.getTree(mod.gpa) catch |err| { - // In this case we emit a warning + a less precise source location. - log.warn("unable to load {s}: {s}", .{ - file.sub_file_path, @errorName(err), - }); - return s.srcLoc(mod); - }; - const node = owner_decl.relativeToNodeIndex(0); - - var buf: [2]Ast.Node.Index = undefined; - if (tree.fullContainerDecl(&buf, node)) |container_decl| { - return queryFieldSrc(tree.*, query, file, container_decl); - } else { - // This struct was generated using @Type - return s.srcLoc(mod); - } + return mod.declPtr(s.owner_decl).srcLoc(mod); } pub fn haveFieldTypes(s: Struct) bool { @@ -1132,7 +1041,7 @@ pub const Struct = struct { }; } - pub fn packedFieldBitOffset(s: Struct, target: Target, index: usize) u16 { + pub fn packedFieldBitOffset(s: Struct, mod: *Module, index: usize) u16 { assert(s.layout == .Packed); assert(s.haveLayout()); var bit_sum: u64 = 0; @@ -1140,12 +1049,13 @@ pub const Struct = struct { if (i == index) { return @intCast(u16, bit_sum); } - bit_sum += field.ty.bitSize(target); + bit_sum += field.ty.bitSize(mod); } unreachable; // index out of bounds } pub const RuntimeFieldIterator = struct { + module: *Module, struct_obj: *const Struct, index: u32 = 0, @@ -1155,6 +1065,7 @@ pub const Struct = struct { }; pub fn next(it: *RuntimeFieldIterator) ?FieldAndIndex { + const mod = it.module; while (true) { var i = it.index; it.index += 1; @@ -1167,122 +1078,21 @@ pub const Struct = struct { } const field = it.struct_obj.fields.values()[i]; - if (!field.is_comptime and field.ty.hasRuntimeBits()) { + if (!field.is_comptime and field.ty.hasRuntimeBits(mod)) { return FieldAndIndex{ .index = i, .field = field }; } } } }; - pub fn runtimeFieldIterator(s: *const Struct) RuntimeFieldIterator { - return .{ .struct_obj = s }; - } -}; - -/// Represents the data that an enum declaration provides, when the fields -/// are auto-numbered, and there are no declarations. The integer tag type -/// is inferred to be the smallest power of two unsigned int that fits -/// the number of fields. -pub const EnumSimple = struct { - /// The Decl that corresponds to the enum itself. - owner_decl: Decl.Index, - /// Set of field names in declaration order. - fields: NameMap, - - pub const NameMap = EnumFull.NameMap; - - pub fn srcLoc(self: EnumSimple, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(self.owner_decl); + pub fn runtimeFieldIterator(s: *const Struct, module: *Module) RuntimeFieldIterator { return .{ - .file_scope = owner_decl.getFileScope(), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), + .struct_obj = s, + .module = module, }; } }; -/// Represents the data that an enum declaration provides, when there are no -/// declarations. However an integer tag type is provided, and the enum tag values -/// are explicitly provided. -pub const EnumNumbered = struct { - /// The Decl that corresponds to the enum itself. - owner_decl: Decl.Index, - /// An integer type which is used for the numerical value of the enum. - /// Whether zig chooses this type or the user specifies it, it is stored here. - tag_ty: Type, - /// Set of field names in declaration order. - fields: NameMap, - /// Maps integer tag value to field index. - /// Entries are in declaration order, same as `fields`. - /// If this hash map is empty, it means the enum tags are auto-numbered. - values: ValueMap, - - pub const NameMap = EnumFull.NameMap; - pub const ValueMap = EnumFull.ValueMap; - - pub fn srcLoc(self: EnumNumbered, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(self.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; - } -}; - -/// Represents the data that an enum declaration provides, when there is -/// at least one tag value explicitly specified, or at least one declaration. -pub const EnumFull = struct { - /// The Decl that corresponds to the enum itself. - owner_decl: Decl.Index, - /// An integer type which is used for the numerical value of the enum. - /// Whether zig chooses this type or the user specifies it, it is stored here. - tag_ty: Type, - /// Set of field names in declaration order. - fields: NameMap, - /// Maps integer tag value to field index. - /// Entries are in declaration order, same as `fields`. - /// If this hash map is empty, it means the enum tags are auto-numbered. - values: ValueMap, - /// Represents the declarations inside this enum. - namespace: Namespace, - /// true if zig inferred this tag type, false if user specified it - tag_ty_inferred: bool, - - pub const NameMap = std.StringArrayHashMapUnmanaged(void); - pub const ValueMap = std.ArrayHashMapUnmanaged(Value, void, Value.ArrayHashContext, false); - - pub fn srcLoc(self: EnumFull, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(self.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; - } - - pub fn fieldSrcLoc(e: EnumFull, mod: *Module, query: FieldSrcQuery) SrcLoc { - @setCold(true); - const owner_decl = mod.declPtr(e.owner_decl); - const file = owner_decl.getFileScope(); - const tree = file.getTree(mod.gpa) catch |err| { - // In this case we emit a warning + a less precise source location. - log.warn("unable to load {s}: {s}", .{ - file.sub_file_path, @errorName(err), - }); - return e.srcLoc(mod); - }; - const node = owner_decl.relativeToNodeIndex(0); - var buf: [2]Ast.Node.Index = undefined; - if (tree.fullContainerDecl(&buf, node)) |container_decl| { - return queryFieldSrc(tree.*, query, file, container_decl); - } else { - // This enum was generated using @Type - return e.srcLoc(mod); - } - } -}; - pub const Union = struct { /// An enum type which is used for the tag of the union. /// This type is created even for untagged unions, even when the memory @@ -1293,7 +1103,7 @@ pub const Union = struct { /// Set of field names in declaration order. fields: Fields, /// Represents the declarations inside this union. - namespace: Namespace, + namespace: Namespace.Index, /// The Decl that corresponds to the union itself. owner_decl: Decl.Index, /// Index of the union_decl ZIR instruction. @@ -1314,6 +1124,28 @@ pub const Union = struct { requires_comptime: PropertyBoolean = .unknown, assumed_runtime_bits: bool = false, + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: OptionalIndex) ?Index { + if (oi == .none) return null; + return @intToEnum(Index, @enumToInt(oi)); + } + }; + pub const Field = struct { /// undefined until `status` is `have_field_types` or `have_layout`. ty: Type, @@ -1323,52 +1155,30 @@ pub const Union = struct { /// Returns the field alignment, assuming the union is not packed. /// Keep implementation in sync with `Sema.unionFieldAlignment`. /// Prefer to call that function instead of this one during Sema. - pub fn normalAlignment(field: Field, target: Target) u32 { + pub fn normalAlignment(field: Field, mod: *Module) u32 { if (field.abi_align == 0) { - return field.ty.abiAlignment(target); + return field.ty.abiAlignment(mod); } else { return field.abi_align; } } }; - pub const Fields = std.StringArrayHashMapUnmanaged(Field); + pub const Fields = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, Field); - pub fn getFullyQualifiedName(s: *Union, mod: *Module) ![:0]u8 { + pub fn getFullyQualifiedName(s: *Union, mod: *Module) !InternPool.NullTerminatedString { return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod); } pub fn srcLoc(self: Union, mod: *Module) SrcLoc { const owner_decl = mod.declPtr(self.owner_decl); return .{ - .file_scope = owner_decl.getFileScope(), + .file_scope = owner_decl.getFileScope(mod), .parent_decl_node = owner_decl.src_node, .lazy = LazySrcLoc.nodeOffset(0), }; } - pub fn fieldSrcLoc(u: Union, mod: *Module, query: FieldSrcQuery) SrcLoc { - @setCold(true); - const owner_decl = mod.declPtr(u.owner_decl); - const file = owner_decl.getFileScope(); - const tree = file.getTree(mod.gpa) catch |err| { - // In this case we emit a warning + a less precise source location. - log.warn("unable to load {s}: {s}", .{ - file.sub_file_path, @errorName(err), - }); - return u.srcLoc(mod); - }; - const node = owner_decl.relativeToNodeIndex(0); - - var buf: [2]Ast.Node.Index = undefined; - if (tree.fullContainerDecl(&buf, node)) |container_decl| { - return queryFieldSrc(tree.*, query, file, container_decl); - } else { - // This union was generated using @Type - return u.srcLoc(mod); - } - } - pub fn haveFieldTypes(u: Union) bool { return switch (u.status) { .none, @@ -1383,22 +1193,22 @@ pub const Union = struct { }; } - pub fn hasAllZeroBitFieldTypes(u: Union) bool { + pub fn hasAllZeroBitFieldTypes(u: Union, mod: *Module) bool { assert(u.haveFieldTypes()); for (u.fields.values()) |field| { - if (field.ty.hasRuntimeBits()) return false; + if (field.ty.hasRuntimeBits(mod)) return false; } return true; } - pub fn mostAlignedField(u: Union, target: Target) u32 { + pub fn mostAlignedField(u: Union, mod: *Module) u32 { assert(u.haveFieldTypes()); var most_alignment: u32 = 0; var most_index: usize = undefined; for (u.fields.values(), 0..) |field, i| { - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; - const field_align = field.normalAlignment(target); + const field_align = field.normalAlignment(mod); if (field_align > most_alignment) { most_alignment = field_align; most_index = i; @@ -1408,20 +1218,20 @@ pub const Union = struct { } /// Returns 0 if the union is represented with 0 bits at runtime. - pub fn abiAlignment(u: Union, target: Target, have_tag: bool) u32 { + pub fn abiAlignment(u: Union, mod: *Module, have_tag: bool) u32 { var max_align: u32 = 0; - if (have_tag) max_align = u.tag_ty.abiAlignment(target); + if (have_tag) max_align = u.tag_ty.abiAlignment(mod); for (u.fields.values()) |field| { - if (!field.ty.hasRuntimeBits()) continue; + if (!field.ty.hasRuntimeBits(mod)) continue; - const field_align = field.normalAlignment(target); + const field_align = field.normalAlignment(mod); max_align = @max(max_align, field_align); } return max_align; } - pub fn abiSize(u: Union, target: Target, have_tag: bool) u64 { - return u.getLayout(target, have_tag).abi_size; + pub fn abiSize(u: Union, mod: *Module, have_tag: bool) u64 { + return u.getLayout(mod, have_tag).abi_size; } pub const Layout = struct { @@ -1451,7 +1261,7 @@ pub const Union = struct { }; } - pub fn getLayout(u: Union, target: Target, have_tag: bool) Layout { + pub fn getLayout(u: Union, mod: *Module, have_tag: bool) Layout { assert(u.haveLayout()); var most_aligned_field: u32 = undefined; var most_aligned_field_size: u64 = undefined; @@ -1460,16 +1270,16 @@ pub const Union = struct { var payload_align: u32 = 0; const fields = u.fields.values(); for (fields, 0..) |field, i| { - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; const field_align = a: { if (field.abi_align == 0) { - break :a field.ty.abiAlignment(target); + break :a field.ty.abiAlignment(mod); } else { break :a field.abi_align; } }; - const field_size = field.ty.abiSize(target); + const field_size = field.ty.abiSize(mod); if (field_size > payload_size) { payload_size = field_size; biggest_field = @intCast(u32, i); @@ -1481,7 +1291,7 @@ pub const Union = struct { } } payload_align = @max(payload_align, 1); - if (!have_tag or !u.tag_ty.hasRuntimeBits()) { + if (!have_tag or !u.tag_ty.hasRuntimeBits(mod)) { return .{ .abi_size = std.mem.alignForwardGeneric(u64, payload_size, payload_align), .abi_align = payload_align, @@ -1497,8 +1307,8 @@ pub const Union = struct { } // Put the tag before or after the payload depending on which one's // alignment is greater. - const tag_size = u.tag_ty.abiSize(target); - const tag_align = @max(1, u.tag_ty.abiAlignment(target)); + const tag_size = u.tag_ty.abiSize(mod); + const tag_align = @max(1, u.tag_ty.abiAlignment(mod)); var size: u64 = 0; var padding: u32 = undefined; if (tag_align >= payload_align) { @@ -1533,26 +1343,6 @@ pub const Union = struct { } }; -pub const Opaque = struct { - /// The Decl that corresponds to the opaque itself. - owner_decl: Decl.Index, - /// Represents the declarations inside this opaque. - namespace: Namespace, - - pub fn srcLoc(self: Opaque, mod: *Module) SrcLoc { - const owner_decl = mod.declPtr(self.owner_decl); - return .{ - .file_scope = owner_decl.getFileScope(), - .parent_decl_node = owner_decl.src_node, - .lazy = LazySrcLoc.nodeOffset(0), - }; - } - - pub fn getFullyQualifiedName(s: *Opaque, mod: *Module) ![:0]u8 { - return mod.declPtr(s.owner_decl).getFullyQualifiedName(mod); - } -}; - /// Some extern function struct memory is owned by the Decl's TypedValue.Managed /// arena allocator. pub const ExternFn = struct { @@ -1630,12 +1420,27 @@ pub const Fn = struct { is_noinline: bool, calls_or_awaits_errorable_fn: bool = false, - /// Any inferred error sets that this function owns, both its own inferred error set and - /// inferred error sets of any inline/comptime functions called. Not to be confused - /// with inferred error sets of generic instantiations of this function, which are - /// *not* tracked here - they are tracked in the new `Fn` object created for the - /// instantiations. - inferred_error_sets: InferredErrorSetList = .{}, + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: OptionalIndex) ?Index { + if (oi == .none) return null; + return @intToEnum(Index, @enumToInt(oi)); + } + }; pub const Analysis = enum { /// This function has not yet undergone analysis, because we have not @@ -1662,16 +1467,16 @@ pub const Fn = struct { /// or comptime functions. pub const InferredErrorSet = struct { /// The function from which this error set originates. - func: *Fn, + func: Fn.Index, /// All currently known errors that this error set contains. This includes /// direct additions via `return error.Foo;`, and possibly also errors that /// are returned from any dependent functions. When the inferred error set is /// fully resolved, this map contains all the errors that the function might return. - errors: ErrorSet.NameMap = .{}, + errors: NameMap = .{}, /// Other inferred error sets which this inferred error set should include. - inferred_error_sets: std.AutoArrayHashMapUnmanaged(*InferredErrorSet, void) = .{}, + inferred_error_sets: std.AutoArrayHashMapUnmanaged(InferredErrorSet.Index, void) = .{}, /// Whether the function returned anyerror. This is true if either of /// the dependent functions returns anyerror. @@ -1681,52 +1486,57 @@ pub const Fn = struct { /// can skip resolving any dependents of this inferred error set. is_resolved: bool = false, - pub fn addErrorSet(self: *InferredErrorSet, gpa: Allocator, err_set_ty: Type) !void { - switch (err_set_ty.tag()) { - .error_set => { - const names = err_set_ty.castTag(.error_set).?.data.names.keys(); - for (names) |name| { - try self.errors.put(gpa, name, {}); - } - }, - .error_set_single => { - const name = err_set_ty.castTag(.error_set_single).?.data; - try self.errors.put(gpa, name, {}); - }, - .error_set_inferred => { - const ies = err_set_ty.castTag(.error_set_inferred).?.data; - try self.inferred_error_sets.put(gpa, ies, {}); - }, - .error_set_merged => { - const names = err_set_ty.castTag(.error_set_merged).?.data.keys(); - for (names) |name| { - try self.errors.put(gpa, name, {}); - } - }, - .anyerror => { + pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void); + + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: InferredErrorSet.Index) InferredErrorSet.OptionalIndex { + return @intToEnum(InferredErrorSet.OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?InferredErrorSet.Index) InferredErrorSet.OptionalIndex { + return @intToEnum(InferredErrorSet.OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: InferredErrorSet.OptionalIndex) ?InferredErrorSet.Index { + if (oi == .none) return null; + return @intToEnum(InferredErrorSet.Index, @enumToInt(oi)); + } + }; + + pub fn addErrorSet( + self: *InferredErrorSet, + err_set_ty: Type, + ip: *InternPool, + gpa: Allocator, + ) !void { + switch (err_set_ty.toIntern()) { + .anyerror_type => { self.is_anyerror = true; }, - else => unreachable, + else => switch (ip.indexToKey(err_set_ty.toIntern())) { + .error_set_type => |error_set_type| { + for (error_set_type.names) |name| { + try self.errors.put(gpa, name, {}); + } + }, + .inferred_error_set_type => |ies_index| { + try self.inferred_error_sets.put(gpa, ies_index, {}); + }, + else => unreachable, + }, } } }; - pub const InferredErrorSetList = std.SinglyLinkedList(InferredErrorSet); - pub const InferredErrorSetListNode = InferredErrorSetList.Node; - - pub fn deinit(func: *Fn, gpa: Allocator) void { - var it = func.inferred_error_sets.first; - while (it) |node| { - const next = node.next; - node.data.errors.deinit(gpa); - node.data.inferred_error_sets.deinit(gpa); - gpa.destroy(node); - it = next; - } - } - pub fn isAnytypeParam(func: Fn, mod: *Module, index: u32) bool { - const file = mod.declPtr(func.owner_decl).getFileScope(); + const file = mod.declPtr(func.owner_decl).getFileScope(mod); const tags = file.zir.instructions.items(.tag); @@ -1741,7 +1551,7 @@ pub const Fn = struct { } pub fn getParamName(func: Fn, mod: *Module, index: u32) [:0]const u8 { - const file = mod.declPtr(func.owner_decl).getFileScope(); + const file = mod.declPtr(func.owner_decl).getFileScope(mod); const tags = file.zir.instructions.items(.tag); const data = file.zir.instructions.items(.data); @@ -1764,7 +1574,7 @@ pub const Fn = struct { pub fn hasInferredErrorSet(func: Fn, mod: *Module) bool { const owner_decl = mod.declPtr(func.owner_decl); - const zir = owner_decl.getFileScope().zir; + const zir = owner_decl.getFileScope(mod).zir; const zir_tags = zir.instructions.items(.tag); switch (zir_tags[func.zir_body_inst]) { .func => return false, @@ -1779,46 +1589,24 @@ pub const Fn = struct { } }; -pub const Var = struct { - /// if is_extern == true this is undefined - init: Value, - owner_decl: Decl.Index, - - /// Library name if specified. - /// For example `extern "c" var stderrp = ...` would have 'c' as library name. - /// Allocated with Module's allocator; outlives the ZIR code. - lib_name: ?[*:0]const u8, - - is_extern: bool, - is_mutable: bool, - is_threadlocal: bool, - is_weak_linkage: bool, - - pub fn deinit(variable: *Var, gpa: Allocator) void { - if (variable.lib_name) |lib_name| { - gpa.free(mem.sliceTo(lib_name, 0)); - } - } -}; - pub const DeclAdapter = struct { mod: *Module, - pub fn hash(self: @This(), s: []const u8) u32 { + pub fn hash(self: @This(), s: InternPool.NullTerminatedString) u32 { _ = self; - return @truncate(u32, std.hash.Wyhash.hash(0, s)); + return std.hash.uint32(@enumToInt(s)); } - pub fn eql(self: @This(), a: []const u8, b_decl_index: Decl.Index, b_index: usize) bool { + pub fn eql(self: @This(), a: InternPool.NullTerminatedString, b_decl_index: Decl.Index, b_index: usize) bool { _ = b_index; const b_decl = self.mod.declPtr(b_decl_index); - return mem.eql(u8, a, mem.sliceTo(b_decl.name, 0)); + return a == b_decl.name; } }; /// The container that structs, enums, unions, and opaques have. pub const Namespace = struct { - parent: ?*Namespace, + parent: OptionalIndex, file_scope: *File, /// Will be a struct, enum, union, or opaque. ty: Type, @@ -1836,21 +1624,41 @@ pub const Namespace = struct { /// Value is whether the usingnamespace decl is marked `pub`. usingnamespace_set: std.AutoHashMapUnmanaged(Decl.Index, bool) = .{}, + pub const Index = enum(u32) { + _, + + pub fn toOptional(i: Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(i)); + } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn init(oi: ?Index) OptionalIndex { + return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none)); + } + + pub fn unwrap(oi: OptionalIndex) ?Index { + if (oi == .none) return null; + return @intToEnum(Index, @enumToInt(oi)); + } + }; + const DeclContext = struct { module: *Module, pub fn hash(ctx: @This(), decl_index: Decl.Index) u32 { const decl = ctx.module.declPtr(decl_index); - return @truncate(u32, std.hash.Wyhash.hash(0, mem.sliceTo(decl.name, 0))); + return std.hash.uint32(@enumToInt(decl.name)); } pub fn eql(ctx: @This(), a_decl_index: Decl.Index, b_decl_index: Decl.Index, b_index: usize) bool { _ = b_index; const a_decl = ctx.module.declPtr(a_decl_index); const b_decl = ctx.module.declPtr(b_decl_index); - const a_name = mem.sliceTo(a_decl.name, 0); - const b_name = mem.sliceTo(b_decl.name, 0); - return mem.eql(u8, a_name, b_name); + return a_decl.name == b_decl.name; } }; @@ -1862,8 +1670,6 @@ pub const Namespace = struct { pub fn destroyDecls(ns: *Namespace, mod: *Module) void { const gpa = mod.gpa; - log.debug("destroyDecls {*}", .{ns}); - var decls = ns.decls; ns.decls = .{}; @@ -1889,8 +1695,6 @@ pub const Namespace = struct { ) !void { const gpa = mod.gpa; - log.debug("deleteAllDecls {*}", .{ns}); - var decls = ns.decls; ns.decls = .{}; @@ -1919,46 +1723,38 @@ pub const Namespace = struct { pub fn renderFullyQualifiedName( ns: Namespace, mod: *Module, - name: []const u8, + name: InternPool.NullTerminatedString, writer: anytype, ) @TypeOf(writer).Error!void { - if (ns.parent) |parent| { - const decl_index = ns.getDeclIndex(); - const decl = mod.declPtr(decl_index); - try parent.renderFullyQualifiedName(mod, mem.sliceTo(decl.name, 0), writer); + if (ns.parent.unwrap()) |parent| { + const decl = mod.declPtr(ns.getDeclIndex(mod)); + try mod.namespacePtr(parent).renderFullyQualifiedName(mod, decl.name, writer); } else { try ns.file_scope.renderFullyQualifiedName(writer); } - if (name.len != 0) { - try writer.writeAll("."); - try writer.writeAll(name); - } + if (name != .empty) try writer.print(".{}", .{name.fmt(&mod.intern_pool)}); } /// This renders e.g. "std/fs.zig:Dir.OpenOptions" pub fn renderFullyQualifiedDebugName( ns: Namespace, mod: *Module, - name: []const u8, + name: InternPool.NullTerminatedString, writer: anytype, ) @TypeOf(writer).Error!void { - var separator_char: u8 = '.'; - if (ns.parent) |parent| { - const decl_index = ns.getDeclIndex(); - const decl = mod.declPtr(decl_index); - try parent.renderFullyQualifiedDebugName(mod, mem.sliceTo(decl.name, 0), writer); - } else { + const separator_char: u8 = if (ns.parent.unwrap()) |parent| sep: { + const decl = mod.declPtr(ns.getDeclIndex(mod)); + try mod.namespacePtr(parent).renderFullyQualifiedDebugName(mod, decl.name, writer); + break :sep '.'; + } else sep: { try ns.file_scope.renderFullyQualifiedDebugName(writer); - separator_char = ':'; - } - if (name.len != 0) { - try writer.writeByte(separator_char); - try writer.writeAll(name); - } + break :sep ':'; + }; + if (name != .empty) try writer.print("{c}{}", .{ separator_char, name.fmt(&mod.intern_pool) }); } - pub fn getDeclIndex(ns: Namespace) Decl.Index { - return ns.ty.getOwnerDecl(); + pub fn getDeclIndex(ns: Namespace, mod: *Module) Decl.Index { + return ns.ty.getOwnerDecl(mod); } }; @@ -2140,11 +1936,11 @@ pub const File = struct { }; } - pub fn fullyQualifiedNameZ(file: File, gpa: Allocator) ![:0]u8 { - var buf = std.ArrayList(u8).init(gpa); - defer buf.deinit(); - try file.renderFullyQualifiedName(buf.writer()); - return buf.toOwnedSliceSentinel(0); + pub fn fullyQualifiedName(file: File, mod: *Module) !InternPool.NullTerminatedString { + const ip = &mod.intern_pool; + const start = ip.string_bytes.items.len; + try file.renderFullyQualifiedName(ip.string_bytes.writer(mod.gpa)); + return ip.getOrPutTrailingString(mod.gpa, ip.string_bytes.items.len - start); } /// Returns the full path to this file relative to its package. @@ -2268,7 +2064,7 @@ pub const ErrorMsg = struct { reference_trace: []Trace = &.{}, pub const Trace = struct { - decl: ?[*:0]const u8, + decl: InternPool.OptionalNullTerminatedString, src_loc: SrcLoc, hidden: u32 = 0, }; @@ -2281,7 +2077,7 @@ pub const ErrorMsg = struct { ) !*ErrorMsg { const err_msg = try gpa.create(ErrorMsg); errdefer gpa.destroy(err_msg); - err_msg.* = try init(gpa, src_loc, format, args); + err_msg.* = try ErrorMsg.init(gpa, src_loc, format, args); return err_msg; } @@ -3287,7 +3083,7 @@ pub const LazySrcLoc = union(enum) { } /// Upgrade to a `SrcLoc` based on the `Decl` provided. - pub fn toSrcLoc(lazy: LazySrcLoc, decl: *Decl) SrcLoc { + pub fn toSrcLoc(lazy: LazySrcLoc, decl: *Decl, mod: *Module) SrcLoc { return switch (lazy) { .unneeded, .entire_file, @@ -3295,7 +3091,7 @@ pub const LazySrcLoc = union(enum) { .token_abs, .node_abs, => .{ - .file_scope = decl.getFileScope(), + .file_scope = decl.getFileScope(mod), .parent_decl_node = 0, .lazy = lazy, }, @@ -3361,7 +3157,7 @@ pub const LazySrcLoc = union(enum) { .for_input, .for_capture_from_input, => .{ - .file_scope = decl.getFileScope(), + .file_scope = decl.getFileScope(mod), .parent_decl_node = decl.src_node, .lazy = lazy, }, @@ -3391,6 +3187,12 @@ pub const CompileError = error{ ComptimeBreak, }; +pub fn init(mod: *Module) !void { + const gpa = mod.gpa; + try mod.intern_pool.init(gpa); + try mod.global_error_set.put(gpa, .empty, {}); +} + pub fn deinit(mod: *Module) void { const gpa = mod.gpa; @@ -3489,42 +3291,29 @@ pub fn deinit(mod: *Module) void { } mod.export_owners.deinit(gpa); - { - var it = mod.global_error_set.keyIterator(); - while (it.next()) |key| { - gpa.free(key.*); - } - mod.global_error_set.deinit(gpa); - } + mod.global_error_set.deinit(gpa); - mod.error_name_list.deinit(gpa); mod.test_functions.deinit(gpa); mod.align_stack_fns.deinit(gpa); mod.monomorphed_funcs.deinit(gpa); - { - var it = mod.memoized_calls.iterator(); - while (it.next()) |entry| { - gpa.free(entry.key_ptr.args); - entry.value_ptr.arena.promote(gpa).deinit(); - } - mod.memoized_calls.deinit(gpa); - } - mod.decls_free_list.deinit(gpa); mod.allocated_decls.deinit(gpa); mod.global_assembly.deinit(gpa); mod.reference_table.deinit(gpa); - mod.string_literal_table.deinit(gpa); - mod.string_literal_bytes.deinit(gpa); + mod.namespaces_free_list.deinit(gpa); + mod.allocated_namespaces.deinit(gpa); + + mod.memoized_decls.deinit(gpa); + mod.intern_pool.deinit(gpa); + mod.tmp_hack_arena.deinit(); } pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { const gpa = mod.gpa; { const decl = mod.declPtr(decl_index); - log.debug("destroy {*} ({s})", .{ decl, decl.name }); _ = mod.test_functions.swapRemove(decl_index); if (decl.deletion_flag) { assert(mod.deletion_set.swapRemove(decl_index)); @@ -3533,14 +3322,15 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { gpa.free(kv.value); } if (decl.has_tv) { - if (decl.getInnerNamespace()) |namespace| { - namespace.destroyDecls(mod); + if (decl.getOwnedInnerNamespaceIndex(mod).unwrap()) |i| { + mod.namespacePtr(i).destroyDecls(mod); + mod.destroyNamespace(i); } } + if (decl.src_scope) |scope| scope.decRef(gpa); decl.clearValues(mod); decl.dependants.deinit(gpa); decl.dependencies.deinit(gpa); - decl.clearName(gpa); decl.* = undefined; } mod.decls_free_list.append(gpa, decl_index) catch { @@ -3554,24 +3344,55 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void { } } -pub fn declPtr(mod: *Module, decl_index: Decl.Index) *Decl { - return mod.allocated_decls.at(@enumToInt(decl_index)); +pub fn declPtr(mod: *Module, index: Decl.Index) *Decl { + return mod.allocated_decls.at(@enumToInt(index)); +} + +pub fn namespacePtr(mod: *Module, index: Namespace.Index) *Namespace { + return mod.allocated_namespaces.at(@enumToInt(index)); +} + +pub fn unionPtr(mod: *Module, index: Union.Index) *Union { + return mod.intern_pool.unionPtr(index); +} + +pub fn structPtr(mod: *Module, index: Struct.Index) *Struct { + return mod.intern_pool.structPtr(index); +} + +pub fn funcPtr(mod: *Module, index: Fn.Index) *Fn { + return mod.intern_pool.funcPtr(index); +} + +pub fn inferredErrorSetPtr(mod: *Module, index: Fn.InferredErrorSet.Index) *Fn.InferredErrorSet { + return mod.intern_pool.inferredErrorSetPtr(index); +} + +pub fn namespacePtrUnwrap(mod: *Module, index: Namespace.OptionalIndex) ?*Namespace { + return mod.namespacePtr(index.unwrap() orelse return null); +} + +/// This one accepts an index from the InternPool and asserts that it is not +/// the anonymous empty struct type. +pub fn structPtrUnwrap(mod: *Module, index: Struct.OptionalIndex) ?*Struct { + return mod.structPtr(index.unwrap() orelse return null); +} + +pub fn funcPtrUnwrap(mod: *Module, index: Fn.OptionalIndex) ?*Fn { + return mod.funcPtr(index.unwrap() orelse return null); } /// Returns true if and only if the Decl is the top level struct associated with a File. pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool { const decl = mod.declPtr(decl_index); - if (decl.src_namespace.parent != null) + const namespace = mod.namespacePtr(decl.src_namespace); + if (namespace.parent != .none) return false; - return decl_index == decl.src_namespace.getDeclIndex(); + return decl_index == namespace.getDeclIndex(mod); } fn freeExportList(gpa: Allocator, export_list: *ArrayListUnmanaged(*Export)) void { - for (export_list.items) |exp| { - gpa.free(exp.options.name); - if (exp.options.section) |s| gpa.free(s); - gpa.destroy(exp); - } + for (export_list.items) |exp| gpa.destroy(exp); export_list.deinit(gpa); } @@ -3990,9 +3811,6 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { if (decl.zir_decl_index != 0) { const old_zir_decl_index = decl.zir_decl_index; const new_zir_decl_index = extra_map.get(old_zir_decl_index) orelse { - log.debug("updateZirRefs {s}: delete {*} ({s})", .{ - file.sub_file_path, decl, decl.name, - }); try file.deleted_decls.append(gpa, decl_index); continue; }; @@ -4000,41 +3818,34 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void { decl.zir_decl_index = new_zir_decl_index; const new_hash = decl.contentsHashZir(new_zir); if (!std.zig.srcHashEql(old_hash, new_hash)) { - log.debug("updateZirRefs {s}: outdated {*} ({s}) {d} => {d}", .{ - file.sub_file_path, decl, decl.name, old_zir_decl_index, new_zir_decl_index, - }); try file.outdated_decls.append(gpa, decl_index); - } else { - log.debug("updateZirRefs {s}: unchanged {*} ({s}) {d} => {d}", .{ - file.sub_file_path, decl, decl.name, old_zir_decl_index, new_zir_decl_index, - }); } } if (!decl.owns_tv) continue; - if (decl.getStruct()) |struct_obj| { + if (decl.getOwnedStruct(mod)) |struct_obj| { struct_obj.zir_index = inst_map.get(struct_obj.zir_index) orelse { try file.deleted_decls.append(gpa, decl_index); continue; }; } - if (decl.getUnion()) |union_obj| { + if (decl.getOwnedUnion(mod)) |union_obj| { union_obj.zir_index = inst_map.get(union_obj.zir_index) orelse { try file.deleted_decls.append(gpa, decl_index); continue; }; } - if (decl.getFunction()) |func| { + if (decl.getOwnedFunction(mod)) |func| { func.zir_body_inst = inst_map.get(func.zir_body_inst) orelse { try file.deleted_decls.append(gpa, decl_index); continue; }; } - if (decl.getInnerNamespace()) |namespace| { + if (decl.getOwnedInnerNamespace(mod)) |namespace| { for (namespace.decls.keys()) |sub_decl| { try decl_stack.append(gpa, sub_decl); } @@ -4207,14 +4018,12 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { .complete => return, .outdated => blk: { - log.debug("re-analyzing {*} ({s})", .{ decl, decl.name }); - // The exports this Decl performs will be re-discovered, so we remove them here // prior to re-analysis. try mod.deleteDeclExports(decl_index); // Similarly, `@setAlignStack` invocations will be re-discovered. - if (decl.getFunction()) |func| { + if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { _ = mod.align_stack_fns.remove(func); } @@ -4223,9 +4032,6 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { const dep = mod.declPtr(dep_index); dep.removeDependant(decl_index); if (dep.dependants.count() == 0 and !dep.deletion_flag) { - log.debug("insert {*} ({s}) dependant {*} ({s}) into deletion set", .{ - decl, decl.name, dep, dep.name, - }); try mod.markDeclForDeletion(dep_index); } } @@ -4237,7 +4043,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { .unreferenced => false, }; - var decl_prog_node = mod.sema_prog_node.start(mem.sliceTo(decl.name, 0), 0); + var decl_prog_node = mod.sema_prog_node.start("", 0); decl_prog_node.activate(); defer decl_prog_node.end(); @@ -4264,7 +4070,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1); mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( mod.gpa, - decl.srcLoc(), + decl.srcLoc(mod), "unable to analyze: {s}", .{@errorName(e)}, )); @@ -4277,7 +4083,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { // Update all dependents which have at least this level of dependency. // If our type remained the same and we're a function, only update // decls which depend on our body; otherwise, update all dependents. - const update_level: Decl.DepType = if (!type_changed and decl.ty.zigTypeTag() == .Fn) .function_body else .normal; + const update_level: Decl.DepType = if (!type_changed and decl.ty.zigTypeTag(mod) == .Fn) .function_body else .normal; for (decl.dependants.keys(), decl.dependants.values()) |dep_index, dep_type| { if (@enumToInt(dep_type) < @enumToInt(update_level)) continue; @@ -4304,10 +4110,11 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void { } } -pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { +pub fn ensureFuncBodyAnalyzed(mod: *Module, func_index: Fn.Index) SemaError!void { const tracy = trace(@src()); defer tracy.end(); + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -4339,7 +4146,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { defer tmp_arena.deinit(); const sema_arena = tmp_arena.allocator(); - var air = mod.analyzeFnBody(func, sema_arena) catch |err| switch (err) { + var air = mod.analyzeFnBody(func_index, sema_arena) catch |err| switch (err) { error.AnalysisFail => { if (func.state == .in_progress) { // If this decl caused the compile error, the analysis field would @@ -4365,17 +4172,14 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { if (no_bin_file and !dump_air and !dump_llvm_ir) return; - log.debug("analyze liveness of {s}", .{decl.name}); - var liveness = try Liveness.analyze(gpa, air); + var liveness = try Liveness.analyze(gpa, air, &mod.intern_pool); defer liveness.deinit(gpa); if (dump_air) { const fqn = try decl.getFullyQualifiedName(mod); - defer mod.gpa.free(fqn); - - std.debug.print("# Begin Function AIR: {s}:\n", .{fqn}); + std.debug.print("# Begin Function AIR: {}:\n", .{fqn.fmt(&mod.intern_pool)}); @import("print_air.zig").dump(mod, air, liveness); - std.debug.print("# End Function AIR: {s}\n\n", .{fqn}); + std.debug.print("# End Function AIR: {}\n\n", .{fqn.fmt(&mod.intern_pool)}); } if (std.debug.runtime_safety) { @@ -4383,6 +4187,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { .gpa = gpa, .air = air, .liveness = liveness, + .intern_pool = &mod.intern_pool, }; defer verify.deinit(); @@ -4394,7 +4199,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { decl_index, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "invalid liveness: {s}", .{@errorName(err)}, ), @@ -4407,7 +4212,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { if (no_bin_file and !dump_llvm_ir) return; - comp.bin_file.updateFunc(mod, func, air, liveness) catch |err| switch (err) { + comp.bin_file.updateFunc(mod, func_index, air, liveness) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { decl.analysis = .codegen_failure; @@ -4417,7 +4222,7 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { try mod.failed_decls.ensureUnusedCapacity(gpa, 1); mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "unable to codegen: {s}", .{@errorName(err)}, )); @@ -4437,7 +4242,8 @@ pub fn ensureFuncBodyAnalyzed(mod: *Module, func: *Fn) SemaError!void { /// analyzed, and for ensuring it can exist at runtime (see /// `sema.fnHasRuntimeBits`). This function does *not* guarantee that the body /// will be analyzed when it returns: for that, see `ensureFuncBodyAnalyzed`. -pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func: *Fn) !void { +pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func_index: Fn.Index) !void { + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); @@ -4475,7 +4281,7 @@ pub fn ensureFuncBodyAnalysisQueued(mod: *Module, func: *Fn) !void { // Decl itself is safely analyzed, and body analysis is not yet queued - try mod.comp.work_queue.writeItem(.{ .codegen_func = func }); + try mod.comp.work_queue.writeItem(.{ .codegen_func = func_index }); if (mod.emit_h != null) { // TODO: we ideally only want to do this if the function's type changed // since the last update @@ -4527,42 +4333,54 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { if (file.root_decl != .none) return; const gpa = mod.gpa; - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - const struct_obj = try new_decl_arena_allocator.create(Module.Struct); - const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); - const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); - const ty_ty = comptime Type.initTag(.type); - struct_obj.* = .{ - .owner_decl = undefined, // set below + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the struct type gains an + // InternPool index. + const new_namespace_index = try mod.createNamespace(.{ + .parent = .none, + .ty = undefined, + .file_scope = file, + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); + + const new_decl_index = try mod.allocateNewDecl(new_namespace_index, 0, null); + const new_decl = mod.declPtr(new_decl_index); + errdefer @panic("TODO error handling"); + + const struct_index = try mod.createStruct(.{ + .owner_decl = new_decl_index, .fields = .{}, .zir_index = undefined, // set below .layout = .Auto, .status = .none, .known_non_opv = undefined, .is_tuple = undefined, // set below - .namespace = .{ - .parent = null, - .ty = struct_ty, - .file_scope = file, - }, - }; - const new_decl_index = try mod.allocateNewDecl(&struct_obj.namespace, 0, null); - const new_decl = mod.declPtr(new_decl_index); + .namespace = new_namespace_index, + }); + errdefer mod.destroyStruct(struct_index); + + const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{ + .index = struct_index.toOptional(), + .namespace = new_namespace_index.toOptional(), + } }); + // TODO: figure out InternPool removals for incremental compilation + //errdefer mod.intern_pool.remove(struct_ty); + + new_namespace.ty = struct_ty.toType(); file.root_decl = new_decl_index.toOptional(); - struct_obj.owner_decl = new_decl_index; - new_decl.name = try file.fullyQualifiedNameZ(gpa); + + new_decl.name = try file.fullyQualifiedName(mod); new_decl.src_line = 0; new_decl.is_pub = true; new_decl.is_exported = false; new_decl.has_align = false; new_decl.has_linksection_or_addrspace = false; - new_decl.ty = ty_ty; - new_decl.val = struct_val; + new_decl.ty = Type.type; + new_decl.val = struct_ty.toValue(); new_decl.@"align" = 0; - new_decl.@"linksection" = null; + new_decl.@"linksection" = .none; new_decl.has_tv = true; new_decl.owns_tv = true; new_decl.alive = true; // This Decl corresponds to a File and is therefore always alive. @@ -4573,6 +4391,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { if (file.status == .success_zir) { assert(file.zir_loaded); const main_struct_inst = Zir.main_struct_inst; + const struct_obj = mod.structPtr(struct_index); struct_obj.zir_index = main_struct_inst; const extended = file.zir.instructions.items(.data)[main_struct_inst].extended; const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); @@ -4582,25 +4401,34 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { defer sema_arena.deinit(); const sema_arena_allocator = sema_arena.allocator(); + var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); + defer comptime_mutable_decls.deinit(); + var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = sema_arena_allocator, - .perm_arena = new_decl_arena_allocator, .code = file.zir, .owner_decl = new_decl, .owner_decl_index = new_decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, + .comptime_mutable_decls = &comptime_mutable_decls, }; defer sema.deinit(); - var wip_captures = try WipCaptureScope.init(gpa, new_decl_arena_allocator, null); + var wip_captures = try WipCaptureScope.init(gpa, null); defer wip_captures.deinit(); - if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_obj)) |_| { + if (sema.analyzeStructDecl(new_decl, main_struct_inst, struct_index)) |_| { try wip_captures.finalize(); + for (comptime_mutable_decls.items) |decl_index| { + const decl = mod.declPtr(decl_index); + try decl.intern(mod); + } new_decl.analysis = .complete; } else |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, @@ -4632,8 +4460,6 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { } else { new_decl.analysis = .file_failure; } - - try new_decl.finalizeNewArena(&new_decl_arena); } /// Returns `true` if the Decl type changed. @@ -4645,68 +4471,52 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { const decl = mod.declPtr(decl_index); - if (decl.getFileScope().status != .success_zir) { + if (decl.getFileScope(mod).status != .success_zir) { return error.AnalysisFail; } const gpa = mod.gpa; - const zir = decl.getFileScope().zir; + const zir = decl.getFileScope(mod).zir; const zir_datas = zir.instructions.items(.data); decl.analysis = .in_progress; - // We need the memory for the Type to go into the arena for the Decl - var decl_arena = std.heap.ArenaAllocator.init(gpa); - const decl_arena_allocator = decl_arena.allocator(); - const decl_value_arena = blk: { - errdefer decl_arena.deinit(); - const s = try decl_arena_allocator.create(ValueArena); - s.* = .{ .state = undefined }; - break :blk s; - }; - defer { - if (decl.value_arena) |value_arena| { - assert(value_arena.state_acquired == null); - decl_value_arena.prev = value_arena; - } - - decl_value_arena.state = decl_arena.state; - decl.value_arena = decl_value_arena; - } - var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); - const analysis_arena_allocator = analysis_arena.allocator(); + + var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); + defer comptime_mutable_decls.deinit(); var sema: Sema = .{ .mod = mod, .gpa = gpa, - .arena = analysis_arena_allocator, - .perm_arena = decl_arena_allocator, + .arena = analysis_arena.allocator(), .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, + .comptime_mutable_decls = &comptime_mutable_decls, }; defer sema.deinit(); if (mod.declIsRoot(decl_index)) { - log.debug("semaDecl root {*} ({s})", .{ decl, decl.name }); const main_struct_inst = Zir.main_struct_inst; - const struct_obj = decl.getStruct().?; + const struct_index = decl.getOwnedStructIndex(mod).unwrap().?; + const struct_obj = mod.structPtr(struct_index); // This might not have gotten set in `semaFile` if the first time had // a ZIR failure, so we set it here in case. struct_obj.zir_index = main_struct_inst; - try sema.analyzeStructDecl(decl, main_struct_inst, struct_obj); + try sema.analyzeStructDecl(decl, main_struct_inst, struct_index); decl.analysis = .complete; decl.generation = mod.generation; return false; } - log.debug("semaDecl {*} ({s})", .{ decl, decl.name }); - var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope); defer wip_captures.deinit(); var block_scope: Sema.Block = .{ @@ -4724,12 +4534,16 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { block_scope.params.deinit(gpa); } - const zir_block_index = decl.zirBlockIndex(); + const zir_block_index = decl.zirBlockIndex(mod); const inst_data = zir_datas[zir_block_index].pl_node; const extra = zir.extraData(Zir.Inst.Block, inst_data.payload_index); const body = zir.extra[extra.end..][0..extra.data.body_len]; const result_ref = (try sema.analyzeBodyBreak(&block_scope, body)).?.operand; try wip_captures.finalize(); + for (comptime_mutable_decls.items) |ct_decl_index| { + const ct_decl = mod.declPtr(ct_decl_index); + try ct_decl.intern(mod); + } const align_src: LazySrcLoc = .{ .node_offset_var_decl_align = 0 }; const section_src: LazySrcLoc = .{ .node_offset_var_decl_section = 0 }; const address_space_src: LazySrcLoc = .{ .node_offset_var_decl_addrspace = 0 }; @@ -4748,16 +4562,15 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl_tv.ty.fmt(mod), }); } - var buffer: Value.ToTypeBuffer = undefined; - const ty = try decl_tv.val.toType(&buffer).copy(decl_arena_allocator); - if (ty.getNamespace() == null) { + const ty = decl_tv.val.toType(); + if (ty.getNamespace(mod) == null) { return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(mod)}); } - decl.ty = Type.type; - decl.val = try Value.Tag.ty.create(decl_arena_allocator, ty); + decl.ty = InternPool.Index.type_type.toType(); + decl.val = ty.toValue(); decl.@"align" = 0; - decl.@"linksection" = null; + decl.@"linksection" = .none; decl.has_tv = true; decl.owns_tv = false; decl.analysis = .complete; @@ -4766,8 +4579,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { return true; } - if (decl_tv.val.castTag(.function)) |fn_payload| { - const func = fn_payload.data; + if (mod.intern_pool.indexToFunc(decl_tv.val.toIntern()).unwrap()) |func_index| { + const func = mod.funcPtr(func_index); const owns_tv = func.owner_decl == decl_index; if (owns_tv) { var prev_type_has_bits = false; @@ -4775,31 +4588,30 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { var type_changed = true; if (decl.has_tv) { - prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(); + prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(mod); type_changed = !decl.ty.eql(decl_tv.ty, mod); - if (decl.getFunction()) |prev_func| { + if (decl.getOwnedFunction(mod)) |prev_func| { prev_is_inline = prev_func.state == .inline_only; } } decl.clearValues(mod); - decl.ty = try decl_tv.ty.copy(decl_arena_allocator); - decl.val = try decl_tv.val.copy(decl_arena_allocator); + decl.ty = decl_tv.ty; + decl.val = (try decl_tv.val.intern(decl_tv.ty, mod)).toValue(); // linksection, align, and addrspace were already set by Sema decl.has_tv = true; decl.owns_tv = owns_tv; decl.analysis = .complete; decl.generation = mod.generation; - const is_inline = decl.ty.fnCallingConvention() == .Inline; + const is_inline = decl.ty.fnCallingConvention(mod) == .Inline; if (decl.is_exported) { const export_src: LazySrcLoc = .{ .token_offset = @boolToInt(decl.is_pub) }; if (is_inline) { return sema.fail(&block_scope, export_src, "export of inline function", .{}); } // The scope needs to have the decl in it. - const options: std.builtin.ExportOptions = .{ .name = mem.sliceTo(decl.name, 0) }; - try sema.analyzeExport(&block_scope, export_src, options, decl_index); + try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index); } return type_changed or is_inline != prev_is_inline; } @@ -4813,64 +4625,57 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { decl.owns_tv = false; var queue_linker_work = false; var is_extern = false; - switch (decl_tv.val.tag()) { - .variable => { - const variable = decl_tv.val.castTag(.variable).?.data; - if (variable.owner_decl == decl_index) { + switch (decl_tv.val.toIntern()) { + .generic_poison => unreachable, + .unreachable_value => unreachable, + else => switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) { + .variable => |variable| if (variable.decl == decl_index) { decl.owns_tv = true; queue_linker_work = true; + }, - const copied_init = try variable.init.copy(decl_arena_allocator); - variable.init = copied_init; - } - }, - .extern_fn => { - const extern_fn = decl_tv.val.castTag(.extern_fn).?.data; - if (extern_fn.owner_decl == decl_index) { + .extern_func => |extern_fn| if (extern_fn.decl == decl_index) { decl.owns_tv = true; queue_linker_work = true; is_extern = true; - } - }, + }, - .generic_poison => unreachable, - .unreachable_value => unreachable, + .func => {}, - .function => {}, - - else => { - log.debug("send global const to linker: {*} ({s})", .{ decl, decl.name }); - queue_linker_work = true; + else => { + queue_linker_work = true; + }, }, } - decl.ty = try decl_tv.ty.copy(decl_arena_allocator); - decl.val = try decl_tv.val.copy(decl_arena_allocator); + decl.ty = decl_tv.ty; + decl.val = (try decl_tv.val.intern(decl_tv.ty, mod)).toValue(); decl.@"align" = blk: { - const align_ref = decl.zirAlignRef(); + const align_ref = decl.zirAlignRef(mod); if (align_ref == .none) break :blk 0; break :blk try sema.resolveAlign(&block_scope, align_src, align_ref); }; decl.@"linksection" = blk: { - const linksection_ref = decl.zirLinksectionRef(); - if (linksection_ref == .none) break :blk null; + const linksection_ref = decl.zirLinksectionRef(mod); + if (linksection_ref == .none) break :blk .none; const bytes = try sema.resolveConstString(&block_scope, section_src, linksection_ref, "linksection must be comptime-known"); if (mem.indexOfScalar(u8, bytes, 0) != null) { return sema.fail(&block_scope, section_src, "linksection cannot contain null bytes", .{}); } else if (bytes.len == 0) { return sema.fail(&block_scope, section_src, "linksection cannot be empty", .{}); } - break :blk (try decl_arena_allocator.dupeZ(u8, bytes)).ptr; + const section = try mod.intern_pool.getOrPutString(gpa, bytes); + break :blk section.toOptional(); }; decl.@"addrspace" = blk: { - const addrspace_ctx: Sema.AddressSpaceContext = switch (decl_tv.val.tag()) { - .function, .extern_fn => .function, + const addrspace_ctx: Sema.AddressSpaceContext = switch (mod.intern_pool.indexToKey(decl_tv.val.toIntern())) { .variable => .variable, + .extern_func, .func => .function, else => .constant, }; const target = sema.mod.getTarget(); - break :blk switch (decl.zirAddrspaceRef()) { + break :blk switch (decl.zirAddrspaceRef(mod)) { .none => switch (addrspace_ctx) { .function => target_util.defaultAddressSpace(target, .function), .variable => target_util.defaultAddressSpace(target, .global_mutable), @@ -4888,7 +4693,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { (queue_linker_work and try sema.typeHasRuntimeBits(decl.ty)); if (has_runtime_bits) { - log.debug("queue linker work for {*} ({s})", .{ decl, decl.name }); // Needed for codegen_decl which will call updateDecl and then the // codegen backend wants full access to the Decl Type. @@ -4904,8 +4708,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { if (decl.is_exported) { const export_src: LazySrcLoc = .{ .token_offset = @boolToInt(decl.is_pub) }; // The scope needs to have the decl in it. - const options: std.builtin.ExportOptions = .{ .name = mem.sliceTo(decl.name, 0) }; - try sema.analyzeExport(&block_scope, export_src, options, decl_index); + try sema.analyzeExport(&block_scope, export_src, .{ .name = decl.name }, decl_index); } return type_changed; @@ -4930,10 +4733,6 @@ pub fn declareDeclDependencyType(mod: *Module, depender_index: Decl.Index, depen } } - log.debug("{*} ({s}) depends on {*} ({s})", .{ - depender, depender.name, dependee, dependee.name, - }); - if (dependee.deletion_flag) { dependee.deletion_flag = false; assert(mod.deletion_set.swapRemove(dependee_index)); @@ -5222,7 +5021,7 @@ pub fn detectEmbedFileUpdate(mod: *Module, embed_file: *EmbedFile) !void { pub fn scanNamespace( mod: *Module, - namespace: *Namespace, + namespace_index: Namespace.Index, extra_start: usize, decls_len: u32, parent_decl: *Decl, @@ -5231,6 +5030,7 @@ pub fn scanNamespace( defer tracy.end(); const gpa = mod.gpa; + const namespace = mod.namespacePtr(namespace_index); const zir = namespace.file_scope.zir; try mod.comp.work_queue.ensureUnusedCapacity(decls_len); @@ -5243,7 +5043,7 @@ pub fn scanNamespace( var decl_i: u32 = 0; var scan_decl_iter: ScanDeclIter = .{ .module = mod, - .namespace = namespace, + .namespace_index = namespace_index, .parent_decl = parent_decl, }; while (decl_i < decls_len) : (decl_i += 1) { @@ -5266,7 +5066,7 @@ pub fn scanNamespace( const ScanDeclIter = struct { module: *Module, - namespace: *Namespace, + namespace_index: Namespace.Index, parent_decl: *Decl, usingnamespace_index: usize = 0, comptime_index: usize = 0, @@ -5278,9 +5078,11 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err defer tracy.end(); const mod = iter.module; - const namespace = iter.namespace; + const namespace_index = iter.namespace_index; + const namespace = mod.namespacePtr(namespace_index); const gpa = mod.gpa; const zir = namespace.file_scope.zir; + const ip = &mod.intern_pool; // zig fmt: off const is_pub = (flags & 0b0001) != 0; @@ -5300,31 +5102,31 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err // Every Decl needs a name. var is_named_test = false; var kind: Decl.Kind = .named; - const decl_name: [:0]const u8 = switch (decl_name_index) { + const decl_name: InternPool.NullTerminatedString = switch (decl_name_index) { 0 => name: { if (export_bit) { const i = iter.usingnamespace_index; iter.usingnamespace_index += 1; kind = .@"usingnamespace"; - break :name try std.fmt.allocPrintZ(gpa, "usingnamespace_{d}", .{i}); + break :name try ip.getOrPutStringFmt(gpa, "usingnamespace_{d}", .{i}); } else { const i = iter.comptime_index; iter.comptime_index += 1; kind = .@"comptime"; - break :name try std.fmt.allocPrintZ(gpa, "comptime_{d}", .{i}); + break :name try ip.getOrPutStringFmt(gpa, "comptime_{d}", .{i}); } }, 1 => name: { const i = iter.unnamed_test_index; iter.unnamed_test_index += 1; kind = .@"test"; - break :name try std.fmt.allocPrintZ(gpa, "test_{d}", .{i}); + break :name try ip.getOrPutStringFmt(gpa, "test_{d}", .{i}); }, 2 => name: { is_named_test = true; const test_name = zir.nullTerminatedString(decl_doccomment_index); kind = .@"test"; - break :name try std.fmt.allocPrintZ(gpa, "decltest.{s}", .{test_name}); + break :name try ip.getOrPutStringFmt(gpa, "decltest.{s}", .{test_name}); }, else => name: { const raw_name = zir.nullTerminatedString(decl_name_index); @@ -5332,14 +5134,12 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err is_named_test = true; const test_name = zir.nullTerminatedString(decl_name_index + 1); kind = .@"test"; - break :name try std.fmt.allocPrintZ(gpa, "test.{s}", .{test_name}); + break :name try ip.getOrPutStringFmt(gpa, "test.{s}", .{test_name}); } else { - break :name try gpa.dupeZ(u8, raw_name); + break :name try ip.getOrPutString(gpa, raw_name); } }, }; - var must_free_decl_name = true; - defer if (must_free_decl_name) gpa.free(decl_name); const is_exported = export_bit and decl_name_index != 0; if (kind == .@"usingnamespace") try namespace.usingnamespace_set.ensureUnusedCapacity(gpa, 1); @@ -5347,21 +5147,19 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err // We create a Decl for it regardless of analysis status. const gop = try namespace.decls.getOrPutContextAdapted( gpa, - @as([]const u8, mem.sliceTo(decl_name, 0)), + decl_name, DeclAdapter{ .mod = mod }, Namespace.DeclContext{ .module = mod }, ); const comp = mod.comp; if (!gop.found_existing) { - const new_decl_index = try mod.allocateNewDecl(namespace, decl_node, iter.parent_decl.src_scope); + const new_decl_index = try mod.allocateNewDecl(namespace_index, decl_node, iter.parent_decl.src_scope); const new_decl = mod.declPtr(new_decl_index); new_decl.kind = kind; new_decl.name = decl_name; - must_free_decl_name = false; if (kind == .@"usingnamespace") { namespace.usingnamespace_set.putAssumeCapacity(new_decl_index, is_pub); } - log.debug("scan new {*} ({s}) into {*}", .{ new_decl, decl_name, namespace }); new_decl.src_line = line; gop.key_ptr.* = new_decl_index; // Exported decls, comptime decls, usingnamespace decls, and @@ -5382,7 +5180,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err if (!comp.bin_file.options.is_test) break :blk false; if (decl_pkg != mod.main_pkg) break :blk false; if (comp.test_filter) |test_filter| { - if (mem.indexOf(u8, decl_name, test_filter) == null) { + if (mem.indexOf(u8, ip.stringToSlice(decl_name), test_filter) == null) { break :blk false; } } @@ -5405,16 +5203,13 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err const decl = mod.declPtr(decl_index); if (kind == .@"test") { const src_loc = SrcLoc{ - .file_scope = decl.getFileScope(), + .file_scope = decl.getFileScope(mod), .parent_decl_node = decl.src_node, .lazy = .{ .token_offset = 1 }, }; - const msg = try ErrorMsg.create( - gpa, - src_loc, - "duplicate test name: {s}", - .{decl_name}, - ); + const msg = try ErrorMsg.create(gpa, src_loc, "duplicate test name: {}", .{ + decl_name.fmt(&mod.intern_pool), + }); errdefer msg.destroy(gpa); try mod.failed_decls.putNoClobber(gpa, decl_index, msg); const other_src_loc = SrcLoc{ @@ -5424,7 +5219,6 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err }; try mod.errNoteNonLazy(other_src_loc, msg, "other test here", .{}); } - log.debug("scan existing {*} ({s}) of {*}", .{ decl, decl.name, namespace }); // Update the AST node of the decl; even if its contents are unchanged, it may // have been re-ordered. decl.src_node = decl_node; @@ -5436,7 +5230,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err decl.has_align = has_align; decl.has_linksection_or_addrspace = has_linksection_or_addrspace; decl.zir_decl_index = @intCast(u32, decl_sub_index); - if (decl.getFunction()) |_| { + if (decl.getOwnedFunctionIndex(mod) != .none) { switch (comp.bin_file.tag) { .coff, .elf, .macho, .plan9 => { // TODO Look into detecting when this would be unnecessary by storing enough state @@ -5458,7 +5252,6 @@ pub fn clearDecl( defer tracy.end(); const decl = mod.declPtr(decl_index); - log.debug("clearing {*} ({s})", .{ decl, decl.name }); const gpa = mod.gpa; try mod.deletion_set.ensureUnusedCapacity(gpa, decl.dependencies.count()); @@ -5473,9 +5266,6 @@ pub fn clearDecl( const dep = mod.declPtr(dep_index); dep.removeDependant(decl_index); if (dep.dependants.count() == 0 and !dep.deletion_flag) { - log.debug("insert {*} ({s}) dependant {*} ({s}) into deletion set", .{ - decl, decl.name, dep, dep.name, - }); // We don't recursively perform a deletion here, because during the update, // another reference to it may turn up. dep.deletion_flag = true; @@ -5510,10 +5300,10 @@ pub fn clearDecl( try mod.deleteDeclExports(decl_index); if (decl.has_tv) { - if (decl.ty.isFnOrHasRuntimeBits()) { + if (decl.ty.isFnOrHasRuntimeBits(mod)) { mod.comp.bin_file.freeDecl(decl_index); } - if (decl.getInnerNamespace()) |namespace| { + if (decl.getOwnedInnerNamespace(mod)) |namespace| { try namespace.deleteAllDecls(mod, outdated_decls); } } @@ -5530,10 +5320,9 @@ pub fn clearDecl( /// This function is exclusively called for anonymous decls. pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void { const decl = mod.declPtr(decl_index); - log.debug("deleteUnusedDecl {d} ({s})", .{ decl_index, decl.name }); assert(!mod.declIsRoot(decl_index)); - assert(decl.src_namespace.anon_decls.swapRemove(decl_index)); + assert(mod.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index)); const dependants = decl.dependants.keys(); for (dependants) |dep| { @@ -5558,10 +5347,9 @@ fn markDeclForDeletion(mod: *Module, decl_index: Decl.Index) !void { /// If other decls depend on this decl, they must be aborted first. pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void { const decl = mod.declPtr(decl_index); - log.debug("abortAnonDecl {*} ({s})", .{ decl, decl.name }); assert(!mod.declIsRoot(decl_index)); - assert(decl.src_namespace.anon_decls.swapRemove(decl_index)); + assert(mod.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index)); // An aborted decl must not have dependants -- they must have // been aborted first and removed from this list. @@ -5575,6 +5363,17 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void { mod.destroyDecl(decl_index); } +/// Finalize the creation of an anon decl. +pub fn finalizeAnonDecl(mod: *Module, decl_index: Decl.Index) Allocator.Error!void { + // The Decl starts off with alive=false and the codegen backend will set alive=true + // if the Decl is referenced by an instruction or another constant. Otherwise, + // the Decl will be garbage collected by the `codegen_decl` task instead of sent + // to the linker. + if (mod.declPtr(decl_index).ty.isFnOrHasRuntimeBits(mod)) { + try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = decl_index }); + } +} + /// Delete all the Export objects that are caused by this Decl. Re-analysis of /// this Decl will cause them to be re-created (or not). fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void { @@ -5600,51 +5399,53 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void } } if (mod.comp.bin_file.cast(link.File.Elf)) |elf| { - elf.deleteDeclExport(decl_index, exp.options.name); + elf.deleteDeclExport(decl_index, exp.opts.name); } if (mod.comp.bin_file.cast(link.File.MachO)) |macho| { - try macho.deleteDeclExport(decl_index, exp.options.name); + try macho.deleteDeclExport(decl_index, exp.opts.name); } if (mod.comp.bin_file.cast(link.File.Wasm)) |wasm| { wasm.deleteDeclExport(decl_index); } if (mod.comp.bin_file.cast(link.File.Coff)) |coff| { - coff.deleteDeclExport(decl_index, exp.options.name); + coff.deleteDeclExport(decl_index, exp.opts.name); } if (mod.failed_exports.fetchSwapRemove(exp)) |failed_kv| { failed_kv.value.destroy(mod.gpa); } - mod.gpa.free(exp.options.name); mod.gpa.destroy(exp); } export_owners.deinit(mod.gpa); } -pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { +pub fn analyzeFnBody(mod: *Module, func_index: Fn.Index, arena: Allocator) SemaError!Air { const tracy = trace(@src()); defer tracy.end(); const gpa = mod.gpa; + const func = mod.funcPtr(func_index); const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); - // Use the Decl's arena for captured values. - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); + var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); + defer comptime_mutable_decls.deinit(); + + const fn_ty = decl.ty; var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = arena, - .perm_arena = decl_arena_allocator, - .code = decl.getFileScope().zir, + .code = decl.getFileScope(mod).zir, .owner_decl = decl, .owner_decl_index = decl_index, .func = func, - .fn_ret_ty = decl.ty.fnReturnType(), + .func_index = func_index.toOptional(), + .fn_ret_ty = mod.typeToFunc(fn_ty).?.return_type.toType(), .owner_func = func, + .owner_func_index = func_index.toOptional(), .branch_quota = @max(func.branch_quota, Sema.default_branch_quota), + .comptime_mutable_decls = &comptime_mutable_decls, }; defer sema.deinit(); @@ -5656,7 +5457,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { try sema.air_extra.ensureTotalCapacity(gpa, reserved_count); sema.air_extra.items.len += reserved_count; - var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope); defer wip_captures.deinit(); var inner_block: Sema.Block = .{ @@ -5680,9 +5481,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { // This could be a generic function instantiation, however, in which case we need to // map the comptime parameters to constant values and only emit arg AIR instructions // for the runtime ones. - const fn_ty = decl.ty; - const fn_ty_info = fn_ty.fnInfo(); - const runtime_params_len = @intCast(u32, fn_ty_info.param_types.len); + const runtime_params_len = @intCast(u32, mod.typeToFunc(fn_ty).?.param_types.len); try inner_block.instructions.ensureTotalCapacityPrecise(gpa, runtime_params_len); try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len * 2); // * 2 for the `addType` try sema.inst_map.ensureSpaceForInstructions(gpa, fn_info.param_body); @@ -5697,9 +5496,9 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { const param_ty = if (func.comptime_args) |comptime_args| t: { const arg_tv = comptime_args[total_param_index]; - const arg_val = if (arg_tv.val.tag() != .generic_poison) + const arg_val = if (!arg_tv.val.isGenericPoison()) arg_tv.val - else if (arg_tv.ty.onePossibleValue()) |opv| + else if (try arg_tv.ty.onePossibleValue(mod)) |opv| opv else break :t arg_tv.ty; @@ -5708,7 +5507,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { sema.inst_map.putAssumeCapacityNoClobber(inst, arg); total_param_index += 1; continue; - } else fn_ty_info.param_types[runtime_param_index]; + } else mod.typeToFunc(fn_ty).?.param_types[runtime_param_index].toType(); const opt_opv = sema.typeHasOnePossibleValue(param_ty) catch |err| switch (err) { error.NeededSourceLocation => unreachable, @@ -5740,7 +5539,6 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { } func.state = .in_progress; - log.debug("set {s} to in_progress", .{decl.name}); const last_arg_index = inner_block.instructions.items.len; @@ -5765,7 +5563,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { // is unused so it just has to be a no-op. sema.air_instructions.set(ptr_inst.*, .{ .tag = .alloc, - .data = .{ .ty = Type.initTag(.single_const_pointer_to_comptime_int) }, + .data = .{ .ty = Type.single_const_pointer_to_comptime_int }, }); } } @@ -5773,7 +5571,7 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { // If we don't get an error return trace from a caller, create our own. if (func.calls_or_awaits_errorable_fn and mod.comp.bin_file.options.error_return_tracing and - !sema.fn_ret_ty.isError()) + !sema.fn_ret_ty.isError(mod)) { sema.setupErrorReturnTrace(&inner_block, last_arg_index) catch |err| switch (err) { // TODO make these unreachable instead of @panic @@ -5786,6 +5584,10 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { } try wip_captures.finalize(); + for (comptime_mutable_decls.items) |ct_decl_index| { + const ct_decl = mod.declPtr(ct_decl_index); + try ct_decl.intern(mod); + } // Copy the block into place and mark that as the main block. try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + @@ -5797,14 +5599,13 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { sema.air_extra.items[@enumToInt(Air.ExtraIndex.main_block)] = main_block_index; func.state = .success; - log.debug("set {s} to success", .{decl.name}); // Finally we must resolve the return type and parameter types so that backends // have full access to type information. // Crucially, this happens *after* we set the function state to success above, // so that dependencies on the function body will now be satisfied rather than // result in circular dependency errors. - sema.resolveFnTypes(fn_ty_info) catch |err| switch (err) { + sema.resolveFnTypes(fn_ty) catch |err| switch (err) { error.NeededSourceLocation => unreachable, error.GenericPoison => unreachable, error.ComptimeReturn => unreachable, @@ -5820,9 +5621,8 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { // Similarly, resolve any queued up types that were requested to be resolved for // the backends. - for (sema.types_to_resolve.items) |inst_ref| { - const ty = sema.getTmpAir().getRefType(inst_ref); - sema.resolveTypeFully(ty) catch |err| switch (err) { + for (sema.types_to_resolve.keys()) |ty| { + sema.resolveTypeFully(ty.toType()) catch |err| switch (err) { error.NeededSourceLocation => unreachable, error.GenericPoison => unreachable, error.ComptimeReturn => unreachable, @@ -5840,13 +5640,11 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air { return Air{ .instructions = sema.air_instructions.toOwnedSlice(), .extra = try sema.air_extra.toOwnedSlice(gpa), - .values = try sema.air_values.toOwnedSlice(gpa), }; } fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { const decl = mod.declPtr(decl_index); - log.debug("mark outdated {*} ({s})", .{ decl, decl.name }); try mod.comp.work_queue.writeItem(.{ .analyze_decl = decl_index }); if (mod.failed_decls.fetchSwapRemove(decl_index)) |kv| { kv.value.destroy(mod.gpa); @@ -5854,11 +5652,8 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { if (mod.cimport_errors.fetchSwapRemove(decl_index)) |kv| { for (kv.value) |err| err.deinit(mod.gpa); } - if (decl.has_tv and decl.owns_tv) { - if (decl.val.castTag(.function)) |payload| { - const func = payload.data; - _ = mod.align_stack_fns.remove(func); - } + if (decl.getOwnedFunctionIndex(mod).unwrap()) |func| { + _ = mod.align_stack_fns.remove(func); } if (mod.emit_h) |emit_h| { if (emit_h.failed_decls.fetchSwapRemove(decl_index)) |kv| { @@ -5869,9 +5664,51 @@ fn markOutdatedDecl(mod: *Module, decl_index: Decl.Index) !void { decl.analysis = .outdated; } +pub fn createNamespace(mod: *Module, initialization: Namespace) !Namespace.Index { + if (mod.namespaces_free_list.popOrNull()) |index| { + mod.allocated_namespaces.at(@enumToInt(index)).* = initialization; + return index; + } + const ptr = try mod.allocated_namespaces.addOne(mod.gpa); + ptr.* = initialization; + return @intToEnum(Namespace.Index, mod.allocated_namespaces.len - 1); +} + +pub fn destroyNamespace(mod: *Module, index: Namespace.Index) void { + mod.namespacePtr(index).* = undefined; + mod.namespaces_free_list.append(mod.gpa, index) catch { + // In order to keep `destroyNamespace` a non-fallible function, we ignore memory + // allocation failures here, instead leaking the Namespace until garbage collection. + }; +} + +pub fn createStruct(mod: *Module, initialization: Struct) Allocator.Error!Struct.Index { + return mod.intern_pool.createStruct(mod.gpa, initialization); +} + +pub fn destroyStruct(mod: *Module, index: Struct.Index) void { + return mod.intern_pool.destroyStruct(mod.gpa, index); +} + +pub fn createUnion(mod: *Module, initialization: Union) Allocator.Error!Union.Index { + return mod.intern_pool.createUnion(mod.gpa, initialization); +} + +pub fn destroyUnion(mod: *Module, index: Union.Index) void { + return mod.intern_pool.destroyUnion(mod.gpa, index); +} + +pub fn createFunc(mod: *Module, initialization: Fn) Allocator.Error!Fn.Index { + return mod.intern_pool.createFunc(mod.gpa, initialization); +} + +pub fn destroyFunc(mod: *Module, index: Fn.Index) void { + return mod.intern_pool.destroyFunc(mod.gpa, index); +} + pub fn allocateNewDecl( mod: *Module, - namespace: *Namespace, + namespace: Namespace.Index, src_node: Ast.Node.Index, src_scope: ?*CaptureScope, ) !Decl.Index { @@ -5896,6 +5733,7 @@ pub fn allocateNewDecl( }; }; + if (src_scope) |scope| scope.incRef(); decl_and_index.new_decl.* = .{ .name = undefined, .src_namespace = namespace, @@ -5906,7 +5744,7 @@ pub fn allocateNewDecl( .ty = undefined, .val = undefined, .@"align" = undefined, - .@"linksection" = undefined, + .@"linksection" = .none, .@"addrspace" = .generic, .analysis = .unreferenced, .deletion_flag = false, @@ -5924,25 +5762,20 @@ pub fn allocateNewDecl( return decl_and_index.decl_index; } -/// Get error value for error tag `name`. -pub fn getErrorValue(mod: *Module, name: []const u8) !std.StringHashMapUnmanaged(ErrorInt).KV { +pub fn getErrorValue( + mod: *Module, + name: InternPool.NullTerminatedString, +) Allocator.Error!ErrorInt { const gop = try mod.global_error_set.getOrPut(mod.gpa, name); - if (gop.found_existing) { - return std.StringHashMapUnmanaged(ErrorInt).KV{ - .key = gop.key_ptr.*, - .value = gop.value_ptr.*, - }; - } + return @intCast(ErrorInt, gop.index); +} - errdefer assert(mod.global_error_set.remove(name)); - try mod.error_name_list.ensureUnusedCapacity(mod.gpa, 1); - gop.key_ptr.* = try mod.gpa.dupe(u8, name); - gop.value_ptr.* = @intCast(ErrorInt, mod.error_name_list.items.len); - mod.error_name_list.appendAssumeCapacity(gop.key_ptr.*); - return std.StringHashMapUnmanaged(ErrorInt).KV{ - .key = gop.key_ptr.*, - .value = gop.value_ptr.*, - }; +pub fn getErrorValueFromSlice( + mod: *Module, + name: []const u8, +) Allocator.Error!ErrorInt { + const interned_name = try mod.intern_pool.getOrPutString(mod.gpa, name); + return getErrorValue(mod, interned_name); } pub fn createAnonymousDecl(mod: *Module, block: *Sema.Block, typed_value: TypedValue) !Decl.Index { @@ -5953,29 +5786,28 @@ pub fn createAnonymousDecl(mod: *Module, block: *Sema.Block, typed_value: TypedV pub fn createAnonymousDeclFromDecl( mod: *Module, src_decl: *Decl, - namespace: *Namespace, + namespace: Namespace.Index, src_scope: ?*CaptureScope, tv: TypedValue, ) !Decl.Index { const new_decl_index = try mod.allocateNewDecl(namespace, src_decl.src_node, src_scope); errdefer mod.destroyDecl(new_decl_index); - const name = try std.fmt.allocPrintZ(mod.gpa, "{s}__anon_{d}", .{ - src_decl.name, @enumToInt(new_decl_index), + const name = try mod.intern_pool.getOrPutStringFmt(mod.gpa, "{}__anon_{d}", .{ + src_decl.name.fmt(&mod.intern_pool), @enumToInt(new_decl_index), }); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, tv, name); return new_decl_index; } -/// Takes ownership of `name` even if it returns an error. pub fn initNewAnonDecl( mod: *Module, new_decl_index: Decl.Index, src_line: u32, - namespace: *Namespace, + namespace: Namespace.Index, typed_value: TypedValue, - name: [:0]u8, -) !void { - errdefer mod.gpa.free(name); + name: InternPool.NullTerminatedString, +) Allocator.Error!void { + assert(typed_value.ty.toIntern() == mod.intern_pool.typeOf(typed_value.val.toIntern())); const new_decl = mod.declPtr(new_decl_index); @@ -5984,34 +5816,12 @@ pub fn initNewAnonDecl( new_decl.ty = typed_value.ty; new_decl.val = typed_value.val; new_decl.@"align" = 0; - new_decl.@"linksection" = null; + new_decl.@"linksection" = .none; new_decl.has_tv = true; new_decl.analysis = .complete; new_decl.generation = mod.generation; - try namespace.anon_decls.putNoClobber(mod.gpa, new_decl_index, {}); - - // The Decl starts off with alive=false and the codegen backend will set alive=true - // if the Decl is referenced by an instruction or another constant. Otherwise, - // the Decl will be garbage collected by the `codegen_decl` task instead of sent - // to the linker. - if (typed_value.ty.isFnOrHasRuntimeBits()) { - try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl_index }); - } -} - -pub fn makeIntType(arena: Allocator, signedness: std.builtin.Signedness, bits: u16) !Type { - const int_payload = try arena.create(Type.Payload.Bits); - int_payload.* = .{ - .base = .{ - .tag = switch (signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - }, - }, - .data = bits, - }; - return Type.initPayload(&int_payload.base); + try mod.namespacePtr(namespace).anon_decls.putNoClobber(mod.gpa, new_decl_index, {}); } pub fn errNoteNonLazy( @@ -6073,16 +5883,17 @@ pub const SwitchProngSrc = union(enum) { /// the LazySrcLoc in order to emit a compile error. pub fn resolve( prong_src: SwitchProngSrc, - gpa: Allocator, + mod: *Module, decl: *Decl, switch_node_offset: i32, range_expand: RangeExpand, ) LazySrcLoc { @setCold(true); - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const gpa = mod.gpa; + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6166,11 +5977,12 @@ pub const PeerTypeCandidateSrc = union(enum) { pub fn resolve( self: PeerTypeCandidateSrc, - gpa: Allocator, + mod: *Module, decl: *Decl, candidate_i: usize, ) ?LazySrcLoc { @setCold(true); + const gpa = mod.gpa; switch (self) { .none => { @@ -6192,10 +6004,10 @@ pub const PeerTypeCandidateSrc = union(enum) { else => {}, } - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6254,15 +6066,16 @@ fn queryFieldSrc( pub fn paramSrc( func_node_offset: i32, - gpa: Allocator, + mod: *Module, decl: *Decl, param_i: usize, ) LazySrcLoc { @setCold(true); - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const gpa = mod.gpa; + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6284,19 +6097,20 @@ pub fn paramSrc( } pub fn argSrc( + mod: *Module, call_node_offset: i32, - gpa: Allocator, decl: *Decl, start_arg_i: usize, bound_arg_src: ?LazySrcLoc, ) LazySrcLoc { + @setCold(true); + const gpa = mod.gpa; if (start_arg_i == 0 and bound_arg_src != null) return bound_arg_src.?; const arg_i = start_arg_i - @boolToInt(bound_arg_src != null); - @setCold(true); - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6310,7 +6124,7 @@ pub fn argSrc( const node_datas = tree.nodes.items(.data); const call_args_node = tree.extra_data[node_datas[node].rhs - 1]; const call_args_offset = decl.nodeIndexToRelative(call_args_node); - return initSrc(call_args_offset, gpa, decl, arg_i); + return mod.initSrc(call_args_offset, decl, arg_i); }, else => unreachable, }; @@ -6318,16 +6132,17 @@ pub fn argSrc( } pub fn initSrc( + mod: *Module, init_node_offset: i32, - gpa: Allocator, decl: *Decl, init_index: usize, ) LazySrcLoc { @setCold(true); - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const gpa = mod.gpa; + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6363,12 +6178,13 @@ pub fn initSrc( } } -pub fn optionsSrc(gpa: Allocator, decl: *Decl, base_src: LazySrcLoc, wanted: []const u8) LazySrcLoc { +pub fn optionsSrc(mod: *Module, decl: *Decl, base_src: LazySrcLoc, wanted: []const u8) LazySrcLoc { @setCold(true); - const tree = decl.getFileScope().getTree(gpa) catch |err| { + const gpa = mod.gpa; + const tree = decl.getFileScope(mod).getTree(gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ - decl.getFileScope().sub_file_path, @errorName(err), + decl.getFileScope(mod).sub_file_path, @errorName(err), }); return LazySrcLoc.nodeOffset(0); }; @@ -6430,11 +6246,13 @@ pub fn processOutdatedAndDeletedDecls(mod: *Module) !void { // deletion set at this time. for (file.deleted_decls.items) |decl_index| { const decl = mod.declPtr(decl_index); - log.debug("deleted from source: {*} ({s})", .{ decl, decl.name }); // Remove from the namespace it resides in, preserving declaration order. assert(decl.zir_decl_index != 0); - _ = decl.src_namespace.decls.orderedRemoveAdapted(@as([]const u8, mem.sliceTo(decl.name, 0)), DeclAdapter{ .mod = mod }); + _ = mod.namespacePtr(decl.src_namespace).decls.orderedRemoveAdapted( + decl.name, + DeclAdapter{ .mod = mod }, + ); try mod.clearDecl(decl_index, &outdated_decls); mod.destroyDecl(decl_index); @@ -6454,7 +6272,7 @@ pub fn processOutdatedAndDeletedDecls(mod: *Module) !void { pub fn processExports(mod: *Module) !void { const gpa = mod.gpa; // Map symbol names to `Export` for name collision detection. - var symbol_exports: std.StringArrayHashMapUnmanaged(*Export) = .{}; + var symbol_exports: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, *Export) = .{}; defer symbol_exports.deinit(gpa); var it = mod.decl_exports.iterator(); @@ -6462,13 +6280,13 @@ pub fn processExports(mod: *Module) !void { const exported_decl = entry.key_ptr.*; const exports = entry.value_ptr.items; for (exports) |new_export| { - const gop = try symbol_exports.getOrPut(gpa, new_export.options.name); + const gop = try symbol_exports.getOrPut(gpa, new_export.opts.name); if (gop.found_existing) { new_export.status = .failed_retryable; try mod.failed_exports.ensureUnusedCapacity(gpa, 1); const src_loc = new_export.getSrcLoc(mod); - const msg = try ErrorMsg.create(gpa, src_loc, "exported symbol collision: {s}", .{ - new_export.options.name, + const msg = try ErrorMsg.create(gpa, src_loc, "exported symbol collision: {}", .{ + new_export.opts.name.fmt(&mod.intern_pool), }); errdefer msg.destroy(gpa); const other_export = gop.value_ptr.*; @@ -6501,11 +6319,16 @@ pub fn populateTestFunctions( main_progress_node: *std.Progress.Node, ) !void { const gpa = mod.gpa; + const ip = &mod.intern_pool; const builtin_pkg = mod.main_pkg.table.get("builtin").?; const builtin_file = (mod.importPkg(builtin_pkg) catch unreachable).file; const root_decl = mod.declPtr(builtin_file.root_decl.unwrap().?); - const builtin_namespace = root_decl.src_namespace; - const decl_index = builtin_namespace.decls.getKeyAdapted(@as([]const u8, "test_functions"), DeclAdapter{ .mod = mod }).?; + const builtin_namespace = mod.namespacePtr(root_decl.src_namespace); + const test_functions_str = try ip.getOrPutString(gpa, "test_functions"); + const decl_index = builtin_namespace.decls.getKeyAdapted( + test_functions_str, + DeclAdapter{ .mod = mod }, + ).?; { // We have to call `ensureDeclAnalyzed` here in case `builtin.test_functions` // was not referenced by start code. @@ -6518,90 +6341,117 @@ pub fn populateTestFunctions( try mod.ensureDeclAnalyzed(decl_index); } const decl = mod.declPtr(decl_index); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf).elemType(); + const test_fn_ty = decl.ty.slicePtrFieldType(mod).childType(mod); + const null_usize = try mod.intern(.{ .opt = .{ + .ty = try mod.intern(.{ .opt_type = .usize_type }), + .val = .none, + } }); const array_decl_index = d: { // Add mod.test_functions to an array decl then make the test_functions // decl reference it as a slice. - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const arena = new_decl_arena.allocator(); - - const test_fn_vals = try arena.alloc(Value, mod.test_functions.count()); - const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ - .ty = try Type.Tag.array.create(arena, .{ - .len = test_fn_vals.len, - .elem_type = try tmp_test_fn_ty.copy(arena), - }), - .val = try Value.Tag.aggregate.create(arena, test_fn_vals), - }); - const array_decl = mod.declPtr(array_decl_index); + const test_fn_vals = try gpa.alloc(InternPool.Index, mod.test_functions.count()); + defer gpa.free(test_fn_vals); // Add a dependency on each test name and function pointer. - try array_decl.dependencies.ensureUnusedCapacity(gpa, test_fn_vals.len * 2); + var array_decl_dependencies = std.ArrayListUnmanaged(Decl.Index){}; + defer array_decl_dependencies.deinit(gpa); + try array_decl_dependencies.ensureUnusedCapacity(gpa, test_fn_vals.len * 2); - for (mod.test_functions.keys(), 0..) |test_decl_index, i| { + for (test_fn_vals, mod.test_functions.keys()) |*test_fn_val, test_decl_index| { const test_decl = mod.declPtr(test_decl_index); - const test_name_slice = mem.sliceTo(test_decl.name, 0); + // TODO: write something like getCoercedInts to avoid needing to dupe + const test_decl_name = try gpa.dupe(u8, ip.stringToSlice(test_decl.name)); + defer gpa.free(test_decl_name); const test_name_decl_index = n: { - var name_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer name_decl_arena.deinit(); - const bytes = try name_decl_arena.allocator().dupe(u8, test_name_slice); - const test_name_decl_index = try mod.createAnonymousDeclFromDecl(array_decl, array_decl.src_namespace, null, .{ - .ty = try Type.Tag.array_u8.create(name_decl_arena.allocator(), bytes.len), - .val = try Value.Tag.bytes.create(name_decl_arena.allocator(), bytes), + const test_name_decl_ty = try mod.arrayType(.{ + .len = test_decl_name.len, + .child = .u8_type, + }); + const test_name_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ + .ty = test_name_decl_ty, + .val = (try mod.intern(.{ .aggregate = .{ + .ty = test_name_decl_ty.toIntern(), + .storage = .{ .bytes = test_decl_name }, + } })).toValue(), }); - try mod.declPtr(test_name_decl_index).finalizeNewArena(&name_decl_arena); break :n test_name_decl_index; }; - array_decl.dependencies.putAssumeCapacityNoClobber(test_decl_index, .normal); - array_decl.dependencies.putAssumeCapacityNoClobber(test_name_decl_index, .normal); + array_decl_dependencies.appendAssumeCapacity(test_decl_index); + array_decl_dependencies.appendAssumeCapacity(test_name_decl_index); try mod.linkerUpdateDecl(test_name_decl_index); - const field_vals = try arena.create([3]Value); - field_vals.* = .{ - try Value.Tag.slice.create(arena, .{ - .ptr = try Value.Tag.decl_ref.create(arena, test_name_decl_index), - .len = try Value.Tag.int_u64.create(arena, test_name_slice.len), - }), // name - try Value.Tag.decl_ref.create(arena, test_decl_index), // func - Value.initTag(.null_value), // async_frame_size + const test_fn_fields = .{ + // name + try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = test_name_decl_index }, + .len = try mod.intern(.{ .int = .{ + .ty = .usize_type, + .storage = .{ .u64 = test_decl_name.len }, + } }), + } }), + // func + try mod.intern(.{ .ptr = .{ + .ty = try mod.intern(.{ .ptr_type = .{ + .child = test_decl.ty.toIntern(), + .flags = .{ + .is_const = true, + }, + } }), + .addr = .{ .decl = test_decl_index }, + } }), + // async_frame_size + null_usize, }; - test_fn_vals[i] = try Value.Tag.aggregate.create(arena, field_vals); + test_fn_val.* = try mod.intern(.{ .aggregate = .{ + .ty = test_fn_ty.toIntern(), + .storage = .{ .elems = &test_fn_fields }, + } }); + } + + const array_decl_ty = try mod.arrayType(.{ + .len = test_fn_vals.len, + .child = test_fn_ty.toIntern(), + .sentinel = .none, + }); + const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, null, .{ + .ty = array_decl_ty, + .val = (try mod.intern(.{ .aggregate = .{ + .ty = array_decl_ty.toIntern(), + .storage = .{ .elems = test_fn_vals }, + } })).toValue(), + }); + for (array_decl_dependencies.items) |array_decl_dependency| { + try mod.declareDeclDependency(array_decl_index, array_decl_dependency); } - try array_decl.finalizeNewArena(&new_decl_arena); break :d array_decl_index; }; try mod.linkerUpdateDecl(array_decl_index); { - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const arena = new_decl_arena.allocator(); + const new_ty = try mod.ptrType(.{ + .child = test_fn_ty.toIntern(), + .flags = .{ + .is_const = true, + .size = .Slice, + }, + }); + const new_val = decl.val; + const new_init = try mod.intern(.{ .ptr = .{ + .ty = new_ty.toIntern(), + .addr = .{ .decl = array_decl_index }, + .len = (try mod.intValue(Type.usize, mod.test_functions.count())).toIntern(), + } }); + ip.mutateVarInit(decl.val.toIntern(), new_init); - { - // This copy accesses the old Decl Type/Value so it must be done before `clearValues`. - const new_ty = try Type.Tag.const_slice.create(arena, try tmp_test_fn_ty.copy(arena)); - const new_var = try gpa.create(Var); - errdefer gpa.destroy(new_var); - new_var.* = decl.val.castTag(.variable).?.data.*; - new_var.init = try Value.Tag.slice.create(arena, .{ - .ptr = try Value.Tag.decl_ref.create(arena, array_decl_index), - .len = try Value.Tag.int_u64.create(arena, mod.test_functions.count()), - }); - const new_val = try Value.Tag.variable.create(arena, new_var); - - // Since we are replacing the Decl's value we must perform cleanup on the - // previous value. - decl.clearValues(mod); - decl.ty = new_ty; - decl.val = new_val; - decl.has_tv = true; - } - - try decl.finalizeNewArena(&new_decl_arena); + // Since we are replacing the Decl's value we must perform cleanup on the + // previous value. + decl.clearValues(mod); + decl.ty = new_ty; + decl.val = new_val; + decl.has_tv = true; } try mod.linkerUpdateDecl(decl_index); } @@ -6631,7 +6481,7 @@ pub fn linkerUpdateDecl(mod: *Module, decl_index: Decl.Index) !void { try mod.failed_decls.ensureUnusedCapacity(gpa, 1); mod.failed_decls.putAssumeCapacityNoClobber(decl_index, try ErrorMsg.create( gpa, - decl.srcLoc(), + decl.srcLoc(mod), "unable to codegen: {s}", .{@errorName(err)}, )); @@ -6673,64 +6523,49 @@ fn reportRetryableFileError( gop.value_ptr.* = err_msg; } -pub fn markReferencedDeclsAlive(mod: *Module, val: Value) void { - switch (val.tag()) { - .decl_ref_mut => return mod.markDeclIndexAlive(val.castTag(.decl_ref_mut).?.data.decl_index), - .extern_fn => return mod.markDeclIndexAlive(val.castTag(.extern_fn).?.data.owner_decl), - .function => return mod.markDeclIndexAlive(val.castTag(.function).?.data.owner_decl), - .variable => return mod.markDeclIndexAlive(val.castTag(.variable).?.data.owner_decl), - .decl_ref => return mod.markDeclIndexAlive(val.cast(Value.Payload.Decl).?.data), - - .repeated, - .eu_payload, - .opt_payload, - .empty_array_sentinel, - => return mod.markReferencedDeclsAlive(val.cast(Value.Payload.SubValue).?.data), - - .eu_payload_ptr, - .opt_payload_ptr, - => return mod.markReferencedDeclsAlive(val.cast(Value.Payload.PayloadPtr).?.data.container_ptr), - - .slice => { - const slice = val.cast(Value.Payload.Slice).?.data; - mod.markReferencedDeclsAlive(slice.ptr); - mod.markReferencedDeclsAlive(slice.len); +pub fn markReferencedDeclsAlive(mod: *Module, val: Value) Allocator.Error!void { + switch (mod.intern_pool.indexToKey(val.toIntern())) { + .variable => |variable| try mod.markDeclIndexAlive(variable.decl), + .extern_func => |extern_func| try mod.markDeclIndexAlive(extern_func.decl), + .func => |func| try mod.markDeclIndexAlive(mod.funcPtr(func.index).owner_decl), + .error_union => |error_union| switch (error_union.val) { + .err_name => {}, + .payload => |payload| try mod.markReferencedDeclsAlive(payload.toValue()), }, - - .elem_ptr => { - const elem_ptr = val.cast(Value.Payload.ElemPtr).?.data; - return mod.markReferencedDeclsAlive(elem_ptr.array_ptr); - }, - .field_ptr => { - const field_ptr = val.cast(Value.Payload.FieldPtr).?.data; - return mod.markReferencedDeclsAlive(field_ptr.container_ptr); - }, - .aggregate => { - for (val.castTag(.aggregate).?.data) |field_val| { - mod.markReferencedDeclsAlive(field_val); + .ptr => |ptr| { + switch (ptr.addr) { + .decl => |decl| try mod.markDeclIndexAlive(decl), + .mut_decl => |mut_decl| try mod.markDeclIndexAlive(mut_decl.decl), + .int, .comptime_field => {}, + .eu_payload, .opt_payload => |parent| try mod.markReferencedDeclsAlive(parent.toValue()), + .elem, .field => |base_index| try mod.markReferencedDeclsAlive(base_index.base.toValue()), } + if (ptr.len != .none) try mod.markReferencedDeclsAlive(ptr.len.toValue()); }, - .@"union" => { - const data = val.cast(Value.Payload.Union).?.data; - mod.markReferencedDeclsAlive(data.tag); - mod.markReferencedDeclsAlive(data.val); + .opt => |opt| if (opt.val != .none) try mod.markReferencedDeclsAlive(opt.val.toValue()), + .aggregate => |aggregate| for (aggregate.storage.values()) |elem| + try mod.markReferencedDeclsAlive(elem.toValue()), + .un => |un| { + try mod.markReferencedDeclsAlive(un.tag.toValue()); + try mod.markReferencedDeclsAlive(un.val.toValue()); }, - else => {}, } } -pub fn markDeclAlive(mod: *Module, decl: *Decl) void { +pub fn markDeclAlive(mod: *Module, decl: *Decl) Allocator.Error!void { if (decl.alive) return; decl.alive = true; + try decl.intern(mod); + // This is the first time we are marking this Decl alive. We must // therefore recurse into its value and mark any Decl it references // as also alive, so that any Decl referenced does not get garbage collected. - mod.markReferencedDeclsAlive(decl.val); + try mod.markReferencedDeclsAlive(decl.val); } -fn markDeclIndexAlive(mod: *Module, decl_index: Decl.Index) void { +fn markDeclIndexAlive(mod: *Module, decl_index: Decl.Index) Allocator.Error!void { return mod.markDeclAlive(mod.declPtr(decl_index)); } @@ -6779,3 +6614,522 @@ pub fn backendSupportsFeature(mod: Module, feature: Feature) bool { .field_reordering => mod.comp.bin_file.options.use_llvm, }; } + +/// Shortcut for calling `intern_pool.get`. +pub fn intern(mod: *Module, key: InternPool.Key) Allocator.Error!InternPool.Index { + return mod.intern_pool.get(mod.gpa, key); +} + +/// Shortcut for calling `intern_pool.getCoerced`. +pub fn getCoerced(mod: *Module, val: Value, new_ty: Type) Allocator.Error!Value { + return (try mod.intern_pool.getCoerced(mod.gpa, val.toIntern(), new_ty.toIntern())).toValue(); +} + +pub fn intType(mod: *Module, signedness: std.builtin.Signedness, bits: u16) Allocator.Error!Type { + return (try intern(mod, .{ .int_type = .{ + .signedness = signedness, + .bits = bits, + } })).toType(); +} + +pub fn arrayType(mod: *Module, info: InternPool.Key.ArrayType) Allocator.Error!Type { + const i = try intern(mod, .{ .array_type = info }); + return i.toType(); +} + +pub fn vectorType(mod: *Module, info: InternPool.Key.VectorType) Allocator.Error!Type { + const i = try intern(mod, .{ .vector_type = info }); + return i.toType(); +} + +pub fn optionalType(mod: *Module, child_type: InternPool.Index) Allocator.Error!Type { + const i = try intern(mod, .{ .opt_type = child_type }); + return i.toType(); +} + +pub fn ptrType(mod: *Module, info: InternPool.Key.PtrType) Allocator.Error!Type { + var canon_info = info; + const have_elem_layout = info.child.toType().layoutIsResolved(mod); + + if (info.flags.size == .C) canon_info.flags.is_allowzero = true; + + // Canonicalize non-zero alignment. If it matches the ABI alignment of the pointee + // type, we change it to 0 here. If this causes an assertion trip because the + // pointee type needs to be resolved more, that needs to be done before calling + // this ptr() function. + if (info.flags.alignment.toByteUnitsOptional()) |info_align| { + if (have_elem_layout and info_align == info.child.toType().abiAlignment(mod)) { + canon_info.flags.alignment = .none; + } + } + + switch (info.flags.vector_index) { + // Canonicalize host_size. If it matches the bit size of the pointee type, + // we change it to 0 here. If this causes an assertion trip, the pointee type + // needs to be resolved before calling this ptr() function. + .none => if (have_elem_layout and info.packed_offset.host_size != 0) { + const elem_bit_size = info.child.toType().bitSize(mod); + assert(info.packed_offset.bit_offset + elem_bit_size <= info.packed_offset.host_size * 8); + if (info.packed_offset.host_size * 8 == elem_bit_size) { + canon_info.packed_offset.host_size = 0; + } + }, + .runtime => {}, + _ => assert(@enumToInt(info.flags.vector_index) < info.packed_offset.host_size), + } + + return (try intern(mod, .{ .ptr_type = canon_info })).toType(); +} + +pub fn singleMutPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { + return ptrType(mod, .{ .child = child_type.toIntern() }); +} + +pub fn singleConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { + return ptrType(mod, .{ + .child = child_type.toIntern(), + .flags = .{ + .is_const = true, + }, + }); +} + +pub fn manyConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { + return ptrType(mod, .{ + .child = child_type.toIntern(), + .flags = .{ + .size = .Many, + .is_const = true, + }, + }); +} + +pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type { + const info = Type.ptrInfoIp(&mod.intern_pool, ptr_ty.toIntern()); + return mod.ptrType(.{ + .child = new_child.toIntern(), + .sentinel = info.sentinel, + .flags = info.flags, + .packed_offset = info.packed_offset, + }); +} + +pub fn funcType(mod: *Module, info: InternPool.Key.FuncType) Allocator.Error!Type { + return (try intern(mod, .{ .func_type = info })).toType(); +} + +/// Use this for `anyframe->T` only. +/// For `anyframe`, use the `InternPool.Index.anyframe` tag directly. +pub fn anyframeType(mod: *Module, payload_ty: Type) Allocator.Error!Type { + return (try intern(mod, .{ .anyframe_type = payload_ty.toIntern() })).toType(); +} + +pub fn errorUnionType(mod: *Module, error_set_ty: Type, payload_ty: Type) Allocator.Error!Type { + return (try intern(mod, .{ .error_union_type = .{ + .error_set_type = error_set_ty.toIntern(), + .payload_type = payload_ty.toIntern(), + } })).toType(); +} + +pub fn singleErrorSetType(mod: *Module, name: InternPool.NullTerminatedString) Allocator.Error!Type { + const names: *const [1]InternPool.NullTerminatedString = &name; + return (try mod.intern_pool.get(mod.gpa, .{ .error_set_type = .{ .names = names } })).toType(); +} + +/// Sorts `names` in place. +pub fn errorSetFromUnsortedNames( + mod: *Module, + names: []InternPool.NullTerminatedString, +) Allocator.Error!Type { + std.mem.sort( + InternPool.NullTerminatedString, + names, + {}, + InternPool.NullTerminatedString.indexLessThan, + ); + const new_ty = try mod.intern(.{ .error_set_type = .{ .names = names } }); + return new_ty.toType(); +} + +/// Supports optionals in addition to pointers. +pub fn ptrIntValue(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { + return mod.getCoerced(try mod.intValue_u64(Type.usize, x), ty); +} + +/// Supports only pointers. See `ptrIntValue` for pointer-like optional support. +pub fn ptrIntValue_ptronly(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { + assert(ty.zigTypeTag(mod) == .Pointer); + const i = try intern(mod, .{ .ptr = .{ + .ty = ty.toIntern(), + .addr = .{ .int = try mod.intValue_u64(Type.usize, x) }, + } }); + return i.toValue(); +} + +/// Creates an enum tag value based on the integer tag value. +pub fn enumValue(mod: *Module, ty: Type, tag_int: InternPool.Index) Allocator.Error!Value { + if (std.debug.runtime_safety) { + const tag = ty.zigTypeTag(mod); + assert(tag == .Enum); + } + const i = try intern(mod, .{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = tag_int, + } }); + return i.toValue(); +} + +/// Creates an enum tag value based on the field index according to source code +/// declaration order. +pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.Error!Value { + const ip = &mod.intern_pool; + const gpa = mod.gpa; + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; + + if (enum_type.values.len == 0) { + // Auto-numbered fields. + return (try ip.get(gpa, .{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = try ip.get(gpa, .{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = field_index }, + } }), + } })).toValue(); + } + + return (try ip.get(gpa, .{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = enum_type.values[field_index], + } })).toValue(); +} + +pub fn intValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { + if (std.math.cast(u64, x)) |casted| return intValue_u64(mod, ty, casted); + if (std.math.cast(i64, x)) |casted| return intValue_i64(mod, ty, casted); + var limbs_buffer: [4]usize = undefined; + var big_int = BigIntMutable.init(&limbs_buffer, x); + return intValue_big(mod, ty, big_int.toConst()); +} + +pub fn intValue_big(mod: *Module, ty: Type, x: BigIntConst) Allocator.Error!Value { + const i = try intern(mod, .{ .int = .{ + .ty = ty.toIntern(), + .storage = .{ .big_int = x }, + } }); + return i.toValue(); +} + +pub fn intValue_u64(mod: *Module, ty: Type, x: u64) Allocator.Error!Value { + const i = try intern(mod, .{ .int = .{ + .ty = ty.toIntern(), + .storage = .{ .u64 = x }, + } }); + return i.toValue(); +} + +pub fn intValue_i64(mod: *Module, ty: Type, x: i64) Allocator.Error!Value { + const i = try intern(mod, .{ .int = .{ + .ty = ty.toIntern(), + .storage = .{ .i64 = x }, + } }); + return i.toValue(); +} + +pub fn unionValue(mod: *Module, union_ty: Type, tag: Value, val: Value) Allocator.Error!Value { + const i = try intern(mod, .{ .un = .{ + .ty = union_ty.toIntern(), + .tag = tag.toIntern(), + .val = val.toIntern(), + } }); + return i.toValue(); +} + +/// This function casts the float representation down to the representation of the type, potentially +/// losing data if the representation wasn't correct. +pub fn floatValue(mod: *Module, ty: Type, x: anytype) Allocator.Error!Value { + const storage: InternPool.Key.Float.Storage = switch (ty.floatBits(mod.getTarget())) { + 16 => .{ .f16 = @floatCast(f16, x) }, + 32 => .{ .f32 = @floatCast(f32, x) }, + 64 => .{ .f64 = @floatCast(f64, x) }, + 80 => .{ .f80 = @floatCast(f80, x) }, + 128 => .{ .f128 = @floatCast(f128, x) }, + else => unreachable, + }; + const i = try intern(mod, .{ .float = .{ + .ty = ty.toIntern(), + .storage = storage, + } }); + return i.toValue(); +} + +pub fn nullValue(mod: *Module, opt_ty: Type) Allocator.Error!Value { + const ip = &mod.intern_pool; + assert(ip.isOptionalType(opt_ty.toIntern())); + const result = try ip.get(mod.gpa, .{ .opt = .{ + .ty = opt_ty.toIntern(), + .val = .none, + } }); + return result.toValue(); +} + +pub fn smallestUnsignedInt(mod: *Module, max: u64) Allocator.Error!Type { + return intType(mod, .unsigned, Type.smallestUnsignedBits(max)); +} + +/// Returns the smallest possible integer type containing both `min` and +/// `max`. Asserts that neither value is undef. +/// TODO: if #3806 is implemented, this becomes trivial +pub fn intFittingRange(mod: *Module, min: Value, max: Value) !Type { + assert(!min.isUndef(mod)); + assert(!max.isUndef(mod)); + + if (std.debug.runtime_safety) { + assert(Value.order(min, max, mod).compare(.lte)); + } + + const sign = min.orderAgainstZero(mod) == .lt; + + const min_val_bits = intBitsForValue(mod, min, sign); + const max_val_bits = intBitsForValue(mod, max, sign); + + return mod.intType( + if (sign) .signed else .unsigned, + @max(min_val_bits, max_val_bits), + ); +} + +/// Given a value representing an integer, returns the number of bits necessary to represent +/// this value in an integer. If `sign` is true, returns the number of bits necessary in a +/// twos-complement integer; otherwise in an unsigned integer. +/// Asserts that `val` is not undef. If `val` is negative, asserts that `sign` is true. +pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { + assert(!val.isUndef(mod)); + + const key = mod.intern_pool.indexToKey(val.toIntern()); + switch (key.int.storage) { + .i64 => |x| { + if (std.math.cast(u64, x)) |casted| return Type.smallestUnsignedBits(casted) + @boolToInt(sign); + assert(sign); + // Protect against overflow in the following negation. + if (x == std.math.minInt(i64)) return 64; + return Type.smallestUnsignedBits(@intCast(u64, -x - 1)) + 1; + }, + .u64 => |x| { + return Type.smallestUnsignedBits(x) + @boolToInt(sign); + }, + .big_int => |big| { + if (big.positive) return @intCast(u16, big.bitCountAbs() + @boolToInt(sign)); + + // Zero is still a possibility, in which case unsigned is fine + if (big.eqZero()) return 0; + + return @intCast(u16, big.bitCountTwosComp()); + }, + .lazy_align => |lazy_ty| { + return Type.smallestUnsignedBits(lazy_ty.toType().abiAlignment(mod)) + @boolToInt(sign); + }, + .lazy_size => |lazy_ty| { + return Type.smallestUnsignedBits(lazy_ty.toType().abiSize(mod)) + @boolToInt(sign); + }, + } +} + +pub const AtomicPtrAlignmentError = error{ + FloatTooBig, + IntTooBig, + BadType, + OutOfMemory, +}; + +pub const AtomicPtrAlignmentDiagnostics = struct { + bits: u16 = undefined, + max_bits: u16 = undefined, +}; + +/// If ABI alignment of `ty` is OK for atomic operations, returns 0. +/// Otherwise returns the alignment required on a pointer for the target +/// to perform atomic operations. +// TODO this function does not take into account CPU features, which can affect +// this value. Audit this! +pub fn atomicPtrAlignment( + mod: *Module, + ty: Type, + diags: *AtomicPtrAlignmentDiagnostics, +) AtomicPtrAlignmentError!u32 { + const target = mod.getTarget(); + const max_atomic_bits: u16 = switch (target.cpu.arch) { + .avr, + .msp430, + .spu_2, + => 16, + + .arc, + .arm, + .armeb, + .hexagon, + .m68k, + .le32, + .mips, + .mipsel, + .nvptx, + .powerpc, + .powerpcle, + .r600, + .riscv32, + .sparc, + .sparcel, + .tce, + .tcele, + .thumb, + .thumbeb, + .x86, + .xcore, + .amdil, + .hsail, + .spir, + .kalimba, + .lanai, + .shave, + .wasm32, + .renderscript32, + .csky, + .spirv32, + .dxil, + .loongarch32, + .xtensa, + => 32, + + .amdgcn, + .bpfel, + .bpfeb, + .le64, + .mips64, + .mips64el, + .nvptx64, + .powerpc64, + .powerpc64le, + .riscv64, + .sparc64, + .s390x, + .amdil64, + .hsail64, + .spir64, + .wasm64, + .renderscript64, + .ve, + .spirv64, + .loongarch64, + => 64, + + .aarch64, + .aarch64_be, + .aarch64_32, + => 128, + + .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .cx16)) 128 else 64, + }; + + const int_ty = switch (ty.zigTypeTag(mod)) { + .Int => ty, + .Enum => ty.intTagType(mod), + .Float => { + const bit_count = ty.floatBits(target); + if (bit_count > max_atomic_bits) { + diags.* = .{ + .bits = bit_count, + .max_bits = max_atomic_bits, + }; + return error.FloatTooBig; + } + return 0; + }, + .Bool => return 0, + else => { + if (ty.isPtrAtRuntime(mod)) return 0; + return error.BadType; + }, + }; + + const bit_count = int_ty.intInfo(mod).bits; + if (bit_count > max_atomic_bits) { + diags.* = .{ + .bits = bit_count, + .max_bits = max_atomic_bits, + }; + return error.IntTooBig; + } + + return 0; +} + +pub fn opaqueSrcLoc(mod: *Module, opaque_type: InternPool.Key.OpaqueType) SrcLoc { + return mod.declPtr(opaque_type.decl).srcLoc(mod); +} + +pub fn opaqueFullyQualifiedName(mod: *Module, opaque_type: InternPool.Key.OpaqueType) !InternPool.NullTerminatedString { + return mod.declPtr(opaque_type.decl).getFullyQualifiedName(mod); +} + +pub fn declFileScope(mod: *Module, decl_index: Decl.Index) *File { + return mod.declPtr(decl_index).getFileScope(mod); +} + +pub fn namespaceDeclIndex(mod: *Module, namespace_index: Namespace.Index) Decl.Index { + return mod.namespacePtr(namespace_index).getDeclIndex(mod); +} + +/// Returns null in the following cases: +/// * `@TypeOf(.{})` +/// * A struct which has no fields (`struct {}`). +/// * Not a struct. +pub fn typeToStruct(mod: *Module, ty: Type) ?*Struct { + if (ty.ip_index == .none) return null; + const struct_index = mod.intern_pool.indexToStructType(ty.toIntern()).unwrap() orelse return null; + return mod.structPtr(struct_index); +} + +pub fn typeToUnion(mod: *Module, ty: Type) ?*Union { + if (ty.ip_index == .none) return null; + const union_index = mod.intern_pool.indexToUnionType(ty.toIntern()).unwrap() orelse return null; + return mod.unionPtr(union_index); +} + +pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType { + if (ty.ip_index == .none) return null; + return mod.intern_pool.indexToFuncType(ty.toIntern()); +} + +pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*Fn.InferredErrorSet { + const index = typeToInferredErrorSetIndex(mod, ty).unwrap() orelse return null; + return mod.inferredErrorSetPtr(index); +} + +pub fn typeToInferredErrorSetIndex(mod: *Module, ty: Type) Fn.InferredErrorSet.OptionalIndex { + if (ty.ip_index == .none) return .none; + return mod.intern_pool.indexToInferredErrorSetType(ty.toIntern()); +} + +pub fn fieldSrcLoc(mod: *Module, owner_decl_index: Decl.Index, query: FieldSrcQuery) SrcLoc { + @setCold(true); + const owner_decl = mod.declPtr(owner_decl_index); + const file = owner_decl.getFileScope(mod); + const tree = file.getTree(mod.gpa) catch |err| { + // In this case we emit a warning + a less precise source location. + log.warn("unable to load {s}: {s}", .{ + file.sub_file_path, @errorName(err), + }); + return owner_decl.srcLoc(mod); + }; + const node = owner_decl.relativeToNodeIndex(0); + var buf: [2]Ast.Node.Index = undefined; + if (tree.fullContainerDecl(&buf, node)) |container_decl| { + return queryFieldSrc(tree.*, query, file, container_decl); + } else { + // This type was generated using @Type + return owner_decl.srcLoc(mod); + } +} + +pub fn toEnum(mod: *Module, comptime E: type, val: Value) E { + return mod.intern_pool.toEnum(E, val.toIntern()); +} diff --git a/src/RangeSet.zig b/src/RangeSet.zig index aa051ff424..f808322fc7 100644 --- a/src/RangeSet.zig +++ b/src/RangeSet.zig @@ -1,18 +1,18 @@ const std = @import("std"); +const assert = std.debug.assert; const Order = std.math.Order; -const RangeSet = @This(); +const InternPool = @import("InternPool.zig"); const Module = @import("Module.zig"); +const RangeSet = @This(); const SwitchProngSrc = @import("Module.zig").SwitchProngSrc; -const Type = @import("type.zig").Type; -const Value = @import("value.zig").Value; ranges: std.ArrayList(Range), module: *Module, pub const Range = struct { - first: Value, - last: Value, + first: InternPool.Index, + last: InternPool.Index, src: SwitchProngSrc, }; @@ -29,18 +29,27 @@ pub fn deinit(self: *RangeSet) void { pub fn add( self: *RangeSet, - first: Value, - last: Value, - ty: Type, + first: InternPool.Index, + last: InternPool.Index, src: SwitchProngSrc, ) !?SwitchProngSrc { + const mod = self.module; + const ip = &mod.intern_pool; + + const ty = ip.typeOf(first); + assert(ty == ip.typeOf(last)); + for (self.ranges.items) |range| { - if (last.compareAll(.gte, range.first, ty, self.module) and - first.compareAll(.lte, range.last, ty, self.module)) + assert(ty == ip.typeOf(range.first)); + assert(ty == ip.typeOf(range.last)); + + if (last.toValue().compareScalar(.gte, range.first.toValue(), ty.toType(), mod) and + first.toValue().compareScalar(.lte, range.last.toValue(), ty.toType(), mod)) { return range.src; // They overlap. } } + try self.ranges.append(.{ .first = first, .last = last, @@ -49,45 +58,43 @@ pub fn add( return null; } -const LessThanContext = struct { ty: Type, module: *Module }; - /// Assumes a and b do not overlap -fn lessThan(ctx: LessThanContext, a: Range, b: Range) bool { - return a.first.compareAll(.lt, b.first, ctx.ty, ctx.module); +fn lessThan(mod: *Module, a: Range, b: Range) bool { + const ty = mod.intern_pool.typeOf(a.first).toType(); + return a.first.toValue().compareScalar(.lt, b.first.toValue(), ty, mod); } -pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool { +pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !bool { + const mod = self.module; + const ip = &mod.intern_pool; + assert(ip.typeOf(first) == ip.typeOf(last)); + if (self.ranges.items.len == 0) return false; - std.mem.sort(Range, self.ranges.items, LessThanContext{ - .ty = ty, - .module = self.module, - }, lessThan); + std.mem.sort(Range, self.ranges.items, mod, lessThan); - if (!self.ranges.items[0].first.eql(first, ty, self.module) or - !self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty, self.module)) + if (self.ranges.items[0].first != first or + self.ranges.items[self.ranges.items.len - 1].last != last) { return false; } - var space: Value.BigIntSpace = undefined; + var space: InternPool.Key.Int.Storage.BigIntSpace = undefined; var counter = try std.math.big.int.Managed.init(self.ranges.allocator); defer counter.deinit(); - const target = self.module.getTarget(); - // look for gaps for (self.ranges.items[1..], 0..) |cur, i| { // i starts counting from the second item. const prev = self.ranges.items[i]; // prev.last + 1 == cur.first - try counter.copy(prev.last.toBigInt(&space, target)); + try counter.copy(prev.last.toValue().toBigInt(&space, mod)); try counter.addScalar(&counter, 1); - const cur_start_int = cur.first.toBigInt(&space, target); + const cur_start_int = cur.first.toValue().toBigInt(&space, mod); if (!cur_start_int.eq(counter.toConst())) { return false; } diff --git a/src/Sema.zig b/src/Sema.zig index 9e21bfa83d..aa04c40fd0 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -11,13 +11,9 @@ gpa: Allocator, /// Points to the temporary arena allocator of the Sema. /// This arena will be cleared when the sema is destroyed. arena: Allocator, -/// Points to the arena allocator for the owner_decl. -/// This arena will persist until the decl is invalidated. -perm_arena: Allocator, code: Zir, air_instructions: std.MultiArrayList(Air.Inst) = .{}, air_extra: std.ArrayListUnmanaged(u32) = .{}, -air_values: std.ArrayListUnmanaged(Value) = .{}, /// Maps ZIR to AIR. inst_map: InstMap = .{}, /// When analyzing an inline function call, owner_decl is the Decl of the caller @@ -28,10 +24,12 @@ owner_decl_index: Decl.Index, /// For an inline or comptime function call, this will be the root parent function /// which contains the callsite. Corresponds to `owner_decl`. owner_func: ?*Module.Fn, +owner_func_index: Module.Fn.OptionalIndex, /// The function this ZIR code is the body of, according to the source code. /// This starts out the same as `owner_func` and then diverges in the case of /// an inline or comptime function call. func: ?*Module.Fn, +func_index: Module.Fn.OptionalIndex, /// Used to restore the error return trace when returning a non-error from a function. error_return_trace_index_on_fn_entry: Air.Inst.Ref = .none, /// When semantic analysis needs to know the return type of the function whose body @@ -65,12 +63,15 @@ comptime_args_fn_inst: Zir.Inst.Index = 0, /// to use this instead of allocating a fresh one. This avoids an unnecessary /// extra hash table lookup in the `monomorphed_funcs` set. /// Sema will set this to null when it takes ownership. -preallocated_new_func: ?*Module.Fn = null, -/// The key is `constant` AIR instructions to types that must be fully resolved -/// after the current function body analysis is done. -/// TODO: after upgrading to use InternPool change the key here to be an -/// InternPool value index. -types_to_resolve: std.ArrayListUnmanaged(Air.Inst.Ref) = .{}, +preallocated_new_func: Module.Fn.OptionalIndex = .none, +/// The key is types that must be fully resolved prior to machine code +/// generation pass. Types are added to this set when resolving them +/// immediately could cause a dependency loop, but they do need to be resolved +/// before machine code generation passes process the AIR. +/// It would work fine if this were an array list instead of an array hash map. +/// I chose array hash map with the intention to save time by omitting +/// duplicates. +types_to_resolve: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}, /// These are lazily created runtime blocks from block_inline instructions. /// They are created when an break_inline passes through a runtime condition, because /// Sema must convert comptime control flow to runtime control flow, which means @@ -84,12 +85,22 @@ is_generic_instantiation: bool = false, /// function types will emit generic poison instead of a partial type. no_partial_func_ty: bool = false, -unresolved_inferred_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}, +/// The temporary arena is used for the memory of the `InferredAlloc` values +/// here so the values can be dropped without any cleanup. +unresolved_inferred_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, InferredAlloc) = .{}, + +/// Indices of comptime-mutable decls created by this Sema. These decls' values +/// should be interned after analysis completes, as they may refer to memory in +/// the Sema arena. +/// TODO: this is a workaround for memory bugs triggered by the removal of +/// Decl.value_arena. A better solution needs to be found. Probably this will +/// involve transitioning comptime-mutable memory away from using Decls at all. +comptime_mutable_decls: *std.ArrayList(Decl.Index), const std = @import("std"); const math = std.math; const mem = std.mem; -const Allocator = std.mem.Allocator; +const Allocator = mem.Allocator; const assert = std.debug.assert; const log = std.log.scoped(.sema); @@ -114,6 +125,7 @@ const Package = @import("Package.zig"); const crash_report = @import("crash_report.zig"); const build_options = @import("build_options"); const Compilation = @import("Compilation.zig"); +const InternPool = @import("InternPool.zig"); pub const default_branch_quota = 1000; pub const default_reference_trace_len = 2; @@ -226,7 +238,7 @@ pub const Block = struct { sema: *Sema, /// The namespace to use for lookups from this source block /// When analyzing fields, this is different from src_decl.src_namespace. - namespace: *Namespace, + namespace: Namespace.Index, /// The AIR instructions generated for this block. instructions: std.ArrayListUnmanaged(Air.Inst.Index), // `param` instructions are collected here to be used by the `func` instruction. @@ -285,6 +297,7 @@ pub const Block = struct { fn explain(cr: ComptimeReason, sema: *Sema, msg: ?*Module.ErrorMsg) !void { const parent = msg orelse return; + const mod = sema.mod; const prefix = "expression is evaluated at comptime because "; switch (cr) { .c_import => |ci| { @@ -292,21 +305,21 @@ pub const Block = struct { }, .comptime_ret_ty => |rt| { const src_loc = if (try sema.funcDeclSrc(rt.func)) |fn_decl| blk: { - var src_loc = fn_decl.srcLoc(); + var src_loc = fn_decl.srcLoc(mod); src_loc.lazy = .{ .node_offset_fn_type_ret_ty = 0 }; break :blk src_loc; } else blk: { - const src_decl = sema.mod.declPtr(rt.block.src_decl); - break :blk rt.func_src.toSrcLoc(src_decl); + const src_decl = mod.declPtr(rt.block.src_decl); + break :blk rt.func_src.toSrcLoc(src_decl, mod); }; - if (rt.return_ty.tag() == .generic_poison) { - return sema.mod.errNoteNonLazy(src_loc, parent, prefix ++ "the generic function was instantiated with a comptime-only return type", .{}); + if (rt.return_ty.isGenericPoison()) { + return mod.errNoteNonLazy(src_loc, parent, prefix ++ "the generic function was instantiated with a comptime-only return type", .{}); } - try sema.mod.errNoteNonLazy( + try mod.errNoteNonLazy( src_loc, parent, prefix ++ "the function returns a comptime-only type '{}'", - .{rt.return_ty.fmt(sema.mod)}, + .{rt.return_ty.fmt(mod)}, ); try sema.explainWhyTypeIsComptime(parent, src_loc, rt.return_ty); }, @@ -398,8 +411,8 @@ pub const Block = struct { }; } - pub fn getFileScope(block: *Block) *Module.File { - return block.namespace.file_scope; + pub fn getFileScope(block: *Block, mod: *Module) *Module.File { + return mod.namespacePtr(block.namespace).file_scope; } fn addTy( @@ -584,13 +597,18 @@ pub const Block = struct { } fn addCmpVector(block: *Block, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, cmp_op: std.math.CompareOperator) !Air.Inst.Ref { + const sema = block.sema; + const mod = sema.mod; return block.addInst(.{ .tag = if (block.float_mode == .Optimized) .cmp_vector_optimized else .cmp_vector, .data = .{ .ty_pl = .{ - .ty = try block.sema.addType( - try Type.vector(block.sema.arena, block.sema.typeOf(lhs).vectorLen(), Type.bool), + .ty = try sema.addType( + try mod.vectorType(.{ + .len = sema.typeOf(lhs).vectorLen(mod), + .child = .bool_type, + }), ), - .payload = try block.sema.addExtra(Air.VectorCmp{ + .payload = try sema.addExtra(Air.VectorCmp{ .lhs = lhs, .rhs = rhs, .op = Air.VectorCmp.encodeOp(cmp_op), @@ -684,29 +702,20 @@ pub const Block = struct { pub fn startAnonDecl(block: *Block) !WipAnonDecl { return WipAnonDecl{ .block = block, - .new_decl_arena = std.heap.ArenaAllocator.init(block.sema.gpa), .finished = false, }; } pub const WipAnonDecl = struct { block: *Block, - new_decl_arena: std.heap.ArenaAllocator, finished: bool, - pub fn arena(wad: *WipAnonDecl) Allocator { - return wad.new_decl_arena.allocator(); - } - pub fn deinit(wad: *WipAnonDecl) void { - if (!wad.finished) { - wad.new_decl_arena.deinit(); - } wad.* = undefined; } /// `alignment` value of 0 means to use ABI alignment. - pub fn finish(wad: *WipAnonDecl, ty: Type, val: Value, alignment: u32) !Decl.Index { + pub fn finish(wad: *WipAnonDecl, ty: Type, val: Value, alignment: u64) !Decl.Index { const sema = wad.block.sema; // Do this ahead of time because `createAnonymousDecl` depends on calling // `type.hasRuntimeBits()`. @@ -716,10 +725,11 @@ pub const Block = struct { .val = val, }); const new_decl = sema.mod.declPtr(new_decl_index); - new_decl.@"align" = alignment; + // TODO: migrate Decl alignment to use `InternPool.Alignment` + new_decl.@"align" = @intCast(u32, alignment); errdefer sema.mod.abortAnonDecl(new_decl_index); - try new_decl.finalizeNewArena(&wad.new_decl_arena); wad.finished = true; + try sema.mod.finalizeAnonDecl(new_decl_index); return new_decl_index; } }; @@ -736,11 +746,27 @@ const LabeledBlock = struct { } }; +/// The value stored in the inferred allocation. This will go into +/// peer type resolution. This is stored in a separate list so that +/// the items are contiguous in memory and thus can be passed to +/// `Module.resolvePeerTypes`. +const InferredAlloc = struct { + prongs: std.MultiArrayList(struct { + /// The dummy instruction used as a peer to resolve the type. + /// Although this has a redundant type with placeholder, this is + /// needed in addition because it may be a constant value, which + /// affects peer type resolution. + stored_inst: Air.Inst.Ref, + /// The bitcast instruction used as a placeholder when the + /// new result pointer type is not yet known. + placeholder: Air.Inst.Index, + }) = .{}, +}; + pub fn deinit(sema: *Sema) void { const gpa = sema.gpa; sema.air_instructions.deinit(gpa); sema.air_extra.deinit(gpa); - sema.air_values.deinit(gpa); sema.inst_map.deinit(gpa); sema.decl_val_table.deinit(gpa); sema.types_to_resolve.deinit(gpa); @@ -823,7 +849,7 @@ pub fn analyzeBodyBreak( else => |e| return e, }; if (block.instructions.items.len != 0 and - sema.typeOf(Air.indexToRef(block.instructions.items[block.instructions.items.len - 1])).isNoReturn()) + sema.isNoReturn(Air.indexToRef(block.instructions.items[block.instructions.items.len - 1]))) return null; const break_data = sema.code.instructions.items(.data)[break_inst].@"break"; const extra = sema.code.extraData(Zir.Inst.Break, break_data.payload_index).data; @@ -858,18 +884,20 @@ fn analyzeBodyInner( try sema.inst_map.ensureSpaceForInstructions(sema.gpa, body); + // Most of the time, we don't need to construct a new capture scope for a + // block. However, successive iterations of comptime loops can capture + // different values for the same Zir.Inst.Index, so in those cases, we will + // have to create nested capture scopes; see the `.repeat` case below. const parent_capture_scope = block.wip_capture_scope; - - var wip_captures = WipCaptureScope{ - .finalized = true, + parent_capture_scope.incRef(); + var wip_captures: WipCaptureScope = .{ .scope = parent_capture_scope, - .perm_arena = sema.perm_arena, .gpa = sema.gpa, + .finalized = true, // don't finalize the parent scope }; - defer if (wip_captures.scope != parent_capture_scope) { - wip_captures.deinit(); - }; + defer wip_captures.deinit(); + const mod = sema.mod; const map = &sema.inst_map; const tags = sema.code.instructions.items(.tag); const datas = sema.code.instructions.items(.data); @@ -890,15 +918,15 @@ fn analyzeBodyInner( crash_info.setBodyIndex(i); const inst = body[i]; std.log.scoped(.sema_zir).debug("sema ZIR {s} %{d}", .{ - sema.mod.declPtr(block.src_decl).src_namespace.file_scope.sub_file_path, inst, + mod.namespacePtr(mod.declPtr(block.src_decl).src_namespace).file_scope.sub_file_path, inst, }); const air_inst: Air.Inst.Ref = switch (tags[inst]) { // zig fmt: off .alloc => try sema.zirAlloc(block, inst), - .alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), - .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), - .alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst, Type.initTag(.inferred_alloc_const)), - .alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(inst, Type.initTag(.inferred_alloc_mut)), + .alloc_inferred => try sema.zirAllocInferred(block, inst, true), + .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, false), + .alloc_inferred_comptime => try sema.zirAllocInferredComptime(inst, true), + .alloc_inferred_comptime_mut => try sema.zirAllocInferredComptime(inst, false), .alloc_mut => try sema.zirAllocMut(block, inst), .alloc_comptime_mut => try sema.zirAllocComptime(block, inst), .make_ptr_const => try sema.zirMakePtrConst(block, inst), @@ -962,7 +990,7 @@ fn analyzeBodyInner( .int_big => try sema.zirIntBig(block, inst), .float => try sema.zirFloat(block, inst), .float128 => try sema.zirFloat128(block, inst), - .int_type => try sema.zirIntType(block, inst), + .int_type => try sema.zirIntType(inst), .is_non_err => try sema.zirIsNonErr(block, inst), .is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), .ret_is_non_err => try sema.zirRetIsNonErr(block, inst), @@ -1420,6 +1448,11 @@ fn analyzeBodyInner( const src = LazySrcLoc.nodeOffset(datas[inst].node); try sema.emitBackwardBranch(block, src); if (wip_captures.scope.captures.count() != orig_captures) { + // We need to construct new capture scopes for the next loop iteration so it + // can capture values without clobbering the earlier iteration's captures. + // At first, we reused the parent capture scope as an optimization, but for + // successive scopes we have to create new ones as children of the parent + // scope. try wip_captures.reset(parent_capture_scope); block.wip_capture_scope = wip_captures.scope; orig_captures = 0; @@ -1435,6 +1468,11 @@ fn analyzeBodyInner( const src = LazySrcLoc.nodeOffset(datas[inst].node); try sema.emitBackwardBranch(block, src); if (wip_captures.scope.captures.count() != orig_captures) { + // We need to construct new capture scopes for the next loop iteration so it + // can capture values without clobbering the earlier iteration's captures. + // At first, we reused the parent capture scope as an optimization, but for + // successive scopes we have to create new ones as children of the parent + // scope. try wip_captures.reset(parent_capture_scope); block.wip_capture_scope = wip_captures.scope; orig_captures = 0; @@ -1621,18 +1659,18 @@ fn analyzeBodyInner( const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; const err_union = try sema.resolveInst(extra.data.operand); const err_union_ty = sema.typeOf(err_union); - if (err_union_ty.zigTypeTag() != .ErrorUnion) { + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, operand_src, "expected error union type, found '{}'", .{ - err_union_ty.fmt(sema.mod), + err_union_ty.fmt(mod), }); } const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union); assert(is_non_err != .none); - const is_non_err_tv = sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { + const is_non_err_val = sema.resolveConstValue(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); return err; }; - if (is_non_err_tv.val.toBool()) { + if (is_non_err_val.toBool()) { break :blk try sema.analyzeErrUnionPayload(block, src, err_union_ty, err_union, operand_src, false); } const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse @@ -1654,11 +1692,11 @@ fn analyzeBodyInner( const err_union = try sema.analyzeLoad(block, src, operand, operand_src); const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union); assert(is_non_err != .none); - const is_non_err_tv = sema.resolveInstConst(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { + const is_non_err_val = sema.resolveConstValue(block, operand_src, is_non_err, "try operand inside comptime block must be comptime-known") catch |err| { if (err == error.AnalysisFail and block.comptime_reason != null) try block.comptime_reason.?.explain(sema, sema.err); return err; }; - if (is_non_err_tv.val.toBool()) { + if (is_non_err_val.toBool()) { break :blk try sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false); } const break_data = (try sema.analyzeBodyBreak(block, inline_body)) orelse @@ -1684,7 +1722,7 @@ fn analyzeBodyInner( const extra = sema.code.extraData(Zir.Inst.DeferErrCode, inst_data.payload_index).data; const defer_body = sema.code.extra[extra.index..][0..extra.len]; const err_code = try sema.resolveInst(inst_data.err_code); - sema.inst_map.putAssumeCapacity(extra.remapped_err_code, err_code); + map.putAssumeCapacity(extra.remapped_err_code, err_code); const break_inst = sema.analyzeBodyInner(block, defer_body) catch |err| switch (err) { error.ComptimeBreak => sema.comptime_break_inst, else => |e| return e, @@ -1693,8 +1731,12 @@ fn analyzeBodyInner( break :blk Air.Inst.Ref.void_value; }, }; - if (sema.typeOf(air_inst).isNoReturn()) + if (sema.isNoReturn(air_inst)) { + // We're going to assume that the body itself is noreturn, so let's ensure that now + assert(block.instructions.items.len > 0); + assert(sema.isNoReturn(Air.indexToRef(block.instructions.items[block.instructions.items.len - 1]))); break always_noreturn; + } map.putAssumeCapacity(inst, air_inst); i += 1; }; @@ -1703,7 +1745,7 @@ fn analyzeBodyInner( const noreturn_inst = block.instructions.popOrNull(); while (dbg_block_begins > 0) { dbg_block_begins -= 1; - if (block.is_comptime or sema.mod.comp.bin_file.options.strip) continue; + if (block.is_comptime or mod.comp.bin_file.options.strip) continue; _ = try block.addInst(.{ .tag = .dbg_block_end, @@ -1713,6 +1755,8 @@ fn analyzeBodyInner( if (noreturn_inst) |some| try block.instructions.append(sema.gpa, some); if (!wip_captures.finalized) { + // We've updated the capture scope due to a `repeat` instruction where + // the body had a capture; finalize our child scope and reset try wip_captures.finalize(); block.wip_capture_scope = parent_capture_scope; } @@ -1720,20 +1764,23 @@ fn analyzeBodyInner( return result; } -pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref { - var i: usize = @enumToInt(zir_ref); - - // First section of indexes correspond to a set number of constant values. - if (i < Zir.Inst.Ref.typed_value_map.len) { - // We intentionally map the same indexes to the same values between ZIR and AIR. - return zir_ref; +pub fn resolveInstAllowNone(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref { + if (zir_ref == .none) { + return .none; + } else { + return resolveInst(sema, zir_ref); } - i -= Zir.Inst.Ref.typed_value_map.len; +} - // Finally, the last section of indexes refers to the map of ZIR=>AIR. - const inst = sema.inst_map.get(@intCast(u32, i)).?; - const ty = sema.typeOf(inst); - if (ty.tag() == .generic_poison) return error.GenericPoison; +pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) !Air.Inst.Ref { + assert(zir_ref != .none); + const i = @enumToInt(zir_ref); + // First section of indexes correspond to a set number of constant values. + // We intentionally map the same indexes to the same values between ZIR and AIR. + if (i < InternPool.static_len) return @intToEnum(Air.Inst.Ref, i); + // The last section of indexes refers to the map of ZIR => AIR. + const inst = sema.inst_map.get(i - InternPool.static_len).?; + if (inst == .generic_poison) return error.GenericPoison; return inst; } @@ -1759,18 +1806,31 @@ pub fn resolveConstString( reason: []const u8, ) ![]u8 { const air_inst = try sema.resolveInst(zir_ref); - const wanted_type = Type.initTag(.const_slice_u8); + const wanted_type = Type.slice_const_u8; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst, reason); return val.toAllocatedBytes(wanted_type, sema.arena, sema.mod); } -pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type { - assert(zir_ref != .var_args_param); +pub fn resolveConstStringIntern( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + zir_ref: Zir.Inst.Ref, + reason: []const u8, +) !InternPool.NullTerminatedString { const air_inst = try sema.resolveInst(zir_ref); - assert(air_inst != .var_args_param); + const wanted_type = Type.slice_const_u8; + const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); + const val = try sema.resolveConstValue(block, src, coerced_inst, reason); + return val.toIpString(wanted_type, sema.mod); +} + +pub fn resolveType(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type { + const air_inst = try sema.resolveInst(zir_ref); + assert(air_inst != .var_args_param_type); const ty = try sema.analyzeAsType(block, src, air_inst); - if (ty.tag() == .generic_poison) return error.GenericPoison; + if (ty.isGenericPoison()) return error.GenericPoison; return ty; } @@ -1780,45 +1840,48 @@ fn analyzeAsType( src: LazySrcLoc, air_inst: Air.Inst.Ref, ) !Type { - const wanted_type = Type.initTag(.type); + const wanted_type = Type.type; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst, "types must be comptime-known"); - var buffer: Value.ToTypeBuffer = undefined; - const ty = val.toType(&buffer); - return ty.copy(sema.arena); + return val.toType(); } pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) !void { - if (!sema.mod.backendSupportsFeature(.error_return_trace)) return; + const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; + if (!mod.backendSupportsFeature(.error_return_trace)) return; assert(!block.is_comptime); var err_trace_block = block.makeSubBlock(); - defer err_trace_block.instructions.deinit(sema.gpa); + defer err_trace_block.instructions.deinit(gpa); const src: LazySrcLoc = .unneeded; // var addrs: [err_return_trace_addr_count]usize = undefined; const err_return_trace_addr_count = 32; - const addr_arr_ty = try Type.array(sema.arena, err_return_trace_addr_count, null, Type.usize, sema.mod); - const addrs_ptr = try err_trace_block.addTy(.alloc, try Type.Tag.single_mut_pointer.create(sema.arena, addr_arr_ty)); + const addr_arr_ty = try Type.array(sema.arena, err_return_trace_addr_count, null, Type.usize, mod); + const addrs_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(addr_arr_ty)); // var st: StackTrace = undefined; const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const st_ptr = try err_trace_block.addTy(.alloc, try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty)); + const st_ptr = try err_trace_block.addTy(.alloc, try mod.singleMutPtrType(stack_trace_ty)); // st.instruction_addresses = &addrs; - const addr_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, "instruction_addresses", src, true); + const instruction_addresses_field_name = try ip.getOrPutString(gpa, "instruction_addresses"); + const addr_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, instruction_addresses_field_name, src, true); try sema.storePtr2(&err_trace_block, src, addr_field_ptr, src, addrs_ptr, src, .store); // st.index = 0; - const index_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, "index", src, true); + const index_field_name = try ip.getOrPutString(gpa, "index"); + const index_field_ptr = try sema.fieldPtr(&err_trace_block, src, st_ptr, index_field_name, src, true); try sema.storePtr2(&err_trace_block, src, index_field_ptr, src, .zero_usize, src, .store); // @errorReturnTrace() = &st; _ = try err_trace_block.addUnOp(.set_err_return_trace, st_ptr); - try block.instructions.insertSlice(sema.gpa, last_arg_index, err_trace_block.instructions.items); + try block.instructions.insertSlice(gpa, last_arg_index, err_trace_block.instructions.items); } /// May return Value Tags: `variable`, `undef`. @@ -1832,7 +1895,7 @@ fn resolveValue( reason: []const u8, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(air_ref)) |val| { - if (val.tag() == .generic_poison) return error.GenericPoison; + if (val.isGenericPoison()) return error.GenericPoison; return val; } return sema.failWithNeededComptime(block, src, reason); @@ -1848,10 +1911,12 @@ fn resolveConstMaybeUndefVal( reason: []const u8, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(inst)) |val| { - switch (val.tag()) { - .variable => return sema.failWithNeededComptime(block, src, reason), + switch (val.toIntern()) { .generic_poison => return error.GenericPoison, - else => return val, + else => switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { + .variable => return sema.failWithNeededComptime(block, src, reason), + else => return val, + }, } } return sema.failWithNeededComptime(block, src, reason); @@ -1867,16 +1932,31 @@ fn resolveConstValue( reason: []const u8, ) CompileError!Value { if (try sema.resolveMaybeUndefValAllowVariables(air_ref)) |val| { - switch (val.tag()) { - .undef => return sema.failWithUseOfUndef(block, src), - .variable => return sema.failWithNeededComptime(block, src, reason), + switch (val.toIntern()) { .generic_poison => return error.GenericPoison, - else => return val, + .undef => return sema.failWithUseOfUndef(block, src), + else => switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { + .undef => return sema.failWithUseOfUndef(block, src), + .variable => return sema.failWithNeededComptime(block, src, reason), + else => return val, + }, } } return sema.failWithNeededComptime(block, src, reason); } +/// Will not return Value Tags: `variable`, `undef`. Instead they will emit compile errors. +/// Lazy values are recursively resolved. +fn resolveConstLazyValue( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + air_ref: Air.Inst.Ref, + reason: []const u8, +) CompileError!Value { + return sema.resolveLazyValue(try sema.resolveConstValue(block, src, air_ref, reason)); +} + /// Value Tag `variable` causes this function to return `null`. /// Value Tag `undef` causes this function to return a compile error. fn resolveDefinedValue( @@ -1885,8 +1965,9 @@ fn resolveDefinedValue( src: LazySrcLoc, air_ref: Air.Inst.Ref, ) CompileError!?Value { + const mod = sema.mod; if (try sema.resolveMaybeUndefVal(air_ref)) |val| { - if (val.isUndef()) { + if (val.isUndef(mod)) { if (block.is_typeof) return null; return sema.failWithUseOfUndef(block, src); } @@ -1903,34 +1984,53 @@ fn resolveMaybeUndefVal( inst: Air.Inst.Ref, ) CompileError!?Value { const val = (try sema.resolveMaybeUndefValAllowVariables(inst)) orelse return null; - switch (val.tag()) { - .variable => return null, + switch (val.ip_index) { .generic_poison => return error.GenericPoison, - else => return val, + .none => return val, + else => switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { + .variable => return null, + else => return val, + }, } } +/// Value Tag `variable` causes this function to return `null`. +/// Value Tag `undef` causes this function to return the Value. +/// Value Tag `generic_poison` causes `error.GenericPoison` to be returned. +/// Lazy values are recursively resolved. +fn resolveMaybeUndefLazyVal( + sema: *Sema, + inst: Air.Inst.Ref, +) CompileError!?Value { + return try sema.resolveLazyValue((try sema.resolveMaybeUndefVal(inst)) orelse return null); +} + /// Value Tag `variable` results in `null`. /// Value Tag `undef` results in the Value. /// Value Tag `generic_poison` causes `error.GenericPoison` to be returned. /// Value Tag `decl_ref` and `decl_ref_mut` or any nested such value results in `null`. +/// Lazy values are recursively resolved. fn resolveMaybeUndefValIntable( sema: *Sema, inst: Air.Inst.Ref, ) CompileError!?Value { const val = (try sema.resolveMaybeUndefValAllowVariables(inst)) orelse return null; var check = val; - while (true) switch (check.tag()) { - .variable, .decl_ref, .decl_ref_mut, .comptime_field_ptr => return null, - .field_ptr => check = check.castTag(.field_ptr).?.data.container_ptr, - .elem_ptr => check = check.castTag(.elem_ptr).?.data.array_ptr, - .eu_payload_ptr, .opt_payload_ptr => check = check.cast(Value.Payload.PayloadPtr).?.data.container_ptr, + while (true) switch (check.ip_index) { .generic_poison => return error.GenericPoison, - else => { - try sema.resolveLazyValue(val); - return val; + .none => break, + else => switch (sema.mod.intern_pool.indexToKey(check.toIntern())) { + .variable => return null, + .ptr => |ptr| switch (ptr.addr) { + .decl, .mut_decl, .comptime_field => return null, + .int => break, + .eu_payload, .opt_payload => |base| check = base.toValue(), + .elem, .field => |base_index| check = base_index.base.toValue(), + }, + else => break, }, }; + return try sema.resolveLazyValue(val); } /// Returns all Value tags including `variable` and `undef`. @@ -1949,35 +2049,33 @@ fn resolveMaybeUndefValAllowVariablesMaybeRuntime( inst: Air.Inst.Ref, make_runtime: *bool, ) CompileError!?Value { + assert(inst != .none); // First section of indexes correspond to a set number of constant values. - var i: usize = @enumToInt(inst); - if (i < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[i].val; + const int = @enumToInt(inst); + if (int < InternPool.static_len) { + return @intToEnum(InternPool.Index, int).toValue(); } - i -= Air.Inst.Ref.typed_value_map.len; + const i = int - InternPool.static_len; const air_tags = sema.air_instructions.items(.tag); if (try sema.typeHasOnePossibleValue(sema.typeOf(inst))) |opv| { - if (air_tags[i] == .constant) { - const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; - const val = sema.air_values.items[ty_pl.payload]; - if (val.tag() == .variable) return val; + if (air_tags[i] == .interned) { + const interned = sema.air_instructions.items(.data)[i].interned; + const val = interned.toValue(); + if (val.getVariable(sema.mod) != null) return val; } return opv; } - switch (air_tags[i]) { - .constant => { - const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; - const val = sema.air_values.items[ty_pl.payload]; - if (val.tag() == .runtime_value) make_runtime.* = true; - if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true; - return val; - }, - .const_ty => { - return try sema.air_instructions.items(.data)[i].ty.toValue(sema.arena); - }, + const air_datas = sema.air_instructions.items(.data); + const val = switch (air_tags[i]) { + .inferred_alloc => unreachable, + .inferred_alloc_comptime => unreachable, + .interned => air_datas[i].interned.toValue(), else => return null, - } + }; + if (val.isRuntimeValue(sema.mod)) make_runtime.* = true; + if (val.isPtrToThreadLocal(sema.mod)) make_runtime.* = true; + return val; } fn failWithNeededComptime(sema: *Sema, block: *Block, src: LazySrcLoc, reason: []const u8) CompileError { @@ -2010,13 +2108,14 @@ fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, opt } fn failWithArrayInitNotSupported(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError { + const mod = sema.mod; const msg = msg: { const msg = try sema.errMsg(block, src, "type '{}' does not support array initialization syntax", .{ - ty.fmt(sema.mod), + ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); - if (ty.isSlice()) { - try sema.errNote(block, src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2().fmt(sema.mod)}); + if (ty.isSlice(mod)) { + try sema.errNote(block, src, msg, "inferred array length is specified with an underscore: '[_]{}'", .{ty.elemType2(mod).fmt(mod)}); } break :msg msg; }; @@ -2042,7 +2141,8 @@ fn failWithErrorSetCodeMissing( } fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty: Type, val: Value, vector_index: usize) CompileError { - if (int_ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (int_ty.zigTypeTag(mod) == .Vector) { const msg = msg: { const msg = try sema.errMsg(block, src, "overflow of vector type '{}' with value '{}'", .{ int_ty.fmt(sema.mod), val.fmtValue(int_ty, sema.mod), @@ -2059,16 +2159,17 @@ fn failWithIntegerOverflow(sema: *Sema, block: *Block, src: LazySrcLoc, int_ty: } fn failWithInvalidComptimeFieldStore(sema: *Sema, block: *Block, init_src: LazySrcLoc, container_ty: Type, field_index: usize) CompileError { + const mod = sema.mod; const msg = msg: { const msg = try sema.errMsg(block, init_src, "value stored in comptime field does not match the default value of the field", .{}); errdefer msg.destroy(sema.gpa); - const struct_ty = container_ty.castTag(.@"struct") orelse break :msg msg; - const default_value_src = struct_ty.data.fieldSrcLoc(sema.mod, .{ + const struct_ty = mod.typeToStruct(container_ty) orelse break :msg msg; + const default_value_src = mod.fieldSrcLoc(struct_ty.owner_decl, .{ .index = field_index, .range = .value, }); - try sema.mod.errNoteNonLazy(default_value_src, msg, "default value set here", .{}); + try mod.errNoteNonLazy(default_value_src, msg, "default value set here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -2083,13 +2184,19 @@ fn failWithUseOfAsync(sema: *Sema, block: *Block, src: LazySrcLoc) CompileError return sema.failWithOwnedErrorMsg(msg); } -fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, object_ty: Type, field_name: []const u8) CompileError { - const inner_ty = if (object_ty.isSinglePointer()) object_ty.childType() else object_ty; +fn failWithInvalidFieldAccess( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + object_ty: Type, + field_name: InternPool.NullTerminatedString, +) CompileError { + const mod = sema.mod; + const inner_ty = if (object_ty.isSinglePointer(mod)) object_ty.childType(mod) else object_ty; - if (inner_ty.zigTypeTag() == .Optional) opt: { - var buf: Type.Payload.ElemType = undefined; - const child_ty = inner_ty.optionalChild(&buf); - if (!typeSupportsFieldAccess(child_ty, field_name)) break :opt; + if (inner_ty.zigTypeTag(mod) == .Optional) opt: { + const child_ty = inner_ty.optionalChild(mod); + if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :opt; const msg = msg: { const msg = try sema.errMsg(block, src, "optional type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -2097,9 +2204,9 @@ fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, objec break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (inner_ty.zigTypeTag() == .ErrorUnion) err: { - const child_ty = inner_ty.errorUnionPayload(); - if (!typeSupportsFieldAccess(child_ty, field_name)) break :err; + } else if (inner_ty.zigTypeTag(mod) == .ErrorUnion) err: { + const child_ty = inner_ty.errorUnionPayload(mod); + if (!typeSupportsFieldAccess(mod, child_ty, field_name)) break :err; const msg = msg: { const msg = try sema.errMsg(block, src, "error union type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -2111,15 +2218,16 @@ fn failWithInvalidFieldAccess(sema: *Sema, block: *Block, src: LazySrcLoc, objec return sema.fail(block, src, "type '{}' does not support field access", .{object_ty.fmt(sema.mod)}); } -fn typeSupportsFieldAccess(ty: Type, field_name: []const u8) bool { - switch (ty.zigTypeTag()) { - .Array => return mem.eql(u8, field_name, "len"), +fn typeSupportsFieldAccess(mod: *const Module, ty: Type, field_name: InternPool.NullTerminatedString) bool { + const ip = &mod.intern_pool; + switch (ty.zigTypeTag(mod)) { + .Array => return ip.stringEqlSlice(field_name, "len"), .Pointer => { - const ptr_info = ty.ptrInfo().data; + const ptr_info = ty.ptrInfo(mod); if (ptr_info.size == .Slice) { - return mem.eql(u8, field_name, "ptr") or mem.eql(u8, field_name, "len"); - } else if (ptr_info.pointee_type.zigTypeTag() == .Array) { - return mem.eql(u8, field_name, "len"); + return ip.stringEqlSlice(field_name, "ptr") or ip.stringEqlSlice(field_name, "len"); + } else if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) { + return ip.stringEqlSlice(field_name, "len"); } else return false; }, .Type, .Struct, .Union => return true, @@ -2139,7 +2247,7 @@ fn errNote( ) error{OutOfMemory}!void { const mod = sema.mod; const src_decl = mod.declPtr(block.src_decl); - return mod.errNoteNonLazy(src.toSrcLoc(src_decl), parent, format, args); + return mod.errNoteNonLazy(src.toSrcLoc(src_decl, mod), parent, format, args); } fn addFieldErrNote( @@ -2152,19 +2260,19 @@ fn addFieldErrNote( ) !void { @setCold(true); const mod = sema.mod; - const decl_index = container_ty.getOwnerDecl(); + const decl_index = container_ty.getOwnerDecl(mod); const decl = mod.declPtr(decl_index); const field_src = blk: { - const tree = decl.getFileScope().getTree(sema.gpa) catch |err| { + const tree = decl.getFileScope(mod).getTree(sema.gpa) catch |err| { log.err("unable to load AST to report compile error: {s}", .{@errorName(err)}); - break :blk decl.srcLoc(); + break :blk decl.srcLoc(mod); }; const container_node = decl.relativeToNodeIndex(0); const node_tags = tree.nodes.items(.tag); var buf: [2]std.zig.Ast.Node.Index = undefined; - const container_decl = tree.fullContainerDecl(&buf, container_node) orelse break :blk decl.srcLoc(); + const container_decl = tree.fullContainerDecl(&buf, container_node) orelse break :blk decl.srcLoc(mod); var it_index: usize = 0; for (container_decl.ast.members) |member_node| { @@ -2174,7 +2282,7 @@ fn addFieldErrNote( .container_field, => { if (it_index == field_index) { - break :blk decl.nodeOffsetSrcLoc(decl.nodeIndexToRelative(member_node)); + break :blk decl.nodeOffsetSrcLoc(decl.nodeIndexToRelative(member_node), mod); } it_index += 1; }, @@ -2195,7 +2303,7 @@ fn errMsg( ) error{OutOfMemory}!*Module.ErrorMsg { const mod = sema.mod; const src_decl = mod.declPtr(block.src_decl); - return Module.ErrorMsg.create(sema.gpa, src.toSrcLoc(src_decl), format, args); + return Module.ErrorMsg.create(sema.gpa, src.toSrcLoc(src_decl, mod), format, args); } pub fn fail( @@ -2212,19 +2320,19 @@ pub fn fail( fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { @setCold(true); const gpa = sema.gpa; + const mod = sema.mod; - if (crash_report.is_enabled and sema.mod.comp.debug_compile_errors) { + if (crash_report.is_enabled and mod.comp.debug_compile_errors) { if (err_msg.src_loc.lazy == .unneeded) return error.NeededSourceLocation; var wip_errors: std.zig.ErrorBundle.Wip = undefined; wip_errors.init(gpa) catch unreachable; - Compilation.addModuleErrorMsg(&wip_errors, err_msg.*) catch unreachable; + Compilation.addModuleErrorMsg(mod, &wip_errors, err_msg.*) catch unreachable; std.debug.print("compile error during Sema:\n", .{}); var error_bundle = wip_errors.toOwnedBundle("") catch unreachable; error_bundle.renderToStdErr(.{ .ttyconf = .no_color }); crash_report.compilerPanic("unexpected compile error occurred", null, null); } - const mod = sema.mod; ref: { errdefer err_msg.destroy(gpa); if (err_msg.src_loc.lazy == .unneeded) { @@ -2234,9 +2342,9 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { try mod.failed_files.ensureUnusedCapacity(gpa, 1); const max_references = blk: { - if (sema.mod.comp.reference_trace) |num| break :blk num; + if (mod.comp.reference_trace) |num| break :blk num; // Do not add multiple traces without explicit request. - if (sema.mod.failed_decls.count() != 0) break :ref; + if (mod.failed_decls.count() != 0) break :ref; break :blk default_reference_trace_len; }; @@ -2245,7 +2353,7 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { defer reference_stack.deinit(); // Avoid infinite loops. - var seen = std.AutoHashMap(Module.Decl.Index, void).init(gpa); + var seen = std.AutoHashMap(Decl.Index, void).init(gpa); defer seen.deinit(); var cur_reference_trace: u32 = 0; @@ -2254,13 +2362,16 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError { if (gop.found_existing) break; if (cur_reference_trace < max_references) { const decl = sema.mod.declPtr(ref.referencer); - try reference_stack.append(.{ .decl = decl.name, .src_loc = ref.src.toSrcLoc(decl) }); + try reference_stack.append(.{ + .decl = decl.name.toOptional(), + .src_loc = ref.src.toSrcLoc(decl, mod), + }); } referenced_by = ref.referencer; } if (sema.mod.comp.reference_trace == null and cur_reference_trace > 0) { try reference_stack.append(.{ - .decl = null, + .decl = .none, .src_loc = undefined, .hidden = 0, }); @@ -2352,10 +2463,10 @@ fn analyzeAsInt( dest_ty: Type, reason: []const u8, ) !u64 { + const mod = sema.mod; const coerced = try sema.coerce(block, dest_ty, air_ref, src); const val = try sema.resolveConstValue(block, src, coerced, reason); - const target = sema.mod.getTarget(); - return (try val.getUnsignedIntAdvanced(target, sema)).?; + return (try val.getUnsignedIntAdvanced(mod, sema)).?; } // Returns a compile error if the value has tag `variable`. See `resolveInstValue` for @@ -2396,73 +2507,77 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const pointee_ty = try sema.resolveType(block, src, extra.lhs); const ptr = try sema.resolveInst(extra.rhs); - const target = sema.mod.getTarget(); + const target = mod.getTarget(); const addr_space = target_util.defaultAddressSpace(target, .local); if (Air.refToIndex(ptr)) |ptr_inst| { - if (sema.air_instructions.items(.tag)[ptr_inst] == .constant) { - const air_datas = sema.air_instructions.items(.data); - const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; - switch (ptr_val.tag()) { - .inferred_alloc => { - const inferred_alloc = &ptr_val.castTag(.inferred_alloc).?.data; - // Add the stored instruction to the set we will use to resolve peer types - // for the inferred allocation. - // This instruction will not make it to codegen; it is only to participate - // in the `stored_inst_list` of the `inferred_alloc`. - var trash_block = block.makeSubBlock(); - defer trash_block.instructions.deinit(sema.gpa); - const operand = try trash_block.addBitCast(pointee_ty, .void_value); + switch (sema.air_instructions.items(.tag)[ptr_inst]) { + .inferred_alloc => { + const ia1 = sema.air_instructions.items(.data)[ptr_inst].inferred_alloc; + const ia2 = sema.unresolved_inferred_allocs.getPtr(ptr_inst).?; + // Add the stored instruction to the set we will use to resolve peer types + // for the inferred allocation. + // This instruction will not make it to codegen; it is only to participate + // in the `stored_inst_list` of the `inferred_alloc`. + var trash_block = block.makeSubBlock(); + defer trash_block.instructions.deinit(sema.gpa); + const operand = try trash_block.addBitCast(pointee_ty, .void_value); - const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = pointee_ty, - .@"align" = inferred_alloc.alignment, - .@"addrspace" = addr_space, - }); - const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); + const ptr_ty = try mod.ptrType(.{ + .child = pointee_ty.toIntern(), + .flags = .{ + .alignment = ia1.alignment, + .address_space = addr_space, + }, + }); + const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr); - try inferred_alloc.prongs.append(sema.arena, .{ - .stored_inst = operand, - .placeholder = Air.refToIndex(bitcasted_ptr).?, - }); + try ia2.prongs.append(sema.arena, .{ + .stored_inst = operand, + .placeholder = Air.refToIndex(bitcasted_ptr).?, + }); - return bitcasted_ptr; - }, - .inferred_alloc_comptime => { - const iac = ptr_val.castTag(.inferred_alloc_comptime).?; - // There will be only one coerce_result_ptr because we are running at comptime. - // The alloc will turn into a Decl. - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); - iac.data.decl_index = try anon_decl.finish( - try pointee_ty.copy(anon_decl.arena()), - Value.undef, - iac.data.alignment, - ); - if (iac.data.alignment != 0) { - try sema.resolveTypeLayout(pointee_ty); - } - const ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = pointee_ty, - .@"align" = iac.data.alignment, - .@"addrspace" = addr_space, - }); - try sema.maybeQueueFuncBodyAnalysis(iac.data.decl_index); - return sema.addConstant( - ptr_ty, - try Value.Tag.decl_ref_mut.create(sema.arena, .{ - .decl_index = iac.data.decl_index, - .runtime_index = block.runtime_index, - }), - ); - }, - else => {}, - } + return bitcasted_ptr; + }, + .inferred_alloc_comptime => { + const alignment = sema.air_instructions.items(.data)[ptr_inst].inferred_alloc_comptime.alignment; + // There will be only one coerce_result_ptr because we are running at comptime. + // The alloc will turn into a Decl. + var anon_decl = try block.startAnonDecl(); + defer anon_decl.deinit(); + const decl_index = try anon_decl.finish( + pointee_ty, + (try mod.intern(.{ .undef = pointee_ty.toIntern() })).toValue(), + alignment.toByteUnits(0), + ); + sema.air_instructions.items(.data)[ptr_inst].inferred_alloc_comptime.decl_index = decl_index; + if (alignment != .none) { + try sema.resolveTypeLayout(pointee_ty); + } + const ptr_ty = try mod.ptrType(.{ + .child = pointee_ty.toIntern(), + .flags = .{ + .alignment = alignment, + .address_space = addr_space, + }, + }); + try sema.maybeQueueFuncBodyAnalysis(decl_index); + try sema.comptime_mutable_decls.append(decl_index); + return sema.addConstant(ptr_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_ty.toIntern(), + .addr = .{ .mut_decl = .{ + .decl = decl_index, + .runtime_index = block.runtime_index, + } }, + } })).toValue()); + }, + else => {}, } } @@ -2487,6 +2602,7 @@ fn coerceResultPtr( dummy_operand: Air.Inst.Ref, trash_block: *Block, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const target = sema.mod.getTarget(); const addr_space = target_util.defaultAddressSpace(target, .local); const pointee_ty = sema.typeOf(dummy_operand); @@ -2530,7 +2646,7 @@ fn coerceResultPtr( return sema.addConstant(ptr_ty, ptr_val); } if (pointee_ty.eql(Type.null, sema.mod)) { - const opt_ty = sema.typeOf(new_ptr).childType(); + const opt_ty = sema.typeOf(new_ptr).childType(mod); const null_inst = try sema.addConstant(opt_ty, Value.null); _ = try block.addBinOp(.store, new_ptr, null_inst); return Air.Inst.Ref.void_value; @@ -2563,7 +2679,7 @@ fn coerceResultPtr( .@"addrspace" = addr_space, }); if (try sema.resolveDefinedValue(block, src, new_ptr)) |ptr_val| { - new_ptr = try sema.addConstant(ptr_operand_ty, ptr_val); + new_ptr = try sema.addConstant(ptr_operand_ty, try mod.getCoerced(ptr_val, ptr_operand_ty)); } else { new_ptr = try sema.bitCast(block, ptr_operand_ty, new_ptr, src, null); } @@ -2600,8 +2716,10 @@ pub fn analyzeStructDecl( sema: *Sema, new_decl: *Decl, inst: Zir.Inst.Index, - struct_obj: *Module.Struct, + struct_index: Module.Struct.Index, ) SemaError!void { + const mod = sema.mod; + const struct_obj = mod.structPtr(struct_index); const extended = sema.code.instructions.items(.data)[inst].extended; assert(extended.opcode == .struct_decl); const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); @@ -2630,7 +2748,7 @@ pub fn analyzeStructDecl( } } - _ = try sema.mod.scanNamespace(&struct_obj.namespace, extra_index, decls_len, new_decl); + _ = try mod.scanNamespace(struct_obj.namespace, extra_index, decls_len, new_decl); } fn zirStructDecl( @@ -2639,28 +2757,35 @@ fn zirStructDecl( extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const gpa = sema.gpa; const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @bitCast(i32, sema.code.extra[extended.operand]); break :blk LazySrcLoc.nodeOffset(node_offset); } else sema.src; - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the struct type gains an + // InternPool index. - const mod = sema.mod; - const struct_obj = try new_decl_arena_allocator.create(Module.Struct); - const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); - const struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = struct_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, small.name_strategy, "struct", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - struct_obj.* = .{ + + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); + + const struct_index = try mod.createStruct(.{ .owner_decl = new_decl_index, .fields = .{}, .zir_index = inst, @@ -2668,18 +2793,25 @@ fn zirStructDecl( .status = .none, .known_non_opv = undefined, .is_tuple = small.is_tuple, - .namespace = .{ - .parent = block.namespace, - .ty = struct_ty, - .file_scope = block.getFileScope(), - }, - }; - std.log.scoped(.module).debug("create struct {*} owned by {*} ({s})", .{ - &struct_obj.namespace, new_decl, new_decl.name, + .namespace = new_namespace_index, }); - try sema.analyzeStructDecl(new_decl, inst, struct_obj); - try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + errdefer mod.destroyStruct(struct_index); + + const struct_ty = try mod.intern_pool.get(gpa, .{ .struct_type = .{ + .index = struct_index.toOptional(), + .namespace = new_namespace_index.toOptional(), + } }); + // TODO: figure out InternPool removals for incremental compilation + //errdefer mod.intern_pool.remove(struct_ty); + + new_decl.ty = Type.type; + new_decl.val = struct_ty.toValue(); + new_namespace.ty = struct_ty.toType(); + + try sema.analyzeStructDecl(new_decl, inst, struct_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; } fn createAnonymousDeclTypeNamed( @@ -2692,6 +2824,7 @@ fn createAnonymousDeclTypeNamed( inst: ?Zir.Inst.Index, ) !Decl.Index { const mod = sema.mod; + const gpa = sema.gpa; const namespace = block.namespace; const src_scope = block.wip_capture_scope; const src_decl = mod.declPtr(block.src_decl); @@ -2707,16 +2840,15 @@ fn createAnonymousDeclTypeNamed( // semantically analyzed. // This name is also used as the key in the parent namespace so it cannot be // renamed. - const name = try std.fmt.allocPrintZ(sema.gpa, "{s}__{s}_{d}", .{ - src_decl.name, anon_prefix, @enumToInt(new_decl_index), - }); - errdefer sema.gpa.free(name); + + const name = mod.intern_pool.getOrPutStringFmt(gpa, "{}__{s}_{d}", .{ + src_decl.name.fmt(&mod.intern_pool), anon_prefix, @enumToInt(new_decl_index), + }) catch unreachable; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; }, .parent => { - const name = try sema.gpa.dupeZ(u8, mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0)); - errdefer sema.gpa.free(name); + const name = mod.declPtr(block.src_decl).name; try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; }, @@ -2724,10 +2856,11 @@ fn createAnonymousDeclTypeNamed( const fn_info = sema.code.getFnInfo(sema.func.?.zir_body_inst); const zir_tags = sema.code.instructions.items(.tag); - var buf = std.ArrayList(u8).init(sema.gpa); + var buf = std.ArrayList(u8).init(gpa); defer buf.deinit(); - try buf.appendSlice(mem.sliceTo(sema.mod.declPtr(block.src_decl).name, 0)); - try buf.appendSlice("("); + + const writer = buf.writer(); + try writer.print("{}(", .{mod.declPtr(block.src_decl).name.fmt(&mod.intern_pool)}); var arg_i: usize = 0; for (fn_info.param_body) |zir_inst| switch (zir_tags[zir_inst]) { @@ -2741,8 +2874,8 @@ fn createAnonymousDeclTypeNamed( const arg_val = sema.resolveConstMaybeUndefVal(block, .unneeded, arg, "") catch return sema.createAnonymousDeclTypeNamed(block, src, typed_value, .anon, anon_prefix, null); - if (arg_i != 0) try buf.appendSlice(","); - try buf.writer().print("{}", .{arg_val.fmtValue(sema.typeOf(arg), sema.mod)}); + if (arg_i != 0) try writer.writeByte(','); + try writer.print("{}", .{arg_val.fmtValue(sema.typeOf(arg), sema.mod)}); arg_i += 1; continue; @@ -2750,9 +2883,8 @@ fn createAnonymousDeclTypeNamed( else => continue, }; - try buf.appendSlice(")"); - const name = try buf.toOwnedSliceSentinel(0); - errdefer sema.gpa.free(name); + try writer.writeByte(')'); + const name = try mod.intern_pool.getOrPutString(gpa, buf.items); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; }, @@ -2765,10 +2897,9 @@ fn createAnonymousDeclTypeNamed( .dbg_var_ptr, .dbg_var_val => { if (zir_data[i].str_op.operand != ref) continue; - const name = try std.fmt.allocPrintZ(sema.gpa, "{s}.{s}", .{ - src_decl.name, zir_data[i].str_op.getStr(sema.code), + const name = try mod.intern_pool.getOrPutStringFmt(gpa, "{}.{s}", .{ + src_decl.name.fmt(&mod.intern_pool), zir_data[i].str_op.getStr(sema.code), }); - errdefer sema.gpa.free(name); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, namespace, typed_value, name); return new_decl_index; @@ -2825,53 +2956,28 @@ fn zirEnumDecl( break :blk decls_len; } else 0; + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the enum type gains an + // InternPool index. + var done = false; - - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer if (!done) new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - - const enum_obj = try new_decl_arena_allocator.create(Module.EnumFull); - const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumFull); - enum_ty_payload.* = .{ - .base = .{ .tag = if (small.nonexhaustive) .enum_nonexhaustive else .enum_full }, - .data = enum_obj, - }; - const enum_ty = Type.initPayload(&enum_ty_payload.base); - const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = enum_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, small.name_strategy, "enum", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer if (!done) mod.abortAnonDecl(new_decl_index); - enum_obj.* = .{ - .owner_decl = new_decl_index, - .tag_ty = Type.null, - .tag_ty_inferred = true, - .fields = .{}, - .values = .{}, - .namespace = .{ - .parent = block.namespace, - .ty = enum_ty, - .file_scope = block.getFileScope(), - }, - }; - std.log.scoped(.module).debug("create enum {*} owned by {*} ({s})", .{ - &enum_obj.namespace, new_decl, new_decl.name, + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer if (!done) mod.destroyNamespace(new_namespace_index); - try new_decl.finalizeNewArena(&new_decl_arena); - const decl_val = try sema.analyzeDeclVal(block, src, new_decl_index); - done = true; - - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = new_decl.value_arena.?.acquire(gpa, &decl_arena); - defer new_decl.value_arena.?.release(&decl_arena); - - extra_index = try mod.scanNamespace(&enum_obj.namespace, extra_index, decls_len, new_decl); + extra_index = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl); const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body.len; @@ -2880,7 +2986,34 @@ fn zirEnumDecl( const body_end = extra_index; extra_index += bit_bags_count; - { + const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| { + if (bag != 0) break true; + } else false; + + const incomplete_enum = try mod.intern_pool.getIncompleteEnum(gpa, .{ + .decl = new_decl_index, + .namespace = new_namespace_index.toOptional(), + .fields_len = fields_len, + .has_values = any_values, + .tag_mode = if (small.nonexhaustive) + .nonexhaustive + else if (tag_type_ref == .none) + .auto + else + .explicit, + }); + // TODO: figure out InternPool removals for incremental compilation + //errdefer if (!done) mod.intern_pool.remove(incomplete_enum.index); + + new_decl.ty = Type.type; + new_decl.val = incomplete_enum.index.toValue(); + new_namespace.ty = incomplete_enum.index.toType(); + + const decl_val = try sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + done = true; + + const int_tag_ty = ty: { // We create a block for the field type instructions because they // may need to reference Decls from inside the enum namespace. // Within the field type, default value, and alignment expressions, the "owner decl" @@ -2896,21 +3029,27 @@ fn zirEnumDecl( } const prev_owner_func = sema.owner_func; + const prev_owner_func_index = sema.owner_func_index; sema.owner_func = null; + sema.owner_func_index = .none; defer sema.owner_func = prev_owner_func; + defer sema.owner_func_index = prev_owner_func_index; const prev_func = sema.func; + const prev_func_index = sema.func_index; sema.func = null; + sema.func_index = .none; defer sema.func = prev_func; + defer sema.func_index = prev_func_index; - var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, new_decl.src_scope); defer wip_captures.deinit(); var enum_block: Block = .{ .parent = null, .sema = sema, .src_decl = new_decl_index, - .namespace = &enum_obj.namespace, + .namespace = new_namespace_index, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, @@ -2926,43 +3065,29 @@ fn zirEnumDecl( if (tag_type_ref != .none) { const ty = try sema.resolveType(block, tag_ty_src, tag_type_ref); - if (ty.zigTypeTag() != .Int and ty.zigTypeTag() != .ComptimeInt) { + if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) { return sema.fail(block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(sema.mod)}); } - enum_obj.tag_ty = try ty.copy(decl_arena_allocator); - enum_obj.tag_ty_inferred = false; + incomplete_enum.setTagType(&mod.intern_pool, ty.toIntern()); + break :ty ty; } else if (fields_len == 0) { - enum_obj.tag_ty = try Type.Tag.int_unsigned.create(decl_arena_allocator, 0); - enum_obj.tag_ty_inferred = true; + break :ty try mod.intType(.unsigned, 0); } else { const bits = std.math.log2_int_ceil(usize, fields_len); - enum_obj.tag_ty = try Type.Tag.int_unsigned.create(decl_arena_allocator, bits); - enum_obj.tag_ty_inferred = true; + break :ty try mod.intType(.unsigned, bits); } - } + }; - if (small.nonexhaustive and enum_obj.tag_ty.zigTypeTag() != .ComptimeInt) { - if (fields_len > 1 and std.math.log2_int(u64, fields_len) == enum_obj.tag_ty.bitSize(sema.mod.getTarget())) { + if (small.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) { + if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(mod)) { return sema.fail(block, src, "non-exhaustive enum specifies every value", .{}); } } - try enum_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); - const any_values = for (sema.code.extra[body_end..][0..bit_bags_count]) |bag| { - if (bag != 0) break true; - } else false; - if (any_values) { - try enum_obj.values.ensureTotalCapacityContext(decl_arena_allocator, fields_len, .{ - .ty = enum_obj.tag_ty, - .mod = mod, - }); - } - var bit_bag_index: usize = body_end; var cur_bit_bag: u32 = undefined; var field_i: u32 = 0; var last_tag_val: ?Value = null; - var tag_val_buf: Value.Payload.U64 = undefined; while (field_i < fields_len) : (field_i += 1) { if (field_i % 32 == 0) { cur_bit_bag = sema.code.extra[bit_bag_index]; @@ -2977,15 +3102,12 @@ fn zirEnumDecl( // doc comment extra_index += 1; - // This string needs to outlive the ZIR code. - const field_name = try decl_arena_allocator.dupe(u8, field_name_zir); - - const gop_field = enum_obj.fields.getOrPutAssumeCapacity(field_name); - if (gop_field.found_existing) { - const field_src = enum_obj.fieldSrcLoc(sema.mod, .{ .index = field_i }).lazy; - const other_field_src = enum_obj.fieldSrcLoc(sema.mod, .{ .index = gop_field.index }).lazy; + const field_name = try mod.intern_pool.getOrPutString(gpa, field_name_zir); + if (try incomplete_enum.addFieldName(&mod.intern_pool, gpa, field_name)) |other_index| { + const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy; + const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy; const msg = msg: { - const msg = try sema.errMsg(block, field_src, "duplicate enum field '{s}'", .{field_name}); + const msg = try sema.errMsg(block, field_src, "duplicate enum field '{s}'", .{field_name_zir}); errdefer msg.destroy(gpa); try sema.errNote(block, other_field_src, msg, "other field here", .{}); break :msg msg; @@ -2993,13 +3115,13 @@ fn zirEnumDecl( return sema.failWithOwnedErrorMsg(msg); } - if (has_tag_value) { + const tag_overflow = if (has_tag_value) overflow: { const tag_val_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; const tag_inst = try sema.resolveInst(tag_val_ref); - const tag_val = sema.resolveConstValue(block, .unneeded, tag_inst, "") catch |err| switch (err) { + last_tag_val = sema.resolveConstValue(block, .unneeded, tag_inst, "") catch |err| switch (err) { error.NeededSourceLocation => { - const value_src = enum_obj.fieldSrcLoc(sema.mod, .{ + const value_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i, .range = .value, }).lazy; @@ -3008,63 +3130,56 @@ fn zirEnumDecl( }, else => |e| return e, }; - last_tag_val = tag_val; - const copied_tag_val = try tag_val.copy(decl_arena_allocator); - const gop_val = enum_obj.values.getOrPutAssumeCapacityContext(copied_tag_val, .{ - .ty = enum_obj.tag_ty, - .mod = mod, - }); - if (gop_val.found_existing) { - const value_src = enum_obj.fieldSrcLoc(sema.mod, .{ + if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) break :overflow true; + last_tag_val = try mod.getCoerced(last_tag_val.?, int_tag_ty); + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, last_tag_val.?.toIntern())) |other_index| { + const value_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i, .range = .value, }).lazy; - const other_field_src = enum_obj.fieldSrcLoc(sema.mod, .{ .index = gop_val.index }).lazy; + const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy; const msg = msg: { - const msg = try sema.errMsg(block, value_src, "enum tag value {} already taken", .{tag_val.fmtValue(enum_obj.tag_ty, sema.mod)}); + const msg = try sema.errMsg(block, value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(int_tag_ty, sema.mod)}); errdefer msg.destroy(gpa); try sema.errNote(block, other_field_src, msg, "other occurrence here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - } else if (any_values) { - const tag_val = if (last_tag_val) |val| - try sema.intAdd(val, Value.one, enum_obj.tag_ty) + break :overflow false; + } else if (any_values) overflow: { + var overflow: ?usize = null; + last_tag_val = if (last_tag_val) |val| + try sema.intAdd(val, try mod.intValue(int_tag_ty, 1), int_tag_ty, &overflow) else - Value.zero; - last_tag_val = tag_val; - const copied_tag_val = try tag_val.copy(decl_arena_allocator); - const gop_val = enum_obj.values.getOrPutAssumeCapacityContext(copied_tag_val, .{ - .ty = enum_obj.tag_ty, - .mod = mod, - }); - if (gop_val.found_existing) { - const field_src = enum_obj.fieldSrcLoc(sema.mod, .{ .index = field_i }).lazy; - const other_field_src = enum_obj.fieldSrcLoc(sema.mod, .{ .index = gop_val.index }).lazy; + try mod.intValue(int_tag_ty, 0); + if (overflow != null) break :overflow true; + if (try incomplete_enum.addFieldValue(&mod.intern_pool, gpa, last_tag_val.?.toIntern())) |other_index| { + const field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i }).lazy; + const other_field_src = mod.fieldSrcLoc(new_decl_index, .{ .index = other_index }).lazy; const msg = msg: { - const msg = try sema.errMsg(block, field_src, "enum tag value {} already taken", .{tag_val.fmtValue(enum_obj.tag_ty, sema.mod)}); + const msg = try sema.errMsg(block, field_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValue(int_tag_ty, sema.mod)}); errdefer msg.destroy(gpa); try sema.errNote(block, other_field_src, msg, "other occurrence here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - } else { - tag_val_buf = .{ - .base = .{ .tag = .int_u64 }, - .data = field_i, - }; - last_tag_val = Value.initPayload(&tag_val_buf.base); - } + break :overflow false; + } else overflow: { + last_tag_val = try mod.intValue(Type.comptime_int, field_i); + if (!try sema.intFitsInType(last_tag_val.?, int_tag_ty, null)) break :overflow true; + last_tag_val = try mod.getCoerced(last_tag_val.?, int_tag_ty); + break :overflow false; + }; - if (!(try sema.intFitsInType(last_tag_val.?, enum_obj.tag_ty, null))) { - const value_src = enum_obj.fieldSrcLoc(sema.mod, .{ + if (tag_overflow) { + const value_src = mod.fieldSrcLoc(new_decl_index, .{ .index = field_i, .range = if (has_tag_value) .value else .name, }).lazy; const msg = try sema.errMsg(block, value_src, "enumeration value '{}' too large for type '{}'", .{ - last_tag_val.?.fmtValue(enum_obj.tag_ty, mod), enum_obj.tag_ty.fmt(mod), + last_tag_val.?.fmtValue(int_tag_ty, mod), int_tag_ty.fmt(mod), }); return sema.failWithOwnedErrorMsg(msg); } @@ -3081,6 +3196,8 @@ fn zirUnionDecl( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; + const gpa = sema.gpa; const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small); var extra_index: usize = extended.operand; @@ -3100,55 +3217,60 @@ fn zirUnionDecl( break :blk decls_len; } else 0; - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the union type gains an + // InternPool index. - const union_obj = try new_decl_arena_allocator.create(Module.Union); - const type_tag = if (small.has_tag_type or small.auto_enum_tag) - Type.Tag.union_tagged - else if (small.layout != .Auto) - Type.Tag.@"union" - else switch (block.sema.mod.optimizeMode()) { - .Debug, .ReleaseSafe => Type.Tag.union_safety_tagged, - .ReleaseFast, .ReleaseSmall => Type.Tag.@"union", - }; - const union_payload = try new_decl_arena_allocator.create(Type.Payload.Union); - union_payload.* = .{ - .base = .{ .tag = type_tag }, - .data = union_obj, - }; - const union_ty = Type.initPayload(&union_payload.base); - const union_val = try Value.Tag.ty.create(new_decl_arena_allocator, union_ty); - const mod = sema.mod; const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = union_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, small.name_strategy, "union", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - union_obj.* = .{ + + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); + + const union_index = try mod.createUnion(.{ .owner_decl = new_decl_index, - .tag_ty = Type.initTag(.null), + .tag_ty = Type.null, .fields = .{}, .zir_index = inst, .layout = small.layout, .status = .none, - .namespace = .{ - .parent = block.namespace, - .ty = union_ty, - .file_scope = block.getFileScope(), - }, - }; - std.log.scoped(.module).debug("create union {*} owned by {*} ({s})", .{ - &union_obj.namespace, new_decl, new_decl.name, + .namespace = new_namespace_index, }); + errdefer mod.destroyUnion(union_index); - _ = try mod.scanNamespace(&union_obj.namespace, extra_index, decls_len, new_decl); + const union_ty = try mod.intern_pool.get(gpa, .{ .union_type = .{ + .index = union_index, + .runtime_tag = if (small.has_tag_type or small.auto_enum_tag) + .tagged + else if (small.layout != .Auto) + .none + else switch (block.sema.mod.optimizeMode()) { + .Debug, .ReleaseSafe => .safety, + .ReleaseFast, .ReleaseSmall => .none, + }, + } }); + // TODO: figure out InternPool removals for incremental compilation + //errdefer mod.intern_pool.remove(union_ty); - try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + new_decl.ty = Type.type; + new_decl.val = union_ty.toValue(); + new_namespace.ty = union_ty.toType(); + + _ = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl); + + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; } fn zirOpaqueDecl( @@ -3161,7 +3283,6 @@ fn zirOpaqueDecl( defer tracy.end(); const mod = sema.mod; - const gpa = sema.gpa; const small = @bitCast(Zir.Inst.OpaqueDecl.Small, extended.small); var extra_index: usize = extended.operand; @@ -3177,42 +3298,42 @@ fn zirOpaqueDecl( break :blk decls_len; } else 0; - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); + // Because these three things each reference each other, `undefined` + // placeholders are used in two places before being set after the opaque + // type gains an InternPool index. - const opaque_obj = try new_decl_arena_allocator.create(Module.Opaque); - const opaque_ty_payload = try new_decl_arena_allocator.create(Type.Payload.Opaque); - opaque_ty_payload.* = .{ - .base = .{ .tag = .@"opaque" }, - .data = opaque_obj, - }; - const opaque_ty = Type.initPayload(&opaque_ty_payload.base); - const opaque_val = try Value.Tag.ty.create(new_decl_arena_allocator, opaque_ty); const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = opaque_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, small.name_strategy, "opaque", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; errdefer mod.abortAnonDecl(new_decl_index); - opaque_obj.* = .{ - .owner_decl = new_decl_index, - .namespace = .{ - .parent = block.namespace, - .ty = opaque_ty, - .file_scope = block.getFileScope(), - }, - }; - std.log.scoped(.module).debug("create opaque {*} owned by {*} ({s})", .{ - &opaque_obj.namespace, new_decl, new_decl.name, + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); - extra_index = try mod.scanNamespace(&opaque_obj.namespace, extra_index, decls_len, new_decl); + const opaque_ty = try mod.intern(.{ .opaque_type = .{ + .decl = new_decl_index, + .namespace = new_namespace_index, + } }); + // TODO: figure out InternPool removals for incremental compilation + //errdefer mod.intern_pool.remove(opaque_ty); - try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + new_decl.ty = Type.type; + new_decl.val = opaque_ty.toValue(); + new_namespace.ty = opaque_ty.toType(); + + extra_index = try mod.scanNamespace(new_namespace_index, extra_index, decls_len, new_decl); + + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; } fn zirErrorSetDecl( @@ -3224,48 +3345,39 @@ fn zirErrorSetDecl( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index); - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - - const error_set = try new_decl_arena_allocator.create(Module.ErrorSet); - const error_set_ty = try Type.Tag.error_set.create(new_decl_arena_allocator, error_set); - const error_set_val = try Value.Tag.ty.create(new_decl_arena_allocator, error_set_ty); - const mod = sema.mod; - const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = error_set_val, - }, name_strategy, "error", inst); - const new_decl = mod.declPtr(new_decl_index); - new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); - - var names = Module.ErrorSet.NameMap{}; - try names.ensureUnusedCapacity(new_decl_arena_allocator, extra.data.fields_len); + var names: Module.Fn.InferredErrorSet.NameMap = .{}; + try names.ensureUnusedCapacity(sema.arena, extra.data.fields_len); var extra_index = @intCast(u32, extra.end); const extra_index_end = extra_index + (extra.data.fields_len * 2); while (extra_index < extra_index_end) : (extra_index += 2) { // +2 to skip over doc_string const str_index = sema.code.extra[extra_index]; - const kv = try mod.getErrorValue(sema.code.nullTerminatedString(str_index)); - const result = names.getOrPutAssumeCapacity(kv.key); + const name = sema.code.nullTerminatedString(str_index); + const name_ip = try mod.intern_pool.getOrPutString(gpa, name); + _ = try mod.getErrorValue(name_ip); + const result = names.getOrPutAssumeCapacity(name_ip); assert(!result.found_existing); // verified in AstGen } - // names must be sorted. - Module.ErrorSet.sortNames(&names); + const error_set_ty = try mod.errorSetFromUnsortedNames(names.keys()); - error_set.* = .{ - .owner_decl = new_decl_index, - .names = names, - }; - try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ + .ty = Type.type, + .val = error_set_ty.toValue(), + }, name_strategy, "error", inst); + const new_decl = mod.declPtr(new_decl_index); + new_decl.owns_tv = true; + errdefer mod.abortAnonDecl(new_decl_index); + + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; } fn zirRetPtr(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { @@ -3319,7 +3431,8 @@ fn ensureResultUsed( ty: Type, src: LazySrcLoc, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Void, .NoReturn => return, .ErrorSet, .ErrorUnion => { const msg = msg: { @@ -3347,11 +3460,12 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = try sema.resolveInst(inst_data.operand); const src = inst_data.src(); const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .ErrorSet, .ErrorUnion => { const msg = msg: { const msg = try sema.errMsg(block, src, "error is discarded", .{}); @@ -3369,16 +3483,17 @@ fn zirEnsureErrUnionPayloadVoid(sema: *Sema, block: *Block, inst: Zir.Inst.Index const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - const err_union_ty = if (operand_ty.zigTypeTag() == .Pointer) - operand_ty.childType() + const err_union_ty = if (operand_ty.zigTypeTag(mod) == .Pointer) + operand_ty.childType(mod) else operand_ty; - if (err_union_ty.zigTypeTag() != .ErrorUnion) return; - const payload_ty = err_union_ty.errorUnionPayload().zigTypeTag(); + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) return; + const payload_ty = err_union_ty.errorUnionPayload(mod).zigTypeTag(mod); if (payload_ty != .Void and payload_ty != .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, src, "error union payload is ignored", .{}); @@ -3407,11 +3522,13 @@ fn indexablePtrLen( src: LazySrcLoc, object: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const object_ty = sema.typeOf(object); - const is_pointer_to = object_ty.isSinglePointer(); - const indexable_ty = if (is_pointer_to) object_ty.childType() else object_ty; + const is_pointer_to = object_ty.isSinglePointer(mod); + const indexable_ty = if (is_pointer_to) object_ty.childType(mod) else object_ty; try checkIndexable(sema, block, src, indexable_ty); - return sema.fieldVal(block, src, object, "len", src); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "len"); + return sema.fieldVal(block, src, object, field_name, src); } fn indexablePtrLenOrNone( @@ -3420,10 +3537,12 @@ fn indexablePtrLenOrNone( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); try checkMemOperand(sema, block, src, operand_ty); - if (operand_ty.ptrSize() == .Many) return .none; - return sema.fieldVal(block, src, operand, "len", src); + if (operand_ty.ptrSize(mod) == .Many) return .none; + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "len"); + return sema.fieldVal(block, src, operand, field_name, src); } fn zirAllocExtended( @@ -3431,6 +3550,7 @@ fn zirAllocExtended( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const gpa = sema.gpa; const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = extra.data.src_node }; const align_src: LazySrcLoc = .{ .node_offset_var_decl_align = extra.data.src_node }; @@ -3451,22 +3571,19 @@ fn zirAllocExtended( break :blk alignment; } else 0; - const inferred_alloc_ty = if (small.is_const) - Type.initTag(.inferred_alloc_const) - else - Type.initTag(.inferred_alloc_mut); - if (block.is_comptime or small.is_comptime) { if (small.has_type) { return sema.analyzeComptimeAlloc(block, var_ty, alignment); } else { - return sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ + try sema.air_instructions.append(gpa, .{ + .tag = .inferred_alloc_comptime, + .data = .{ .inferred_alloc_comptime = .{ .decl_index = undefined, - .alignment = alignment, - }), - ); + .alignment = InternPool.Alignment.fromByteUnits(alignment), + .is_const = small.is_const, + } }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } } @@ -3484,17 +3601,15 @@ fn zirAllocExtended( return block.addTy(.alloc, ptr_type); } - // `Sema.addConstant` does not add the instruction to the block because it is - // not needed in the case of constant values. However here, we plan to "downgrade" - // to a normal instruction when we hit `resolve_inferred_alloc`. So we append - // to the block even though it is currently a `.constant`. - const result = try sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc.create(sema.arena, .{ .alignment = alignment }), - ); - try block.instructions.append(sema.gpa, Air.refToIndex(result).?); - try sema.unresolved_inferred_allocs.putNoClobber(sema.gpa, Air.refToIndex(result).?, {}); - return result; + const result_index = try block.addInstAsIndex(.{ + .tag = .inferred_alloc, + .data = .{ .inferred_alloc = .{ + .alignment = InternPool.Alignment.fromByteUnits(alignment), + .is_const = small.is_const, + } }, + }); + try sema.unresolved_inferred_allocs.putNoClobber(gpa, result_index, .{}); + return Air.indexToRef(result_index); } fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -3508,11 +3623,12 @@ fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr } fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const alloc = try sema.resolveInst(inst_data.operand); const alloc_ty = sema.typeOf(alloc); - var ptr_info = alloc_ty.ptrInfo().data; + var ptr_info = alloc_ty.ptrInfo(mod); const elem_ty = ptr_info.pointee_type; // Detect if all stores to an `.alloc` were comptime-known. @@ -3558,8 +3674,8 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( - try elem_ty.copy(anon_decl.arena()), - try store_val.copy(anon_decl.arena()), + elem_ty, + store_val, ptr_info.@"align", )); } @@ -3568,15 +3684,16 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Air.Inst.Ref { + const mod = sema.mod; const alloc_ty = sema.typeOf(alloc); - var ptr_info = alloc_ty.ptrInfo().data; + var ptr_info = alloc_ty.ptrInfo(mod); ptr_info.mutable = false; const const_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); // Detect if a comptime value simply needs to have its type changed. if (try sema.resolveMaybeUndefVal(alloc)) |val| { - return sema.addConstant(const_ptr_ty, val); + return sema.addConstant(const_ptr_ty, try mod.getCoerced(val, const_ptr_ty)); } return block.addBitCast(const_ptr_ty, alloc); @@ -3585,18 +3702,22 @@ fn makePtrConst(sema: *Sema, block: *Block, alloc: Air.Inst.Ref) CompileError!Ai fn zirAllocInferredComptime( sema: *Sema, inst: Zir.Inst.Index, - inferred_alloc_ty: Type, + is_const: bool, ) CompileError!Air.Inst.Ref { + const gpa = sema.gpa; const src_node = sema.code.instructions.items(.data)[inst].node; const src = LazySrcLoc.nodeOffset(src_node); sema.src = src; - return sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ + + try sema.air_instructions.append(gpa, .{ + .tag = .inferred_alloc_comptime, + .data = .{ .inferred_alloc_comptime = .{ .decl_index = undefined, - .alignment = 0, - }), - ); + .alignment = .none, + .is_const = is_const, + } }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -3642,104 +3763,103 @@ fn zirAllocInferred( sema: *Sema, block: *Block, inst: Zir.Inst.Index, - inferred_alloc_ty: Type, + is_const: bool, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); + const gpa = sema.gpa; const src_node = sema.code.instructions.items(.data)[inst].node; const src = LazySrcLoc.nodeOffset(src_node); sema.src = src; if (block.is_comptime) { - return sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc_comptime.create(sema.arena, .{ + try sema.air_instructions.append(gpa, .{ + .tag = .inferred_alloc_comptime, + .data = .{ .inferred_alloc_comptime = .{ .decl_index = undefined, - .alignment = 0, - }), - ); + .alignment = .none, + .is_const = is_const, + } }, + }); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } - // `Sema.addConstant` does not add the instruction to the block because it is - // not needed in the case of constant values. However here, we plan to "downgrade" - // to a normal instruction when we hit `resolve_inferred_alloc`. So we append - // to the block even though it is currently a `.constant`. - const result = try sema.addConstant( - inferred_alloc_ty, - try Value.Tag.inferred_alloc.create(sema.arena, .{ .alignment = 0 }), - ); - try block.instructions.append(sema.gpa, Air.refToIndex(result).?); - try sema.unresolved_inferred_allocs.putNoClobber(sema.gpa, Air.refToIndex(result).?, {}); - return result; + const result_index = try block.addInstAsIndex(.{ + .tag = .inferred_alloc, + .data = .{ .inferred_alloc = .{ + .alignment = .none, + .is_const = is_const, + } }, + }); + try sema.unresolved_inferred_allocs.putNoClobber(gpa, result_index, .{}); + return Air.indexToRef(result_index); } fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const ptr = try sema.resolveInst(inst_data.operand); const ptr_inst = Air.refToIndex(ptr).?; - assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); - const value_index = sema.air_instructions.items(.data)[ptr_inst].ty_pl.payload; - const ptr_val = sema.air_values.items[value_index]; - const var_is_mut = switch (sema.typeOf(ptr).tag()) { - .inferred_alloc_const => false, - .inferred_alloc_mut => true, - else => unreachable, - }; - const target = sema.mod.getTarget(); + const target = mod.getTarget(); - switch (ptr_val.tag()) { + switch (sema.air_instructions.items(.tag)[ptr_inst]) { .inferred_alloc_comptime => { - const iac = ptr_val.castTag(.inferred_alloc_comptime).?; - const decl_index = iac.data.decl_index; - try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); + const iac = sema.air_instructions.items(.data)[ptr_inst].inferred_alloc_comptime; + const decl_index = iac.decl_index; + try mod.declareDeclDependency(sema.owner_decl_index, decl_index); - const decl = sema.mod.declPtr(decl_index); - const final_elem_ty = try decl.ty.copy(sema.arena); - const final_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = final_elem_ty, - .mutable = true, - .@"align" = iac.data.alignment, - .@"addrspace" = target_util.defaultAddressSpace(target, .local), + const decl = mod.declPtr(decl_index); + if (iac.is_const) try decl.intern(mod); + const final_elem_ty = decl.ty; + const final_ptr_ty = try mod.ptrType(.{ + .child = final_elem_ty.toIntern(), + .flags = .{ + .is_const = false, + .alignment = iac.alignment, + .address_space = target_util.defaultAddressSpace(target, .local), + }, }); - const final_ptr_ty_inst = try sema.addType(final_ptr_ty); - sema.air_instructions.items(.data)[ptr_inst].ty_pl.ty = final_ptr_ty_inst; try sema.maybeQueueFuncBodyAnalysis(decl_index); - if (var_is_mut) { - sema.air_values.items[value_index] = try Value.Tag.decl_ref_mut.create(sema.arena, .{ - .decl_index = decl_index, - .runtime_index = block.runtime_index, - }); - } else { - sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, decl_index); - } + // Change it to an interned. + sema.air_instructions.set(ptr_inst, .{ + .tag = .interned, + .data = .{ .interned = try mod.intern(.{ .ptr = .{ + .ty = final_ptr_ty.toIntern(), + .addr = if (!iac.is_const) .{ .mut_decl = .{ + .decl = decl_index, + .runtime_index = block.runtime_index, + } } else .{ .decl = decl_index }, + } }) }, + }); }, .inferred_alloc => { - assert(sema.unresolved_inferred_allocs.remove(ptr_inst)); - const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; - const peer_inst_list = inferred_alloc.data.prongs.items(.stored_inst); + const ia1 = sema.air_instructions.items(.data)[ptr_inst].inferred_alloc; + const ia2 = sema.unresolved_inferred_allocs.fetchRemove(ptr_inst).?.value; + const peer_inst_list = ia2.prongs.items(.stored_inst); const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list, .none); - const final_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = final_elem_ty, - .mutable = true, - .@"align" = inferred_alloc.data.alignment, - .@"addrspace" = target_util.defaultAddressSpace(target, .local), + const final_ptr_ty = try mod.ptrType(.{ + .child = final_elem_ty.toIntern(), + .flags = .{ + .alignment = ia1.alignment, + .address_space = target_util.defaultAddressSpace(target, .local), + }, }); - if (var_is_mut) { + if (!ia1.is_const) { try sema.validateVarType(block, ty_src, final_elem_ty, false); } else ct: { // Detect if the value is comptime-known. In such case, the // last 3 AIR instructions of the block will look like this: // - // %a = constant + // %a = inferred_alloc // %b = bitcast(%a) // %c = store(%b, %d) // @@ -3779,43 +3899,46 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com } }; - const const_inst = while (true) { + while (true) { if (search_index == 0) break :ct; search_index -= 1; const candidate = block.instructions.items[search_index]; + if (candidate == ptr_inst) break; switch (air_tags[candidate]) { .dbg_stmt, .dbg_block_begin, .dbg_block_end => continue, - .constant => break candidate, else => break :ct, } - }; + } const store_op = air_datas[store_inst].bin_op; const store_val = (try sema.resolveMaybeUndefVal(store_op.rhs)) orelse break :ct; if (store_op.lhs != Air.indexToRef(bitcast_inst)) break :ct; - if (air_datas[bitcast_inst].ty_op.operand != Air.indexToRef(const_inst)) break :ct; + if (air_datas[bitcast_inst].ty_op.operand != ptr) break :ct; const new_decl_index = d: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const new_decl_index = try anon_decl.finish( - try final_elem_ty.copy(anon_decl.arena()), - try store_val.copy(anon_decl.arena()), - inferred_alloc.data.alignment, + final_elem_ty, + store_val, + ia1.alignment.toByteUnits(0), ); break :d new_decl_index; }; - try sema.mod.declareDeclDependency(sema.owner_decl_index, new_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, new_decl_index); // Even though we reuse the constant instruction, we still remove it from the // block so that codegen does not see it. block.instructions.shrinkRetainingCapacity(search_index); try sema.maybeQueueFuncBodyAnalysis(new_decl_index); - sema.air_values.items[value_index] = try Value.Tag.decl_ref.create(sema.arena, new_decl_index); - // if bitcast ty ref needs to be made const, make_ptr_const - // ZIR handles it later, so we can just use the ty ref here. - air_datas[ptr_inst].ty_pl.ty = air_datas[bitcast_inst].ty_op.ty; + sema.air_instructions.set(ptr_inst, .{ + .tag = .interned, + .data = .{ .interned = try mod.intern(.{ .ptr = .{ + .ty = final_ptr_ty.toIntern(), + .addr = .{ .decl = new_decl_index }, + } }) }, + }); // Unless the block is comptime, `alloc_inferred` always produces // a runtime constant. The final inferred type needs to be @@ -3836,18 +3959,19 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com // Now we need to go back over all the coerce_result_ptr instructions, which // previously inserted a bitcast as a placeholder, and do the logic as if // the new result ptr type was available. - const placeholders = inferred_alloc.data.prongs.items(.placeholder); + const placeholders = ia2.prongs.items(.placeholder); const gpa = sema.gpa; var trash_block = block.makeSubBlock(); trash_block.is_comptime = false; defer trash_block.instructions.deinit(gpa); - const mut_final_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = final_elem_ty, - .mutable = true, - .@"align" = inferred_alloc.data.alignment, - .@"addrspace" = target_util.defaultAddressSpace(target, .local), + const mut_final_ptr_ty = try mod.ptrType(.{ + .child = final_elem_ty.toIntern(), + .flags = .{ + .alignment = ia1.alignment, + .address_space = target_util.defaultAddressSpace(target, .local), + }, }); const dummy_ptr = try trash_block.addTy(.alloc, mut_final_ptr_ty); const empty_trash_count = trash_block.instructions.items.len; @@ -3855,7 +3979,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com for (peer_inst_list, placeholders) |peer_inst, placeholder_inst| { const sub_ptr_ty = sema.typeOf(Air.indexToRef(placeholder_inst)); - if (mut_final_ptr_ty.eql(sub_ptr_ty, sema.mod)) { + if (mut_final_ptr_ty.eql(sub_ptr_ty, mod)) { // New result location type is the same as the old one; nothing // to do here. continue; @@ -3920,27 +4044,28 @@ fn zirArrayBasePtr( block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const start_ptr = try sema.resolveInst(inst_data.operand); var base_ptr = start_ptr; - while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag()) { + while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) { .ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true), .Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true), else => break, }; - const elem_ty = sema.typeOf(base_ptr).childType(); - switch (elem_ty.zigTypeTag()) { + const elem_ty = sema.typeOf(base_ptr).childType(mod); + switch (elem_ty.zigTypeTag(mod)) { .Array, .Vector => return base_ptr, - .Struct => if (elem_ty.isTuple()) { + .Struct => if (elem_ty.isTuple(mod)) { // TODO validate element count return base_ptr; }, else => {}, } - return sema.failWithArrayInitNotSupported(block, src, sema.typeOf(start_ptr).childType()); + return sema.failWithArrayInitNotSupported(block, src, sema.typeOf(start_ptr).childType(mod)); } fn zirFieldBasePtr( @@ -3948,27 +4073,30 @@ fn zirFieldBasePtr( block: *Block, inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const start_ptr = try sema.resolveInst(inst_data.operand); var base_ptr = start_ptr; - while (true) switch (sema.typeOf(base_ptr).childType().zigTypeTag()) { + while (true) switch (sema.typeOf(base_ptr).childType(mod).zigTypeTag(mod)) { .ErrorUnion => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true), .Optional => base_ptr = try sema.analyzeOptionalPayloadPtr(block, src, base_ptr, false, true), else => break, }; - const elem_ty = sema.typeOf(base_ptr).childType(); - switch (elem_ty.zigTypeTag()) { + const elem_ty = sema.typeOf(base_ptr).childType(mod); + switch (elem_ty.zigTypeTag(mod)) { .Struct, .Union => return base_ptr, else => {}, } - return sema.failWithStructInitNotSupported(block, src, sema.typeOf(start_ptr).childType()); + return sema.failWithStructInitNotSupported(block, src, sema.typeOf(start_ptr).childType(mod)); } fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index); const args = sema.code.refSlice(extra.end, extra.data.operands_len); @@ -3991,7 +4119,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const object_ty = sema.typeOf(object); // Each arg could be an indexable, or a range, in which case the length // is passed directly as an integer. - const is_int = switch (object_ty.zigTypeTag()) { + const is_int = switch (object_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => true, else => false, }; @@ -4000,7 +4128,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .input_index = i, } }; const arg_len_uncoerced = if (is_int) object else l: { - if (!object_ty.isIndexable()) { + if (!object_ty.isIndexable(mod)) { // Instead of using checkIndexable we customize this error. const msg = msg: { const msg = try sema.errMsg(block, arg_src, "type '{}' is not indexable and not a range", .{object_ty.fmt(sema.mod)}); @@ -4010,9 +4138,9 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. }; return sema.failWithOwnedErrorMsg(msg); } - if (!object_ty.indexableHasLen()) continue; + if (!object_ty.indexableHasLen(mod)) continue; - break :l try sema.fieldVal(block, arg_src, object, "len", arg_src); + break :l try sema.fieldVal(block, arg_src, object, try ip.getOrPutString(gpa, "len"), arg_src); }; const arg_len = try sema.coerce(block, Type.usize, arg_len_uncoerced, arg_src); if (len == .none) { @@ -4061,7 +4189,7 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const object_ty = sema.typeOf(object); // Each arg could be an indexable, or a range, in which case the length // is passed directly as an integer. - switch (object_ty.zigTypeTag()) { + switch (object_ty.zigTypeTag(mod)) { .Int, .ComptimeInt => continue, else => {}, } @@ -4096,15 +4224,16 @@ fn validateArrayInitTy( block: *Block, inst: Zir.Inst.Index, ) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const ty_src: LazySrcLoc = .{ .node_offset_init_ty = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.ArrayInit, inst_data.payload_index).data; const ty = try sema.resolveType(block, ty_src, extra.ty); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Array => { - const array_len = ty.arrayLen(); + const array_len = ty.arrayLen(mod); if (extra.init_count != array_len) { return sema.fail(block, src, "expected {d} array elements; found {d}", .{ array_len, extra.init_count, @@ -4113,7 +4242,7 @@ fn validateArrayInitTy( return; }, .Vector => { - const array_len = ty.arrayLen(); + const array_len = ty.arrayLen(mod); if (extra.init_count != array_len) { return sema.fail(block, src, "expected {d} vector elements; found {d}", .{ array_len, extra.init_count, @@ -4121,9 +4250,9 @@ fn validateArrayInitTy( } return; }, - .Struct => if (ty.isTuple()) { + .Struct => if (ty.isTuple(mod)) { _ = try sema.resolveTypeFields(ty); - const array_len = ty.arrayLen(); + const array_len = ty.arrayLen(mod); if (extra.init_count > array_len) { return sema.fail(block, src, "expected at most {d} tuple fields; found {d}", .{ array_len, extra.init_count, @@ -4141,11 +4270,12 @@ fn validateStructInitTy( block: *Block, inst: Zir.Inst.Index, ) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct, .Union => return, else => {}, } @@ -4160,6 +4290,7 @@ fn zirValidateStructInit( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const validate_inst = sema.code.instructions.items(.data)[inst].pl_node; const init_src = validate_inst.src(); const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); @@ -4167,8 +4298,8 @@ fn zirValidateStructInit( const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; const object_ptr = try sema.resolveInst(field_ptr_extra.lhs); - const agg_ty = sema.typeOf(object_ptr).childType(); - switch (agg_ty.zigTypeTag()) { + const agg_ty = sema.typeOf(object_ptr).childType(mod); + switch (agg_ty.zigTypeTag(mod)) { .Struct => return sema.validateStructInit( block, agg_ty, @@ -4194,6 +4325,9 @@ fn validateUnionInit( instrs: []const Zir.Inst.Index, union_ptr: Air.Inst.Ref, ) CompileError!void { + const mod = sema.mod; + const gpa = sema.gpa; + if (instrs.len != 1) { const msg = msg: { const msg = try sema.errMsg( @@ -4202,7 +4336,7 @@ fn validateUnionInit( "cannot initialize multiple union fields at once; unions can only have one active field", .{}, ); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); for (instrs[1..]) |inst| { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; @@ -4226,7 +4360,7 @@ fn validateUnionInit( const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node; const field_src: LazySrcLoc = .{ .node_offset_initializer = field_ptr_data.src_node }; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; - const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start); + const field_name = try mod.intern_pool.getOrPutString(gpa, sema.code.nullTerminatedString(field_ptr_extra.field_name_start)); // Validate the field access but ignore the index since we want the tag enum field index. _ = try sema.unionFieldIndex(block, union_ty, field_name, field_src); const air_tags = sema.air_instructions.items(.tag); @@ -4291,21 +4425,25 @@ fn validateUnionInit( break; } - const tag_ty = union_ty.unionTagTypeHypothetical(); - const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); - const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); + const tag_ty = union_ty.unionTagTypeHypothetical(mod); + const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); + const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); if (init_val) |val| { // Our task is to delete all the `field_ptr` and `store` instructions, and insert // instead a single `store` to the result ptr with a comptime union value. block.instructions.shrinkRetainingCapacity(first_block_index); - var union_val = try Value.Tag.@"union".create(sema.arena, .{ - .tag = tag_val, - .val = val, - }); - if (make_runtime) union_val = try Value.Tag.runtime_value.create(sema.arena, union_val); - const union_init = try sema.addConstant(union_ty, union_val); + var union_val = try mod.intern(.{ .un = .{ + .ty = union_ty.toIntern(), + .tag = tag_val.toIntern(), + .val = val.toIntern(), + } }); + if (make_runtime) union_val = try mod.intern(.{ .runtime_value = .{ + .ty = union_ty.toIntern(), + .val = union_val, + } }); + const union_init = try sema.addConstant(union_ty, union_val.toValue()); try sema.storePtr2(block, init_src, union_ptr, init_src, union_init, init_src, .store); return; } else if (try sema.typeRequiresComptime(union_ty)) { @@ -4323,10 +4461,12 @@ fn validateStructInit( init_src: LazySrcLoc, instrs: []const Zir.Inst.Index, ) CompileError!void { + const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; // Maps field index to field_ptr index of where it was already initialized. - const found_fields = try gpa.alloc(Zir.Inst.Index, struct_ty.structFieldCount()); + const found_fields = try gpa.alloc(Zir.Inst.Index, struct_ty.structFieldCount(mod)); defer gpa.free(found_fields); @memset(found_fields, 0); @@ -4337,8 +4477,11 @@ fn validateStructInit( const field_src: LazySrcLoc = .{ .node_offset_initializer = field_ptr_data.src_node }; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; struct_ptr_zir_ref = field_ptr_extra.lhs; - const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start); - const field_index = if (struct_ty.isTuple()) + const field_name = try ip.getOrPutString( + gpa, + sema.code.nullTerminatedString(field_ptr_extra.field_name_start), + ); + const field_index = if (struct_ty.isTuple(mod)) try sema.tupleFieldIndex(block, struct_ty, field_name, field_src) else try sema.structFieldIndex(block, struct_ty, field_name, field_src); @@ -4371,9 +4514,9 @@ fn validateStructInit( for (found_fields, 0..) |field_ptr, i| { if (field_ptr != 0) continue; - const default_val = struct_ty.structFieldDefaultValue(i); - if (default_val.tag() == .unreachable_value) { - if (struct_ty.isTuple()) { + const default_val = struct_ty.structFieldDefaultValue(i, mod); + if (default_val.toIntern() == .unreachable_value) { + if (struct_ty.isTuple(mod)) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, .{i}); @@ -4382,9 +4525,9 @@ fn validateStructInit( } continue; } - const field_name = struct_ty.structFieldName(i); - const template = "missing struct field: {s}"; - const args = .{field_name}; + const field_name = struct_ty.structFieldName(i, mod); + const template = "missing struct field: {}"; + const args = .{field_name.fmt(ip)}; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, args); } else { @@ -4394,25 +4537,23 @@ fn validateStructInit( } const field_src = init_src; // TODO better source location - const default_field_ptr = if (struct_ty.isTuple()) + const default_field_ptr = if (struct_ty.isTuple(mod)) try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true) else try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); - const field_ty = sema.typeOf(default_field_ptr).childType(); + const field_ty = sema.typeOf(default_field_ptr).childType(mod); const init = try sema.addConstant(field_ty, default_val); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); } if (root_msg) |msg| { - if (struct_ty.castTag(.@"struct")) |struct_obj| { - const mod = sema.mod; - const fqn = try struct_obj.data.getFullyQualifiedName(mod); - defer gpa.free(fqn); + if (mod.typeToStruct(struct_ty)) |struct_obj| { + const fqn = try struct_obj.getFullyQualifiedName(mod); try mod.errNoteNonLazy( - struct_obj.data.srcLoc(mod), + struct_obj.srcLoc(mod), msg, - "struct '{s}' declared here", - .{fqn}, + "struct '{}' declared here", + .{fqn.fmt(ip)}, ); } root_msg = null; @@ -4432,14 +4573,14 @@ fn validateStructInit( // We collect the comptime field values in case the struct initialization // ends up being comptime-known. - const field_values = try sema.arena.alloc(Value, struct_ty.structFieldCount()); + const field_values = try sema.arena.alloc(InternPool.Index, struct_ty.structFieldCount(mod)); field: for (found_fields, 0..) |field_ptr, i| { if (field_ptr != 0) { // Determine whether the value stored to this pointer is comptime-known. - const field_ty = struct_ty.structFieldType(i); + const field_ty = struct_ty.structFieldType(i, mod); if (try sema.typeHasOnePossibleValue(field_ty)) |opv| { - field_values[i] = opv; + field_values[i] = opv.toIntern(); continue; } @@ -4504,7 +4645,7 @@ fn validateStructInit( first_block_index = @min(first_block_index, block_index); } if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| { - field_values[i] = val; + field_values[i] = val.toIntern(); } else if (require_comptime) { const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node; return sema.failWithNeededComptime(block, field_ptr_data.src(), "initializer of comptime only struct must be comptime-known"); @@ -4517,9 +4658,9 @@ fn validateStructInit( continue :field; } - const default_val = struct_ty.structFieldDefaultValue(i); - if (default_val.tag() == .unreachable_value) { - if (struct_ty.isTuple()) { + const default_val = struct_ty.structFieldDefaultValue(i, mod); + if (default_val.toIntern() == .unreachable_value) { + if (struct_ty.isTuple(mod)) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, .{i}); @@ -4528,9 +4669,9 @@ fn validateStructInit( } continue; } - const field_name = struct_ty.structFieldName(i); - const template = "missing struct field: {s}"; - const args = .{field_name}; + const field_name = struct_ty.structFieldName(i, mod); + const template = "missing struct field: {}"; + const args = .{field_name.fmt(ip)}; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, args); } else { @@ -4538,18 +4679,17 @@ fn validateStructInit( } continue; } - field_values[i] = default_val; + field_values[i] = default_val.toIntern(); } if (root_msg) |msg| { - if (struct_ty.castTag(.@"struct")) |struct_obj| { - const fqn = try struct_obj.data.getFullyQualifiedName(sema.mod); - defer gpa.free(fqn); - try sema.mod.errNoteNonLazy( - struct_obj.data.srcLoc(sema.mod), + if (mod.typeToStruct(struct_ty)) |struct_obj| { + const fqn = try struct_obj.getFullyQualifiedName(mod); + try mod.errNoteNonLazy( + struct_obj.srcLoc(mod), msg, - "struct '{s}' declared here", - .{fqn}, + "struct '{}' declared here", + .{fqn.fmt(ip)}, ); } root_msg = null; @@ -4561,9 +4701,15 @@ fn validateStructInit( // instead a single `store` to the struct_ptr with a comptime struct value. block.instructions.shrinkRetainingCapacity(first_block_index); - var struct_val = try Value.Tag.aggregate.create(sema.arena, field_values); - if (make_runtime) struct_val = try Value.Tag.runtime_value.create(sema.arena, struct_val); - const struct_init = try sema.addConstant(struct_ty, struct_val); + var struct_val = try mod.intern(.{ .aggregate = .{ + .ty = struct_ty.toIntern(), + .storage = .{ .elems = field_values }, + } }); + if (make_runtime) struct_val = try mod.intern(.{ .runtime_value = .{ + .ty = struct_ty.toIntern(), + .val = struct_val, + } }); + const struct_init = try sema.addConstant(struct_ty, struct_val.toValue()); try sema.storePtr2(block, init_src, struct_ptr, init_src, struct_init, init_src, .store); return; } @@ -4574,12 +4720,12 @@ fn validateStructInit( if (field_ptr != 0) continue; const field_src = init_src; // TODO better source location - const default_field_ptr = if (struct_ty.isTuple()) + const default_field_ptr = if (struct_ty.isTuple(mod)) try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(u32, i), true) else try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(u32, i), field_src, struct_ty, true); - const field_ty = sema.typeOf(default_field_ptr).childType(); - const init = try sema.addConstant(field_ty, field_values[i]); + const field_ty = sema.typeOf(default_field_ptr).childType(mod); + const init = try sema.addConstant(field_ty, field_values[i].toValue()); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); } } @@ -4589,6 +4735,7 @@ fn zirValidateArrayInit( block: *Block, inst: Zir.Inst.Index, ) CompileError!void { + const mod = sema.mod; const validate_inst = sema.code.instructions.items(.data)[inst].pl_node; const init_src = validate_inst.src(); const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); @@ -4596,18 +4743,18 @@ fn zirValidateArrayInit( const first_elem_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; const elem_ptr_extra = sema.code.extraData(Zir.Inst.ElemPtrImm, first_elem_ptr_data.payload_index).data; const array_ptr = try sema.resolveInst(elem_ptr_extra.ptr); - const array_ty = sema.typeOf(array_ptr).childType(); - const array_len = array_ty.arrayLen(); + const array_ty = sema.typeOf(array_ptr).childType(mod); + const array_len = array_ty.arrayLen(mod); - if (instrs.len != array_len) switch (array_ty.zigTypeTag()) { + if (instrs.len != array_len) switch (array_ty.zigTypeTag(mod)) { .Struct => { var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); var i = instrs.len; while (i < array_len) : (i += 1) { - const default_val = array_ty.structFieldDefaultValue(i); - if (default_val.tag() == .unreachable_value) { + const default_val = array_ty.structFieldDefaultValue(i, mod); + if (default_val.toIntern() == .unreachable_value) { const template = "missing tuple field with index {d}"; if (root_msg) |msg| { try sema.errNote(block, init_src, msg, template, .{i}); @@ -4642,39 +4789,41 @@ fn zirValidateArrayInit( // at comptime so we have almost nothing to do here. However, in case of a // sentinel-terminated array, the sentinel will not have been populated by // any ZIR instructions at comptime; we need to do that here. - if (array_ty.sentinel()) |sentinel_val| { + if (array_ty.sentinel(mod)) |sentinel_val| { const array_len_ref = try sema.addIntUnsigned(Type.usize, array_len); const sentinel_ptr = try sema.elemPtrArray(block, init_src, init_src, array_ptr, init_src, array_len_ref, true, true); - const sentinel = try sema.addConstant(array_ty.childType(), sentinel_val); + const sentinel = try sema.addConstant(array_ty.childType(mod), sentinel_val); try sema.storePtr2(block, init_src, sentinel_ptr, init_src, sentinel, init_src, .store); } return; } + // If the array has one possible value, the value is always comptime-known. + if (try sema.typeHasOnePossibleValue(array_ty)) |array_opv| { + const array_init = try sema.addConstant(array_ty, array_opv); + try sema.storePtr2(block, init_src, array_ptr, init_src, array_init, init_src, .store); + return; + } + var array_is_comptime = true; var first_block_index = block.instructions.items.len; var make_runtime = false; // Collect the comptime element values in case the array literal ends up // being comptime-known. - const array_len_s = try sema.usizeCast(block, init_src, array_ty.arrayLenIncludingSentinel()); - const element_vals = try sema.arena.alloc(Value, array_len_s); - const opt_opv = try sema.typeHasOnePossibleValue(array_ty); + const element_vals = try sema.arena.alloc( + InternPool.Index, + try sema.usizeCast(block, init_src, array_len), + ); const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); outer: for (instrs, 0..) |elem_ptr, i| { // Determine whether the value stored to this pointer is comptime-known. - if (array_ty.isTuple()) { - if (array_ty.structFieldValueComptime(i)) |opv| { - element_vals[i] = opv; - continue; - } - } else { - // Array has one possible value, so value is always comptime-known - if (opt_opv) |opv| { - element_vals[i] = opv; + if (array_ty.isTuple(mod)) { + if (try array_ty.structFieldValueComptime(mod, i)) |opv| { + element_vals[i] = opv.toIntern(); continue; } } @@ -4735,7 +4884,7 @@ fn zirValidateArrayInit( first_block_index = @min(first_block_index, block_index); } if (try sema.resolveMaybeUndefValAllowVariablesMaybeRuntime(bin_op.rhs, &make_runtime)) |val| { - element_vals[i] = val; + element_vals[i] = val.toIntern(); } else { array_is_comptime = false; } @@ -4747,50 +4896,55 @@ fn zirValidateArrayInit( if (array_is_comptime) { if (try sema.resolveDefinedValue(block, init_src, array_ptr)) |ptr_val| { - if (ptr_val.tag() == .comptime_field_ptr) { - // This store was validated by the individual elem ptrs. - return; + switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .comptime_field => return, // This store was validated by the individual elem ptrs. + else => {}, + }, + else => {}, } } // Our task is to delete all the `elem_ptr` and `store` instructions, and insert // instead a single `store` to the array_ptr with a comptime struct value. - // Also to populate the sentinel value, if any. - if (array_ty.sentinel()) |sentinel_val| { - element_vals[instrs.len] = sentinel_val; - } - block.instructions.shrinkRetainingCapacity(first_block_index); - var array_val = try Value.Tag.aggregate.create(sema.arena, element_vals); - if (make_runtime) array_val = try Value.Tag.runtime_value.create(sema.arena, array_val); - const array_init = try sema.addConstant(array_ty, array_val); + var array_val = try mod.intern(.{ .aggregate = .{ + .ty = array_ty.toIntern(), + .storage = .{ .elems = element_vals }, + } }); + if (make_runtime) array_val = try mod.intern(.{ .runtime_value = .{ + .ty = array_ty.toIntern(), + .val = array_val, + } }); + const array_init = try sema.addConstant(array_ty, array_val.toValue()); try sema.storePtr2(block, init_src, array_ptr, init_src, array_init, init_src, .store); } } fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - if (operand_ty.zigTypeTag() != .Pointer) { - return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(sema.mod)}); - } else switch (operand_ty.ptrSize()) { + if (operand_ty.zigTypeTag(mod) != .Pointer) { + return sema.fail(block, src, "cannot dereference non-pointer type '{}'", .{operand_ty.fmt(mod)}); + } else switch (operand_ty.ptrSize(mod)) { .One, .C => {}, - .Many => return sema.fail(block, src, "index syntax required for unknown-length pointer type '{}'", .{operand_ty.fmt(sema.mod)}), - .Slice => return sema.fail(block, src, "index syntax required for slice type '{}'", .{operand_ty.fmt(sema.mod)}), + .Many => return sema.fail(block, src, "index syntax required for unknown-length pointer type '{}'", .{operand_ty.fmt(mod)}), + .Slice => return sema.fail(block, src, "index syntax required for slice type '{}'", .{operand_ty.fmt(mod)}), } - if ((try sema.typeHasOnePossibleValue(operand_ty.childType())) != null) { + if ((try sema.typeHasOnePossibleValue(operand_ty.childType(mod))) != null) { // No need to validate the actual pointer value, we don't need it! return; } - const elem_ty = operand_ty.elemType2(); + const elem_ty = operand_ty.elemType2(mod); if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) { + if (val.isUndef(mod)) { return sema.fail(block, src, "cannot dereference undefined value", .{}); } } else if (!(try sema.validateRunTimeType(elem_ty, false))) { @@ -4799,12 +4953,12 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr block, src, "values of type '{}' must be comptime-known, but operand value is runtime-known", - .{elem_ty.fmt(sema.mod)}, + .{elem_ty.fmt(mod)}, ); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl), elem_ty); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl, mod), elem_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -4816,23 +4970,24 @@ fn failWithBadMemberAccess( block: *Block, agg_ty: Type, field_src: LazySrcLoc, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, ) CompileError { - const kw_name = switch (agg_ty.zigTypeTag()) { + const mod = sema.mod; + const kw_name = switch (agg_ty.zigTypeTag(mod)) { .Union => "union", .Struct => "struct", .Opaque => "opaque", .Enum => "enum", else => unreachable, }; - if (agg_ty.getOwnerDeclOrNull()) |some| if (sema.mod.declIsRoot(some)) { - return sema.fail(block, field_src, "root struct of file '{}' has no member named '{s}'", .{ - agg_ty.fmt(sema.mod), field_name, + if (agg_ty.getOwnerDeclOrNull(mod)) |some| if (mod.declIsRoot(some)) { + return sema.fail(block, field_src, "root struct of file '{}' has no member named '{}'", .{ + agg_ty.fmt(mod), field_name.fmt(&mod.intern_pool), }); }; const msg = msg: { - const msg = try sema.errMsg(block, field_src, "{s} '{}' has no member named '{s}'", .{ - kw_name, agg_ty.fmt(sema.mod), field_name, + const msg = try sema.errMsg(block, field_src, "{s} '{}' has no member named '{}'", .{ + kw_name, agg_ty.fmt(mod), field_name.fmt(&mod.intern_pool), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, agg_ty); @@ -4846,22 +5001,22 @@ fn failWithBadStructFieldAccess( block: *Block, struct_obj: *Module.Struct, field_src: LazySrcLoc, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, ) CompileError { + const mod = sema.mod; const gpa = sema.gpa; - const fqn = try struct_obj.getFullyQualifiedName(sema.mod); - defer gpa.free(fqn); + const fqn = try struct_obj.getFullyQualifiedName(mod); const msg = msg: { const msg = try sema.errMsg( block, field_src, - "no field named '{s}' in struct '{s}'", - .{ field_name, fqn }, + "no field named '{}' in struct '{}'", + .{ field_name.fmt(&mod.intern_pool), fqn.fmt(&mod.intern_pool) }, ); errdefer msg.destroy(gpa); - try sema.mod.errNoteNonLazy(struct_obj.srcLoc(sema.mod), msg, "struct declared here", .{}); + try mod.errNoteNonLazy(struct_obj.srcLoc(mod), msg, "struct declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -4872,30 +5027,31 @@ fn failWithBadUnionFieldAccess( block: *Block, union_obj: *Module.Union, field_src: LazySrcLoc, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, ) CompileError { + const mod = sema.mod; const gpa = sema.gpa; - const fqn = try union_obj.getFullyQualifiedName(sema.mod); - defer gpa.free(fqn); + const fqn = try union_obj.getFullyQualifiedName(mod); const msg = msg: { const msg = try sema.errMsg( block, field_src, - "no field named '{s}' in union '{s}'", - .{ field_name, fqn }, + "no field named '{}' in union '{}'", + .{ field_name.fmt(&mod.intern_pool), fqn.fmt(&mod.intern_pool) }, ); errdefer msg.destroy(gpa); - try sema.mod.errNoteNonLazy(union_obj.srcLoc(sema.mod), msg, "union declared here", .{}); + try mod.errNoteNonLazy(union_obj.srcLoc(mod), msg, "union declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !void { - const src_loc = decl_ty.declSrcLocOrNull(sema.mod) orelse return; - const category = switch (decl_ty.zigTypeTag()) { + const mod = sema.mod; + const src_loc = decl_ty.declSrcLocOrNull(mod) orelse return; + const category = switch (decl_ty.zigTypeTag(mod)) { .Union => "union", .Struct => "struct", .Enum => "enum", @@ -4903,7 +5059,7 @@ fn addDeclaredHereNote(sema: *Sema, parent: *Module.ErrorMsg, decl_ty: Type) !vo .ErrorSet => "error set", else => unreachable, }; - try sema.mod.errNoteNonLazy(src_loc, parent, "{s} declared here", .{category}); + try mod.errNoteNonLazy(src_loc, parent, "{s} declared here", .{category}); } fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { @@ -4919,17 +5075,14 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const src: LazySrcLoc = sema.src; blk: { const ptr_inst = Air.refToIndex(ptr) orelse break :blk; - if (sema.air_instructions.items(.tag)[ptr_inst] != .constant) break :blk; - const air_datas = sema.air_instructions.items(.data); - const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; - switch (ptr_val.tag()) { + switch (sema.air_instructions.items(.tag)[ptr_inst]) { .inferred_alloc_comptime => { - const iac = ptr_val.castTag(.inferred_alloc_comptime).?; + const iac = &sema.air_instructions.items(.data)[ptr_inst].inferred_alloc_comptime; return sema.storeToInferredAllocComptime(block, src, operand, iac); }, .inferred_alloc => { - const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; - return sema.storeToInferredAlloc(block, ptr, operand, inferred_alloc); + const ia = sema.unresolved_inferred_allocs.getPtr(ptr_inst).?; + return sema.storeToInferredAlloc(block, ptr, operand, ia); }, else => break :blk, } @@ -4947,18 +5100,16 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi const ptr = try sema.resolveInst(bin_inst.lhs); const operand = try sema.resolveInst(bin_inst.rhs); const ptr_inst = Air.refToIndex(ptr).?; - assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); const air_datas = sema.air_instructions.items(.data); - const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; - switch (ptr_val.tag()) { + switch (sema.air_instructions.items(.tag)[ptr_inst]) { .inferred_alloc_comptime => { - const iac = ptr_val.castTag(.inferred_alloc_comptime).?; + const iac = &air_datas[ptr_inst].inferred_alloc_comptime; return sema.storeToInferredAllocComptime(block, src, operand, iac); }, .inferred_alloc => { - const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; - return sema.storeToInferredAlloc(block, ptr, operand, inferred_alloc); + const ia = sema.unresolved_inferred_allocs.getPtr(ptr_inst).?; + return sema.storeToInferredAlloc(block, ptr, operand, ia); }, else => unreachable, } @@ -4969,14 +5120,14 @@ fn storeToInferredAlloc( block: *Block, ptr: Air.Inst.Ref, operand: Air.Inst.Ref, - inferred_alloc: *Value.Payload.InferredAlloc, + inferred_alloc: *InferredAlloc, ) CompileError!void { // Create a store instruction as a placeholder. This will be replaced by a // proper store sequence once we know the stored type. const dummy_store = try block.addBinOp(.store, ptr, operand); // Add the stored instruction to the set we will use to resolve peer types // for the inferred allocation. - try inferred_alloc.data.prongs.append(sema.arena, .{ + try inferred_alloc.prongs.append(sema.arena, .{ .stored_inst = operand, .placeholder = Air.refToIndex(dummy_store).?, }); @@ -4987,20 +5138,21 @@ fn storeToInferredAllocComptime( block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, - iac: *Value.Payload.InferredAllocComptime, + iac: *Air.Inst.Data.InferredAllocComptime, ) CompileError!void { const operand_ty = sema.typeOf(operand); // There will be only one store_to_inferred_ptr because we are running at comptime. // The alloc will turn into a Decl. if (try sema.resolveMaybeUndefValAllowVariables(operand)) |operand_val| store: { - if (operand_val.tag() == .variable) break :store; + if (operand_val.getVariable(sema.mod) != null) break :store; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - iac.data.decl_index = try anon_decl.finish( - try operand_ty.copy(anon_decl.arena()), - try operand_val.copy(anon_decl.arena()), - iac.data.alignment, + iac.decl_index = try anon_decl.finish( + operand_ty, + operand_val, + iac.alignment.toByteUnits(0), ); + try sema.comptime_mutable_decls.append(iac.decl_index); return; } @@ -5028,6 +5180,7 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const zir_tags = sema.code.instructions.items(.tag); const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].pl_node; @@ -5046,9 +5199,9 @@ fn zirStoreNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!v // %b = store(%a, %c) // Where %c is an error union or error set. In such case we need to add // to the current function's inferred error set, if any. - if (is_ret and (sema.typeOf(operand).zigTypeTag() == .ErrorUnion or - sema.typeOf(operand).zigTypeTag() == .ErrorSet) and - sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) + if (is_ret and (sema.typeOf(operand).zigTypeTag(mod) == .ErrorUnion or + sema.typeOf(operand).zigTypeTag(mod) == .ErrorSet) and + sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) { try sema.addToInferredErrorSet(operand); } @@ -5072,47 +5225,30 @@ fn zirStr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins return sema.addStrLit(block, bytes); } -fn addStrLit(sema: *Sema, block: *Block, zir_bytes: []const u8) CompileError!Air.Inst.Ref { - // `zir_bytes` references memory inside the ZIR module, which can get deallocated - // after semantic analysis is complete, for example in the case of the initialization - // expression of a variable declaration. +fn addStrLit(sema: *Sema, block: *Block, bytes: []const u8) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; - const string_bytes = &mod.string_literal_bytes; - const StringLiteralAdapter = Module.StringLiteralAdapter; - const StringLiteralContext = Module.StringLiteralContext; - try string_bytes.ensureUnusedCapacity(gpa, zir_bytes.len); - const gop = try mod.string_literal_table.getOrPutContextAdapted(gpa, zir_bytes, StringLiteralAdapter{ - .bytes = string_bytes, - }, StringLiteralContext{ - .bytes = string_bytes, + // TODO: write something like getCoercedInts to avoid needing to dupe + const duped_bytes = try sema.arena.dupe(u8, bytes); + const ty = try mod.arrayType(.{ + .len = bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, }); + const val = try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .bytes = duped_bytes }, + } }); + const gop = try mod.memoized_decls.getOrPut(gpa, val); if (!gop.found_existing) { - gop.key_ptr.* = .{ - .index = @intCast(u32, string_bytes.items.len), - .len = @intCast(u32, zir_bytes.len), - }; - string_bytes.appendSliceAssumeCapacity(zir_bytes); - gop.value_ptr.* = .none; + const new_decl_index = try mod.createAnonymousDecl(block, .{ + .ty = ty, + .val = val.toValue(), + }); + gop.value_ptr.* = new_decl_index; + try mod.finalizeAnonDecl(new_decl_index); } - const decl_index = gop.value_ptr.unwrap() orelse di: { - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); - - const decl_index = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), gop.key_ptr.len), - try Value.Tag.str_lit.create(anon_decl.arena(), gop.key_ptr.*), - 0, // default alignment - ); - - // Needed so that `Decl.clearValues` will additionally set the corresponding - // string literal table value back to `Decl.OptionalIndex.none`. - mod.declPtr(decl_index).owns_tv = true; - - gop.value_ptr.* = decl_index.toOptional(); - break :di decl_index; - }; - return sema.analyzeDeclRef(decl_index); + return sema.analyzeDeclRef(gop.value_ptr.*); } fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -5121,7 +5257,7 @@ fn zirInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins defer tracy.end(); const int = sema.code.instructions.items(.data)[inst].int; - return sema.addIntUnsigned(Type.initTag(.comptime_int), int); + return sema.addIntUnsigned(Type.comptime_int, int); } fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -5129,38 +5265,43 @@ fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const tracy = trace(@src()); defer tracy.end(); - const arena = sema.arena; + const mod = sema.mod; const int = sema.code.instructions.items(.data)[inst].str; const byte_count = int.len * @sizeOf(std.math.big.Limb); const limb_bytes = sema.code.string_bytes[int.start..][0..byte_count]; - const limbs = try arena.alloc(std.math.big.Limb, int.len); + + // TODO: this allocation and copy is only needed because the limbs may be unaligned. + // If ZIR is adjusted so that big int limbs are guaranteed to be aligned, these + // two lines can be removed. + const limbs = try sema.arena.alloc(std.math.big.Limb, int.len); @memcpy(mem.sliceAsBytes(limbs), limb_bytes); return sema.addConstant( - Type.initTag(.comptime_int), - try Value.Tag.int_big_positive.create(arena, limbs), + Type.comptime_int, + try mod.intValue_big(Type.comptime_int, .{ + .limbs = limbs, + .positive = true, + }), ); } fn zirFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; - const arena = sema.arena; const number = sema.code.instructions.items(.data)[inst].float; return sema.addConstant( - Type.initTag(.comptime_float), - try Value.Tag.float_64.create(arena, number), + Type.comptime_float, + try sema.mod.floatValue(Type.comptime_float, number), ); } fn zirFloat128(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; - const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data; const number = extra.get(); return sema.addConstant( - Type.initTag(.comptime_float), - try Value.Tag.float_128.create(arena, number), + Type.comptime_float, + try sema.mod.floatValue(Type.comptime_float, number), ); } @@ -5179,7 +5320,9 @@ fn zirCompileLog( sema: *Sema, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { - var managed = sema.mod.compile_log_text.toManaged(sema.gpa); + const mod = sema.mod; + + var managed = mod.compile_log_text.toManaged(sema.gpa); defer sema.mod.compile_log_text = managed.moveToUnmanaged(); const writer = managed.writer(); @@ -5192,19 +5335,18 @@ fn zirCompileLog( const arg = try sema.resolveInst(arg_ref); const arg_ty = sema.typeOf(arg); - if (try sema.resolveMaybeUndefVal(arg)) |val| { - try sema.resolveLazyValue(val); + if (try sema.resolveMaybeUndefLazyVal(arg)) |val| { try writer.print("@as({}, {})", .{ - arg_ty.fmt(sema.mod), val.fmtValue(arg_ty, sema.mod), + arg_ty.fmt(mod), val.fmtValue(arg_ty, mod), }); } else { - try writer.print("@as({}, [runtime value])", .{arg_ty.fmt(sema.mod)}); + try writer.print("@as({}, [runtime value])", .{arg_ty.fmt(mod)}); } } try writer.print("\n", .{}); const decl_index = if (sema.func) |some| some.owner_decl else sema.owner_decl_index; - const gop = try sema.mod.compile_log_decls.getOrPut(sema.gpa, decl_index); + const gop = try mod.compile_log_decls.getOrPut(sema.gpa, decl_index); if (!gop.found_existing) { gop.value_ptr.* = src_node; } @@ -5235,6 +5377,7 @@ fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); @@ -5284,7 +5427,7 @@ fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError try sema.analyzeBody(&loop_block, body); const loop_block_len = loop_block.instructions.items.len; - if (loop_block_len > 0 and sema.typeOf(Air.indexToRef(loop_block.instructions.items[loop_block_len - 1])).isNoReturn()) { + if (loop_block_len > 0 and sema.typeOf(Air.indexToRef(loop_block.instructions.items[loop_block_len - 1])).isNoReturn(mod)) { // If the loop ended with a noreturn terminator, then there is no way for it to loop, // so we can just use the block instead. try child_block.instructions.appendSlice(gpa, loop_block.instructions.items); @@ -5311,7 +5454,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr // we check this here to avoid undefined symbols if (!@import("build_options").have_llvm) - return sema.fail(parent_block, src, "cannot do C import on Zig compiler not built with LLVM-extension", .{}); + return sema.fail(parent_block, src, "C import unavailable; Zig compiler built without LLVM extensions", .{}); var c_import_buf = std.ArrayList(u8).init(sema.gpa); defer c_import_buf.deinit(); @@ -5354,7 +5497,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr if (!mod.comp.bin_file.options.link_libc) try sema.errNote(&child_block, src, msg, "libc headers not available; compilation does not link against libc", .{}); - const gop = try sema.mod.cimport_errors.getOrPut(sema.gpa, sema.owner_decl_index); + const gop = try mod.cimport_errors.getOrPut(sema.gpa, sema.owner_decl_index); if (!gop.found_existing) { var errs = try std.ArrayListUnmanaged(Module.CImportError).initCapacity(sema.gpa, c_import_res.errors.len); errdefer { @@ -5537,7 +5680,7 @@ fn analyzeBlockBody( // Blocks must terminate with noreturn instruction. assert(child_block.instructions.items.len != 0); - assert(sema.typeOf(Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn()); + assert(sema.typeOf(Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn(mod)); if (merges.results.items.len == 0) { // No need for a block instruction. We can put the new instructions @@ -5578,7 +5721,7 @@ fn analyzeBlockBody( try sema.errNote(child_block, runtime_src, msg, "runtime control flow here", .{}); const child_src_decl = mod.declPtr(child_block.src_decl); - try sema.explainWhyTypeIsComptime(msg, type_src.toSrcLoc(child_src_decl), resolved_ty); + try sema.explainWhyTypeIsComptime(msg, type_src.toSrcLoc(child_src_decl, mod), resolved_ty); break :msg msg; }; @@ -5649,15 +5792,16 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Export, inst_data.payload_index).data; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; - const decl_name = sema.code.nullTerminatedString(extra.decl_name); + const decl_name = try mod.intern_pool.getOrPutString(mod.gpa, sema.code.nullTerminatedString(extra.decl_name)); const decl_index = if (extra.namespace != .none) index_blk: { const container_ty = try sema.resolveType(block, operand_src, extra.namespace); - const container_namespace = container_ty.getNamespace().?; + const container_namespace = container_ty.getNamespaceIndex(mod).unwrap().?; const maybe_index = try sema.lookupInNamespace(block, operand_src, container_namespace, decl_name, false); break :index_blk maybe_index orelse @@ -5671,10 +5815,10 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void else => |e| return e, }; { - try sema.mod.ensureDeclAnalyzed(decl_index); - const exported_decl = sema.mod.declPtr(decl_index); - if (exported_decl.val.castTag(.function)) |some| { - return sema.analyzeExport(block, src, options, some.data.owner_decl); + try mod.ensureDeclAnalyzed(decl_index); + const exported_decl = mod.declPtr(decl_index); + if (exported_decl.val.getFunction(mod)) |function| { + return sema.analyzeExport(block, src, options, function.owner_decl); } } try sema.analyzeExport(block, src, options, decl_index); @@ -5697,17 +5841,14 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError }, else => |e| return e, }; - const decl_index = switch (operand.val.tag()) { - .function => operand.val.castTag(.function).?.data.owner_decl, - else => blk: { - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); - break :blk try anon_decl.finish( - try operand.ty.copy(anon_decl.arena()), - try operand.val.copy(anon_decl.arena()), - 0, - ); - }, + const decl_index = if (operand.val.getFunction(sema.mod)) |function| function.owner_decl else blk: { + var anon_decl = try block.startAnonDecl(); + defer anon_decl.deinit(); + break :blk try anon_decl.finish( + operand.ty, + operand.val, + 0, + ); }; try sema.analyzeExport(block, src, options, decl_index); } @@ -5716,13 +5857,13 @@ pub fn analyzeExport( sema: *Sema, block: *Block, src: LazySrcLoc, - borrowed_options: std.builtin.ExportOptions, + options: Module.Export.Options, exported_decl_index: Decl.Index, ) !void { const Export = Module.Export; const mod = sema.mod; - if (borrowed_options.linkage == .Internal) { + if (options.linkage == .Internal) { return; } @@ -5731,11 +5872,11 @@ pub fn analyzeExport( if (!try sema.validateExternType(exported_decl.ty, .other)) { const msg = msg: { - const msg = try sema.errMsg(block, src, "unable to export type '{}'", .{exported_decl.ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, src, "unable to export type '{}'", .{exported_decl.ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), exported_decl.ty, .other); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), exported_decl.ty, .other); try sema.addDeclaredHereNote(msg, exported_decl.ty); break :msg msg; @@ -5744,15 +5885,15 @@ pub fn analyzeExport( } // TODO: some backends might support re-exporting extern decls - if (exported_decl.isExtern()) { + if (exported_decl.isExtern(mod)) { return sema.fail(block, src, "export target cannot be extern", .{}); } // This decl is alive no matter what, since it's being exported - mod.markDeclAlive(exported_decl); + try mod.markDeclAlive(exported_decl); try sema.maybeQueueFuncBodyAnalysis(exported_decl_index); - const gpa = mod.gpa; + const gpa = sema.gpa; try mod.decl_exports.ensureUnusedCapacity(gpa, 1); try mod.export_owners.ensureUnusedCapacity(gpa, 1); @@ -5760,19 +5901,8 @@ pub fn analyzeExport( const new_export = try gpa.create(Export); errdefer gpa.destroy(new_export); - const symbol_name = try gpa.dupe(u8, borrowed_options.name); - errdefer gpa.free(symbol_name); - - const section: ?[]const u8 = if (borrowed_options.section) |s| try gpa.dupe(u8, s) else null; - errdefer if (section) |s| gpa.free(s); - new_export.* = .{ - .options = .{ - .name = symbol_name, - .linkage = borrowed_options.linkage, - .section = section, - .visibility = borrowed_options.visibility, - }, + .opts = options, .src = src, .owner_decl = sema.owner_decl_index, .src_decl = block.src_decl, @@ -5798,6 +5928,7 @@ pub fn analyzeExport( } fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const src = LazySrcLoc.nodeOffset(extra.node); @@ -5807,11 +5938,12 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst alignment, }); } - const func = sema.func orelse + const func_index = sema.func_index.unwrap() orelse return sema.fail(block, src, "@setAlignStack outside function body", .{}); + const func = mod.funcPtr(func_index); - const fn_owner_decl = sema.mod.declPtr(func.owner_decl); - switch (fn_owner_decl.ty.fnCallingConvention()) { + const fn_owner_decl = mod.declPtr(func.owner_decl); + switch (fn_owner_decl.ty.fnCallingConvention(mod)) { .Naked => return sema.fail(block, src, "@setAlignStack in naked function", .{}), .Inline => return sema.fail(block, src, "@setAlignStack in inline function", .{}), else => if (block.inlining != null) { @@ -5819,7 +5951,7 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst }, } - const gop = try sema.mod.align_stack_fns.getOrPut(sema.mod.gpa, func); + const gop = try mod.align_stack_fns.getOrPut(sema.gpa, func_index); if (gop.found_existing) { const msg = msg: { const msg = try sema.errMsg(block, src, "multiple @setAlignStack in the same function body", .{}); @@ -5971,10 +6103,11 @@ fn addDbgVar( air_tag: Air.Inst.Tag, name: []const u8, ) CompileError!void { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); switch (air_tag) { .dbg_var_ptr => { - if (!(try sema.typeHasRuntimeBits(operand_ty.childType()))) return; + if (!(try sema.typeHasRuntimeBits(operand_ty.childType(mod)))) return; }, .dbg_var_val => { if (!(try sema.typeHasRuntimeBits(operand_ty))) return; @@ -6003,29 +6136,32 @@ fn addDbgVar( } fn zirDeclRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); - const decl_name = inst_data.get(sema.code); + const decl_name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); const decl_index = try sema.lookupIdentifier(block, src, decl_name); try sema.addReferencedBy(block, src, decl_index); return sema.analyzeDeclRef(decl_index); } fn zirDeclVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); - const decl_name = inst_data.get(sema.code); + const decl_name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); const decl = try sema.lookupIdentifier(block, src, decl_name); return sema.analyzeDeclVal(block, src, decl); } -fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: []const u8) !Decl.Index { +fn lookupIdentifier(sema: *Sema, block: *Block, src: LazySrcLoc, name: InternPool.NullTerminatedString) !Decl.Index { + const mod = sema.mod; var namespace = block.namespace; while (true) { if (try sema.lookupInNamespace(block, src, namespace, name, false)) |decl_index| { return decl_index; } - namespace = namespace.parent orelse break; + namespace = mod.namespacePtr(namespace).parent.unwrap() orelse break; } unreachable; // AstGen detects use of undeclared identifier errors. } @@ -6036,21 +6172,22 @@ fn lookupInNamespace( sema: *Sema, block: *Block, src: LazySrcLoc, - namespace: *Namespace, - ident_name: []const u8, + namespace_index: Namespace.Index, + ident_name: InternPool.NullTerminatedString, observe_usingnamespace: bool, ) CompileError!?Decl.Index { const mod = sema.mod; - const namespace_decl_index = namespace.getDeclIndex(); - const namespace_decl = sema.mod.declPtr(namespace_decl_index); + const namespace = mod.namespacePtr(namespace_index); + const namespace_decl_index = namespace.getDeclIndex(mod); + const namespace_decl = mod.declPtr(namespace_decl_index); if (namespace_decl.analysis == .file_failure) { try mod.declareDeclDependency(sema.owner_decl_index, namespace_decl_index); return error.AnalysisFail; } if (observe_usingnamespace and namespace.usingnamespace_set.count() != 0) { - const src_file = block.namespace.file_scope; + const src_file = mod.namespacePtr(block.namespace).file_scope; const gpa = sema.gpa; var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, bool) = .{}; @@ -6069,7 +6206,7 @@ fn lookupInNamespace( // Skip decls which are not marked pub, which are in a different // file than the `a.b`/`@hasDecl` syntax. const decl = mod.declPtr(decl_index); - if (decl.is_pub or (src_file == decl.getFileScope() and checked_namespaces.values()[check_i])) { + if (decl.is_pub or (src_file == decl.getFileScope(mod) and checked_namespaces.values()[check_i])) { try candidates.append(gpa, decl_index); } } @@ -6080,15 +6217,15 @@ fn lookupInNamespace( if (sub_usingnamespace_decl_index == sema.owner_decl_index) continue; const sub_usingnamespace_decl = mod.declPtr(sub_usingnamespace_decl_index); const sub_is_pub = entry.value_ptr.*; - if (!sub_is_pub and src_file != sub_usingnamespace_decl.getFileScope()) { + if (!sub_is_pub and src_file != sub_usingnamespace_decl.getFileScope(mod)) { // Skip usingnamespace decls which are not marked pub, which are in // a different file than the `a.b`/`@hasDecl` syntax. continue; } try sema.ensureDeclAnalyzed(sub_usingnamespace_decl_index); - const ns_ty = sub_usingnamespace_decl.val.castTag(.ty).?.data; - const sub_ns = ns_ty.getNamespace().?; - try checked_namespaces.put(gpa, sub_ns, src_file == sub_usingnamespace_decl.getFileScope()); + const ns_ty = sub_usingnamespace_decl.val.toType(); + const sub_ns = ns_ty.getNamespace(mod).?; + try checked_namespaces.put(gpa, sub_ns, src_file == sub_usingnamespace_decl.getFileScope(mod)); } } @@ -6116,7 +6253,7 @@ fn lookupInNamespace( errdefer msg.destroy(gpa); for (candidates.items) |candidate_index| { const candidate = mod.declPtr(candidate_index); - const src_loc = candidate.srcLoc(); + const src_loc = candidate.srcLoc(mod); try mod.errNoteNonLazy(src_loc, msg, "declared here", .{}); } break :msg msg; @@ -6129,9 +6266,6 @@ fn lookupInNamespace( return decl_index; } - log.debug("{*} ({s}) depends on non-existence of '{s}' in {*} ({s})", .{ - sema.owner_decl, sema.owner_decl.name, ident_name, namespace_decl, namespace_decl.name, - }); // TODO This dependency is too strong. Really, it should only be a dependency // on the non-existence of `ident_name` in the namespace. We can lessen the number of // outdated declarations by making this dependency more sophisticated. @@ -6140,22 +6274,28 @@ fn lookupInNamespace( } fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl { + const mod = sema.mod; const func_val = (try sema.resolveMaybeUndefVal(func_inst)) orelse return null; - if (func_val.isUndef()) return null; - const owner_decl_index = switch (func_val.tag()) { - .extern_fn => func_val.castTag(.extern_fn).?.data.owner_decl, - .function => func_val.castTag(.function).?.data.owner_decl, - .decl_ref => sema.mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data.owner_decl, + if (func_val.isUndef(mod)) return null; + const owner_decl_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { + .extern_func => |extern_func| extern_func.decl, + .func => |func| mod.funcPtr(func.index).owner_decl, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| mod.declPtr(decl).val.getFunction(mod).?.owner_decl, + else => return null, + }, else => return null, }; - return sema.mod.declPtr(owner_decl_index); + return mod.declPtr(owner_decl_index); } pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref { + const mod = sema.mod; + const gpa = sema.gpa; const src = sema.src; - if (!sema.mod.backendSupportsFeature(.error_return_trace)) return .none; - if (!sema.mod.comp.bin_file.options.error_return_tracing) return .none; + if (!mod.backendSupportsFeature(.error_return_trace)) return .none; + if (!mod.comp.bin_file.options.error_return_tracing) return .none; if (block.is_comptime) return .none; @@ -6168,7 +6308,8 @@ pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, else => |e| return e, }; - const field_index = sema.structFieldIndex(block, stack_trace_ty, "index", src) catch |err| switch (err) { + const field_name = try mod.intern_pool.getOrPutString(gpa, "index"); + const field_index = sema.structFieldIndex(block, stack_trace_ty, field_name, src) catch |err| switch (err) { error.NeededSourceLocation, error.GenericPoison, error.ComptimeReturn, error.ComptimeBreak => unreachable, else => |e| return e, }; @@ -6191,6 +6332,8 @@ fn popErrorReturnTrace( operand: Air.Inst.Ref, saved_error_trace_index: Air.Inst.Ref, ) CompileError!void { + const mod = sema.mod; + const gpa = sema.gpa; var is_non_error: ?bool = null; var is_non_error_inst: Air.Inst.Ref = undefined; if (operand != .none) { @@ -6205,15 +6348,16 @@ fn popErrorReturnTrace( const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty); + const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); - const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, "index", src, stack_trace_ty, true); + const field_name = try mod.intern_pool.getOrPutString(gpa, "index"); + const field_ptr = try sema.structFieldPtr(block, src, err_return_trace, field_name, src, stack_trace_ty, true); try sema.storePtr2(block, src, field_ptr, src, saved_error_trace_index, src, .store); } else if (is_non_error == null) { // The result might be an error. If it is, we leave the error trace alone. If it isn't, we need // to pop any error trace that may have been propagated from our arguments. - try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Block).Struct.fields.len); + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len); const cond_block_inst = try block.addInstAsIndex(.{ .tag = .block, .data = .{ @@ -6225,28 +6369,29 @@ fn popErrorReturnTrace( }); var then_block = block.makeSubBlock(); - defer then_block.instructions.deinit(sema.gpa); + defer then_block.instructions.deinit(gpa); // If non-error, then pop the error return trace by restoring the index. const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty); + const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try then_block.addTy(.err_return_trace, ptr_stack_trace_ty); - const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, "index", src, stack_trace_ty, true); + const field_name = try mod.intern_pool.getOrPutString(gpa, "index"); + const field_ptr = try sema.structFieldPtr(&then_block, src, err_return_trace, field_name, src, stack_trace_ty, true); try sema.storePtr2(&then_block, src, field_ptr, src, saved_error_trace_index, src, .store); _ = try then_block.addBr(cond_block_inst, Air.Inst.Ref.void_value); // Otherwise, do nothing var else_block = block.makeSubBlock(); - defer else_block.instructions.deinit(sema.gpa); + defer else_block.instructions.deinit(gpa); _ = try else_block.addBr(cond_block_inst, Air.Inst.Ref.void_value); - try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.CondBr).Struct.fields.len + + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len + then_block.instructions.items.len + else_block.instructions.items.len + @typeInfo(Air.Block).Struct.fields.len + 1); // +1 for the sole .cond_br instruction in the .block const cond_br_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); - try sema.air_instructions.append(sema.gpa, .{ .tag = .cond_br, .data = .{ .pl_op = .{ + try sema.air_instructions.append(gpa, .{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = is_non_error_inst, .payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @intCast(u32, then_block.instructions.items.len), @@ -6270,6 +6415,7 @@ fn zirCall( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const callee_src: LazySrcLoc = .{ .node_offset_call_func = inst_data.src_node }; const call_src = inst_data.src(); @@ -6288,7 +6434,7 @@ fn zirCall( .direct => .{ .direct = try sema.resolveInst(extra.data.callee) }, .field => blk: { const object_ptr = try sema.resolveInst(extra.data.obj_ptr); - const field_name = sema.code.nullTerminatedString(extra.data.field_name_start); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, sema.code.nullTerminatedString(extra.data.field_name_start)); const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; break :blk try sema.fieldCallBind(block, callee_src, object_ptr, field_name, field_name_src); }, @@ -6320,8 +6466,7 @@ fn zirCall( var input_is_error = false; const block_index = @intCast(Air.Inst.Index, block.instructions.items.len); - const func_ty_info = func_ty.fnInfo(); - const fn_params_len = func_ty_info.param_types.len; + const fn_params_len = mod.typeToFunc(func_ty).?.param_types.len; const parent_comptime = block.is_comptime; // `extra_index` and `arg_index` are separate since the bound function is passed as the first argument. var extra_index: usize = 0; @@ -6330,32 +6475,33 @@ fn zirCall( extra_index += 1; arg_index += 1; }) { + const func_ty_info = mod.typeToFunc(func_ty).?; const arg_end = sema.code.extra[extra.end + extra_index]; defer arg_start = arg_end; // Generate args to comptime params in comptime block. defer block.is_comptime = parent_comptime; - if (arg_index < fn_params_len and func_ty_info.comptime_params[arg_index]) { + if (arg_index < @min(fn_params_len, 32) and func_ty_info.paramIsComptime(@intCast(u5, arg_index))) { block.is_comptime = true; // TODO set comptime_reason } sema.inst_map.putAssumeCapacity(inst, inst: { if (arg_index >= fn_params_len) - break :inst Air.Inst.Ref.var_args_param; + break :inst Air.Inst.Ref.var_args_param_type; - if (func_ty_info.param_types[arg_index].tag() == .generic_poison) + if (func_ty_info.param_types[arg_index] == .generic_poison_type) break :inst Air.Inst.Ref.generic_poison_type; - break :inst try sema.addType(func_ty_info.param_types[arg_index]); + break :inst try sema.addType(func_ty_info.param_types[arg_index].toType()); }); const resolved = try sema.resolveBody(block, args_body[arg_start..arg_end], inst); const resolved_ty = sema.typeOf(resolved); - if (resolved_ty.zigTypeTag() == .NoReturn) { + if (resolved_ty.zigTypeTag(mod) == .NoReturn) { return resolved; } - if (resolved_ty.isError()) { + if (resolved_ty.isError(mod)) { input_is_error = true; } resolved_args[arg_index] = resolved; @@ -6367,7 +6513,7 @@ fn zirCall( // AstGen ensures that a call instruction is always preceded by a dbg_stmt instruction. const call_dbg_node = inst - 1; - if (sema.mod.backendSupportsFeature(.error_return_trace) and sema.mod.comp.bin_file.options.error_return_tracing and + if (mod.backendSupportsFeature(.error_return_trace) and mod.comp.bin_file.options.error_return_tracing and !block.is_comptime and !block.is_typeof and (input_is_error or pop_error_return_trace)) { const call_inst: Air.Inst.Ref = if (modifier == .always_tail) undefined else b: { @@ -6375,15 +6521,16 @@ fn zirCall( }; const return_ty = sema.typeOf(call_inst); - if (modifier != .always_tail and return_ty.isNoReturn()) + if (modifier != .always_tail and return_ty.isNoReturn(mod)) return call_inst; // call to "fn(...) noreturn", don't pop // If any input is an error-type, we might need to pop any trace it generated. Otherwise, we only // need to clean-up our own trace if we were passed to a non-error-handling expression. - if (input_is_error or (pop_error_return_trace and modifier != .always_tail and return_ty.isError())) { + if (input_is_error or (pop_error_return_trace and modifier != .always_tail and return_ty.isError(mod))) { const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const field_index = try sema.structFieldIndex(block, stack_trace_ty, "index", call_src); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, "index"); + const field_index = try sema.structFieldIndex(block, stack_trace_ty, field_name, call_src); // Insert a save instruction before the arg resolution + call instructions we just generated const save_inst = try block.insertInst(block_index, .{ @@ -6417,24 +6564,24 @@ fn checkCallArgumentCount( total_args: usize, member_fn: bool, ) !Type { + const mod = sema.mod; const func_ty = func_ty: { - switch (callee_ty.zigTypeTag()) { + switch (callee_ty.zigTypeTag(mod)) { .Fn => break :func_ty callee_ty, .Pointer => { - const ptr_info = callee_ty.ptrInfo().data; - if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Fn) { + const ptr_info = callee_ty.ptrInfo(mod); + if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) { break :func_ty ptr_info.pointee_type; } }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const opt_child = callee_ty.optionalChild(&buf); - if (opt_child.zigTypeTag() == .Fn or (opt_child.isSinglePointer() and - opt_child.childType().zigTypeTag() == .Fn)) + const opt_child = callee_ty.optionalChild(mod); + if (opt_child.zigTypeTag(mod) == .Fn or (opt_child.isSinglePointer(mod) and + opt_child.childType(mod).zigTypeTag(mod) == .Fn)) { const msg = msg: { const msg = try sema.errMsg(block, func_src, "cannot call optional type '{}'", .{ - callee_ty.fmt(sema.mod), + callee_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, func_src, msg, "consider using '.?', 'orelse' or 'if'", .{}); @@ -6445,10 +6592,10 @@ fn checkCallArgumentCount( }, else => {}, } - return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(sema.mod)}); + return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty.fmt(mod)}); }; - const func_ty_info = func_ty.fnInfo(); + const func_ty_info = mod.typeToFunc(func_ty).?; const fn_params_len = func_ty_info.param_types.len; const args_len = total_args - @boolToInt(member_fn); if (func_ty_info.is_var_args) { @@ -6475,7 +6622,7 @@ fn checkCallArgumentCount( ); errdefer msg.destroy(sema.gpa); - if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(), msg, "function declared here", .{}); + if (maybe_decl) |fn_decl| try mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -6488,22 +6635,23 @@ fn callBuiltin( modifier: std.builtin.CallModifier, args: []const Air.Inst.Ref, ) !void { + const mod = sema.mod; const callee_ty = sema.typeOf(builtin_fn); const func_ty = func_ty: { - switch (callee_ty.zigTypeTag()) { + switch (callee_ty.zigTypeTag(mod)) { .Fn => break :func_ty callee_ty, .Pointer => { - const ptr_info = callee_ty.ptrInfo().data; - if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Fn) { + const ptr_info = callee_ty.ptrInfo(mod); + if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag(mod) == .Fn) { break :func_ty ptr_info.pointee_type; } }, else => {}, } - std.debug.panic("type '{}' is not a function calling builtin fn", .{callee_ty.fmt(sema.mod)}); + std.debug.panic("type '{}' is not a function calling builtin fn", .{callee_ty.fmt(mod)}); }; - const func_ty_info = func_ty.fnInfo(); + const func_ty_info = mod.typeToFunc(func_ty).?; const fn_params_len = func_ty_info.param_types.len; if (args.len != fn_params_len or (func_ty_info.is_var_args and args.len < fn_params_len)) { std.debug.panic("parameter count mismatch calling builtin fn, expected {d}, found {d}", .{ fn_params_len, args.len }); @@ -6511,76 +6659,6 @@ fn callBuiltin( _ = try sema.analyzeCall(block, builtin_fn, func_ty, sema.src, sema.src, modifier, false, args, null, null); } -const GenericCallAdapter = struct { - generic_fn: *Module.Fn, - precomputed_hash: u64, - func_ty_info: Type.Payload.Function.Data, - args: []const Arg, - module: *Module, - - const Arg = struct { - ty: Type, - val: Value, - is_anytype: bool, - }; - - pub fn eql(ctx: @This(), adapted_key: void, other_key: *Module.Fn) bool { - _ = adapted_key; - // Checking for equality may happen on an item that has been inserted - // into the map but is not yet fully initialized. In such case, the - // two initialized fields are `hash` and `generic_owner_decl`. - if (ctx.generic_fn.owner_decl != other_key.generic_owner_decl.unwrap().?) return false; - - const other_comptime_args = other_key.comptime_args.?; - for (other_comptime_args[0..ctx.func_ty_info.param_types.len], 0..) |other_arg, i| { - const this_arg = ctx.args[i]; - const this_is_comptime = this_arg.val.tag() != .generic_poison; - const other_is_comptime = other_arg.val.tag() != .generic_poison; - const this_is_anytype = this_arg.is_anytype; - const other_is_anytype = other_key.isAnytypeParam(ctx.module, @intCast(u32, i)); - - if (other_is_anytype != this_is_anytype) return false; - if (other_is_comptime != this_is_comptime) return false; - - if (this_is_anytype) { - // Both are anytype parameters. - if (!this_arg.ty.eql(other_arg.ty, ctx.module)) { - return false; - } - if (this_is_comptime) { - // Both are comptime and anytype parameters with matching types. - if (!this_arg.val.eql(other_arg.val, other_arg.ty, ctx.module)) { - return false; - } - } - } else if (this_is_comptime) { - // Both are comptime parameters but not anytype parameters. - // We assert no error is possible here because any lazy values must be resolved - // before inserting into the generic function hash map. - const is_eql = Value.eqlAdvanced( - this_arg.val, - this_arg.ty, - other_arg.val, - other_arg.ty, - ctx.module, - null, - ) catch unreachable; - if (!is_eql) { - return false; - } - } - } - return true; - } - - /// The implementation of the hash is in semantic analysis of function calls, so - /// that any errors when computing the hash can be properly reported. - pub fn hash(ctx: @This(), adapted_key: void) u64 { - _ = adapted_key; - return ctx.precomputed_hash; - } -}; - fn analyzeCall( sema: *Sema, block: *Block, @@ -6597,7 +6675,7 @@ fn analyzeCall( const mod = sema.mod; const callee_ty = sema.typeOf(func); - const func_ty_info = func_ty.fnInfo(); + const func_ty_info = mod.typeToFunc(func_ty).?; const fn_params_len = func_ty_info.param_types.len; const cc = func_ty_info.cc; if (cc == .Naked) { @@ -6611,7 +6689,7 @@ fn analyzeCall( ); errdefer msg.destroy(sema.gpa); - if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(), msg, "function declared here", .{}); + if (maybe_decl) |fn_decl| try mod.errNoteNonLazy(fn_decl.srcLoc(mod), msg, "function declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -6645,7 +6723,7 @@ fn analyzeCall( var comptime_reason_buf: Block.ComptimeReason = undefined; var comptime_reason: ?*const Block.ComptimeReason = null; if (!is_comptime_call) { - if (sema.typeRequiresComptime(func_ty_info.return_type)) |ct| { + if (sema.typeRequiresComptime(func_ty_info.return_type.toType())) |ct| { is_comptime_call = ct; if (ct) { // stage1 can't handle doing this directly @@ -6653,7 +6731,7 @@ fn analyzeCall( .block = block, .func = func, .func_src = func_src, - .return_ty = func_ty_info.return_type, + .return_ty = func_ty_info.return_type.toType(), } }; comptime_reason = &comptime_reason_buf; } @@ -6671,7 +6749,7 @@ fn analyzeCall( func, func_src, call_src, - func_ty_info, + func_ty, ensure_result_used, uncasted_args, call_tag, @@ -6691,7 +6769,7 @@ fn analyzeCall( .block = block, .func = func, .func_src = func_src, - .return_ty = func_ty_info.return_type, + .return_ty = func_ty_info.return_type.toType(), } }; comptime_reason = &comptime_reason_buf; }, @@ -6708,18 +6786,21 @@ fn analyzeCall( if (err == error.AnalysisFail and comptime_reason != null) try comptime_reason.?.explain(sema, sema.err); return err; }; - const module_fn = switch (func_val.tag()) { - .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data, - .function => func_val.castTag(.function).?.data, - .extern_fn => return sema.fail(block, call_src, "{s} call of extern function", .{ + const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { + .extern_func => return sema.fail(block, call_src, "{s} call of extern function", .{ @as([]const u8, if (is_comptime_call) "comptime" else "inline"), }), - else => { - assert(callee_ty.isPtrAtRuntime()); - return sema.fail(block, call_src, "{s} call of function pointer", .{ - @as([]const u8, if (is_comptime_call) "comptime" else "inline"), - }); + .func => |function| function.index, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| mod.declPtr(decl).val.getFunctionIndex(mod).unwrap().?, + else => { + assert(callee_ty.isPtrAtRuntime(mod)); + return sema.fail(block, call_src, "{s} call of function pointer", .{ + @as([]const u8, if (is_comptime_call) "comptime" else "inline"), + }); + }, }, + else => unreachable, }; if (func_ty_info.is_var_args) { return sema.fail(block, call_src, "{s} call of variadic function", .{ @@ -6752,8 +6833,9 @@ fn analyzeCall( // In order to save a bit of stack space, directly modify Sema rather // than create a child one. const parent_zir = sema.code; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); - sema.code = fn_owner_decl.getFileScope().zir; + sema.code = fn_owner_decl.getFileScope(mod).zir; defer sema.code = parent_zir; try mod.declareDeclDependencyType(sema.owner_decl_index, module_fn.owner_decl, .function_body); @@ -6767,14 +6849,17 @@ fn analyzeCall( } const parent_func = sema.func; + const parent_func_index = sema.func_index; sema.func = module_fn; + sema.func_index = module_fn_index.toOptional(); defer sema.func = parent_func; + defer sema.func_index = parent_func_index; const parent_err_ret_index = sema.error_return_trace_index_on_fn_entry; sema.error_return_trace_index_on_fn_entry = block.error_return_trace_index; defer sema.error_return_trace_index_on_fn_entry = parent_err_ret_index; - var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, fn_owner_decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, fn_owner_decl.src_scope); defer wip_captures.deinit(); var child_block: Block = .{ @@ -6797,28 +6882,18 @@ fn analyzeCall( defer child_block.instructions.deinit(gpa); defer merges.deinit(gpa); - // If it's a comptime function call, we need to memoize it as long as no external - // comptime memory is mutated. - var memoized_call_key: Module.MemoizedCall.Key = undefined; - var delete_memoized_call_key = false; - defer if (delete_memoized_call_key) gpa.free(memoized_call_key.args); - if (is_comptime_call) { - memoized_call_key = .{ - .func = module_fn, - .args = try gpa.alloc(TypedValue, func_ty_info.param_types.len), - }; - delete_memoized_call_key = true; - } - try sema.emitBackwardBranch(block, call_src); - // Whether this call should be memoized, set to false if the call can mutate - // comptime state. + // Whether this call should be memoized, set to false if the call can mutate comptime state. var should_memoize = true; - var new_fn_info = fn_owner_decl.ty.fnInfo(); - new_fn_info.param_types = try sema.arena.alloc(Type, new_fn_info.param_types.len); - new_fn_info.comptime_params = (try sema.arena.alloc(bool, new_fn_info.param_types.len)).ptr; + // If it's a comptime function call, we need to memoize it as long as no external + // comptime memory is mutated. + const memoized_arg_values = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len); + + var new_fn_info = mod.typeToFunc(fn_owner_decl.ty).?; + new_fn_info.param_types = try sema.arena.alloc(InternPool.Index, new_fn_info.param_types.len); + new_fn_info.comptime_bits = 0; // This will have return instructions analyzed as break instructions to // the block_inst above. Here we are performing "comptime/inline semantic analysis" @@ -6837,31 +6912,31 @@ fn analyzeCall( &child_block, .unneeded, inst, - new_fn_info, + &new_fn_info, &arg_i, uncasted_args, is_comptime_call, &should_memoize, - memoized_call_key, - func_ty_info.param_types, + memoized_arg_values, + mod.typeToFunc(func_ty).?.param_types, func, &has_comptime_args, ) catch |err| switch (err) { error.NeededSourceLocation => { _ = sema.inst_map.remove(inst); - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); try sema.analyzeInlineCallArg( block, &child_block, - Module.argSrc(call_src.node_offset.x, sema.gpa, decl, arg_i, bound_arg_src), + mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src), inst, - new_fn_info, + &new_fn_info, &arg_i, uncasted_args, is_comptime_call, &should_memoize, - memoized_call_key, - func_ty_info.param_types, + memoized_arg_values, + mod.typeToFunc(func_ty).?.param_types, func, &has_comptime_args, ); @@ -6897,21 +6972,15 @@ fn analyzeCall( // Create a fresh inferred error set type for inline/comptime calls. const fn_ret_ty = blk: { if (module_fn.hasInferredErrorSet(mod)) { - const node = try sema.gpa.create(Module.Fn.InferredErrorSetListNode); - node.data = .{ .func = module_fn }; - if (parent_func) |some| { - some.inferred_error_sets.prepend(node); - } - - const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, &node.data); - break :blk try Type.Tag.error_union.create(sema.arena, .{ - .error_set = error_set_ty, - .payload = bare_return_type, + const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{ + .func = module_fn_index, }); + const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index }); + break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type); } break :blk bare_return_type; }; - new_fn_info.return_type = fn_ret_ty; + new_fn_info.return_type = fn_ret_ty.toIntern(); const parent_fn_ret_ty = sema.fn_ret_ty; sema.fn_ret_ty = fn_ret_ty; defer sema.fn_ret_ty = parent_fn_ret_ty; @@ -6920,23 +6989,22 @@ fn analyzeCall( // bug generating invalid LLVM IR. const res2: Air.Inst.Ref = res2: { if (should_memoize and is_comptime_call) { - if (mod.memoized_calls.getContext(memoized_call_key, .{ .module = mod })) |result| { - const ty_inst = try sema.addType(fn_ret_ty); - try sema.air_values.append(gpa, result.val); - sema.air_instructions.set(block_inst, .{ - .tag = .constant, - .data = .{ .ty_pl = .{ - .ty = ty_inst, - .payload = @intCast(u32, sema.air_values.items.len - 1), - } }, - }); - break :res2 Air.indexToRef(block_inst); + if (mod.intern_pool.getIfExists(.{ .memoized_call = .{ + .func = module_fn_index, + .arg_values = memoized_arg_values, + .result = .none, + } })) |memoized_call_index| { + const memoized_call = mod.intern_pool.indexToKey(memoized_call_index).memoized_call; + break :res2 try sema.addConstant( + mod.intern_pool.typeOf(memoized_call.result).toType(), + memoized_call.result.toValue(), + ); } } - const new_func_resolved_ty = try Type.Tag.function.create(sema.arena, new_fn_info); + const new_func_resolved_ty = try mod.funcType(new_fn_info); if (!is_comptime_call and !block.is_typeof) { - try sema.emitDbgInline(block, parent_func.?, module_fn, new_func_resolved_ty, .dbg_inline_begin); + try sema.emitDbgInline(block, parent_func_index.unwrap().?, module_fn_index, new_func_resolved_ty, .dbg_inline_begin); const zir_tags = sema.code.instructions.items(.tag); for (fn_info.param_body) |param| switch (zir_tags[param]) { @@ -6968,7 +7036,7 @@ fn analyzeCall( error.ComptimeReturn => break :result inlining.comptime_result, error.AnalysisFail => { const err_msg = sema.err orelse return err; - if (std.mem.eql(u8, err_msg.msg, recursive_msg)) return err; + if (mem.eql(u8, err_msg.msg, recursive_msg)) return err; try sema.errNote(block, call_src, err_msg, "called from here", .{}); err_msg.clearTrace(sema.gpa); return err; @@ -6978,11 +7046,11 @@ fn analyzeCall( break :result try sema.analyzeBlockBody(block, call_src, &child_block, merges); }; - if (!is_comptime_call and !block.is_typeof and sema.typeOf(result).zigTypeTag() != .NoReturn) { + if (!is_comptime_call and !block.is_typeof and sema.typeOf(result).zigTypeTag(mod) != .NoReturn) { try sema.emitDbgInline( block, - module_fn, - parent_func.?, + module_fn_index, + parent_func_index.unwrap().?, mod.declPtr(parent_func.?.owner_decl).ty, .dbg_inline_end, ); @@ -6993,23 +7061,11 @@ fn analyzeCall( // TODO: check whether any external comptime memory was mutated by the // comptime function call. If so, then do not memoize the call here. - // TODO: re-evaluate whether memoized_calls needs its own arena. I think - // it should be fine to use the Decl arena for the function. - { - var arena_allocator = std.heap.ArenaAllocator.init(gpa); - errdefer arena_allocator.deinit(); - const arena = arena_allocator.allocator(); - - for (memoized_call_key.args) |*arg| { - arg.* = try arg.*.copy(arena); - } - - try mod.memoized_calls.putContext(gpa, memoized_call_key, .{ - .val = try result_val.copy(arena), - .arena = arena_allocator.state, - }, .{ .module = mod }); - delete_memoized_call_key = false; - } + _ = try mod.intern(.{ .memoized_call = .{ + .func = module_fn_index, + .arg_values = memoized_arg_values, + .result = try result_val.intern(fn_ret_ty, mod), + } }); } break :res2 result; @@ -7028,7 +7084,7 @@ fn analyzeCall( .func_inst = func, .param_i = @intCast(u32, i), } }; - const param_ty = func_ty.fnParamType(i); + const param_ty = mod.typeToFunc(func_ty).?.param_types[i].toType(); args[i] = sema.analyzeCallArg( block, .unneeded, @@ -7037,10 +7093,10 @@ fn analyzeCall( opts, ) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); _ = try sema.analyzeCallArg( block, - Module.argSrc(call_src.node_offset.x, sema.gpa, decl, i, bound_arg_src), + mod.argSrc(call_src.node_offset.x, decl, i, bound_arg_src), param_ty, uncasted_arg, opts, @@ -7052,11 +7108,11 @@ fn analyzeCall( } else { args[i] = sema.coerceVarArgParam(block, uncasted_arg, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); _ = try sema.coerceVarArgParam( block, uncasted_arg, - Module.argSrc(call_src.node_offset.x, sema.gpa, decl, i, bound_arg_src), + mod.argSrc(call_src.node_offset.x, decl, i, bound_arg_src), ); unreachable; }, @@ -7067,14 +7123,14 @@ fn analyzeCall( if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - try sema.queueFullTypeResolution(func_ty_info.return_type); - if (sema.owner_func != null and func_ty_info.return_type.isError()) { + try sema.queueFullTypeResolution(func_ty_info.return_type.toType()); + if (sema.owner_func != null and func_ty_info.return_type.toType().isError(mod)) { sema.owner_func.?.calls_or_awaits_errorable_fn = true; } if (try sema.resolveMaybeUndefVal(func)) |func_val| { - if (func_val.castTag(.function)) |func_obj| { - try sema.mod.ensureFuncBodyAnalysisQueued(func_obj.data); + if (mod.intern_pool.indexToFunc(func_val.toIntern()).unwrap()) |func_index| { + try mod.ensureFuncBodyAnalysisQueued(func_index); } } @@ -7096,23 +7152,24 @@ fn analyzeCall( try sema.ensureResultUsed(block, sema.typeOf(func_inst), call_src); } return sema.handleTailCall(block, call_src, func_ty, func_inst); - } else if (block.wantSafety() and func_ty_info.return_type.isNoReturn()) { + } + if (block.wantSafety() and func_ty_info.return_type == .noreturn_type) skip_safety: { // Function pointers and extern functions aren't guaranteed to // actually be noreturn so we add a safety check for them. - check: { - var func_val = (try sema.resolveMaybeUndefVal(func)) orelse break :check; - switch (func_val.tag()) { - .function, .decl_ref => { - _ = try block.addNoOp(.unreach); - return Air.Inst.Ref.unreachable_value; + if (try sema.resolveMaybeUndefVal(func)) |func_val| { + switch (mod.intern_pool.indexToKey(func_val.toIntern())) { + .func => break :skip_safety, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| if (!mod.declPtr(decl).isExtern(mod)) break :skip_safety, + else => {}, }, - else => break :check, + else => {}, } } - try sema.safetyPanic(block, .noreturn_returned); return Air.Inst.Ref.unreachable_value; - } else if (func_ty_info.return_type.isNoReturn()) { + } + if (func_ty_info.return_type == .noreturn_type) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -7126,17 +7183,18 @@ fn analyzeCall( } fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Type, result: Air.Inst.Ref) !Air.Inst.Ref { - const target = sema.mod.getTarget(); - const backend = sema.mod.comp.getZigBackend(); + const mod = sema.mod; + const target = mod.getTarget(); + const backend = mod.comp.getZigBackend(); if (!target_util.supportsTailCall(target, backend)) { return sema.fail(block, call_src, "unable to perform tail call: compiler backend '{s}' does not support tail calls on target architecture '{s}' with the selected CPU feature flags", .{ @tagName(backend), @tagName(target.cpu.arch), }); } - const func_decl = sema.mod.declPtr(sema.owner_func.?.owner_decl); - if (!func_ty.eql(func_decl.ty, sema.mod)) { + const func_decl = mod.declPtr(sema.owner_func.?.owner_decl); + if (!func_ty.eql(func_decl.ty, mod)) { return sema.fail(block, call_src, "unable to perform tail call: type of function being called '{}' does not match type of calling function '{}'", .{ - func_ty.fmt(sema.mod), func_decl.ty.fmt(sema.mod), + func_ty.fmt(mod), func_decl.ty.fmt(mod), }); } _ = try block.addUnOp(.ret, result); @@ -7149,16 +7207,17 @@ fn analyzeInlineCallArg( param_block: *Block, arg_src: LazySrcLoc, inst: Zir.Inst.Index, - new_fn_info: Type.Payload.Function.Data, + new_fn_info: *InternPool.Key.FuncType, arg_i: *usize, uncasted_args: []const Air.Inst.Ref, is_comptime_call: bool, should_memoize: *bool, - memoized_call_key: Module.MemoizedCall.Key, - raw_param_types: []const Type, + memoized_arg_values: []InternPool.Index, + raw_param_types: []const InternPool.Index, func_inst: Air.Inst.Ref, has_comptime_args: *bool, ) !void { + const mod = sema.mod; const zir_tags = sema.code.instructions.items(.tag); switch (zir_tags[inst]) { .param_comptime, .param_anytype_comptime => has_comptime_args.* = true, @@ -7174,13 +7233,14 @@ fn analyzeInlineCallArg( const param_body = sema.code.extra[extra.end..][0..extra.data.body_len]; const param_ty = param_ty: { const raw_param_ty = raw_param_types[arg_i.*]; - if (raw_param_ty.tag() != .generic_poison) break :param_ty raw_param_ty; + if (raw_param_ty != .generic_poison_type) break :param_ty raw_param_ty; const param_ty_inst = try sema.resolveBody(param_block, param_body, inst); - break :param_ty try sema.analyzeAsType(param_block, param_src, param_ty_inst); + const param_ty = try sema.analyzeAsType(param_block, param_src, param_ty_inst); + break :param_ty param_ty.toIntern(); }; new_fn_info.param_types[arg_i.*] = param_ty; const uncasted_arg = uncasted_args[arg_i.*]; - if (try sema.typeRequiresComptime(param_ty)) { + if (try sema.typeRequiresComptime(param_ty.toType())) { _ = sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "argument to parameter with comptime-only type must be comptime-known") catch |err| { if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); return err; @@ -7188,7 +7248,7 @@ fn analyzeInlineCallArg( } else if (!is_comptime_call and zir_tags[inst] == .param_comptime) { _ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime"); } - const casted_arg = sema.coerceExtra(arg_block, param_ty, uncasted_arg, arg_src, .{ .param_src = .{ + const casted_arg = sema.coerceExtra(arg_block, param_ty.toType(), uncasted_arg, arg_src, .{ .param_src = .{ .func_inst = func_inst, .param_i = @intCast(u32, arg_i.*), } }) catch |err| switch (err) { @@ -7202,24 +7262,20 @@ fn analyzeInlineCallArg( if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); return err; }; - switch (arg_val.tag()) { + switch (arg_val.toIntern()) { .generic_poison, .generic_poison_type => { // This function is currently evaluated as part of an as-of-yet unresolvable // parameter or return type. return error.GenericPoison; }, - else => { - // Needed so that lazy values do not trigger - // assertion due to type not being resolved - // when the hash function is called. - try sema.resolveLazyValue(arg_val); - }, + else => {}, } - should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(); - memoized_call_key.args[arg_i.*] = .{ - .ty = param_ty, - .val = arg_val, - }; + // Needed so that lazy values do not trigger + // assertion due to type not being resolved + // when the hash function is called. + const resolved_arg_val = try sema.resolveLazyValue(arg_val); + should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(mod); + memoized_arg_values[arg_i.*] = try resolved_arg_val.intern(param_ty.toType(), mod); } else { sema.inst_map.putAssumeCapacityNoClobber(inst, casted_arg); } @@ -7233,7 +7289,7 @@ fn analyzeInlineCallArg( .param_anytype, .param_anytype_comptime => { // No coercion needed. const uncasted_arg = uncasted_args[arg_i.*]; - new_fn_info.param_types[arg_i.*] = sema.typeOf(uncasted_arg); + new_fn_info.param_types[arg_i.*] = sema.typeOf(uncasted_arg).toIntern(); if (is_comptime_call) { sema.inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg); @@ -7241,24 +7297,20 @@ fn analyzeInlineCallArg( if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); return err; }; - switch (arg_val.tag()) { + switch (arg_val.toIntern()) { .generic_poison, .generic_poison_type => { // This function is currently evaluated as part of an as-of-yet unresolvable // parameter or return type. return error.GenericPoison; }, - else => { - // Needed so that lazy values do not trigger - // assertion due to type not being resolved - // when the hash function is called. - try sema.resolveLazyValue(arg_val); - }, + else => {}, } - should_memoize.* = should_memoize.* and !arg_val.canMutateComptimeVarState(); - memoized_call_key.args[arg_i.*] = .{ - .ty = sema.typeOf(uncasted_arg), - .val = arg_val, - }; + // Needed so that lazy values do not trigger + // assertion due to type not being resolved + // when the hash function is called. + const resolved_arg_val = try sema.resolveLazyValue(arg_val); + should_memoize.* = should_memoize.* and !resolved_arg_val.canMutateComptimeVarState(mod); + memoized_arg_values[arg_i.*] = try resolved_arg_val.intern(sema.typeOf(uncasted_arg), mod); } else { if (zir_tags[inst] == .param_anytype_comptime) { _ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime"); @@ -7298,14 +7350,15 @@ fn analyzeGenericCallArg( uncasted_arg: Air.Inst.Ref, comptime_arg: TypedValue, runtime_args: []Air.Inst.Ref, - new_fn_info: Type.Payload.Function.Data, + new_fn_info: InternPool.Key.FuncType, runtime_i: *u32, ) !void { - const is_runtime = comptime_arg.val.tag() == .generic_poison and - comptime_arg.ty.hasRuntimeBits() and + const mod = sema.mod; + const is_runtime = comptime_arg.val.isGenericPoison() and + comptime_arg.ty.hasRuntimeBits(mod) and !(try sema.typeRequiresComptime(comptime_arg.ty)); if (is_runtime) { - const param_ty = new_fn_info.param_types[runtime_i.*]; + const param_ty = new_fn_info.param_types[runtime_i.*].toType(); const casted_arg = try sema.coerce(block, param_ty, uncasted_arg, arg_src); try sema.queueFullTypeResolution(param_ty); runtime_args[runtime_i.*] = casted_arg; @@ -7315,10 +7368,16 @@ fn analyzeGenericCallArg( } } -fn analyzeGenericCallArgVal(sema: *Sema, block: *Block, arg_src: LazySrcLoc, uncasted_arg: Air.Inst.Ref) !Value { - const arg_val = try sema.resolveValue(block, arg_src, uncasted_arg, "parameter is comptime"); - try sema.resolveLazyValue(arg_val); - return arg_val; +fn analyzeGenericCallArgVal( + sema: *Sema, + block: *Block, + arg_src: LazySrcLoc, + arg_ty: Type, + uncasted_arg: Air.Inst.Ref, + reason: []const u8, +) !Value { + const casted_arg = try sema.coerce(block, arg_ty, uncasted_arg, arg_src); + return sema.resolveLazyValue(try sema.resolveValue(block, arg_src, casted_arg, reason)); } fn instantiateGenericCall( @@ -7327,7 +7386,7 @@ fn instantiateGenericCall( func: Air.Inst.Ref, func_src: LazySrcLoc, call_src: LazySrcLoc, - func_ty_info: Type.Payload.Function.Data, + generic_func_ty: Type, ensure_result_used: bool, uncasted_args: []const Air.Inst.Ref, call_tag: Air.Inst.Tag, @@ -7338,46 +7397,41 @@ fn instantiateGenericCall( const gpa = sema.gpa; const func_val = try sema.resolveConstValue(block, func_src, func, "generic function being called must be comptime-known"); - const module_fn = switch (func_val.tag()) { - .function => func_val.castTag(.function).?.data, - .decl_ref => mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data, + const module_fn_index = switch (mod.intern_pool.indexToKey(func_val.toIntern())) { + .func => |function| function.index, + .ptr => |ptr| mod.declPtr(ptr.addr.decl).val.getFunctionIndex(mod).unwrap().?, else => unreachable, }; + const module_fn = mod.funcPtr(module_fn_index); // Check the Module's generic function map with an adapted context, so that we // can match against `uncasted_args` rather than doing the work below to create a // generic Scope only to junk it if it matches an existing instantiation. const fn_owner_decl = mod.declPtr(module_fn.owner_decl); - const namespace = fn_owner_decl.src_namespace; + const namespace_index = fn_owner_decl.src_namespace; + const namespace = mod.namespacePtr(namespace_index); const fn_zir = namespace.file_scope.zir; const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst); const zir_tags = fn_zir.instructions.items(.tag); - // This hash must match `Module.MonomorphedFuncsContext.hash`. - // For parameters explicitly marked comptime and simple parameter type expressions, - // we know whether a parameter is elided from a monomorphed function, and can - // use it in the hash here. However, for parameter type expressions that are not - // explicitly marked comptime and rely on previous parameter comptime values, we - // don't find out until after generating a monomorphed function whether the parameter - // type ended up being a "must-be-comptime-known" type. - var hasher = std.hash.Wyhash.init(0); - std.hash.autoHash(&hasher, module_fn.owner_decl); - - const generic_args = try sema.arena.alloc(GenericCallAdapter.Arg, func_ty_info.param_types.len); - { - var i: usize = 0; + const monomorphed_args = try sema.arena.alloc(InternPool.Index, mod.typeToFunc(generic_func_ty).?.param_types.len); + const callee_index = callee: { + var arg_i: usize = 0; + var monomorphed_arg_i: u32 = 0; + var known_unique = false; for (fn_info.param_body) |inst| { + const generic_func_ty_info = mod.typeToFunc(generic_func_ty).?; var is_comptime = false; var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = func_ty_info.paramIsComptime(i); + is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; - is_comptime = func_ty_info.paramIsComptime(i); + is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_anytype_comptime => { is_anytype = true; @@ -7386,87 +7440,90 @@ fn instantiateGenericCall( else => continue, } - const arg_ty = sema.typeOf(uncasted_args[i]); + defer arg_i += 1; + const param_ty = generic_func_ty_info.param_types[arg_i]; + const is_generic = !is_anytype and param_ty == .generic_poison_type; + + if (known_unique) { + if (is_comptime or is_anytype or is_generic) { + monomorphed_arg_i += 1; + } + continue; + } + + const uncasted_arg = uncasted_args[arg_i]; + const arg_ty = if (is_generic) mod.monomorphed_funcs.getAdapted( + Module.MonomorphedFuncAdaptedKey{ + .func = module_fn_index, + .args = monomorphed_args[0..monomorphed_arg_i], + }, + Module.MonomorphedFuncsAdaptedContext{ .mod = mod }, + ) orelse { + known_unique = true; + monomorphed_arg_i += 1; + continue; + } else if (is_anytype) sema.typeOf(uncasted_arg).toIntern() else param_ty; + const was_comptime = is_comptime; + if (!is_comptime and try sema.typeRequiresComptime(arg_ty.toType())) is_comptime = true; if (is_comptime or is_anytype) { // Tuple default values are a part of the type and need to be // resolved to hash the type. - try sema.resolveTupleLazyValues(block, call_src, arg_ty); + try sema.resolveTupleLazyValues(block, call_src, arg_ty.toType()); } if (is_comptime) { - const arg_val = sema.analyzeGenericCallArgVal(block, .unneeded, uncasted_args[i]) catch |err| switch (err) { + const casted_arg = sema.analyzeGenericCallArgVal(block, .unneeded, arg_ty.toType(), uncasted_arg, "") catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); - const arg_src = Module.argSrc(call_src.node_offset.x, sema.gpa, decl, i, bound_arg_src); - _ = try sema.analyzeGenericCallArgVal(block, arg_src, uncasted_args[i]); + const decl = mod.declPtr(block.src_decl); + const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src); + _ = try sema.analyzeGenericCallArgVal( + block, + arg_src, + arg_ty.toType(), + uncasted_arg, + if (was_comptime) + "parameter is comptime" + else + "argument to parameter with comptime-only type must be comptime-known", + ); unreachable; }, else => |e| return e, }; - arg_val.hashUncoerced(arg_ty, &hasher, mod); - if (is_anytype) { - arg_ty.hashWithHasher(&hasher, mod); - generic_args[i] = .{ - .ty = arg_ty, - .val = arg_val, - .is_anytype = true, - }; - } else { - generic_args[i] = .{ - .ty = arg_ty, - .val = arg_val, - .is_anytype = false, - }; - } - } else if (is_anytype) { - arg_ty.hashWithHasher(&hasher, mod); - generic_args[i] = .{ - .ty = arg_ty, - .val = Value.initTag(.generic_poison), - .is_anytype = true, - }; - } else { - generic_args[i] = .{ - .ty = arg_ty, - .val = Value.initTag(.generic_poison), - .is_anytype = false, - }; + monomorphed_args[monomorphed_arg_i] = casted_arg.toIntern(); + monomorphed_arg_i += 1; + } else if (is_anytype or is_generic) { + monomorphed_args[monomorphed_arg_i] = try mod.intern(.{ .undef = arg_ty }); + monomorphed_arg_i += 1; } - - i += 1; } - } - const precomputed_hash = hasher.final(); + if (!known_unique) { + if (mod.monomorphed_funcs.getAdapted( + Module.MonomorphedFuncAdaptedKey{ + .func = module_fn_index, + .args = monomorphed_args[0..monomorphed_arg_i], + }, + Module.MonomorphedFuncsAdaptedContext{ .mod = mod }, + )) |callee_func| break :callee mod.intern_pool.indexToKey(callee_func).func.index; + } - const adapter: GenericCallAdapter = .{ - .generic_fn = module_fn, - .precomputed_hash = precomputed_hash, - .func_ty_info = func_ty_info, - .args = generic_args, - .module = mod, - }; - const gop = try mod.monomorphed_funcs.getOrPutAdapted(gpa, {}, adapter); - const callee = if (!gop.found_existing) callee: { - const new_module_func = try gpa.create(Module.Fn); + const new_module_func_index = try mod.createFunc(undefined); + const new_module_func = mod.funcPtr(new_module_func_index); - // This ensures that we can operate on the hash map before the Module.Fn - // struct is fully initialized. - new_module_func.hash = precomputed_hash; new_module_func.generic_owner_decl = module_fn.owner_decl.toOptional(); new_module_func.comptime_args = null; - gop.key_ptr.* = new_module_func; try namespace.anon_decls.ensureUnusedCapacity(gpa, 1); // Create a Decl for the new function. - const src_decl_index = namespace.getDeclIndex(); + const src_decl_index = namespace.getDeclIndex(mod); const src_decl = mod.declPtr(src_decl_index); - const new_decl_index = try mod.allocateNewDecl(namespace, fn_owner_decl.src_node, src_decl.src_scope); + const new_decl_index = try mod.allocateNewDecl(namespace_index, fn_owner_decl.src_node, src_decl.src_scope); const new_decl = mod.declPtr(new_decl_index); // TODO better names for generic function instantiations - const decl_name = try std.fmt.allocPrintZ(gpa, "{s}__anon_{d}", .{ - fn_owner_decl.name, @enumToInt(new_decl_index), + const decl_name = try mod.intern_pool.getOrPutStringFmt(gpa, "{}__anon_{d}", .{ + fn_owner_decl.name.fmt(&mod.intern_pool), @enumToInt(new_decl_index), }); new_decl.name = decl_name; new_decl.src_line = fn_owner_decl.src_line; @@ -7488,25 +7545,21 @@ fn instantiateGenericCall( assert(new_decl.dependencies.keys().len == 0); try mod.declareDeclDependencyType(new_decl_index, module_fn.owner_decl, .function_body); - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - const new_decl_arena_allocator = new_decl_arena.allocator(); - const new_func = sema.resolveGenericInstantiationType( block, - new_decl_arena_allocator, fn_zir, new_decl, new_decl_index, uncasted_args, - module_fn, - new_module_func, - namespace, - func_ty_info, + monomorphed_arg_i, + module_fn_index, + new_module_func_index, + namespace_index, + generic_func_ty, call_src, bound_arg_src, ) catch |err| switch (err) { error.GenericPoison, error.ComptimeReturn => { - new_decl_arena.deinit(); // Resolving the new function type below will possibly declare more decl dependencies // and so we remove them all here in case of error. for (new_decl.dependencies.keys()) |dep_index| { @@ -7515,16 +7568,10 @@ fn instantiateGenericCall( } assert(namespace.anon_decls.orderedRemove(new_decl_index)); mod.destroyDecl(new_decl_index); - assert(mod.monomorphed_funcs.remove(new_module_func)); - gpa.destroy(new_module_func); + mod.destroyFunc(new_module_func_index); return err; }, else => { - assert(mod.monomorphed_funcs.remove(new_module_func)); - { - errdefer new_decl_arena.deinit(); - try new_decl.finalizeNewArena(&new_decl_arena); - } // TODO look up the compile error that happened here and attach a note to it // pointing here, at the generic instantiation callsite. if (sema.owner_func) |owner_func| { @@ -7535,12 +7582,10 @@ fn instantiateGenericCall( return err; }, }; - errdefer new_decl_arena.deinit(); - try new_decl.finalizeNewArena(&new_decl_arena); break :callee new_func; - } else gop.key_ptr.*; - + }; + const callee = mod.funcPtr(callee_index); callee.branch_quota = @max(callee.branch_quota, sema.branch_quota); const callee_inst = try sema.analyzeDeclVal(block, func_src, callee.owner_decl); @@ -7548,8 +7593,7 @@ fn instantiateGenericCall( // Make a runtime call to the new function, making sure to omit the comptime args. const comptime_args = callee.comptime_args.?; const func_ty = mod.declPtr(callee.owner_decl).ty; - const new_fn_info = func_ty.fnInfo(); - const runtime_args_len = @intCast(u32, new_fn_info.param_types.len); + const runtime_args_len = @intCast(u32, mod.typeToFunc(func_ty).?.param_types.len); const runtime_args = try sema.arena.alloc(Air.Inst.Ref, runtime_args_len); { var runtime_i: u32 = 0; @@ -7565,18 +7609,18 @@ fn instantiateGenericCall( uncasted_args[total_i], comptime_args[total_i], runtime_args, - new_fn_info, + mod.typeToFunc(func_ty).?, &runtime_i, ) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); _ = try sema.analyzeGenericCallArg( block, - Module.argSrc(call_src.node_offset.x, sema.gpa, decl, total_i, bound_arg_src), + mod.argSrc(call_src.node_offset.x, decl, total_i, bound_arg_src), uncasted_args[total_i], comptime_args[total_i], runtime_args, - new_fn_info, + mod.typeToFunc(func_ty).?, &runtime_i, ); unreachable; @@ -7586,16 +7630,16 @@ fn instantiateGenericCall( total_i += 1; } - try sema.queueFullTypeResolution(new_fn_info.return_type); + try sema.queueFullTypeResolution(mod.typeToFunc(func_ty).?.return_type.toType()); } if (call_dbg_node) |some| try sema.zirDbgStmt(block, some); - if (sema.owner_func != null and new_fn_info.return_type.isError()) { + if (sema.owner_func != null and mod.typeToFunc(func_ty).?.return_type.toType().isError(mod)) { sema.owner_func.?.calls_or_awaits_errorable_fn = true; } - try sema.mod.ensureFuncBodyAnalysisQueued(callee); + try mod.ensureFuncBodyAnalysisQueued(callee_index); try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len + runtime_args_len); @@ -7616,7 +7660,7 @@ fn instantiateGenericCall( if (call_tag == .call_always_tail) { return sema.handleTailCall(block, call_src, func_ty, result); } - if (new_fn_info.return_type.isNoReturn()) { + if (func_ty.fnReturnType(mod).isNoReturn(mod)) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -7626,22 +7670,23 @@ fn instantiateGenericCall( fn resolveGenericInstantiationType( sema: *Sema, block: *Block, - new_decl_arena_allocator: Allocator, fn_zir: Zir, new_decl: *Decl, new_decl_index: Decl.Index, uncasted_args: []const Air.Inst.Ref, - module_fn: *Module.Fn, - new_module_func: *Module.Fn, - namespace: *Namespace, - func_ty_info: Type.Payload.Function.Data, + monomorphed_args_len: u32, + module_fn_index: Module.Fn.Index, + new_module_func: Module.Fn.Index, + namespace: Namespace.Index, + generic_func_ty: Type, call_src: LazySrcLoc, bound_arg_src: ?LazySrcLoc, -) !*Module.Fn { +) !Module.Fn.Index { const mod = sema.mod; const gpa = sema.gpa; const zir_tags = fn_zir.instructions.items(.tag); + const module_fn = mod.funcPtr(module_fn_index); const fn_info = fn_zir.getFnInfo(module_fn.zir_body_inst); // Re-run the block that creates the function, with the comptime parameters @@ -7652,23 +7697,26 @@ fn resolveGenericInstantiationType( .mod = mod, .gpa = gpa, .arena = sema.arena, - .perm_arena = new_decl_arena_allocator, .code = fn_zir, .owner_decl = new_decl, .owner_decl_index = new_decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, - .comptime_args = try new_decl_arena_allocator.alloc(TypedValue, uncasted_args.len), + .owner_func_index = .none, + // TODO: fully migrate functions into InternPool + .comptime_args = try mod.tmp_hack_arena.allocator().alloc(TypedValue, uncasted_args.len), .comptime_args_fn_inst = module_fn.zir_body_inst, - .preallocated_new_func = new_module_func, + .preallocated_new_func = new_module_func.toOptional(), .is_generic_instantiation = true, .branch_quota = sema.branch_quota, .branch_count = sema.branch_count, + .comptime_mutable_decls = sema.comptime_mutable_decls, }; defer child_sema.deinit(); - var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, new_decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, new_decl.src_scope); defer wip_captures.deinit(); var child_block: Block = .{ @@ -7690,18 +7738,19 @@ fn resolveGenericInstantiationType( var arg_i: usize = 0; for (fn_info.param_body) |inst| { + const generic_func_ty_info = mod.typeToFunc(generic_func_ty).?; var is_comptime = false; var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = func_ty_info.paramIsComptime(arg_i); + is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_comptime => { is_comptime = true; }, .param_anytype => { is_anytype = true; - is_comptime = func_ty_info.paramIsComptime(arg_i); + is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_anytype_comptime => { is_anytype = true; @@ -7719,8 +7768,8 @@ fn resolveGenericInstantiationType( if (try sema.typeRequiresComptime(arg_ty)) { const arg_val = sema.resolveConstValue(block, .unneeded, arg, "") catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); - const arg_src = Module.argSrc(call_src.node_offset.x, sema.gpa, decl, arg_i, bound_arg_src); + const decl = mod.declPtr(block.src_decl); + const arg_src = mod.argSrc(call_src.node_offset.x, decl, arg_i, bound_arg_src); _ = try sema.resolveConstValue(block, arg_src, arg, "argument to parameter with comptime-only type must be comptime-known"); unreachable; }, @@ -7752,50 +7801,61 @@ fn resolveGenericInstantiationType( const new_func_inst = try child_sema.resolveBody(&child_block, fn_info.param_body, fn_info.param_body_inst); const new_func_val = child_sema.resolveConstValue(&child_block, .unneeded, new_func_inst, undefined) catch unreachable; - const new_func = new_func_val.castTag(.function).?.data; - errdefer new_func.deinit(gpa); + const new_func = new_func_val.getFunctionIndex(mod).unwrap().?; assert(new_func == new_module_func); + const monomorphed_args_index = @intCast(u32, mod.monomorphed_func_keys.items.len); + const monomorphed_args = try mod.monomorphed_func_keys.addManyAsSlice(gpa, monomorphed_args_len); + var monomorphed_arg_i: u32 = 0; + try mod.monomorphed_funcs.ensureUnusedCapacityContext(gpa, monomorphed_args_len + 1, .{ .mod = mod }); + arg_i = 0; for (fn_info.param_body) |inst| { + const generic_func_ty_info = mod.typeToFunc(generic_func_ty).?; var is_comptime = false; + var is_anytype = false; switch (zir_tags[inst]) { .param => { - is_comptime = func_ty_info.paramIsComptime(arg_i); + is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_comptime => { is_comptime = true; }, .param_anytype => { - is_comptime = func_ty_info.paramIsComptime(arg_i); + is_anytype = true; + is_comptime = generic_func_ty_info.paramIsComptime(@intCast(u5, arg_i)); }, .param_anytype_comptime => { + is_anytype = true; is_comptime = true; }, else => continue, } - // We populate the Type here regardless because it is needed by - // `GenericCallAdapter.eql` as well as function body analysis. - // Whether it is anytype is communicated by `isAnytypeParam`. - const arg = child_sema.inst_map.get(inst).?; - const copied_arg_ty = try child_sema.typeOf(arg).copy(new_decl_arena_allocator); + const param_ty = generic_func_ty_info.param_types[arg_i]; + const is_generic = !is_anytype and param_ty == .generic_poison_type; - if (try sema.typeRequiresComptime(copied_arg_ty)) { - is_comptime = true; - } + const arg = child_sema.inst_map.get(inst).?; + const arg_ty = child_sema.typeOf(arg); + + if (is_generic) if (mod.monomorphed_funcs.fetchPutAssumeCapacityContext(.{ + .func = module_fn_index, + .args_index = monomorphed_args_index, + .args_len = monomorphed_arg_i, + }, arg_ty.toIntern(), .{ .mod = mod })) |kv| assert(kv.value == arg_ty.toIntern()); + if (!is_comptime and try sema.typeRequiresComptime(arg_ty)) is_comptime = true; if (is_comptime) { const arg_val = (child_sema.resolveMaybeUndefValAllowVariables(arg) catch unreachable).?; - child_sema.comptime_args[arg_i] = .{ - .ty = copied_arg_ty, - .val = try arg_val.copy(new_decl_arena_allocator), - }; + monomorphed_args[monomorphed_arg_i] = arg_val.toIntern(); + monomorphed_arg_i += 1; + child_sema.comptime_args[arg_i] = .{ .ty = arg_ty, .val = arg_val }; } else { - child_sema.comptime_args[arg_i] = .{ - .ty = copied_arg_ty, - .val = Value.initTag(.generic_poison), - }; + if (is_anytype or is_generic) { + monomorphed_args[monomorphed_arg_i] = try mod.intern(.{ .undef = arg_ty.toIntern() }); + monomorphed_arg_i += 1; + } + child_sema.comptime_args[arg_i] = .{ .ty = arg_ty, .val = Value.generic_poison }; } arg_i += 1; @@ -7804,11 +7864,11 @@ fn resolveGenericInstantiationType( try wip_captures.finalize(); // Populate the Decl ty/val with the function and its type. - new_decl.ty = try child_sema.typeOf(new_func_inst).copy(new_decl_arena_allocator); + new_decl.ty = child_sema.typeOf(new_func_inst); // If the call evaluated to a return type that requires comptime, never mind // our generic instantiation. Instead we need to perform a comptime call. - const new_fn_info = new_decl.ty.fnInfo(); - if (try sema.typeRequiresComptime(new_fn_info.return_type)) { + const new_fn_info = mod.typeToFunc(new_decl.ty).?; + if (try sema.typeRequiresComptime(new_fn_info.return_type.toType())) { return error.ComptimeReturn; } // Similarly, if the call evaluated to a generic type we need to instead @@ -7817,15 +7877,20 @@ fn resolveGenericInstantiationType( return error.GenericPoison; } - new_decl.val = try Value.Tag.function.create(new_decl_arena_allocator, new_func); + new_decl.val = (try mod.intern(.{ .func = .{ + .ty = new_decl.ty.toIntern(), + .index = new_func, + } })).toValue(); new_decl.@"align" = 0; new_decl.has_tv = true; new_decl.owns_tv = true; new_decl.analysis = .complete; - log.debug("generic function '{s}' instantiated with type {}", .{ - new_decl.name, new_decl.ty.fmtDebug(), - }); + mod.monomorphed_funcs.putAssumeCapacityNoClobberContext(.{ + .func = module_fn_index, + .args_index = monomorphed_args_index, + .args_len = monomorphed_arg_i, + }, new_decl.val.toIntern(), .{ .mod = mod }); // Queue up a `codegen_func` work item for the new Fn. The `comptime_args` field // will be populated, ensuring it will have `analyzeBody` called with the ZIR @@ -7835,46 +7900,46 @@ fn resolveGenericInstantiationType( } fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { - if (!ty.isSimpleTupleOrAnonStruct()) return; - const tuple = ty.tupleFields(); - for (tuple.values, 0..) |field_val, i| { - try sema.resolveTupleLazyValues(block, src, tuple.types[i]); - if (field_val.tag() == .unreachable_value) continue; - try sema.resolveLazyValue(field_val); + const mod = sema.mod; + const tuple = switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .anon_struct_type => |tuple| tuple, + else => return, + }; + for (tuple.types, tuple.values) |field_ty, field_val| { + try sema.resolveTupleLazyValues(block, src, field_ty.toType()); + if (field_val == .none) continue; + // TODO: mutate in intern pool + _ = try sema.resolveLazyValue(field_val.toValue()); } } fn emitDbgInline( sema: *Sema, block: *Block, - old_func: *Module.Fn, - new_func: *Module.Fn, + old_func: Module.Fn.Index, + new_func: Module.Fn.Index, new_func_ty: Type, tag: Air.Inst.Tag, ) CompileError!void { - if (sema.mod.comp.bin_file.options.strip) return; + const mod = sema.mod; + if (mod.comp.bin_file.options.strip) return; // Recursive inline call; no dbg_inline needed. if (old_func == new_func) return; - try sema.air_values.append(sema.gpa, try Value.Tag.function.create(sema.arena, new_func)); _ = try block.addInst(.{ .tag = tag, - .data = .{ .ty_pl = .{ + .data = .{ .ty_fn = .{ .ty = try sema.addType(new_func_ty), - .payload = @intCast(u32, sema.air_values.items.len - 1), + .func = new_func, } }, }); } -fn zirIntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - _ = block; - const tracy = trace(@src()); - defer tracy.end(); - +fn zirIntType(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const int_type = sema.code.instructions.items(.data)[inst].int_type; - const ty = try Module.makeIntType(sema.arena, int_type.signedness, int_type.bit_count); - + const ty = try mod.intType(int_type.signedness, int_type.bit_count); return sema.addType(ty); } @@ -7882,43 +7947,46 @@ fn zirOptionalType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node }; const child_type = try sema.resolveType(block, operand_src, inst_data.operand); - if (child_type.zigTypeTag() == .Opaque) { - return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(sema.mod)}); - } else if (child_type.zigTypeTag() == .Null) { - return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(sema.mod)}); + if (child_type.zigTypeTag(mod) == .Opaque) { + return sema.fail(block, operand_src, "opaque type '{}' cannot be optional", .{child_type.fmt(mod)}); + } else if (child_type.zigTypeTag(mod) == .Null) { + return sema.fail(block, operand_src, "type '{}' cannot be optional", .{child_type.fmt(mod)}); } - const opt_type = try Type.optional(sema.arena, child_type); + const opt_type = try Type.optional(sema.arena, child_type, mod); return sema.addType(opt_type); } fn zirElemTypeIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const bin = sema.code.instructions.items(.data)[inst].bin; const indexable_ty = try sema.resolveType(block, .unneeded, bin.lhs); - assert(indexable_ty.isIndexable()); // validated by a previous instruction - if (indexable_ty.zigTypeTag() == .Struct) { - const elem_type = indexable_ty.structFieldType(@enumToInt(bin.rhs)); + assert(indexable_ty.isIndexable(mod)); // validated by a previous instruction + if (indexable_ty.zigTypeTag(mod) == .Struct) { + const elem_type = indexable_ty.structFieldType(@enumToInt(bin.rhs), mod); return sema.addType(elem_type); } else { - const elem_type = indexable_ty.elemType2(); + const elem_type = indexable_ty.elemType2(mod); return sema.addType(elem_type); } } fn zirVectorType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const len = try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector length must be comptime-known"); + const len = @intCast(u32, try sema.resolveInt(block, len_src, extra.lhs, Type.u32, "vector length must be comptime-known")); const elem_type = try sema.resolveType(block, elem_type_src, extra.rhs); try sema.checkVectorElemType(block, elem_type_src, elem_type); - const vector_type = try Type.Tag.vector.create(sema.arena, .{ - .len = @intCast(u32, len), - .elem_type = elem_type, + const vector_type = try mod.vectorType(.{ + .len = len, + .child = elem_type.toIntern(), }); return sema.addType(vector_type); } @@ -7960,9 +8028,10 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil } fn validateArrayElemType(sema: *Sema, block: *Block, elem_type: Type, elem_src: LazySrcLoc) !void { - if (elem_type.zigTypeTag() == .Opaque) { - return sema.fail(block, elem_src, "array of opaque type '{}' not allowed", .{elem_type.fmt(sema.mod)}); - } else if (elem_type.zigTypeTag() == .NoReturn) { + const mod = sema.mod; + if (elem_type.zigTypeTag(mod) == .Opaque) { + return sema.fail(block, elem_src, "array of opaque type '{}' not allowed", .{elem_type.fmt(mod)}); + } else if (elem_type.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, elem_src, "array of 'noreturn' not allowed", .{}); } } @@ -7975,9 +8044,10 @@ fn zirAnyframeType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro if (true) { return sema.failWithUseOfAsync(block, inst_data.src()); } + const mod = sema.mod; const operand_src: LazySrcLoc = .{ .node_offset_anyframe_type = inst_data.src_node }; const return_type = try sema.resolveType(block, operand_src, inst_data.operand); - const anyframe_type = try Type.Tag.anyframe_T.create(sema.arena, return_type); + const anyframe_type = try mod.anyframeType(return_type); return sema.addType(anyframe_type); } @@ -7986,6 +8056,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -7993,50 +8064,48 @@ fn zirErrorUnionType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const error_set = try sema.resolveType(block, lhs_src, extra.lhs); const payload = try sema.resolveType(block, rhs_src, extra.rhs); - if (error_set.zigTypeTag() != .ErrorSet) { + if (error_set.zigTypeTag(mod) != .ErrorSet) { return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{ - error_set.fmt(sema.mod), + error_set.fmt(mod), }); } try sema.validateErrorUnionPayloadType(block, payload, rhs_src); - const err_union_ty = try Type.errorUnion(sema.arena, error_set, payload, sema.mod); + const err_union_ty = try mod.errorUnionType(error_set, payload); return sema.addType(err_union_ty); } fn validateErrorUnionPayloadType(sema: *Sema, block: *Block, payload_ty: Type, payload_src: LazySrcLoc) !void { - if (payload_ty.zigTypeTag() == .Opaque) { + const mod = sema.mod; + if (payload_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, payload_src, "error union with payload of opaque type '{}' not allowed", .{ - payload_ty.fmt(sema.mod), + payload_ty.fmt(mod), }); - } else if (payload_ty.zigTypeTag() == .ErrorSet) { + } else if (payload_ty.zigTypeTag(mod) == .ErrorSet) { return sema.fail(block, payload_src, "error union with payload of error set type '{}' not allowed", .{ - payload_ty.fmt(sema.mod), + payload_ty.fmt(mod), }); } } fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; - const tracy = trace(@src()); - defer tracy.end(); - + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - - // Create an anonymous error set type with only this error value, and return the value. - const kv = try sema.mod.getErrorValue(inst_data.get(sema.code)); - const result_type = try Type.Tag.error_set_single.create(sema.arena, kv.key); - return sema.addConstant( - result_type, - try Value.Tag.@"error".create(sema.arena, .{ - .name = kv.key, - }), - ); + const name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); + _ = try mod.getErrorValue(name); + // Create an error set type with only this error value, and return the value. + const error_set_type = try mod.singleErrorSetType(name); + return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ + .ty = error_set_type.toIntern(), + .name = name, + } })).toValue()); } fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -8044,34 +8113,26 @@ fn zirErrorToInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const operand = try sema.coerce(block, Type.anyerror, uncasted_operand, operand_src); if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) { + if (val.isUndef(mod)) { return sema.addConstUndef(Type.err_int); } - switch (val.tag()) { - .@"error" => { - const payload = try sema.arena.create(Value.Payload.U64); - payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value, - }; - return sema.addConstant(Type.err_int, Value.initPayload(&payload.base)); - }, - - // This is not a valid combination with the type `anyerror`. - .the_only_possible_value => unreachable, - - // Assume it's already encoded as an integer. - else => return sema.addConstant(Type.err_int, val), - } + const err_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; + return sema.addConstant(Type.err_int, try mod.intValue( + Type.err_int, + try mod.getErrorValue(err_name), + )); } const op_ty = sema.typeOf(uncasted_operand); try sema.resolveInferredErrorSetTy(block, src, op_ty); - if (!op_ty.isAnyError()) { - const names = op_ty.errorSetNames(); + if (!op_ty.isAnyError(mod)) { + const names = op_ty.errorSetNames(mod); switch (names.len) { - 0 => return sema.addConstant(Type.err_int, Value.zero), - 1 => return sema.addIntUnsigned(Type.err_int, sema.mod.global_error_set.get(names[0]).?), + 0 => return sema.addConstant(Type.err_int, try mod.intValue(Type.err_int, 0)), + 1 => { + const int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(names[0]).?); + return sema.addIntUnsigned(Type.err_int, int); + }, else => {}, } } @@ -8084,28 +8145,26 @@ fn zirIntToError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const uncasted_operand = try sema.resolveInst(extra.operand); const operand = try sema.coerce(block, Type.err_int, uncasted_operand, operand_src); - const target = sema.mod.getTarget(); if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| { - const int = try sema.usizeCast(block, operand_src, value.toUnsignedInt(target)); - if (int > sema.mod.global_error_set.count() or int == 0) + const int = try sema.usizeCast(block, operand_src, value.toUnsignedInt(mod)); + if (int > mod.global_error_set.count() or int == 0) return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int}); - const payload = try sema.arena.create(Value.Payload.Error); - payload.* = .{ - .base = .{ .tag = .@"error" }, - .data = .{ .name = sema.mod.error_name_list.items[int] }, - }; - return sema.addConstant(Type.anyerror, Value.initPayload(&payload.base)); + return sema.addConstant(Type.anyerror, (try mod.intern(.{ .err = .{ + .ty = .anyerror_type, + .name = mod.global_error_set.keys()[int], + } })).toValue()); } try sema.requireRuntimeBlock(block, src, operand_src); if (block.wantSafety()) { const is_lt_len = try block.addUnOp(.cmp_lt_errors_len, operand); - const zero_val = try sema.addConstant(Type.err_int, Value.zero); + const zero_val = try sema.addConstant(Type.err_int, try mod.intValue(Type.err_int, 0)); const is_non_zero = try block.addBinOp(.cmp_neq, operand, zero_val); const ok = try block.addBinOp(.bit_and, is_lt_len, is_non_zero); try sema.addSafetyCheck(block, ok, .invalid_error_code); @@ -8123,6 +8182,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; @@ -8130,7 +8190,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); - if (sema.typeOf(lhs).zigTypeTag() == .Bool and sema.typeOf(rhs).zigTypeTag() == .Bool) { + if (sema.typeOf(lhs).zigTypeTag(mod) == .Bool and sema.typeOf(rhs).zigTypeTag(mod) == .Bool) { const msg = msg: { const msg = try sema.errMsg(block, lhs_src, "expected error set type, found 'bool'", .{}); errdefer msg.destroy(sema.gpa); @@ -8141,32 +8201,32 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr } const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs); const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs); - if (lhs_ty.zigTypeTag() != .ErrorSet) - return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{lhs_ty.fmt(sema.mod)}); - if (rhs_ty.zigTypeTag() != .ErrorSet) - return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(sema.mod)}); + if (lhs_ty.zigTypeTag(mod) != .ErrorSet) + return sema.fail(block, lhs_src, "expected error set type, found '{}'", .{lhs_ty.fmt(mod)}); + if (rhs_ty.zigTypeTag(mod) != .ErrorSet) + return sema.fail(block, rhs_src, "expected error set type, found '{}'", .{rhs_ty.fmt(mod)}); // Anything merged with anyerror is anyerror. - if (lhs_ty.tag() == .anyerror or rhs_ty.tag() == .anyerror) { + if (lhs_ty.toIntern() == .anyerror_type or rhs_ty.toIntern() == .anyerror_type) { return Air.Inst.Ref.anyerror_type; } - if (lhs_ty.castTag(.error_set_inferred)) |payload| { - try sema.resolveInferredErrorSet(block, src, payload.data); + if (mod.typeToInferredErrorSetIndex(lhs_ty).unwrap()) |ies_index| { + try sema.resolveInferredErrorSet(block, src, ies_index); // isAnyError might have changed from a false negative to a true positive after resolution. - if (lhs_ty.isAnyError()) { + if (lhs_ty.isAnyError(mod)) { return Air.Inst.Ref.anyerror_type; } } - if (rhs_ty.castTag(.error_set_inferred)) |payload| { - try sema.resolveInferredErrorSet(block, src, payload.data); + if (mod.typeToInferredErrorSetIndex(rhs_ty).unwrap()) |ies_index| { + try sema.resolveInferredErrorSet(block, src, ies_index); // isAnyError might have changed from a false negative to a true positive after resolution. - if (rhs_ty.isAnyError()) { + if (rhs_ty.isAnyError(mod)) { return Air.Inst.Ref.anyerror_type; } } - const err_set_ty = try lhs_ty.errorSetMerge(sema.arena, rhs_ty); + const err_set_ty = try sema.errorSetMerge(lhs_ty, rhs_ty); return sema.addType(err_set_ty); } @@ -8175,27 +8235,27 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - const duped_name = try sema.arena.dupe(u8, inst_data.get(sema.code)); - return sema.addConstant( - Type.initTag(.enum_literal), - try Value.Tag.enum_literal.create(sema.arena, duped_name), - ); + const name = inst_data.get(sema.code); + return sema.addConstant(.{ .ip_index = .enum_literal_type }, (try mod.intern(.{ + .enum_literal = try mod.intern_pool.getOrPutString(sema.gpa, name), + })).toValue()); } fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const arena = sema.arena; + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag()) { + const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag(mod)) { .Enum => operand, .Union => blk: { const union_ty = try sema.resolveTypeFields(operand_ty); - const tag_ty = union_ty.unionTagType() orelse { + const tag_ty = union_ty.unionTagType(mod) orelse { return sema.fail( block, operand_src, @@ -8207,22 +8267,20 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => { return sema.fail(block, operand_src, "expected enum or tagged union, found '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }); }, }; const enum_tag_ty = sema.typeOf(enum_tag); - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = try enum_tag_ty.intTagType(&int_tag_type_buffer).copy(arena); + const int_tag_ty = enum_tag_ty.intTagType(mod); if (try sema.typeHasOnePossibleValue(enum_tag_ty)) |opv| { - return sema.addConstant(int_tag_ty, opv); + return sema.addConstant(int_tag_ty, try mod.getCoerced(opv, int_tag_ty)); } if (try sema.resolveMaybeUndefVal(enum_tag)) |enum_tag_val| { - var buffer: Value.Payload.U64 = undefined; - const val = enum_tag_val.enumToInt(enum_tag_ty, &buffer); + const val = try enum_tag_val.enumToInt(enum_tag_ty, mod); return sema.addConstant(int_tag_ty, try val.copy(sema.arena)); } @@ -8231,6 +8289,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -8239,24 +8298,23 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = try sema.resolveInst(extra.rhs); - if (dest_ty.zigTypeTag() != .Enum) { - return sema.fail(block, dest_ty_src, "expected enum, found '{}'", .{dest_ty.fmt(sema.mod)}); + if (dest_ty.zigTypeTag(mod) != .Enum) { + return sema.fail(block, dest_ty_src, "expected enum, found '{}'", .{dest_ty.fmt(mod)}); } _ = try sema.checkIntType(block, operand_src, sema.typeOf(operand)); if (try sema.resolveMaybeUndefVal(operand)) |int_val| { - if (dest_ty.isNonexhaustiveEnum()) { - var buffer: Type.Payload.Bits = undefined; - const int_tag_ty = dest_ty.intTagType(&buffer); + if (dest_ty.isNonexhaustiveEnum(mod)) { + const int_tag_ty = dest_ty.intTagType(mod); if (try sema.intFitsInType(int_val, int_tag_ty, null)) { - return sema.addConstant(dest_ty, int_val); + return sema.addConstant(dest_ty, try mod.getCoerced(int_val, dest_ty)); } const msg = msg: { const msg = try sema.errMsg( block, src, "int value '{}' out of range of non-exhaustive enum '{}'", - .{ int_val.fmtValue(sema.typeOf(operand), sema.mod), dest_ty.fmt(sema.mod) }, + .{ int_val.fmtValue(sema.typeOf(operand), mod), dest_ty.fmt(mod) }, ); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, dest_ty); @@ -8264,7 +8322,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }; return sema.failWithOwnedErrorMsg(msg); } - if (int_val.isUndef()) { + if (int_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, operand_src); } if (!(try sema.enumHasInt(dest_ty, int_val))) { @@ -8273,7 +8331,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A block, src, "enum '{}' has no tag with value '{}'", - .{ dest_ty.fmt(sema.mod), int_val.fmtValue(sema.typeOf(operand), sema.mod) }, + .{ dest_ty.fmt(mod), int_val.fmtValue(sema.typeOf(operand), mod) }, ); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, dest_ty); @@ -8281,7 +8339,7 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }; return sema.failWithOwnedErrorMsg(msg); } - return sema.addConstant(dest_ty, int_val); + return sema.addConstant(dest_ty, try mod.getCoerced(int_val, dest_ty)); } if (try sema.typeHasOnePossibleValue(dest_ty)) |opv| { @@ -8295,8 +8353,8 @@ fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A try sema.requireRuntimeBlock(block, src, operand_src); const result = try block.addTyOp(.intcast, dest_ty, operand); - if (block.wantSafety() and !dest_ty.isNonexhaustiveEnum() and - sema.mod.backendSupportsFeature(.is_named_enum_value)) + if (block.wantSafety() and !dest_ty.isNonexhaustiveEnum(mod) and + mod.backendSupportsFeature(.is_named_enum_value)) { const ok = try block.addUnOp(.is_named_enum_value, result); try sema.addSafetyCheck(block, ok, .invalid_enum_value); @@ -8329,49 +8387,44 @@ fn analyzeOptionalPayloadPtr( safety_check: bool, initializing: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const optional_ptr_ty = sema.typeOf(optional_ptr); - assert(optional_ptr_ty.zigTypeTag() == .Pointer); + assert(optional_ptr_ty.zigTypeTag(mod) == .Pointer); - const opt_type = optional_ptr_ty.elemType(); - if (opt_type.zigTypeTag() != .Optional) { - return sema.fail(block, src, "expected optional type, found '{}'", .{opt_type.fmt(sema.mod)}); + const opt_type = optional_ptr_ty.childType(mod); + if (opt_type.zigTypeTag(mod) != .Optional) { + return sema.fail(block, src, "expected optional type, found '{}'", .{opt_type.fmt(mod)}); } - const child_type = try opt_type.optionalChildAlloc(sema.arena); - const child_pointer = try Type.ptr(sema.arena, sema.mod, .{ + const child_type = opt_type.optionalChild(mod); + const child_pointer = try Type.ptr(sema.arena, mod, .{ .pointee_type = child_type, - .mutable = !optional_ptr_ty.isConstPtr(), - .@"addrspace" = optional_ptr_ty.ptrAddressSpace(), + .mutable = !optional_ptr_ty.isConstPtr(mod), + .@"addrspace" = optional_ptr_ty.ptrAddressSpace(mod), }); if (try sema.resolveDefinedValue(block, src, optional_ptr)) |ptr_val| { if (initializing) { - if (!ptr_val.isComptimeMutablePtr()) { + if (!ptr_val.isComptimeMutablePtr(mod)) { // If the pointer resulting from this function was stored at comptime, // the optional non-null bit would be set that way. But in this case, // we need to emit a runtime instruction to do it. _ = try block.addTyOp(.optional_payload_ptr_set, child_pointer, optional_ptr); } - return sema.addConstant( - child_pointer, - try Value.Tag.opt_payload_ptr.create(sema.arena, .{ - .container_ptr = ptr_val, - .container_ty = optional_ptr_ty.childType(), - }), - ); + return sema.addConstant(child_pointer, (try mod.intern(.{ .ptr = .{ + .ty = child_pointer.toIntern(), + .addr = .{ .opt_payload = ptr_val.toIntern() }, + } })).toValue()); } if (try sema.pointerDeref(block, src, ptr_val, optional_ptr_ty)) |val| { - if (val.isNull()) { + if (val.isNull(mod)) { return sema.fail(block, src, "unable to unwrap null", .{}); } // The same Value represents the pointer to the optional and the payload. - return sema.addConstant( - child_pointer, - try Value.Tag.opt_payload_ptr.create(sema.arena, .{ - .container_ptr = ptr_val, - .container_ty = optional_ptr_ty.childType(), - }), - ); + return sema.addConstant(child_pointer, (try mod.intern(.{ .ptr = .{ + .ty = child_pointer.toIntern(), + .addr = .{ .opt_payload = ptr_val.toIntern() }, + } })).toValue()); } } @@ -8397,21 +8450,22 @@ fn zirOptionalPayload( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - const result_ty = switch (operand_ty.zigTypeTag()) { - .Optional => try operand_ty.optionalChildAlloc(sema.arena), + const result_ty = switch (operand_ty.zigTypeTag(mod)) { + .Optional => operand_ty.optionalChild(mod), .Pointer => t: { - if (operand_ty.ptrSize() != .C) { + if (operand_ty.ptrSize(mod) != .C) { return sema.failWithExpectedOptionalType(block, src, operand_ty); } // TODO https://github.com/ziglang/zig/issues/6597 if (true) break :t operand_ty; - const ptr_info = operand_ty.ptrInfo().data; - break :t try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = try ptr_info.pointee_type.copy(sema.arena), + const ptr_info = operand_ty.ptrInfo(mod); + break :t try Type.ptr(sema.arena, mod, .{ + .pointee_type = ptr_info.pointee_type, .@"align" = ptr_info.@"align", .@"addrspace" = ptr_info.@"addrspace", .mutable = ptr_info.mutable, @@ -8424,13 +8478,10 @@ fn zirOptionalPayload( }; if (try sema.resolveDefinedValue(block, src, operand)) |val| { - if (val.isNull()) { - return sema.fail(block, src, "unable to unwrap null", .{}); - } - if (val.castTag(.opt_payload)) |payload| { - return sema.addConstant(result_ty, payload.data); - } - return sema.addConstant(result_ty, val); + return if (val.optionalValue(mod)) |payload| + sema.addConstant(result_ty, payload) + else + sema.fail(block, src, "unable to unwrap null", .{}); } try sema.requireRuntimeBlock(block, src, null); @@ -8450,14 +8501,15 @@ fn zirErrUnionPayload( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_src = src; const err_union_ty = sema.typeOf(operand); - if (err_union_ty.zigTypeTag() != .ErrorUnion) { + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, operand_src, "expected error union type, found '{}'", .{ - err_union_ty.fmt(sema.mod), + err_union_ty.fmt(mod), }); } return sema.analyzeErrUnionPayload(block, src, err_union_ty, operand, operand_src, false); @@ -8468,24 +8520,27 @@ fn analyzeErrUnionPayload( block: *Block, src: LazySrcLoc, err_union_ty: Type, - operand: Zir.Inst.Ref, + operand: Air.Inst.Ref, operand_src: LazySrcLoc, safety_check: bool, ) CompileError!Air.Inst.Ref { - const payload_ty = err_union_ty.errorUnionPayload(); + const mod = sema.mod; + const payload_ty = err_union_ty.errorUnionPayload(mod); if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { - if (val.getError()) |name| { - return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); + if (val.getErrorName(mod).unwrap()) |name| { + return sema.fail(block, src, "caught unexpected error '{}'", .{name.fmt(&mod.intern_pool)}); } - const data = val.castTag(.eu_payload).?.data; - return sema.addConstant(payload_ty, data); + return sema.addConstant( + payload_ty, + mod.intern_pool.indexToKey(val.toIntern()).error_union.val.payload.toValue(), + ); } try sema.requireRuntimeBlock(block, src, null); // If the error set has no fields then no safety check is needed. if (safety_check and block.wantSafety() and - !err_union_ty.errorUnionSet().errorSetIsEmpty()) + !err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { try sema.panicUnwrapError(block, operand, .unwrap_errunion_err, .is_non_err); } @@ -8517,52 +8572,46 @@ fn analyzeErrUnionPayloadPtr( safety_check: bool, initializing: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - assert(operand_ty.zigTypeTag() == .Pointer); + assert(operand_ty.zigTypeTag(mod) == .Pointer); - if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) { + if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ - operand_ty.elemType().fmt(sema.mod), + operand_ty.childType(mod).fmt(mod), }); } - const err_union_ty = operand_ty.elemType(); - const payload_ty = err_union_ty.errorUnionPayload(); - const operand_pointer_ty = try Type.ptr(sema.arena, sema.mod, .{ + const err_union_ty = operand_ty.childType(mod); + const payload_ty = err_union_ty.errorUnionPayload(mod); + const operand_pointer_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = payload_ty, - .mutable = !operand_ty.isConstPtr(), - .@"addrspace" = operand_ty.ptrAddressSpace(), + .mutable = !operand_ty.isConstPtr(mod), + .@"addrspace" = operand_ty.ptrAddressSpace(mod), }); if (try sema.resolveDefinedValue(block, src, operand)) |ptr_val| { if (initializing) { - if (!ptr_val.isComptimeMutablePtr()) { + if (!ptr_val.isComptimeMutablePtr(mod)) { // If the pointer resulting from this function was stored at comptime, // the error union error code would be set that way. But in this case, // we need to emit a runtime instruction to do it. try sema.requireRuntimeBlock(block, src, null); _ = try block.addTyOp(.errunion_payload_ptr_set, operand_pointer_ty, operand); } - return sema.addConstant( - operand_pointer_ty, - try Value.Tag.eu_payload_ptr.create(sema.arena, .{ - .container_ptr = ptr_val, - .container_ty = operand_ty.elemType(), - }), - ); + return sema.addConstant(operand_pointer_ty, (try mod.intern(.{ .ptr = .{ + .ty = operand_pointer_ty.toIntern(), + .addr = .{ .eu_payload = ptr_val.toIntern() }, + } })).toValue()); } if (try sema.pointerDeref(block, src, ptr_val, operand_ty)) |val| { - if (val.getError()) |name| { - return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); + if (val.getErrorName(mod).unwrap()) |name| { + return sema.fail(block, src, "caught unexpected error '{}'", .{name.fmt(&mod.intern_pool)}); } - - return sema.addConstant( - operand_pointer_ty, - try Value.Tag.eu_payload_ptr.create(sema.arena, .{ - .container_ptr = ptr_val, - .container_ty = operand_ty.elemType(), - }), - ); + return sema.addConstant(operand_pointer_ty, (try mod.intern(.{ .ptr = .{ + .ty = operand_pointer_ty.toIntern(), + .addr = .{ .eu_payload = ptr_val.toIntern() }, + } })).toValue()); } } @@ -8570,7 +8619,7 @@ fn analyzeErrUnionPayloadPtr( // If the error set has no fields then no safety check is needed. if (safety_check and block.wantSafety() and - !err_union_ty.errorUnionSet().errorSetIsEmpty()) + !err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { try sema.panicUnwrapError(block, operand, .unwrap_errunion_err_ptr, .is_non_err_ptr); } @@ -8594,18 +8643,21 @@ fn zirErrUnionCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro } fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - if (operand_ty.zigTypeTag() != .ErrorUnion) { + if (operand_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }); } - const result_ty = operand_ty.errorUnionSet(); + const result_ty = operand_ty.errorUnionSet(mod); if (try sema.resolveDefinedValue(block, src, operand)) |val| { - assert(val.getError() != null); - return sema.addConstant(result_ty, val); + return sema.addConstant(result_ty, (try mod.intern(.{ .err = .{ + .ty = result_ty.toIntern(), + .name = mod.intern_pool.indexToKey(val.toIntern()).error_union.val.err_name, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, null); @@ -8617,23 +8669,24 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - assert(operand_ty.zigTypeTag() == .Pointer); + assert(operand_ty.zigTypeTag(mod) == .Pointer); - if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) { + if (operand_ty.childType(mod).zigTypeTag(mod) != .ErrorUnion) { return sema.fail(block, src, "expected error union type, found '{}'", .{ - operand_ty.elemType().fmt(sema.mod), + operand_ty.childType(mod).fmt(mod), }); } - const result_ty = operand_ty.elemType().errorUnionSet(); + const result_ty = operand_ty.childType(mod).errorUnionSet(mod); if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| { - assert(val.getError() != null); + assert(val.getErrorName(mod) != .none); return sema.addConstant(result_ty, val); } } @@ -8667,7 +8720,7 @@ fn zirFunc( break :blk ret_ty; } else |err| switch (err) { error.GenericPoison => { - break :blk Type.initTag(.generic_poison); + break :blk Type.generic_poison; }, else => |e| return e, } @@ -8677,8 +8730,7 @@ fn zirFunc( extra_index += ret_ty_body.len; const ret_ty_val = try sema.resolveGenericBody(block, ret_ty_src, ret_ty_body, inst, Type.type, "return type must be comptime-known"); - var buffer: Value.ToTypeBuffer = undefined; - break :blk try ret_ty_val.toType(&buffer).copy(sema.arena); + break :blk ret_ty_val.toType(); }, }; @@ -8745,10 +8797,10 @@ fn resolveGenericBody( }; switch (err) { error.GenericPoison => { - if (dest_ty.tag() == .type) { - return Value.initTag(.generic_poison_type); + if (dest_ty.toIntern() == .type_type) { + return Value.generic_poison_type; } else { - return Value.initTag(.generic_poison); + return Value.generic_poison; } }, else => |e| return e, @@ -8822,7 +8874,7 @@ fn handleExternLibName( const FuncLinkSection = union(enum) { generic, default, - explicit: []const u8, + explicit: InternPool.NullTerminatedString, }; fn funcCommon( @@ -8849,11 +8901,13 @@ fn funcCommon( noalias_bits: u32, is_noinline: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const gpa = sema.gpa; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; const cc_src: LazySrcLoc = .{ .node_offset_fn_type_cc = src_node_offset }; const func_src = LazySrcLoc.nodeOffset(src_node_offset); - var is_generic = bare_return_type.tag() == .generic_poison or + var is_generic = bare_return_type.isGenericPoison() or alignment == null or address_space == null or section == .generic or @@ -8869,70 +8923,42 @@ fn funcCommon( } var destroy_fn_on_error = false; - const new_func: *Module.Fn = new_func: { + const new_func_index = new_func: { if (!has_body) break :new_func undefined; if (sema.comptime_args_fn_inst == func_inst) { - const new_func = sema.preallocated_new_func.?; - sema.preallocated_new_func = null; // take ownership - break :new_func new_func; + const new_func_index = sema.preallocated_new_func.unwrap().?; + sema.preallocated_new_func = .none; // take ownership + break :new_func new_func_index; } destroy_fn_on_error = true; - const new_func = try sema.gpa.create(Module.Fn); + var new_func: Module.Fn = undefined; // Set this here so that the inferred return type can be printed correctly if it appears in an error. new_func.owner_decl = sema.owner_decl_index; - break :new_func new_func; + const new_func_index = try mod.createFunc(new_func); + break :new_func new_func_index; }; - errdefer if (destroy_fn_on_error) sema.gpa.destroy(new_func); + errdefer if (destroy_fn_on_error) mod.destroyFunc(new_func_index); - var maybe_inferred_error_set_node: ?*Module.Fn.InferredErrorSetListNode = null; - errdefer if (maybe_inferred_error_set_node) |node| sema.gpa.destroy(node); - // Note: no need to errdefer since this will still be in its default state at the end of the function. - - const target = sema.mod.getTarget(); + const target = mod.getTarget(); const fn_ty: Type = fn_ty: { - // Hot path for some common function types. - // TODO can we eliminate some of these Type tag values? seems unnecessarily complicated. - if (!is_generic and block.params.items.len == 0 and !var_args and !inferred_error_set and - alignment.? == 0 and - address_space.? == target_util.defaultAddressSpace(target, .function) and - section == .default and - !is_noinline) - { - if (bare_return_type.zigTypeTag() == .NoReturn and cc.? == .Unspecified) { - break :fn_ty Type.initTag(.fn_noreturn_no_args); - } - - if (bare_return_type.zigTypeTag() == .Void and cc.? == .Unspecified) { - break :fn_ty Type.initTag(.fn_void_no_args); - } - - if (bare_return_type.zigTypeTag() == .NoReturn and cc.? == .Naked) { - break :fn_ty Type.initTag(.fn_naked_noreturn_no_args); - } - - if (bare_return_type.zigTypeTag() == .Void and cc.? == .C) { - break :fn_ty Type.initTag(.fn_ccc_void_no_args); - } - } - // In the case of generic calling convention, or generic alignment, we use // default values which are only meaningful for the generic function, *not* // the instantiation, which can depend on comptime parameters. // Related proposal: https://github.com/ziglang/zig/issues/11834 const cc_resolved = cc orelse .Unspecified; - const param_types = try sema.arena.alloc(Type, block.params.items.len); - const comptime_params = try sema.arena.alloc(bool, block.params.items.len); - for (block.params.items, 0..) |param, i| { + const param_types = try sema.arena.alloc(InternPool.Index, block.params.items.len); + var comptime_bits: u32 = 0; + for (param_types, block.params.items, 0..) |*dest_param_ty, param, i| { const is_noalias = blk: { const index = std.math.cast(u5, i) orelse break :blk false; break :blk @truncate(u1, noalias_bits >> index) != 0; }; - param_types[i] = param.ty; + dest_param_ty.* = param.ty.toIntern(); sema.analyzeParameter( block, .unneeded, param, - comptime_params, + &comptime_bits, i, &is_generic, cc_resolved, @@ -8940,12 +8966,12 @@ fn funcCommon( is_noalias, ) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); + const decl = mod.declPtr(block.src_decl); try sema.analyzeParameter( block, - Module.paramSrc(src_node_offset, sema.gpa, decl, i), + Module.paramSrc(src_node_offset, mod, decl, i), param, - comptime_params, + &comptime_bits, i, &is_generic, cc_resolved, @@ -8961,7 +8987,7 @@ fn funcCommon( var ret_ty_requires_comptime = false; const ret_poison = if (sema.typeRequiresComptime(bare_return_type)) |ret_comptime| rp: { ret_ty_requires_comptime = ret_comptime; - break :rp bare_return_type.tag() == .generic_poison; + break :rp bare_return_type.isGenericPoison(); } else |err| switch (err) { error.GenericPoison => rp: { is_generic = true; @@ -8970,43 +8996,41 @@ fn funcCommon( else => |e| return e, }; - const return_type = if (!inferred_error_set or ret_poison) + const return_type: Type = if (!inferred_error_set or ret_poison) bare_return_type else blk: { try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src); - const node = try sema.gpa.create(Module.Fn.InferredErrorSetListNode); - node.data = .{ .func = new_func }; - maybe_inferred_error_set_node = node; - - const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, &node.data); - break :blk try Type.Tag.error_union.create(sema.arena, .{ - .error_set = error_set_ty, - .payload = bare_return_type, + const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{ + .func = new_func_index, }); + const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index }); + break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type); }; - if (!return_type.isValidReturnType()) { - const opaque_str = if (return_type.zigTypeTag() == .Opaque) "opaque " else ""; + if (!return_type.isValidReturnType(mod)) { + const opaque_str = if (return_type.zigTypeTag(mod) == .Opaque) "opaque " else ""; const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "{s}return type '{}' not allowed", .{ - opaque_str, return_type.fmt(sema.mod), + opaque_str, return_type.fmt(mod), }); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, return_type); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - if (!ret_poison and !Type.fnCallingConventionAllowsZigTypes(target, cc_resolved) and !try sema.validateExternType(return_type, .ret_ty)) { + if (!ret_poison and !target_util.fnCallConvAllowsZigTypes(target, cc_resolved) and + !try sema.validateExternType(return_type, .ret_ty)) + { const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "return type '{}' not allowed in function with calling convention '{s}'", .{ - return_type.fmt(sema.mod), @tagName(cc_resolved), + return_type.fmt(mod), @tagName(cc_resolved), }); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, ret_ty_src.toSrcLoc(src_decl), return_type, .ret_ty); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsNotExtern(msg, ret_ty_src.toSrcLoc(src_decl, mod), return_type, .ret_ty); try sema.addDeclaredHereNote(msg, return_type); break :msg msg; @@ -9024,9 +9048,9 @@ fn funcCommon( block, ret_ty_src, "function with comptime-only return type '{}' requires all parameters to be comptime", - .{return_type.fmt(sema.mod)}, + .{return_type.fmt(mod)}, ); - try sema.explainWhyTypeIsComptime(msg, ret_ty_src.toSrcLoc(sema.owner_decl), return_type); + try sema.explainWhyTypeIsComptime(msg, ret_ty_src.toSrcLoc(sema.owner_decl, mod), return_type); const tags = sema.code.instructions.items(.tag); const data = sema.code.instructions.items(.data); @@ -9049,7 +9073,7 @@ fn funcCommon( return sema.failWithOwnedErrorMsg(msg); } - const arch = sema.mod.getTarget().cpu.arch; + const arch = mod.getTarget().cpu.arch; if (switch (cc_resolved) { .Unspecified, .C, .Naked, .Async, .Inline => null, .Interrupt => switch (arch) { @@ -9092,8 +9116,7 @@ fn funcCommon( return sema.fail(block, cc_src, "'noinline' function cannot have callconv 'Inline'", .{}); } if (is_generic and sema.no_partial_func_ty) return error.GenericPoison; - for (comptime_params) |ct| is_generic = is_generic or ct; - is_generic = is_generic or ret_ty_requires_comptime; + is_generic = is_generic or comptime_bits != 0 or ret_ty_requires_comptime; if (!is_generic and sema.wantErrorReturnTracing(return_type)) { // Make sure that StackTrace's fields are resolved so that the backend can @@ -9102,68 +9125,58 @@ fn funcCommon( _ = try sema.resolveTypeFields(unresolved_stack_trace_ty); } - break :fn_ty try Type.Tag.function.create(sema.arena, .{ + break :fn_ty try mod.funcType(.{ .param_types = param_types, - .comptime_params = comptime_params.ptr, - .return_type = return_type, + .noalias_bits = noalias_bits, + .comptime_bits = comptime_bits, + .return_type = return_type.toIntern(), .cc = cc_resolved, .cc_is_generic = cc == null, - .alignment = alignment orelse 0, + .alignment = if (alignment) |a| InternPool.Alignment.fromByteUnits(a) else .none, .align_is_generic = alignment == null, .section_is_generic = section == .generic, .addrspace_is_generic = address_space == null, .is_var_args = var_args, .is_generic = is_generic, .is_noinline = is_noinline, - .noalias_bits = noalias_bits, }); }; sema.owner_decl.@"linksection" = switch (section) { - .generic => undefined, - .default => null, - .explicit => |section_name| try sema.perm_arena.dupeZ(u8, section_name), + .generic => .none, + .default => .none, + .explicit => |section_name| section_name.toOptional(), }; sema.owner_decl.@"align" = alignment orelse 0; sema.owner_decl.@"addrspace" = address_space orelse .generic; if (is_extern) { - const new_extern_fn = try sema.gpa.create(Module.ExternFn); - errdefer sema.gpa.destroy(new_extern_fn); - - new_extern_fn.* = Module.ExternFn{ - .owner_decl = sema.owner_decl_index, - .lib_name = null, - }; - - if (opt_lib_name) |lib_name| { - new_extern_fn.lib_name = try sema.handleExternLibName(block, .{ - .node_offset_lib_name = src_node_offset, - }, lib_name); - } - - const extern_fn_payload = try sema.arena.create(Value.Payload.ExternFn); - extern_fn_payload.* = .{ - .base = .{ .tag = .extern_fn }, - .data = new_extern_fn, - }; - return sema.addConstant(fn_ty, Value.initPayload(&extern_fn_payload.base)); + return sema.addConstant(fn_ty, (try mod.intern(.{ .extern_func = .{ + .ty = fn_ty.toIntern(), + .decl = sema.owner_decl_index, + .lib_name = if (opt_lib_name) |lib_name| (try mod.intern_pool.getOrPutString( + gpa, + try sema.handleExternLibName(block, .{ + .node_offset_lib_name = src_node_offset, + }, lib_name), + )).toOptional() else .none, + } })).toValue()); } if (!has_body) { return sema.addType(fn_ty); } - const is_inline = fn_ty.fnCallingConvention() == .Inline; + const is_inline = fn_ty.fnCallingConvention(mod) == .Inline; const anal_state: Module.Fn.Analysis = if (is_inline) .inline_only else .none; const comptime_args: ?[*]TypedValue = if (sema.comptime_args_fn_inst == func_inst) blk: { break :blk if (sema.comptime_args.len == 0) null else sema.comptime_args.ptr; } else null; + const new_func = mod.funcPtr(new_func_index); const hash = new_func.hash; const generic_owner_decl = if (comptime_args == null) .none else new_func.generic_owner_decl; - const fn_payload = try sema.arena.create(Value.Payload.Function); new_func.* = .{ .state = anal_state, .zir_body_inst = func_inst, @@ -9178,15 +9191,10 @@ fn funcCommon( .branch_quota = default_branch_quota, .is_noinline = is_noinline, }; - if (maybe_inferred_error_set_node) |node| { - new_func.inferred_error_sets.prepend(node); - } - maybe_inferred_error_set_node = null; - fn_payload.* = .{ - .base = .{ .tag = .function }, - .data = new_func, - }; - return sema.addConstant(fn_ty, Value.initPayload(&fn_payload.base)); + return sema.addConstant(fn_ty, (try mod.intern(.{ .func = .{ + .ty = fn_ty.toIntern(), + .index = new_func_index, + } })).toValue()); } fn analyzeParameter( @@ -9194,29 +9202,32 @@ fn analyzeParameter( block: *Block, param_src: LazySrcLoc, param: Block.Param, - comptime_params: []bool, + comptime_bits: *u32, i: usize, is_generic: *bool, cc: std.builtin.CallingConvention, has_body: bool, is_noalias: bool, ) !void { + const mod = sema.mod; const requires_comptime = try sema.typeRequiresComptime(param.ty); - comptime_params[i] = param.is_comptime or requires_comptime; - const this_generic = param.ty.tag() == .generic_poison; + if (param.is_comptime or requires_comptime) { + comptime_bits.* |= @as(u32, 1) << @intCast(u5, i); // TODO: handle cast error + } + const this_generic = param.ty.isGenericPoison(); is_generic.* = is_generic.* or this_generic; - const target = sema.mod.getTarget(); - if (param.is_comptime and !Type.fnCallingConventionAllowsZigTypes(target, cc)) { + const target = mod.getTarget(); + if (param.is_comptime and !target_util.fnCallConvAllowsZigTypes(target, cc)) { return sema.fail(block, param_src, "comptime parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)}); } - if (this_generic and !sema.no_partial_func_ty and !Type.fnCallingConventionAllowsZigTypes(target, cc)) { + if (this_generic and !sema.no_partial_func_ty and !target_util.fnCallConvAllowsZigTypes(target, cc)) { return sema.fail(block, param_src, "generic parameters not allowed in function with calling convention '{s}'", .{@tagName(cc)}); } - if (!param.ty.isValidParamType()) { - const opaque_str = if (param.ty.zigTypeTag() == .Opaque) "opaque " else ""; + if (!param.ty.isValidParamType(mod)) { + const opaque_str = if (param.ty.zigTypeTag(mod) == .Opaque) "opaque " else ""; const msg = msg: { const msg = try sema.errMsg(block, param_src, "parameter of {s}type '{}' not allowed", .{ - opaque_str, param.ty.fmt(sema.mod), + opaque_str, param.ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); @@ -9225,15 +9236,15 @@ fn analyzeParameter( }; return sema.failWithOwnedErrorMsg(msg); } - if (!this_generic and !Type.fnCallingConventionAllowsZigTypes(target, cc) and !try sema.validateExternType(param.ty, .param_ty)) { + if (!this_generic and !target_util.fnCallConvAllowsZigTypes(target, cc) and !try sema.validateExternType(param.ty, .param_ty)) { const msg = msg: { const msg = try sema.errMsg(block, param_src, "parameter of type '{}' not allowed in function with calling convention '{s}'", .{ - param.ty.fmt(sema.mod), @tagName(cc), + param.ty.fmt(mod), @tagName(cc), }); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, param_src.toSrcLoc(src_decl), param.ty, .param_ty); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsNotExtern(msg, param_src.toSrcLoc(src_decl, mod), param.ty, .param_ty); try sema.addDeclaredHereNote(msg, param.ty); break :msg msg; @@ -9243,12 +9254,12 @@ fn analyzeParameter( if (!sema.is_generic_instantiation and requires_comptime and !param.is_comptime and has_body) { const msg = msg: { const msg = try sema.errMsg(block, param_src, "parameter of type '{}' must be declared comptime", .{ - param.ty.fmt(sema.mod), + param.ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsComptime(msg, param_src.toSrcLoc(src_decl), param.ty); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsComptime(msg, param_src.toSrcLoc(src_decl, mod), param.ty); try sema.addDeclaredHereNote(msg, param.ty); break :msg msg; @@ -9256,7 +9267,7 @@ fn analyzeParameter( return sema.failWithOwnedErrorMsg(msg); } if (!sema.is_generic_instantiation and !this_generic and is_noalias and - !(param.ty.zigTypeTag() == .Pointer or param.ty.isPtrLikeOptional())) + !(param.ty.zigTypeTag(mod) == .Pointer or param.ty.isPtrLikeOptional(mod))) { return sema.fail(block, param_src, "non-pointer parameter declared noalias", .{}); } @@ -9283,7 +9294,7 @@ fn zirParam( const prev_preallocated_new_func = sema.preallocated_new_func; const prev_no_partial_func_type = sema.no_partial_func_ty; block.params = .{}; - sema.preallocated_new_func = null; + sema.preallocated_new_func = .none; sema.no_partial_func_ty = true; defer { block.params.deinit(sema.gpa); @@ -9309,7 +9320,7 @@ fn zirParam( // We result the param instruction with a poison value and // insert an anytype parameter. try block.params.append(sema.gpa, .{ - .ty = Type.initTag(.generic_poison), + .ty = Type.generic_poison, .is_comptime = comptime_syntax, .name = param_name, }); @@ -9330,7 +9341,7 @@ fn zirParam( // We result the param instruction with a poison value and // insert an anytype parameter. try block.params.append(sema.gpa, .{ - .ty = Type.initTag(.generic_poison), + .ty = Type.generic_poison, .is_comptime = comptime_syntax, .name = param_name, }); @@ -9340,7 +9351,7 @@ fn zirParam( else => |e| return e, } or comptime_syntax; if (sema.inst_map.get(inst)) |arg| { - if (is_comptime and sema.preallocated_new_func != null) { + if (is_comptime and sema.preallocated_new_func != .none) { // We have a comptime value for this parameter so it should be elided from the // function type of the function instruction in this block. const coerced_arg = sema.coerce(block, param_ty, arg, .unneeded) catch |err| switch (err) { @@ -9363,7 +9374,7 @@ fn zirParam( assert(sema.inst_map.remove(inst)); } - if (sema.preallocated_new_func != null) { + if (sema.preallocated_new_func != .none) { if (try sema.typeHasOnePossibleValue(param_ty)) |opv| { // In this case we are instantiating a generic function call with a non-comptime // non-anytype parameter that ended up being a one-possible-type. @@ -9383,7 +9394,7 @@ fn zirParam( if (is_comptime) { // If this is a comptime parameter we can add a constant generic_poison // since this is also a generic parameter. - const result = try sema.addConstant(param_ty, Value.initTag(.generic_poison)); + const result = try sema.addConstant(Type.generic_poison, Value.generic_poison); sema.inst_map.putAssumeCapacityNoClobber(inst, result); } else { // Otherwise we need a dummy runtime instruction. @@ -9428,7 +9439,7 @@ fn zirParamAnytype( // We are evaluating a generic function without any comptime args provided. try block.params.append(sema.gpa, .{ - .ty = Type.initTag(.generic_poison), + .ty = Type.generic_poison, .is_comptime = comptime_syntax, .name = param_name, }); @@ -9472,13 +9483,14 @@ fn analyzeAs( zir_operand: Zir.Inst.Ref, no_cast_to_comptime_int: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand = try sema.resolveInst(zir_operand); - if (zir_dest_type == .var_args_param) return operand; + if (zir_dest_type == .var_args_param_type) return operand; const dest_ty = sema.resolveType(block, src, zir_dest_type) catch |err| switch (err) { error.GenericPoison => return operand, else => |e| return e, }; - if (dest_ty.zigTypeTag() == .NoReturn) { + if (dest_ty.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, src, "cannot cast to noreturn", .{}); } const is_ret = if (Zir.refToIndex(zir_dest_type)) |ptr_index| @@ -9495,15 +9507,19 @@ fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr = try sema.resolveInst(inst_data.operand); const ptr_ty = sema.typeOf(ptr); - if (!ptr_ty.isPtrAtRuntime()) { - return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}); + if (!ptr_ty.isPtrAtRuntime(mod)) { + return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(mod)}); } if (try sema.resolveMaybeUndefValIntable(ptr)) |ptr_val| { - return sema.addConstant(Type.usize, ptr_val); + return sema.addConstant( + Type.usize, + try mod.intValue(Type.usize, (try ptr_val.getUnsignedIntAdvanced(mod, sema)).?), + ); } try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src); return block.addUnOp(.ptrtoint, ptr); @@ -9513,11 +9529,12 @@ fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; - const field_name = sema.code.nullTerminatedString(extra.field_name_start); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, sema.code.nullTerminatedString(extra.field_name_start)); const object = try sema.resolveInst(extra.lhs); return sema.fieldVal(block, src, object, field_name, field_name_src); } @@ -9526,11 +9543,12 @@ fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index, initializing: b const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; - const field_name = sema.code.nullTerminatedString(extra.field_name_start); + const field_name = try mod.intern_pool.getOrPutString(sema.gpa, sema.code.nullTerminatedString(extra.field_name_start)); const object_ptr = try sema.resolveInst(extra.lhs); return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, initializing); } @@ -9544,7 +9562,7 @@ fn zirFieldValNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; const object = try sema.resolveInst(extra.lhs); - const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name, "field name must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, "field name must be comptime-known"); return sema.fieldVal(block, src, object, field_name, field_name_src); } @@ -9557,7 +9575,7 @@ fn zirFieldPtrNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; const object_ptr = try sema.resolveInst(extra.lhs); - const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name, "field name must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, "field name must be comptime-known"); return sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, false); } @@ -9586,31 +9604,31 @@ fn intCast( operand_src: LazySrcLoc, runtime_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); const dest_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, dest_ty, dest_ty_src); const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src); if (try sema.isComptimeKnown(operand)) { return sema.coerce(block, dest_ty, operand, operand_src); - } else if (dest_scalar_ty.zigTypeTag() == .ComptimeInt) { + } else if (dest_scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_int'", .{}); } try sema.checkVectorizableBinaryOperands(block, operand_src, dest_ty, operand_ty, dest_ty_src, operand_src); - const is_vector = dest_ty.zigTypeTag() == .Vector; + const is_vector = dest_ty.zigTypeTag(mod) == .Vector; if ((try sema.typeHasOnePossibleValue(dest_ty))) |opv| { // requirement: intCast(u0, input) iff input == 0 if (runtime_safety and block.wantSafety()) { try sema.requireRuntimeBlock(block, src, operand_src); - const target = sema.mod.getTarget(); - const wanted_info = dest_scalar_ty.intInfo(target); + const wanted_info = dest_scalar_ty.intInfo(mod); const wanted_bits = wanted_info.bits; if (wanted_bits == 0) { const ok = if (is_vector) ok: { - const zeros = try Value.Tag.repeated.create(sema.arena, Value.zero); - const zero_inst = try sema.addConstant(sema.typeOf(operand), zeros); + const zeros = try sema.splat(operand_ty, try mod.intValue(operand_scalar_ty, 0)); + const zero_inst = try sema.addConstant(operand_ty, zeros); const is_in_range = try block.addCmpVector(operand, zero_inst, .eq); const all_in_range = try block.addInst(.{ .tag = .reduce, @@ -9618,7 +9636,7 @@ fn intCast( }); break :ok all_in_range; } else ok: { - const zero_inst = try sema.addConstant(sema.typeOf(operand), Value.zero); + const zero_inst = try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 0)); const is_in_range = try block.addBinOp(.cmp_lte, operand, zero_inst); break :ok is_in_range; }; @@ -9631,9 +9649,8 @@ fn intCast( try sema.requireRuntimeBlock(block, src, operand_src); if (runtime_safety and block.wantSafety()) { - const target = sema.mod.getTarget(); - const actual_info = operand_scalar_ty.intInfo(target); - const wanted_info = dest_scalar_ty.intInfo(target); + const actual_info = operand_scalar_ty.intInfo(mod); + const wanted_info = dest_scalar_ty.intInfo(mod); const actual_bits = actual_info.bits; const wanted_bits = wanted_info.bits; const actual_value_bits = actual_bits - @boolToInt(actual_info.signedness == .signed); @@ -9642,26 +9659,24 @@ fn intCast( // range shrinkage // requirement: int value fits into target type if (wanted_value_bits < actual_value_bits) { - const dest_max_val_scalar = try dest_scalar_ty.maxInt(sema.arena, target); - const dest_max_val = if (is_vector) - try Value.Tag.repeated.create(sema.arena, dest_max_val_scalar) - else - dest_max_val_scalar; + const dest_max_val_scalar = try dest_scalar_ty.maxIntScalar(mod, operand_scalar_ty); + const dest_max_val = try sema.splat(operand_ty, dest_max_val_scalar); const dest_max = try sema.addConstant(operand_ty, dest_max_val); const diff = try block.addBinOp(.subwrap, dest_max, operand); if (actual_info.signedness == .signed) { // Reinterpret the sign-bit as part of the value. This will make // negative differences (`operand` > `dest_max`) appear too big. - const unsigned_operand_ty = try Type.Tag.int_unsigned.create(sema.arena, actual_bits); + const unsigned_operand_ty = try mod.intType(.unsigned, actual_bits); const diff_unsigned = try block.addBitCast(unsigned_operand_ty, diff); // If the destination type is signed, then we need to double its // range to account for negative values. const dest_range_val = if (wanted_info.signedness == .signed) range_val: { - const range_minus_one = try dest_max_val.shl(Value.one, unsigned_operand_ty, sema.arena, sema.mod); - break :range_val try sema.intAdd(range_minus_one, Value.one, unsigned_operand_ty); - } else dest_max_val; + const one = try mod.intValue(unsigned_operand_ty, 1); + const range_minus_one = try dest_max_val.shl(one, unsigned_operand_ty, sema.arena, mod); + break :range_val try sema.intAdd(range_minus_one, one, unsigned_operand_ty, undefined); + } else try mod.getCoerced(dest_max_val, unsigned_operand_ty); const dest_range = try sema.addConstant(unsigned_operand_ty, dest_range_val); const ok = if (is_vector) ok: { @@ -9701,7 +9716,8 @@ fn intCast( // no shrinkage, yes sign loss // requirement: signed to unsigned >= 0 const ok = if (is_vector) ok: { - const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); + const scalar_zero = try mod.intValue(operand_scalar_ty, 0); + const zero_val = try sema.splat(operand_ty, scalar_zero); const zero_inst = try sema.addConstant(operand_ty, zero_val); const is_in_range = try block.addCmpVector(operand, zero_inst, .gte); const all_in_range = try block.addInst(.{ @@ -9713,7 +9729,7 @@ fn intCast( }); break :ok all_in_range; } else ok: { - const zero_inst = try sema.addConstant(operand_ty, Value.zero); + const zero_inst = try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 0)); const is_in_range = try block.addBinOp(.cmp_gte, operand, zero_inst); break :ok is_in_range; }; @@ -9727,6 +9743,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; @@ -9735,7 +9752,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - switch (dest_ty.zigTypeTag()) { + switch (dest_ty.zigTypeTag(mod)) { .AnyFrame, .ComptimeFloat, .ComptimeInt, @@ -9751,14 +9768,14 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Type, .Undefined, .Void, - => return sema.fail(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(sema.mod)}), + => return sema.fail(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}), .Enum => { const msg = msg: { - const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - switch (operand_ty.zigTypeTag()) { - .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToEnum to cast from '{}'", .{operand_ty.fmt(sema.mod)}), + switch (operand_ty.zigTypeTag(mod)) { + .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToEnum to cast from '{}'", .{operand_ty.fmt(mod)}), else => {}, } @@ -9769,11 +9786,11 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Pointer => { const msg = msg: { - const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, dest_ty_src, "cannot @bitCast to '{}'", .{dest_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - switch (operand_ty.zigTypeTag()) { - .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToPtr to cast from '{}'", .{operand_ty.fmt(sema.mod)}), - .Pointer => try sema.errNote(block, dest_ty_src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(sema.mod)}), + switch (operand_ty.zigTypeTag(mod)) { + .Int, .ComptimeInt => try sema.errNote(block, dest_ty_src, msg, "use @intToPtr to cast from '{}'", .{operand_ty.fmt(mod)}), + .Pointer => try sema.errNote(block, dest_ty_src, msg, "use @ptrCast to cast from '{}'", .{operand_ty.fmt(mod)}), else => {}, } @@ -9781,14 +9798,14 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }; return sema.failWithOwnedErrorMsg(msg); }, - .Struct, .Union => if (dest_ty.containerLayout() == .Auto) { - const container = switch (dest_ty.zigTypeTag()) { + .Struct, .Union => if (dest_ty.containerLayout(mod) == .Auto) { + const container = switch (dest_ty.zigTypeTag(mod)) { .Struct => "struct", .Union => "union", else => unreachable, }; return sema.fail(block, dest_ty_src, "cannot @bitCast to '{}'; {s} does not have a guaranteed in-memory layout", .{ - dest_ty.fmt(sema.mod), container, + dest_ty.fmt(mod), container, }); }, @@ -9799,7 +9816,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Vector, => {}, } - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .AnyFrame, .ComptimeFloat, .ComptimeInt, @@ -9815,14 +9832,14 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air .Type, .Undefined, .Void, - => return sema.fail(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(sema.mod)}), + => return sema.fail(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)}), .Enum => { const msg = msg: { - const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - switch (dest_ty.zigTypeTag()) { - .Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @enumToInt to cast to '{}'", .{dest_ty.fmt(sema.mod)}), + switch (dest_ty.zigTypeTag(mod)) { + .Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @enumToInt to cast to '{}'", .{dest_ty.fmt(mod)}), else => {}, } @@ -9832,11 +9849,11 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }, .Pointer => { const msg = msg: { - const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, operand_src, "cannot @bitCast from '{}'", .{operand_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - switch (dest_ty.zigTypeTag()) { - .Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @ptrToInt to cast to '{}'", .{dest_ty.fmt(sema.mod)}), - .Pointer => try sema.errNote(block, operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(sema.mod)}), + switch (dest_ty.zigTypeTag(mod)) { + .Int, .ComptimeInt => try sema.errNote(block, operand_src, msg, "use @ptrToInt to cast to '{}'", .{dest_ty.fmt(mod)}), + .Pointer => try sema.errNote(block, operand_src, msg, "use @ptrCast to cast to '{}'", .{dest_ty.fmt(mod)}), else => {}, } @@ -9844,14 +9861,14 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air }; return sema.failWithOwnedErrorMsg(msg); }, - .Struct, .Union => if (operand_ty.containerLayout() == .Auto) { - const container = switch (operand_ty.zigTypeTag()) { + .Struct, .Union => if (operand_ty.containerLayout(mod) == .Auto) { + const container = switch (operand_ty.zigTypeTag(mod)) { .Struct => "struct", .Union => "union", else => unreachable, }; return sema.fail(block, operand_src, "cannot @bitCast from '{}'; {s} does not have a guaranteed in-memory layout", .{ - operand_ty.fmt(sema.mod), container, + operand_ty.fmt(mod), container, }); }, @@ -9869,6 +9886,7 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; @@ -9877,31 +9895,31 @@ fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = try sema.resolveInst(extra.rhs); - const target = sema.mod.getTarget(); - const dest_is_comptime_float = switch (dest_ty.zigTypeTag()) { + const target = mod.getTarget(); + const dest_is_comptime_float = switch (dest_ty.zigTypeTag(mod)) { .ComptimeFloat => true, .Float => false, else => return sema.fail( block, dest_ty_src, "expected float type, found '{}'", - .{dest_ty.fmt(sema.mod)}, + .{dest_ty.fmt(mod)}, ), }; const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt => {}, else => return sema.fail( block, operand_src, "expected float type, found '{}'", - .{operand_ty.fmt(sema.mod)}, + .{operand_ty.fmt(mod)}, ), } if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - return sema.addConstant(dest_ty, try operand_val.floatCast(sema.arena, dest_ty, target)); + return sema.addConstant(dest_ty, try operand_val.floatCast(dest_ty, mod)); } if (dest_is_comptime_float) { return sema.fail(block, operand_src, "unable to cast runtime value to 'comptime_float'", .{}); @@ -9944,20 +9962,21 @@ fn zirElemPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const array_ptr = try sema.resolveInst(extra.lhs); const elem_index = try sema.resolveInst(extra.rhs); const indexable_ty = sema.typeOf(array_ptr); - if (indexable_ty.zigTypeTag() != .Pointer) { + if (indexable_ty.zigTypeTag(mod) != .Pointer) { const capture_src: LazySrcLoc = .{ .for_capture_from_input = inst_data.src_node }; const msg = msg: { const msg = try sema.errMsg(block, capture_src, "pointer capture of non pointer type '{}'", .{ - indexable_ty.fmt(sema.mod), + indexable_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); - if (indexable_ty.zigTypeTag() == .Array) { + if (indexable_ty.zigTypeTag(mod) == .Array) { try sema.errNote(block, src, msg, "consider using '&' here", .{}); } break :msg msg; @@ -10054,7 +10073,7 @@ fn zirSliceLength(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const array_ptr = try sema.resolveInst(extra.lhs); const start = try sema.resolveInst(extra.start); const len = try sema.resolveInst(extra.len); - const sentinel = try sema.resolveInst(extra.sentinel); + const sentinel = if (extra.sentinel == .none) .none else try sema.resolveInst(extra.sentinel); const ptr_src: LazySrcLoc = .{ .node_offset_slice_ptr = inst_data.src_node }; const start_src: LazySrcLoc = .{ .node_offset_slice_start = extra.start_src_node_offset }; const end_src: LazySrcLoc = .{ .node_offset_slice_end = inst_data.src_node }; @@ -10076,6 +10095,8 @@ fn zirSwitchCapture( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; + const gpa = sema.gpa; const zir_datas = sema.code.instructions.items(.data); const capture_info = zir_datas[inst].switch_capture; const switch_info = zir_datas[capture_info.switch_inst].pl_node; @@ -10087,47 +10108,49 @@ fn zirSwitchCapture( const operand_is_ref = cond_tag == .switch_cond_ref; const operand_ptr = try sema.resolveInst(cond_info.operand); const operand_ptr_ty = sema.typeOf(operand_ptr); - const operand_ty = if (operand_is_ref) operand_ptr_ty.childType() else operand_ptr_ty; + const operand_ty = if (operand_is_ref) operand_ptr_ty.childType(mod) else operand_ptr_ty; if (block.inline_case_capture != .none) { const item_val = sema.resolveConstValue(block, .unneeded, block.inline_case_capture, undefined) catch unreachable; - if (operand_ty.zigTypeTag() == .Union) { - const field_index = @intCast(u32, operand_ty.unionTagFieldIndex(item_val, sema.mod).?); - const union_obj = operand_ty.cast(Type.Payload.Union).?.data; + const resolved_item_val = try sema.resolveLazyValue(item_val); + if (operand_ty.zigTypeTag(mod) == .Union) { + const field_index = @intCast(u32, operand_ty.unionTagFieldIndex(resolved_item_val, mod).?); + const union_obj = mod.typeToUnion(operand_ty).?; const field_ty = union_obj.fields.values()[field_index].ty; if (try sema.resolveDefinedValue(block, sema.src, operand_ptr)) |union_val| { if (is_ref) { - const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ + const ptr_field_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = field_ty, - .mutable = operand_ptr_ty.ptrIsMutable(), - .@"volatile" = operand_ptr_ty.isVolatilePtr(), - .@"addrspace" = operand_ptr_ty.ptrAddressSpace(), + .mutable = operand_ptr_ty.ptrIsMutable(mod), + .@"volatile" = operand_ptr_ty.isVolatilePtr(mod), + .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); - return sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = union_val, - .container_ty = operand_ty, - .field_index = field_index, - }), - ); + return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.toIntern(), + .addr = .{ .field = .{ + .base = union_val.toIntern(), + .index = field_index, + } }, + } })).toValue()); } - const tag_and_val = union_val.castTag(.@"union").?.data; - return sema.addConstant(field_ty, tag_and_val.val); + return sema.addConstant( + field_ty, + mod.intern_pool.indexToKey(union_val.toIntern()).un.val.toValue(), + ); } if (is_ref) { - const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ + const ptr_field_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = field_ty, - .mutable = operand_ptr_ty.ptrIsMutable(), - .@"volatile" = operand_ptr_ty.isVolatilePtr(), - .@"addrspace" = operand_ptr_ty.ptrAddressSpace(), + .mutable = operand_ptr_ty.ptrIsMutable(mod), + .@"volatile" = operand_ptr_ty.isVolatilePtr(mod), + .@"addrspace" = operand_ptr_ty.ptrAddressSpace(mod), }); return block.addStructFieldPtr(operand_ptr, field_index, ptr_field_ty); } else { return block.addStructFieldVal(operand_ptr, field_index, field_ty); } } else if (is_ref) { - return sema.addConstantMaybeRef(block, operand_ty, item_val, true); + return sema.addConstantMaybeRef(block, operand_ty, resolved_item_val, true); } else { return block.inline_case_capture; } @@ -10144,7 +10167,7 @@ fn zirSwitchCapture( return operand_ptr; } - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .ErrorSet => if (block.switch_else_err_ty) |some| { return sema.bitCast(block, some, operand, operand_src, null); } else { @@ -10162,14 +10185,14 @@ fn zirSwitchCapture( switch_extra.data.getScalarProng(sema.code, switch_extra.end, capture_info.prong_index).item, }; - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Union => { - const union_obj = operand_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(operand_ty).?; const first_item = try sema.resolveInst(items[0]); // Previous switch validation ensured this will succeed const first_item_val = sema.resolveConstValue(block, .unneeded, first_item, "") catch unreachable; - const first_field_index = @intCast(u32, operand_ty.unionTagFieldIndex(first_item_val, sema.mod).?); + const first_field_index = @intCast(u32, operand_ty.unionTagFieldIndex(first_item_val, mod).?); const first_field = union_obj.fields.values()[first_field_index]; for (items[1..], 0..) |item, i| { @@ -10177,22 +10200,22 @@ fn zirSwitchCapture( // Previous switch validation ensured this will succeed const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable; - const field_index = operand_ty.unionTagFieldIndex(item_val, sema.mod).?; + const field_index = operand_ty.unionTagFieldIndex(item_val, mod).?; const field = union_obj.fields.values()[field_index]; - if (!field.ty.eql(first_field.ty, sema.mod)) { + if (!field.ty.eql(first_field.ty, mod)) { const msg = msg: { const raw_capture_src = Module.SwitchProngSrc{ .multi_capture = capture_info.prong_index }; - const capture_src = raw_capture_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); + const capture_src = raw_capture_src.resolve(mod, mod.declPtr(block.src_decl), switch_info.src_node, .first); const msg = try sema.errMsg(block, capture_src, "capture group with incompatible types", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const raw_first_item_src = Module.SwitchProngSrc{ .multi = .{ .prong = capture_info.prong_index, .item = 0 } }; - const first_item_src = raw_first_item_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); + const first_item_src = raw_first_item_src.resolve(mod, mod.declPtr(block.src_decl), switch_info.src_node, .first); const raw_item_src = Module.SwitchProngSrc{ .multi = .{ .prong = capture_info.prong_index, .item = 1 + @intCast(u32, i) } }; - const item_src = raw_item_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), switch_info.src_node, .first); - try sema.errNote(block, first_item_src, msg, "type '{}' here", .{first_field.ty.fmt(sema.mod)}); - try sema.errNote(block, item_src, msg, "type '{}' here", .{field.ty.fmt(sema.mod)}); + const item_src = raw_item_src.resolve(mod, mod.declPtr(block.src_decl), switch_info.src_node, .first); + try sema.errNote(block, first_item_src, msg, "type '{}' here", .{first_field.ty.fmt(mod)}); + try sema.errNote(block, item_src, msg, "type '{}' here", .{field.ty.fmt(mod)}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -10200,21 +10223,20 @@ fn zirSwitchCapture( } if (is_ref) { - const field_ty_ptr = try Type.ptr(sema.arena, sema.mod, .{ + const field_ty_ptr = try Type.ptr(sema.arena, mod, .{ .pointee_type = first_field.ty, .@"addrspace" = .generic, - .mutable = operand_ptr_ty.ptrIsMutable(), + .mutable = operand_ptr_ty.ptrIsMutable(mod), }); if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| { - return sema.addConstant( - field_ty_ptr, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = op_ptr_val, - .container_ty = operand_ty, - .field_index = first_field_index, - }), - ); + return sema.addConstant(field_ty_ptr, (try mod.intern(.{ .ptr = .{ + .ty = field_ty_ptr.toIntern(), + .addr = .{ .field = .{ + .base = op_ptr_val.toIntern(), + .index = first_field_index, + } }, + } })).toValue()); } try sema.requireRuntimeBlock(block, operand_src, null); return block.addStructFieldPtr(operand_ptr, first_field_index, field_ty_ptr); @@ -10223,7 +10245,7 @@ fn zirSwitchCapture( if (try sema.resolveDefinedValue(block, operand_src, operand)) |operand_val| { return sema.addConstant( first_field.ty, - operand_val.castTag(.@"union").?.data.val, + mod.intern_pool.indexToKey(operand_val.toIntern()).un.val.toValue(), ); } try sema.requireRuntimeBlock(block, operand_src, null); @@ -10231,28 +10253,23 @@ fn zirSwitchCapture( }, .ErrorSet => { if (is_multi) { - var names: Module.ErrorSet.NameMap = .{}; + var names: Module.Fn.InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, items.len); for (items) |item| { const item_ref = try sema.resolveInst(item); // Previous switch validation ensured this will succeed - const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable; - names.putAssumeCapacityNoClobber( - item_val.getError().?, - {}, - ); + const item_val = sema.resolveConstLazyValue(block, .unneeded, item_ref, "") catch unreachable; + names.putAssumeCapacityNoClobber(item_val.getErrorName(mod).unwrap().?, {}); } - // names must be sorted - Module.ErrorSet.sortNames(&names); - const else_error_ty = try Type.Tag.error_set_merged.create(sema.arena, names); + const else_error_ty = try mod.errorSetFromUnsortedNames(names.keys()); return sema.bitCast(block, else_error_ty, operand, operand_src, null); } else { const item_ref = try sema.resolveInst(items[0]); // Previous switch validation ensured this will succeed - const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable; + const item_val = sema.resolveConstLazyValue(block, .unneeded, item_ref, "") catch unreachable; - const item_ty = try Type.Tag.error_set_single.create(sema.arena, item_val.getError().?); + const item_ty = try mod.singleErrorSetType(item_val.getErrorName(mod).unwrap().?); return sema.bitCast(block, item_ty, operand, operand_src, null); } }, @@ -10269,6 +10286,7 @@ fn zirSwitchCapture( } fn zirSwitchCaptureTag(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_tok; const src = inst_data.src(); @@ -10278,12 +10296,12 @@ fn zirSwitchCaptureTag(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compile const cond_data = zir_datas[Zir.refToIndex(inst_data.operand).?].un_node; const operand_ptr = try sema.resolveInst(cond_data.operand); const operand_ptr_ty = sema.typeOf(operand_ptr); - const operand_ty = if (is_ref) operand_ptr_ty.childType() else operand_ptr_ty; + const operand_ty = if (is_ref) operand_ptr_ty.childType(mod) else operand_ptr_ty; - if (operand_ty.zigTypeTag() != .Union) { + if (operand_ty.zigTypeTag(mod) != .Union) { const msg = msg: { const msg = try sema.errMsg(block, src, "cannot capture tag of non-union type '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, operand_ty); @@ -10301,6 +10319,7 @@ fn zirSwitchCond( inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node }; @@ -10311,7 +10330,7 @@ fn zirSwitchCond( operand_ptr; const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Type, .Void, .Bool, @@ -10325,8 +10344,8 @@ fn zirSwitchCond( .ErrorSet, .Enum, => { - if (operand_ty.isSlice()) { - return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(sema.mod)}); + if (operand_ty.isSlice(mod)) { + return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(mod)}); } if ((try sema.typeHasOnePossibleValue(operand_ty))) |opv| { return sema.addConstant(operand_ty, opv); @@ -10336,12 +10355,12 @@ fn zirSwitchCond( .Union => { const union_ty = try sema.resolveTypeFields(operand_ty); - const enum_ty = union_ty.unionTagType() orelse { + const enum_ty = union_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(block, src, "switch on union with no attached enum", .{}); errdefer msg.destroy(sema.gpa); - if (union_ty.declSrcLocOrNull(sema.mod)) |union_src| { - try sema.mod.errNoteNonLazy(union_src, msg, "consider 'union(enum)' here", .{}); + if (union_ty.declSrcLocOrNull(mod)) |union_src| { + try mod.errNoteNonLazy(union_src, msg, "consider 'union(enum)' here", .{}); } break :msg msg; }; @@ -10361,17 +10380,19 @@ fn zirSwitchCond( .Vector, .Frame, .AnyFrame, - => return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(sema.mod)}), + => return sema.fail(block, src, "switch on type '{}'", .{operand_ty.fmt(mod)}), } } -const SwitchErrorSet = std.StringHashMap(Module.SwitchProngSrc); +const SwitchErrorSet = std.AutoHashMap(InternPool.NullTerminatedString, Module.SwitchProngSrc); fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const gpa = sema.gpa; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const src_node_offset = inst_data.src_node; @@ -10413,14 +10434,14 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const cond_index = Zir.refToIndex(extra.data.operand).?; const raw_operand = sema.resolveInst(zir_data[cond_index].un_node.operand) catch unreachable; const target_ty = sema.typeOf(raw_operand); - break :blk if (zir_tags[cond_index] == .switch_cond_ref) target_ty.elemType() else target_ty; + break :blk if (zir_tags[cond_index] == .switch_cond_ref) target_ty.childType(mod) else target_ty; }; - const union_originally = maybe_union_ty.zigTypeTag() == .Union; + const union_originally = maybe_union_ty.zigTypeTag(mod) == .Union; // Duplicate checking variables later also used for `inline else`. var seen_enum_fields: []?Module.SwitchProngSrc = &.{}; var seen_errors = SwitchErrorSet.init(gpa); - var range_set = RangeSet.init(gpa, sema.mod); + var range_set = RangeSet.init(gpa, mod); var true_count: u8 = 0; var false_count: u8 = 0; @@ -10433,12 +10454,12 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var empty_enum = false; const operand_ty = sema.typeOf(operand); - const err_set = operand_ty.zigTypeTag() == .ErrorSet; + const err_set = operand_ty.zigTypeTag(mod) == .ErrorSet; var else_error_ty: ?Type = null; // Validate usage of '_' prongs. - if (special_prong == .under and (!operand_ty.isNonexhaustiveEnum() or union_originally)) { + if (special_prong == .under and (!operand_ty.isNonexhaustiveEnum(mod) or union_originally)) { const msg = msg: { const msg = try sema.errMsg( block, @@ -10459,14 +10480,12 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError return sema.failWithOwnedErrorMsg(msg); } - const target = sema.mod.getTarget(); - // Validate for duplicate items, missing else prong, and invalid range. - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Union => unreachable, // handled in zirSwitchCond .Enum => { - seen_enum_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount()); - empty_enum = seen_enum_fields.len == 0 and !operand_ty.isNonexhaustiveEnum(); + seen_enum_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount(mod)); + empty_enum = seen_enum_fields.len == 0 and !operand_ty.isNonexhaustiveEnum(mod); @memset(seen_enum_fields, null); // `range_set` is used for non-exhaustive enum values that do not correspond to any tags. @@ -10521,7 +10540,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } else true; if (special_prong == .@"else") { - if (all_tags_handled and !operand_ty.isNonexhaustiveEnum()) return sema.fail( + if (all_tags_handled and !operand_ty.isNonexhaustiveEnum(mod)) return sema.fail( block, special_prong_src, "unreachable else prong; all cases already handled", @@ -10539,25 +10558,25 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError for (seen_enum_fields, 0..) |seen_src, i| { if (seen_src != null) continue; - const field_name = operand_ty.enumFieldName(i); + const field_name = operand_ty.enumFieldName(i, mod); try sema.addFieldErrNote( operand_ty, i, msg, - "unhandled enumeration value: '{s}'", - .{field_name}, + "unhandled enumeration value: '{}'", + .{field_name.fmt(&mod.intern_pool)}, ); } - try sema.mod.errNoteNonLazy( - operand_ty.declSrcLoc(sema.mod), + try mod.errNoteNonLazy( + operand_ty.declSrcLoc(mod), msg, "enum '{}' declared here", - .{operand_ty.fmt(sema.mod)}, + .{operand_ty.fmt(mod)}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (special_prong == .none and operand_ty.isNonexhaustiveEnum() and !union_originally) { + } else if (special_prong == .none and operand_ty.isNonexhaustiveEnum(mod) and !union_originally) { return sema.fail( block, src, @@ -10614,7 +10633,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError try sema.resolveInferredErrorSetTy(block, src, operand_ty); - if (operand_ty.isAnyError()) { + if (operand_ty.isAnyError(mod)) { if (special_prong != .@"else") { return sema.fail( block, @@ -10628,7 +10647,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var maybe_msg: ?*Module.ErrorMsg = null; errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa); - for (operand_ty.errorSetNames()) |error_name| { + for (operand_ty.errorSetNames(mod)) |error_name| { if (!seen_errors.contains(error_name) and special_prong != .@"else") { const msg = maybe_msg orelse blk: { maybe_msg = try sema.errMsg( @@ -10644,8 +10663,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError block, src, msg, - "unhandled error value: 'error.{s}'", - .{error_name}, + "unhandled error value: 'error.{}'", + .{error_name.fmt(ip)}, ); } } @@ -10656,7 +10675,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError return sema.failWithOwnedErrorMsg(msg); } - if (special_prong == .@"else" and seen_errors.count() == operand_ty.errorSetNames().len) { + if (special_prong == .@"else" and seen_errors.count() == operand_ty.errorSetNames(mod).len) { // In order to enable common patterns for generic code allow simple else bodies // else => unreachable, // else => return, @@ -10693,18 +10712,17 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError ); } - const error_names = operand_ty.errorSetNames(); - var names: Module.ErrorSet.NameMap = .{}; + const error_names = operand_ty.errorSetNames(mod); + var names: Module.Fn.InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, error_names.len); for (error_names) |error_name| { if (seen_errors.contains(error_name)) continue; names.putAssumeCapacityNoClobber(error_name, {}); } - - // names must be sorted - Module.ErrorSet.sortNames(&names); - else_error_ty = try Type.Tag.error_set_merged.create(sema.arena, names); + // No need to keep the hash map metadata correct; here we + // extract the (sorted) keys only. + else_error_ty = try mod.errorSetFromUnsortedNames(names.keys()); } }, .Int, .ComptimeInt => { @@ -10722,7 +10740,6 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError block, &range_set, item_ref, - operand_ty, src_node_offset, .{ .scalar = scalar_i }, ); @@ -10745,7 +10762,6 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError block, &range_set, item_ref, - operand_ty, src_node_offset, .{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }, ); @@ -10763,7 +10779,6 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError &range_set, item_first, item_last, - operand_ty, src_node_offset, .{ .range = .{ .prong = multi_i, .item = range_i } }, ); @@ -10774,13 +10789,10 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } check_range: { - if (operand_ty.zigTypeTag() == .Int) { - var arena = std.heap.ArenaAllocator.init(gpa); - defer arena.deinit(); - - const min_int = try operand_ty.minInt(arena.allocator(), target); - const max_int = try operand_ty.maxInt(arena.allocator(), target); - if (try range_set.spans(min_int, max_int, operand_ty)) { + if (operand_ty.zigTypeTag(mod) == .Int) { + const min_int = try operand_ty.minInt(mod, operand_ty); + const max_int = try operand_ty.maxInt(mod, operand_ty); + if (try range_set.spans(min_int.toIntern(), max_int.toIntern())) { if (special_prong == .@"else") { return sema.fail( block, @@ -10878,15 +10890,12 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError block, src, "else prong required when switching on type '{}'", - .{operand_ty.fmt(sema.mod)}, + .{operand_ty.fmt(mod)}, ); } - var seen_values = ValueSrcMap.initContext(gpa, .{ - .ty = operand_ty, - .mod = sema.mod, - }); - defer seen_values.deinit(); + var seen_values = ValueSrcMap{}; + defer seen_values.deinit(gpa); var extra_index: usize = special.end; { @@ -10948,7 +10957,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError .ComptimeFloat, .Float, => return sema.fail(block, operand_src, "invalid switch operand type '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }), } @@ -10991,6 +11000,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError defer merges.deinit(gpa); if (try sema.resolveDefinedValue(&child_block, src, operand)) |operand_val| { + const resolved_operand_val = try sema.resolveLazyValue(operand_val); var extra_index: usize = special.end; { var scalar_i: usize = 0; @@ -11005,8 +11015,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const item = try sema.resolveInst(item_ref); // Validation above ensured these will succeed. - const item_val = sema.resolveConstValue(&child_block, .unneeded, item, "") catch unreachable; - if (operand_val.eql(item_val, operand_ty, sema.mod)) { + const item_val = sema.resolveConstLazyValue(&child_block, .unneeded, item, "") catch unreachable; + if (resolved_operand_val.eql(item_val, operand_ty, mod)) { if (is_inline) child_block.inline_case_capture = operand; if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand); @@ -11031,8 +11041,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError for (items) |item_ref| { const item = try sema.resolveInst(item_ref); // Validation above ensured these will succeed. - const item_val = sema.resolveConstValue(&child_block, .unneeded, item, "") catch unreachable; - if (operand_val.eql(item_val, operand_ty, sema.mod)) { + const item_val = sema.resolveConstLazyValue(&child_block, .unneeded, item, "") catch unreachable; + if (resolved_operand_val.eql(item_val, operand_ty, mod)) { if (is_inline) child_block.inline_case_capture = operand; if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand); @@ -11050,8 +11060,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError // Validation above ensured these will succeed. const first_tv = sema.resolveInstConst(&child_block, .unneeded, item_first, "") catch unreachable; const last_tv = sema.resolveInstConst(&child_block, .unneeded, item_last, "") catch unreachable; - if ((try sema.compareAll(operand_val, .gte, first_tv.val, operand_ty)) and - (try sema.compareAll(operand_val, .lte, last_tv.val, operand_ty))) + if ((try sema.compareAll(resolved_operand_val, .gte, first_tv.val, operand_ty)) and + (try sema.compareAll(resolved_operand_val, .lte, last_tv.val, operand_ty))) { if (is_inline) child_block.inline_case_capture = operand; if (err_set) try sema.maybeErrorUnwrapComptime(&child_block, body, operand); @@ -11080,8 +11090,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (err_set and try sema.maybeErrorUnwrap(block, special.body, operand)) { return Air.Inst.Ref.unreachable_value; } - if (sema.mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag() == .Enum and - (!operand_ty.isNonexhaustiveEnum() or union_originally)) + if (mod.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(mod) == .Enum and + (!operand_ty.isNonexhaustiveEnum(mod) or union_originally)) { try sema.zirDbgStmt(block, cond_dbg_node_index); const ok = try block.addUnOp(.is_named_enum_value, operand); @@ -11121,7 +11131,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body_len; - var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope); + var wip_captures = try WipCaptureScope.init(gpa, child_block.wip_capture_scope); defer wip_captures.deinit(); case_block.instructions.shrinkRetainingCapacity(0); @@ -11133,9 +11143,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError // `item` is already guaranteed to be constant known. const analyze_body = if (union_originally) blk: { - const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable; - const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); - break :blk field_ty.zigTypeTag() != .NoReturn; + const item_val = sema.resolveConstLazyValue(block, .unneeded, item, "") catch unreachable; + const field_ty = maybe_union_ty.unionFieldType(item_val, mod); + break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (err_set and try sema.maybeErrorUnwrap(&case_block, body, operand)) { @@ -11197,9 +11207,12 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const item_last_ref = try sema.resolveInst(last_ref); const item_last = sema.resolveConstValue(block, .unneeded, item_last_ref, undefined) catch unreachable; - while (item.compareAll(.lte, item_last, operand_ty, sema.mod)) : ({ + while (item.compareScalar(.lte, item_last, operand_ty, mod)) : ({ // Previous validation has resolved any possible lazy values. - item = try sema.intAddScalar(item, Value.one); + item = sema.intAddScalar(item, try mod.intValue(operand_ty, 1), operand_ty) catch |err| switch (err) { + error.Overflow => unreachable, + else => |e| return e, + }; }) { cases_len += 1; @@ -11212,8 +11225,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { const case_src = Module.SwitchProngSrc{ .range = .{ .prong = multi_i, .item = range_i } }; - const decl = sema.mod.declPtr(case_block.src_decl); - try sema.emitBackwardBranch(block, case_src.resolve(sema.gpa, decl, src_node_offset, .none)); + const decl = mod.declPtr(case_block.src_decl); + try sema.emitBackwardBranch(block, case_src.resolve(mod, decl, src_node_offset, .none)); unreachable; }, else => return err, @@ -11241,15 +11254,15 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally) blk: { const item_val = sema.resolveConstValue(block, .unneeded, item, undefined) catch unreachable; - const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); - break :blk field_ty.zigTypeTag() != .NoReturn; + const field_ty = maybe_union_ty.unionFieldType(item_val, mod); + break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (emit_bb) sema.emitBackwardBranch(block, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { const case_src = Module.SwitchProngSrc{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } }; - const decl = sema.mod.declPtr(case_block.src_decl); - try sema.emitBackwardBranch(block, case_src.resolve(sema.gpa, decl, src_node_offset, .none)); + const decl = mod.declPtr(case_block.src_decl); + try sema.emitBackwardBranch(block, case_src.resolve(mod, decl, src_node_offset, .none)); unreachable; }, else => return err, @@ -11285,8 +11298,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError for (items) |item_ref| { const item = try sema.resolveInst(item_ref); const item_val = sema.resolveConstValue(block, .unneeded, item, "") catch unreachable; - const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); - if (field_ty.zigTypeTag() != .NoReturn) break true; + const field_ty = maybe_union_ty.unionFieldType(item_val, mod); + if (field_ty.zigTypeTag(mod) != .NoReturn) break true; } else false else true; @@ -11366,7 +11379,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var cond_body = try case_block.instructions.toOwnedSlice(gpa); defer gpa.free(cond_body); - var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope); + var wip_captures = try WipCaptureScope.init(gpa, child_block.wip_capture_scope); defer wip_captures.deinit(); case_block.instructions.shrinkRetainingCapacity(0); @@ -11409,18 +11422,18 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError var final_else_body: []const Air.Inst.Index = &.{}; if (special.body.len != 0 or !is_first or case_block.wantSafety()) { var emit_bb = false; - if (special.is_inline) switch (operand_ty.zigTypeTag()) { + if (special.is_inline) switch (operand_ty.zigTypeTag(mod)) { .Enum => { - if (operand_ty.isNonexhaustiveEnum() and !union_originally) { + if (operand_ty.isNonexhaustiveEnum(mod) and !union_originally) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }); } for (seen_enum_fields, 0..) |f, i| { if (f != null) continue; cases_len += 1; - const item_val = try Value.Tag.enum_field_index.create(sema.arena, @intCast(u32, i)); + const item_val = try mod.enumValueFieldIndex(operand_ty, @intCast(u32, i)); const item_ref = try sema.addConstant(operand_ty, item_val); case_block.inline_case_capture = item_ref; @@ -11428,8 +11441,8 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError case_block.wip_capture_scope = child_block.wip_capture_scope; const analyze_body = if (union_originally) blk: { - const field_ty = maybe_union_ty.unionFieldType(item_val, sema.mod); - break :blk field_ty.zigTypeTag() != .NoReturn; + const field_ty = maybe_union_ty.unionFieldType(item_val, mod); + break :blk field_ty.zigTypeTag(mod) != .NoReturn; } else true; if (emit_bb) try sema.emitBackwardBranch(block, special_prong_src); @@ -11449,17 +11462,21 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } }, .ErrorSet => { - if (operand_ty.isAnyError()) { + if (operand_ty.isAnyError(mod)) { return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }); } - for (operand_ty.errorSetNames()) |error_name| { + for (0..operand_ty.errorSetNames(mod).len) |i| { + const error_name = operand_ty.errorSetNames(mod)[i]; if (seen_errors.contains(error_name)) continue; cases_len += 1; - const item_val = try Value.Tag.@"error".create(sema.arena, .{ .name = error_name }); - const item_ref = try sema.addConstant(operand_ty, item_val); + const item_val = try mod.intern(.{ .err = .{ + .ty = operand_ty.toIntern(), + .name = error_name, + } }); + const item_ref = try sema.addConstant(operand_ty, item_val.toValue()); case_block.inline_case_capture = item_ref; case_block.instructions.shrinkRetainingCapacity(0); @@ -11482,7 +11499,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError while (try it.next()) |cur| { cases_len += 1; - const item_ref = try sema.addConstant(operand_ty, cur); + const item_ref = try sema.addConstant(operand_ty, cur.toValue()); case_block.inline_case_capture = item_ref; case_block.instructions.shrinkRetainingCapacity(0); @@ -11539,19 +11556,19 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } }, else => return sema.fail(block, special_prong_src, "cannot enumerate values of type '{}' for 'inline else'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }), }; - var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope); + var wip_captures = try WipCaptureScope.init(gpa, child_block.wip_capture_scope); defer wip_captures.deinit(); case_block.instructions.shrinkRetainingCapacity(0); case_block.wip_capture_scope = wip_captures.scope; case_block.inline_case_capture = .none; - if (sema.mod.backendSupportsFeature(.is_named_enum_value) and special.body.len != 0 and block.wantSafety() and - operand_ty.zigTypeTag() == .Enum and (!operand_ty.isNonexhaustiveEnum() or union_originally)) + if (mod.backendSupportsFeature(.is_named_enum_value) and special.body.len != 0 and block.wantSafety() and + operand_ty.zigTypeTag(mod) == .Enum and (!operand_ty.isNonexhaustiveEnum(mod) or union_originally)) { try sema.zirDbgStmt(&case_block, cond_dbg_node_index); const ok = try case_block.addUnOp(.is_named_enum_value, operand); @@ -11561,9 +11578,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const analyze_body = if (union_originally and !special.is_inline) for (seen_enum_fields, 0..) |seen_field, index| { if (seen_field != null) continue; - const union_obj = maybe_union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(maybe_union_ty).?; const field_ty = union_obj.fields.values()[index].ty; - if (field_ty.zigTypeTag() != .NoReturn) break true; + if (field_ty.zigTypeTag(mod) != .NoReturn) break true; } else false else true; @@ -11620,47 +11637,70 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError } const RangeSetUnhandledIterator = struct { - sema: *Sema, - ty: Type, - cur: Value, - max: Value, + mod: *Module, + cur: ?InternPool.Index, + max: InternPool.Index, + range_i: usize, ranges: []const RangeSet.Range, - range_i: usize = 0, - first: bool = true, + limbs: []math.big.Limb, + + const preallocated_limbs = math.big.int.calcTwosCompLimbCount(128); fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator { - const target = sema.mod.getTarget(); - const min = try ty.minInt(sema.arena, target); - const max = try ty.maxInt(sema.arena, target); - - return RangeSetUnhandledIterator{ - .sema = sema, - .ty = ty, - .cur = min, - .max = max, + const mod = sema.mod; + const int_type = mod.intern_pool.indexToKey(ty.toIntern()).int_type; + const needed_limbs = math.big.int.calcTwosCompLimbCount(int_type.bits); + return .{ + .mod = mod, + .cur = (try ty.minInt(mod, ty)).toIntern(), + .max = (try ty.maxInt(mod, ty)).toIntern(), + .range_i = 0, .ranges = range_set.ranges.items, + .limbs = if (needed_limbs > preallocated_limbs) + try sema.arena.alloc(math.big.Limb, needed_limbs) + else + &.{}, }; } - fn next(it: *RangeSetUnhandledIterator) !?Value { - while (it.range_i < it.ranges.len) : (it.range_i += 1) { - if (!it.first) { - it.cur = try it.sema.intAdd(it.cur, Value.one, it.ty); - } - it.first = false; - if (it.cur.compareAll(.lt, it.ranges[it.range_i].first, it.ty, it.sema.mod)) { - return it.cur; - } - it.cur = it.ranges[it.range_i].last; + fn addOne(it: *const RangeSetUnhandledIterator, val: InternPool.Index) !?InternPool.Index { + if (val == it.max) return null; + const int = it.mod.intern_pool.indexToKey(val).int; + + switch (int.storage) { + inline .u64, .i64 => |val_int| { + const next_int = @addWithOverflow(val_int, 1); + if (next_int[1] == 0) + return (try it.mod.intValue(int.ty.toType(), next_int[0])).toIntern(); + }, + .big_int => {}, + .lazy_align, .lazy_size => unreachable, } - if (!it.first) { - it.cur = try it.sema.intAdd(it.cur, Value.one, it.ty); + + var val_space: InternPool.Key.Int.Storage.BigIntSpace = undefined; + const val_bigint = int.storage.toBigInt(&val_space); + + var result_limbs: [preallocated_limbs]math.big.Limb = undefined; + var result_bigint = math.big.int.Mutable.init( + if (it.limbs.len > 0) it.limbs else &result_limbs, + 0, + ); + + result_bigint.addScalar(val_bigint, 1); + return (try it.mod.intValue_big(int.ty.toType(), result_bigint.toConst())).toIntern(); + } + + fn next(it: *RangeSetUnhandledIterator) !?InternPool.Index { + var cur = it.cur orelse return null; + while (it.range_i < it.ranges.len and cur == it.ranges[it.range_i].first) { + defer it.range_i += 1; + cur = (try it.addOne(it.ranges[it.range_i].last)) orelse { + it.cur = null; + return null; + }; } - it.first = false; - if (it.cur.compareAll(.lte, it.max, it.ty, it.sema.mod)) { - return it.cur; - } - return null; + it.cur = try it.addOne(cur); + return cur; } }; @@ -11671,18 +11711,17 @@ fn resolveSwitchItemVal( switch_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, range_expand: Module.SwitchProngSrc.RangeExpand, -) CompileError!TypedValue { +) CompileError!InternPool.Index { + const mod = sema.mod; const item = try sema.resolveInst(item_ref); - const item_ty = sema.typeOf(item); // Constructing a LazySrcLoc is costly because we only have the switch AST node. // Only if we know for sure we need to report a compile error do we resolve the // full source locations. - if (sema.resolveConstValue(block, .unneeded, item, "")) |val| { - try sema.resolveLazyValue(val); - return TypedValue{ .ty = item_ty, .val = val }; + if (sema.resolveConstLazyValue(block, .unneeded, item, "")) |val| { + return val.toIntern(); } else |err| switch (err) { error.NeededSourceLocation => { - const src = switch_prong_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), switch_node_offset, range_expand); + const src = switch_prong_src.resolve(mod, mod.declPtr(block.src_decl), switch_node_offset, range_expand); _ = try sema.resolveConstValue(block, src, item, "switch prong values must be comptime-known"); unreachable; }, @@ -11696,17 +11735,17 @@ fn validateSwitchRange( range_set: *RangeSet, first_ref: Zir.Inst.Ref, last_ref: Zir.Inst.Ref, - operand_ty: Type, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { - const first_val = (try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first)).val; - const last_val = (try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last)).val; - if (first_val.compareAll(.gt, last_val, operand_ty, sema.mod)) { - const src = switch_prong_src.resolve(sema.gpa, sema.mod.declPtr(block.src_decl), src_node_offset, .first); + const mod = sema.mod; + const first = try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first); + const last = try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last); + if (first.toValue().compareScalar(.gt, last.toValue(), mod.intern_pool.typeOf(first).toType(), mod)) { + const src = switch_prong_src.resolve(mod, mod.declPtr(block.src_decl), src_node_offset, .first); return sema.fail(block, src, "range start value is greater than the end value", .{}); } - const maybe_prev_src = try range_set.add(first_val, last_val, operand_ty, switch_prong_src); + const maybe_prev_src = try range_set.add(first, last, switch_prong_src); return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); } @@ -11715,12 +11754,11 @@ fn validateSwitchItem( block: *Block, range_set: *RangeSet, item_ref: Zir.Inst.Ref, - operand_ty: Type, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { - const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; - const maybe_prev_src = try range_set.add(item_val, item_val, operand_ty, switch_prong_src); + const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); + const maybe_prev_src = try range_set.add(item, item, switch_prong_src); return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); } @@ -11733,9 +11771,11 @@ fn validateSwitchItemEnum( src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { - const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); - const field_index = item_tv.ty.enumTagFieldIndex(item_tv.val, sema.mod) orelse { - const maybe_prev_src = try range_set.add(item_tv.val, item_tv.val, item_tv.ty, switch_prong_src); + const ip = &sema.mod.intern_pool; + const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); + const int = ip.indexToKey(item).enum_tag.int; + const field_index = ip.indexToKey(ip.typeOf(item)).enum_type.tagValueIndex(ip, int) orelse { + const maybe_prev_src = try range_set.add(int, int, switch_prong_src); return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); }; const maybe_prev_src = seen_fields[field_index]; @@ -11751,9 +11791,10 @@ fn validateSwitchItemError( src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { - const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); + const ip = &sema.mod.intern_pool; + const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); // TODO: Do i need to typecheck here? - const error_name = item_tv.val.castTag(.@"error").?.data.name; + const error_name = ip.indexToKey(item).err.name; const maybe_prev_src = if (try seen_errors.fetchPut(error_name, switch_prong_src)) |prev| prev.value else @@ -11769,10 +11810,10 @@ fn validateSwitchDupe( src_node_offset: i32, ) CompileError!void { const prev_prong_src = maybe_prev_src orelse return; - const gpa = sema.gpa; - const block_src_decl = sema.mod.declPtr(block.src_decl); - const src = switch_prong_src.resolve(gpa, block_src_decl, src_node_offset, .none); - const prev_src = prev_prong_src.resolve(gpa, block_src_decl, src_node_offset, .none); + const mod = sema.mod; + const block_src_decl = mod.declPtr(block.src_decl); + const src = switch_prong_src.resolve(mod, block_src_decl, src_node_offset, .none); + const prev_src = prev_prong_src.resolve(mod, block_src_decl, src_node_offset, .none); const msg = msg: { const msg = try sema.errMsg( block, @@ -11802,20 +11843,21 @@ fn validateSwitchItemBool( src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { - const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; - if (item_val.toBool()) { + const mod = sema.mod; + const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); + if (item.toValue().toBool()) { true_count.* += 1; } else { false_count.* += 1; } if (true_count.* + false_count.* > 2) { - const block_src_decl = sema.mod.declPtr(block.src_decl); - const src = switch_prong_src.resolve(sema.gpa, block_src_decl, src_node_offset, .none); + const block_src_decl = mod.declPtr(block.src_decl); + const src = switch_prong_src.resolve(mod, block_src_decl, src_node_offset, .none); return sema.fail(block, src, "duplicate switch value", .{}); } } -const ValueSrcMap = std.HashMap(Value, Module.SwitchProngSrc, Value.HashContext, std.hash_map.default_max_load_percentage); +const ValueSrcMap = std.AutoHashMapUnmanaged(InternPool.Index, Module.SwitchProngSrc); fn validateSwitchItemSparse( sema: *Sema, @@ -11825,8 +11867,8 @@ fn validateSwitchItemSparse( src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, ) CompileError!void { - const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; - const kv = (try seen_values.fetchPut(item_val, switch_prong_src)) orelse return; + const item = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); + const kv = (try seen_values.fetchPut(sema.gpa, item, switch_prong_src)) orelse return; return sema.validateSwitchDupe(block, kv.value, switch_prong_src, src_node_offset); } @@ -11864,7 +11906,8 @@ fn validateSwitchNoRange( } fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, operand: Air.Inst.Ref) !bool { - if (!sema.mod.backendSupportsFeature(.panic_unwrap_error)) return false; + const mod = sema.mod; + if (!mod.backendSupportsFeature(.panic_unwrap_error)) return false; const tags = sema.code.instructions.items(.tag); for (body) |inst| { @@ -11900,7 +11943,7 @@ fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, op .as_node => try sema.zirAsNode(block, inst), .field_val => try sema.zirFieldVal(block, inst), .@"unreachable" => { - if (!sema.mod.comp.formatted_panics) { + if (!mod.comp.formatted_panics) { try sema.safetyPanic(block, .unwrap_error); return true; } @@ -11923,7 +11966,7 @@ fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, op }, else => unreachable, }; - if (sema.typeOf(air_inst).isNoReturn()) + if (sema.typeOf(air_inst).isNoReturn(mod)) return true; sema.inst_map.putAssumeCapacity(inst, air_inst); } @@ -11931,19 +11974,20 @@ fn maybeErrorUnwrap(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, op } fn maybeErrorUnwrapCondbr(sema: *Sema, block: *Block, body: []const Zir.Inst.Index, cond: Zir.Inst.Ref, cond_src: LazySrcLoc) !void { + const mod = sema.mod; const index = Zir.refToIndex(cond) orelse return; if (sema.code.instructions.items(.tag)[index] != .is_non_err) return; const err_inst_data = sema.code.instructions.items(.data)[index].un_node; const err_operand = try sema.resolveInst(err_inst_data.operand); const operand_ty = sema.typeOf(err_operand); - if (operand_ty.zigTypeTag() == .ErrorSet) { + if (operand_ty.zigTypeTag(mod) == .ErrorSet) { try sema.maybeErrorUnwrapComptime(block, body, err_operand); return; } if (try sema.resolveDefinedValue(block, cond_src, err_operand)) |val| { - if (!operand_ty.isError()) return; - if (val.getError() == null) return; + if (!operand_ty.isError(mod)) return; + if (val.getErrorName(mod) == .none) return; try sema.maybeErrorUnwrapComptime(block, body, err_operand); } } @@ -11965,45 +12009,60 @@ fn maybeErrorUnwrapComptime(sema: *Sema, block: *Block, body: []const Zir.Inst.I const src = inst_data.src(); if (try sema.resolveDefinedValue(block, src, operand)) |val| { - if (val.getError()) |name| { - return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); + if (val.getErrorName(sema.mod).unwrap()) |name| { + return sema.fail(block, src, "caught unexpected error '{}'", .{name.fmt(&sema.mod.intern_pool)}); } } } fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const unresolved_ty = try sema.resolveType(block, ty_src, extra.lhs); - const field_name = try sema.resolveConstString(block, name_src, extra.rhs, "field name must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, name_src, extra.rhs, "field name must be comptime-known"); const ty = try sema.resolveTypeFields(unresolved_ty); + const ip = &mod.intern_pool; const has_field = hf: { - if (ty.isSlice()) { - if (mem.eql(u8, field_name, "ptr")) break :hf true; - if (mem.eql(u8, field_name, "len")) break :hf true; - break :hf false; + switch (ip.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .Slice => { + if (ip.stringEqlSlice(field_name, "ptr")) break :hf true; + if (ip.stringEqlSlice(field_name, "len")) break :hf true; + break :hf false; + }, + else => {}, + }, + .anon_struct_type => |anon_struct| { + if (anon_struct.names.len != 0) { + break :hf mem.indexOfScalar(InternPool.NullTerminatedString, anon_struct.names, field_name) != null; + } else { + const field_index = field_name.toUnsigned(ip) orelse break :hf false; + break :hf field_index < ty.structFieldCount(mod); + } + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :hf false; + assert(struct_obj.haveFieldTypes()); + break :hf struct_obj.fields.contains(field_name); + }, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + assert(union_obj.haveFieldTypes()); + break :hf union_obj.fields.contains(field_name); + }, + .enum_type => |enum_type| { + break :hf enum_type.nameIndex(ip, field_name) != null; + }, + .array_type => break :hf ip.stringEqlSlice(field_name, "len"), + else => {}, } - if (ty.castTag(.anon_struct)) |pl| { - break :hf for (pl.data.names) |name| { - if (mem.eql(u8, name, field_name)) break true; - } else false; - } - if (ty.isTuple()) { - const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch break :hf false; - break :hf field_index < ty.structFieldCount(); - } - break :hf switch (ty.zigTypeTag()) { - .Struct => ty.structFields().contains(field_name), - .Union => ty.unionFields().contains(field_name), - .Enum => ty.enumFields().contains(field_name), - .Array => mem.eql(u8, field_name, "len"), - else => return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ - ty.fmt(sema.mod), - }), - }; + return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{ + ty.fmt(mod), + }); }; if (has_field) { return Air.Inst.Ref.bool_true; @@ -12013,20 +12072,22 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const container_type = try sema.resolveType(block, lhs_src, extra.lhs); - const decl_name = try sema.resolveConstString(block, rhs_src, extra.rhs, "decl name must be comptime-known"); + const decl_name = try sema.resolveConstStringIntern(block, rhs_src, extra.rhs, "decl name must be comptime-known"); try sema.checkNamespaceType(block, lhs_src, container_type); - const namespace = container_type.getNamespace() orelse return Air.Inst.Ref.bool_false; + const namespace = container_type.getNamespaceIndex(mod).unwrap() orelse + return Air.Inst.Ref.bool_false; if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl_index| { - const decl = sema.mod.declPtr(decl_index); - if (decl.is_pub or decl.getFileScope() == block.getFileScope()) { + const decl = mod.declPtr(decl_index); + if (decl.is_pub or decl.getFileScope(mod) == block.getFileScope(mod)) { return Air.Inst.Ref.bool_true; } } @@ -12042,12 +12103,12 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const operand_src = inst_data.src(); const operand = inst_data.get(sema.code); - const result = mod.importFile(block.getFileScope(), operand) catch |err| switch (err) { + const result = mod.importFile(block.getFileScope(mod), operand) catch |err| switch (err) { error.ImportOutsidePkgPath => { return sema.fail(block, operand_src, "import of file outside package path: '{s}'", .{operand}); }, error.PackageNotFound => { - const name = try block.getFileScope().pkg.getName(sema.gpa, mod.*); + const name = try block.getFileScope(mod).pkg.getName(sema.gpa, mod.*); defer sema.gpa.free(name); return sema.fail(block, operand_src, "no package named '{s}' available within package '{s}'", .{ operand, name }); }, @@ -12073,7 +12134,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const name = try sema.resolveConstString(block, operand_src, inst_data.operand, "file path name must be comptime-known"); - const embed_file = mod.embedFile(block.getFileScope(), name) catch |err| switch (err) { + const embed_file = mod.embedFile(block.getFileScope(mod), name) catch |err| switch (err) { error.ImportOutsidePkgPath => { return sema.fail(block, operand_src, "embed of file outside package path: '{s}'", .{name}); }, @@ -12087,17 +12148,23 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes_including_null = embed_file.bytes[0 .. embed_file.bytes.len + 1]; - - // TODO instead of using `Value.Tag.bytes`, create a new value tag for pointing at + // TODO instead of using `.bytes`, create a new value tag for pointing at // a `*Module.EmbedFile`. The purpose of this would be: // - If only the length is read and the bytes are not inspected by comptime code, // there can be an optimization where the codegen backend does a copy_file_range // into the final binary, and never loads the data into memory. // - When a Decl is destroyed, it can free the `*Module.EmbedFile`. + const ty = try mod.arrayType(.{ + .len = embed_file.bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); embed_file.owner_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), embed_file.bytes.len), - try Value.Tag.bytes.create(anon_decl.arena(), bytes_including_null), + ty, + (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .bytes = embed_file.bytes }, + } })).toValue(), 0, // default alignment ); @@ -12105,16 +12172,15 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - const err_name = inst_data.get(sema.code); - - // Return the error code from the function. - const kv = try sema.mod.getErrorValue(err_name); - const result_inst = try sema.addConstant( - try Type.Tag.error_set_single.create(sema.arena, kv.key), - try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), - ); - return result_inst; + const name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); + _ = try mod.getErrorValue(name); + const error_set_type = try mod.singleErrorSetType(name); + return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ + .ty = error_set_type.toIntern(), + .name = name, + } })).toValue()); } fn zirShl( @@ -12126,6 +12192,7 @@ fn zirShl( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); sema.src = src; @@ -12136,11 +12203,10 @@ fn zirShl( const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const target = sema.mod.getTarget(); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - const scalar_ty = lhs_ty.scalarType(); - const scalar_rhs_ty = rhs_ty.scalarType(); + const scalar_ty = lhs_ty.scalarType(mod); + const scalar_rhs_ty = rhs_ty.scalarType(mod); // TODO coerce rhs if air_tag is not shl_sat const rhs_is_comptime_int = try sema.checkIntType(block, rhs_src, scalar_rhs_ty); @@ -12149,62 +12215,56 @@ fn zirShl( const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs); if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(sema.typeOf(lhs)); } // If rhs is 0, return lhs without doing any calculations. if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { return lhs; } - if (scalar_ty.zigTypeTag() != .ComptimeInt and air_tag != .shl_sat) { - var bits_payload = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = scalar_ty.intInfo(target).bits, - }; - const bit_value = Value.initPayload(&bits_payload.base); - if (rhs_ty.zigTypeTag() == .Vector) { + if (scalar_ty.zigTypeTag(mod) != .ComptimeInt and air_tag != .shl_sat) { + const bit_value = try mod.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits); + if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < rhs_ty.vectorLen()) : (i += 1) { - var elem_value_buf: Value.ElemValueBuffer = undefined; - const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); - if (rhs_elem.compareHetero(.gte, bit_value, target)) { + while (i < rhs_ty.vectorLen(mod)) : (i += 1) { + const rhs_elem = try rhs_val.elemValue(mod, i); + if (rhs_elem.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ - rhs_elem.fmtValue(scalar_ty, sema.mod), + rhs_elem.fmtValue(scalar_ty, mod), i, - scalar_ty.fmt(sema.mod), + scalar_ty.fmt(mod), }); } } - } else if (rhs_val.compareHetero(.gte, bit_value, target)) { + } else if (rhs_val.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{ - rhs_val.fmtValue(scalar_ty, sema.mod), - scalar_ty.fmt(sema.mod), + rhs_val.fmtValue(scalar_ty, mod), + scalar_ty.fmt(mod), }); } } - if (rhs_ty.zigTypeTag() == .Vector) { + if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < rhs_ty.vectorLen()) : (i += 1) { - var elem_value_buf: Value.ElemValueBuffer = undefined; - const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); - if (rhs_elem.compareHetero(.lt, Value.zero, target)) { + while (i < rhs_ty.vectorLen(mod)) : (i += 1) { + const rhs_elem = try rhs_val.elemValue(mod, i); + if (rhs_elem.compareHetero(.lt, try mod.intValue(scalar_rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ - rhs_elem.fmtValue(scalar_ty, sema.mod), + rhs_elem.fmtValue(scalar_ty, mod), i, }); } } - } else if (rhs_val.compareHetero(.lt, Value.zero, target)) { + } else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ - rhs_val.fmtValue(scalar_ty, sema.mod), + rhs_val.fmtValue(scalar_ty, mod), }); } } const runtime_src = if (maybe_lhs_val) |lhs_val| rs: { - if (lhs_val.isUndef()) return sema.addConstUndef(lhs_ty); + if (lhs_val.isUndef(mod)) return sema.addConstUndef(lhs_ty); const rhs_val = maybe_rhs_val orelse { - if (scalar_ty.zigTypeTag() == .ComptimeInt) { + if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{}); } break :rs rhs_src; @@ -12212,25 +12272,25 @@ fn zirShl( const val = switch (air_tag) { .shl_exact => val: { - const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, sema.mod); - if (scalar_ty.zigTypeTag() == .ComptimeInt) { + const shifted = try lhs_val.shlWithOverflow(rhs_val, lhs_ty, sema.arena, mod); + if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) { break :val shifted.wrapped_result; } - if (shifted.overflow_bit.compareAllWithZero(.eq, sema.mod)) { + if (shifted.overflow_bit.compareAllWithZero(.eq, mod)) { break :val shifted.wrapped_result; } return sema.fail(block, src, "operation caused overflow", .{}); }, - .shl_sat => if (scalar_ty.zigTypeTag() == .ComptimeInt) - try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod) + .shl_sat => if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) + try lhs_val.shl(rhs_val, lhs_ty, sema.arena, mod) else - try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, sema.mod), + try lhs_val.shlSat(rhs_val, lhs_ty, sema.arena, mod), - .shl => if (scalar_ty.zigTypeTag() == .ComptimeInt) - try lhs_val.shl(rhs_val, lhs_ty, sema.arena, sema.mod) + .shl => if (scalar_ty.zigTypeTag(mod) == .ComptimeInt) + try lhs_val.shl(rhs_val, lhs_ty, sema.arena, mod) else - try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, sema.mod), + try lhs_val.shlTrunc(rhs_val, lhs_ty, sema.arena, mod), else => unreachable, }; @@ -12241,11 +12301,11 @@ fn zirShl( const new_rhs = if (air_tag == .shl_sat) rhs: { // Limit the RHS type for saturating shl to be an integer as small as the LHS. if (rhs_is_comptime_int or - scalar_rhs_ty.intInfo(target).bits > scalar_ty.intInfo(target).bits) + scalar_rhs_ty.intInfo(mod).bits > scalar_ty.intInfo(mod).bits) { const max_int = try sema.addConstant( lhs_ty, - try lhs_ty.maxInt(sema.arena, target), + try lhs_ty.maxInt(mod, lhs_ty), ); const rhs_limited = try sema.analyzeMinMax(block, rhs_src, .min, &.{ rhs, max_int }, &.{ rhs_src, rhs_src }); break :rhs try sema.intCast(block, src, lhs_ty, rhs_src, rhs_limited, rhs_src, false); @@ -12256,12 +12316,11 @@ fn zirShl( try sema.requireRuntimeBlock(block, src, runtime_src); if (block.wantSafety()) { - const bit_count = scalar_ty.intInfo(target).bits; + const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { - const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count); - - const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: { - const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); + const bit_count_val = try mod.intValue(scalar_rhs_ty, bit_count); + const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { + const bit_count_inst = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, bit_count_val)); const lt = try block.addCmpVector(rhs, bit_count_inst, .lt); break :ok try block.addInst(.{ .tag = .reduce, @@ -12290,7 +12349,7 @@ fn zirShl( } }, }); const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty); - const any_ov_bit = if (lhs_ty.zigTypeTag() == .Vector) + const any_ov_bit = if (lhs_ty.zigTypeTag(mod) == .Vector) try block.addInst(.{ .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce, .data = .{ .reduce = .{ @@ -12300,7 +12359,7 @@ fn zirShl( }) else ov_bit; - const zero_ov = try sema.addConstant(Type.u1, Value.zero); + const zero_ov = try sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov); try sema.addSafetyCheck(block, no_ov, .shl_overflow); @@ -12319,6 +12378,7 @@ fn zirShr( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); sema.src = src; @@ -12330,94 +12390,87 @@ fn zirShr( const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - const target = sema.mod.getTarget(); - const scalar_ty = lhs_ty.scalarType(); + const scalar_ty = lhs_ty.scalarType(mod); const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs); const runtime_src = if (maybe_rhs_val) |rhs_val| rs: { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(lhs_ty); } // If rhs is 0, return lhs without doing any calculations. if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { return lhs; } - if (scalar_ty.zigTypeTag() != .ComptimeInt) { - var bits_payload = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = scalar_ty.intInfo(target).bits, - }; - const bit_value = Value.initPayload(&bits_payload.base); - if (rhs_ty.zigTypeTag() == .Vector) { + if (scalar_ty.zigTypeTag(mod) != .ComptimeInt) { + const bit_value = try mod.intValue(Type.comptime_int, scalar_ty.intInfo(mod).bits); + if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < rhs_ty.vectorLen()) : (i += 1) { - var elem_value_buf: Value.ElemValueBuffer = undefined; - const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); - if (rhs_elem.compareHetero(.gte, bit_value, target)) { + while (i < rhs_ty.vectorLen(mod)) : (i += 1) { + const rhs_elem = try rhs_val.elemValue(mod, i); + if (rhs_elem.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' at index '{d}' is too large for operand type '{}'", .{ - rhs_elem.fmtValue(scalar_ty, sema.mod), + rhs_elem.fmtValue(scalar_ty, mod), i, - scalar_ty.fmt(sema.mod), + scalar_ty.fmt(mod), }); } } - } else if (rhs_val.compareHetero(.gte, bit_value, target)) { + } else if (rhs_val.compareHetero(.gte, bit_value, mod)) { return sema.fail(block, rhs_src, "shift amount '{}' is too large for operand type '{}'", .{ - rhs_val.fmtValue(scalar_ty, sema.mod), - scalar_ty.fmt(sema.mod), + rhs_val.fmtValue(scalar_ty, mod), + scalar_ty.fmt(mod), }); } } - if (rhs_ty.zigTypeTag() == .Vector) { + if (rhs_ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < rhs_ty.vectorLen()) : (i += 1) { - var elem_value_buf: Value.ElemValueBuffer = undefined; - const rhs_elem = rhs_val.elemValueBuffer(sema.mod, i, &elem_value_buf); - if (rhs_elem.compareHetero(.lt, Value.zero, target)) { + while (i < rhs_ty.vectorLen(mod)) : (i += 1) { + const rhs_elem = try rhs_val.elemValue(mod, i); + if (rhs_elem.compareHetero(.lt, try mod.intValue(rhs_ty.childType(mod), 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}' at index '{d}'", .{ - rhs_elem.fmtValue(scalar_ty, sema.mod), + rhs_elem.fmtValue(scalar_ty, mod), i, }); } } - } else if (rhs_val.compareHetero(.lt, Value.zero, target)) { + } else if (rhs_val.compareHetero(.lt, try mod.intValue(rhs_ty, 0), mod)) { return sema.fail(block, rhs_src, "shift by negative amount '{}'", .{ - rhs_val.fmtValue(scalar_ty, sema.mod), + rhs_val.fmtValue(scalar_ty, mod), }); } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(lhs_ty); } if (air_tag == .shr_exact) { // Detect if any ones would be shifted out. - const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, sema.mod); + const truncated = try lhs_val.intTruncBitsAsValue(lhs_ty, sema.arena, .unsigned, rhs_val, mod); if (!(try truncated.compareAllWithZeroAdvanced(.eq, sema))) { return sema.fail(block, src, "exact shift shifted out 1 bits", .{}); } } - const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, sema.mod); + const val = try lhs_val.shr(rhs_val, lhs_ty, sema.arena, mod); return sema.addConstant(lhs_ty, val); } else { break :rs lhs_src; } } else rhs_src; - if (maybe_rhs_val == null and scalar_ty.zigTypeTag() == .ComptimeInt) { + if (maybe_rhs_val == null and scalar_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be comptime-known", .{}); } try sema.requireRuntimeBlock(block, src, runtime_src); const result = try block.addBinOp(air_tag, lhs, rhs); if (block.wantSafety()) { - const bit_count = scalar_ty.intInfo(target).bits; + const bit_count = scalar_ty.intInfo(mod).bits; if (!std.math.isPowerOfTwo(bit_count)) { - const bit_count_val = try Value.Tag.int_u64.create(sema.arena, bit_count); + const bit_count_val = try mod.intValue(rhs_ty.scalarType(mod), bit_count); - const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: { - const bit_count_inst = try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, bit_count_val)); + const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { + const bit_count_inst = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, bit_count_val)); const lt = try block.addCmpVector(rhs, bit_count_inst, .lt); break :ok try block.addInst(.{ .tag = .reduce, @@ -12436,7 +12489,7 @@ fn zirShr( if (air_tag == .shr_exact) { const back = try block.addBinOp(.shl, result, rhs); - const ok = if (rhs_ty.zigTypeTag() == .Vector) ok: { + const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: { const eql = try block.addCmpVector(lhs, back, .eq); break :ok try block.addInst(.{ .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce, @@ -12461,6 +12514,7 @@ fn zirBitwise( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -12475,8 +12529,8 @@ fn zirBitwise( const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } }); - const scalar_type = resolved_type.scalarType(); - const scalar_tag = scalar_type.zigTypeTag(); + const scalar_type = resolved_type.scalarType(mod); + const scalar_tag = scalar_type.zigTypeTag(mod); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -12484,7 +12538,7 @@ fn zirBitwise( const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; if (!is_int) { - return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag()), @tagName(rhs_ty.zigTypeTag()) }); + return sema.fail(block, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag(mod)), @tagName(rhs_ty.zigTypeTag(mod)) }); } const runtime_src = runtime: { @@ -12493,9 +12547,9 @@ fn zirBitwise( if (try sema.resolveMaybeUndefValIntable(casted_lhs)) |lhs_val| { if (try sema.resolveMaybeUndefValIntable(casted_rhs)) |rhs_val| { const result_val = switch (air_tag) { - .bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena, sema.mod), - .bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena, sema.mod), - .xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena, sema.mod), + .bit_and => try lhs_val.bitwiseAnd(rhs_val, resolved_type, sema.arena, mod), + .bit_or => try lhs_val.bitwiseOr(rhs_val, resolved_type, sema.arena, mod), + .xor => try lhs_val.bitwiseXor(rhs_val, resolved_type, sema.arena, mod), else => unreachable, }; return sema.addConstant(resolved_type, result_val); @@ -12515,37 +12569,37 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); const operand_type = sema.typeOf(operand); - const scalar_type = operand_type.scalarType(); + const scalar_type = operand_type.scalarType(mod); - if (scalar_type.zigTypeTag() != .Int) { + if (scalar_type.zigTypeTag(mod) != .Int) { return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{ - operand_type.fmt(sema.mod), + operand_type.fmt(mod), }); } if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) { + if (val.isUndef(mod)) { return sema.addConstUndef(operand_type); - } else if (operand_type.zigTypeTag() == .Vector) { - const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen()); - var elem_val_buf: Value.ElemValueBuffer = undefined; - const elems = try sema.arena.alloc(Value, vec_len); + } else if (operand_type.zigTypeTag(mod) == .Vector) { + const vec_len = try sema.usizeCast(block, operand_src, operand_type.vectorLen(mod)); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_val_buf); - elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, sema.mod); + const elem_val = try val.elemValue(mod, i); + elem.* = try (try elem_val.bitwiseNot(scalar_type, sema.arena, mod)).intern(scalar_type, mod); } - return sema.addConstant( - operand_type, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(operand_type, (try mod.intern(.{ .aggregate = .{ + .ty = operand_type.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else { - const result_val = try val.bitwiseNot(operand_type, sema.arena, sema.mod); + const result_val = try val.bitwiseNot(operand_type, sema.arena, mod); return sema.addConstant(operand_type, result_val); } } @@ -12561,18 +12615,19 @@ fn analyzeTupleCat( lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); const src = LazySrcLoc.nodeOffset(src_node); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node }; - const lhs_len = lhs_ty.structFieldCount(); - const rhs_len = rhs_ty.structFieldCount(); + const lhs_len = lhs_ty.structFieldCount(mod); + const rhs_len = rhs_ty.structFieldCount(mod); const dest_fields = lhs_len + rhs_len; if (dest_fields == 0) { - return sema.addConstant(Type.initTag(.empty_struct_literal), Value.initTag(.empty_struct_value)); + return sema.addConstant(Type.empty_struct_literal, Value.empty_struct); } if (lhs_len == 0) { return rhs; @@ -12582,42 +12637,48 @@ fn analyzeTupleCat( } const final_len = try sema.usizeCast(block, rhs_src, dest_fields); - const types = try sema.arena.alloc(Type, final_len); - const values = try sema.arena.alloc(Value, final_len); + const types = try sema.arena.alloc(InternPool.Index, final_len); + const values = try sema.arena.alloc(InternPool.Index, final_len); const opt_runtime_src = rs: { var runtime_src: ?LazySrcLoc = null; var i: u32 = 0; while (i < lhs_len) : (i += 1) { - types[i] = lhs_ty.structFieldType(i); - const default_val = lhs_ty.structFieldDefaultValue(i); - values[i] = default_val; + types[i] = lhs_ty.structFieldType(i, mod).toIntern(); + const default_val = lhs_ty.structFieldDefaultValue(i, mod); + values[i] = default_val.toIntern(); const operand_src = lhs_src; // TODO better source location - if (default_val.tag() == .unreachable_value) { + if (default_val.toIntern() == .unreachable_value) { runtime_src = operand_src; + values[i] = .none; } } i = 0; while (i < rhs_len) : (i += 1) { - types[i + lhs_len] = rhs_ty.structFieldType(i); - const default_val = rhs_ty.structFieldDefaultValue(i); - values[i + lhs_len] = default_val; + types[i + lhs_len] = rhs_ty.structFieldType(i, mod).toIntern(); + const default_val = rhs_ty.structFieldDefaultValue(i, mod); + values[i + lhs_len] = default_val.toIntern(); const operand_src = rhs_src; // TODO better source location - if (default_val.tag() == .unreachable_value) { + if (default_val.toIntern() == .unreachable_value) { runtime_src = operand_src; + values[i + lhs_len] = .none; } } break :rs runtime_src; }; - const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{ + const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ .types = types, .values = values, - }); + .names = &.{}, + } }); const runtime_src = opt_runtime_src orelse { - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstant(tuple_ty, tuple_val); + const tuple_val = try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty, + .storage = .{ .elems = values }, + } }); + return sema.addConstant(tuple_ty.toType(), tuple_val.toValue()); }; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -12635,13 +12696,14 @@ fn analyzeTupleCat( try sema.tupleFieldValByIndex(block, operand_src, rhs, i, rhs_ty); } - return block.addAggregateInit(tuple_ty, element_refs); + return block.addAggregateInit(tuple_ty.toType(), element_refs); } fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); @@ -12650,8 +12712,8 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs_ty = sema.typeOf(rhs); const src = inst_data.src(); - const lhs_is_tuple = lhs_ty.isTuple(); - const rhs_is_tuple = rhs_ty.isTuple(); + const lhs_is_tuple = lhs_ty.isTuple(mod); + const rhs_is_tuple = rhs_ty.isTuple(mod); if (lhs_is_tuple and rhs_is_tuple) { return sema.analyzeTupleCat(block, inst_data.src_node, lhs, rhs); } @@ -12661,11 +12723,11 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, rhs_ty) orelse lhs_info: { if (lhs_is_tuple) break :lhs_info @as(Type.ArrayInfo, undefined); - return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(sema.mod)}); + return sema.fail(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(mod)}); }; const rhs_info = try sema.getArrayCatInfo(block, rhs_src, rhs, lhs_ty) orelse { assert(!rhs_is_tuple); - return sema.fail(block, rhs_src, "expected indexable; found '{}'", .{rhs_ty.fmt(sema.mod)}); + return sema.fail(block, rhs_src, "expected indexable; found '{}'", .{rhs_ty.fmt(mod)}); }; const resolved_elem_ty = t: { @@ -12727,73 +12789,71 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai ), }; - const result_ty = try Type.array(sema.arena, result_len, res_sent_val, resolved_elem_ty, sema.mod); + const result_ty = try Type.array(sema.arena, result_len, res_sent_val, resolved_elem_ty, mod); const ptr_addrspace = p: { - if (lhs_ty.zigTypeTag() == .Pointer) break :p lhs_ty.ptrAddressSpace(); - if (rhs_ty.zigTypeTag() == .Pointer) break :p rhs_ty.ptrAddressSpace(); + if (lhs_ty.zigTypeTag(mod) == .Pointer) break :p lhs_ty.ptrAddressSpace(mod); + if (rhs_ty.zigTypeTag(mod) == .Pointer) break :p rhs_ty.ptrAddressSpace(mod); break :p null; }; - const runtime_src = if (switch (lhs_ty.zigTypeTag()) { + const runtime_src = if (switch (lhs_ty.zigTypeTag(mod)) { .Array, .Struct => try sema.resolveMaybeUndefVal(lhs), .Pointer => try sema.resolveDefinedValue(block, lhs_src, lhs), else => unreachable, }) |lhs_val| rs: { - if (switch (rhs_ty.zigTypeTag()) { + if (switch (rhs_ty.zigTypeTag(mod)) { .Array, .Struct => try sema.resolveMaybeUndefVal(rhs), .Pointer => try sema.resolveDefinedValue(block, rhs_src, rhs), else => unreachable, }) |rhs_val| { - const lhs_sub_val = if (lhs_ty.isSinglePointer()) + const lhs_sub_val = if (lhs_ty.isSinglePointer(mod)) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val; - const rhs_sub_val = if (rhs_ty.isSinglePointer()) + const rhs_sub_val = if (rhs_ty.isSinglePointer(mod)) (try sema.pointerDeref(block, rhs_src, rhs_val, rhs_ty)).? else rhs_val; - const final_len_including_sent = result_len + @boolToInt(res_sent_val != null); - const element_vals = try sema.arena.alloc(Value, final_len_including_sent); + const element_vals = try sema.arena.alloc(InternPool.Index, result_len); var elem_i: usize = 0; while (elem_i < lhs_len) : (elem_i += 1) { const lhs_elem_i = elem_i; - const elem_ty = if (lhs_is_tuple) lhs_ty.structFieldType(lhs_elem_i) else lhs_info.elem_type; - const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i) else Value.initTag(.unreachable_value); - const elem_val = if (elem_default_val.tag() == .unreachable_value) try lhs_sub_val.elemValue(sema.mod, sema.arena, lhs_elem_i) else elem_default_val; + const elem_ty = if (lhs_is_tuple) lhs_ty.structFieldType(lhs_elem_i, mod) else lhs_info.elem_type; + const elem_default_val = if (lhs_is_tuple) lhs_ty.structFieldDefaultValue(lhs_elem_i, mod) else Value.@"unreachable"; + const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try lhs_sub_val.elemValue(mod, lhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, ""); - element_vals[elem_i] = coerced_elem_val; + element_vals[elem_i] = try coerced_elem_val.intern(resolved_elem_ty, mod); } while (elem_i < result_len) : (elem_i += 1) { const rhs_elem_i = elem_i - lhs_len; - const elem_ty = if (rhs_is_tuple) rhs_ty.structFieldType(rhs_elem_i) else rhs_info.elem_type; - const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i) else Value.initTag(.unreachable_value); - const elem_val = if (elem_default_val.tag() == .unreachable_value) try rhs_sub_val.elemValue(sema.mod, sema.arena, rhs_elem_i) else elem_default_val; + const elem_ty = if (rhs_is_tuple) rhs_ty.structFieldType(rhs_elem_i, mod) else rhs_info.elem_type; + const elem_default_val = if (rhs_is_tuple) rhs_ty.structFieldDefaultValue(rhs_elem_i, mod) else Value.@"unreachable"; + const elem_val = if (elem_default_val.toIntern() == .unreachable_value) try rhs_sub_val.elemValue(mod, rhs_elem_i) else elem_default_val; const elem_val_inst = try sema.addConstant(elem_ty, elem_val); const coerced_elem_val_inst = try sema.coerce(block, resolved_elem_ty, elem_val_inst, .unneeded); const coerced_elem_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, coerced_elem_val_inst, ""); - element_vals[elem_i] = coerced_elem_val; + element_vals[elem_i] = try coerced_elem_val.intern(resolved_elem_ty, mod); } - if (res_sent_val) |sent_val| { - element_vals[result_len] = sent_val; - } - const val = try Value.Tag.aggregate.create(sema.arena, element_vals); - return sema.addConstantMaybeRef(block, result_ty, val, ptr_addrspace != null); + return sema.addConstantMaybeRef(block, result_ty, (try mod.intern(.{ .aggregate = .{ + .ty = result_ty.toIntern(), + .storage = .{ .elems = element_vals }, + } })).toValue(), ptr_addrspace != null); } else break :rs rhs_src; } else lhs_src; try sema.requireRuntimeBlock(block, src, runtime_src); if (ptr_addrspace) |ptr_as| { - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ + const alloc_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = result_ty, .@"addrspace" = ptr_as, }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + const elem_ptr_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = resolved_elem_ty, .@"addrspace" = ptr_as, }); @@ -12815,7 +12875,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (res_sent_val) |sent_val| { const elem_index = try sema.addIntUnsigned(Type.usize, result_len); const elem_ptr = try block.addPtrElemPtr(alloc, elem_index, elem_ptr_ty); - const init = try sema.addConstant(lhs_info.elem_type, sent_val); + const init = try sema.addConstant(lhs_info.elem_type, try mod.getCoerced(sent_val, lhs_info.elem_type)); try sema.storePtr2(block, src, elem_ptr, src, init, lhs_src, .store); } @@ -12841,11 +12901,12 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Inst.Ref, peer_ty: Type) !?Type.ArrayInfo { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { - .Array => return operand_ty.arrayInfo(), + switch (operand_ty.zigTypeTag(mod)) { + .Array => return operand_ty.arrayInfo(mod), .Pointer => { - const ptr_info = operand_ty.ptrInfo().data; + const ptr_info = operand_ty.ptrInfo(mod); switch (ptr_info.size) { // TODO: in the Many case here this should only work if the type // has a sentinel, and this code should compute the length based @@ -12855,24 +12916,24 @@ fn getArrayCatInfo(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air.Ins return Type.ArrayInfo{ .elem_type = ptr_info.pointee_type, .sentinel = ptr_info.sentinel, - .len = val.sliceLen(sema.mod), + .len = val.sliceLen(mod), }; }, .One => { - if (ptr_info.pointee_type.zigTypeTag() == .Array) { - return ptr_info.pointee_type.arrayInfo(); + if (ptr_info.pointee_type.zigTypeTag(mod) == .Array) { + return ptr_info.pointee_type.arrayInfo(mod); } }, .C => {}, } }, .Struct => { - if (operand_ty.isTuple() and peer_ty.isIndexable()) { - assert(!peer_ty.isTuple()); + if (operand_ty.isTuple(mod) and peer_ty.isIndexable(mod)) { + assert(!peer_ty.isTuple(mod)); return .{ - .elem_type = peer_ty.elemType2(), + .elem_type = peer_ty.elemType2(mod), .sentinel = null, - .len = operand_ty.arrayLen(), + .len = operand_ty.arrayLen(mod), }; } }, @@ -12886,52 +12947,54 @@ fn analyzeTupleMul( block: *Block, src_node: i32, operand: Air.Inst.Ref, - factor: u64, + factor: usize, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); const src = LazySrcLoc.nodeOffset(src_node); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = src_node }; - const tuple_len = operand_ty.structFieldCount(); - const final_len_u64 = std.math.mul(u64, tuple_len, factor) catch + const tuple_len = operand_ty.structFieldCount(mod); + const final_len = std.math.mul(usize, tuple_len, factor) catch return sema.fail(block, rhs_src, "operation results in overflow", .{}); - if (final_len_u64 == 0) { - return sema.addConstant(Type.initTag(.empty_struct_literal), Value.initTag(.empty_struct_value)); + if (final_len == 0) { + return sema.addConstant(Type.empty_struct_literal, Value.empty_struct); } - const final_len = try sema.usizeCast(block, rhs_src, final_len_u64); - - const types = try sema.arena.alloc(Type, final_len); - const values = try sema.arena.alloc(Value, final_len); + const types = try sema.arena.alloc(InternPool.Index, final_len); + const values = try sema.arena.alloc(InternPool.Index, final_len); const opt_runtime_src = rs: { var runtime_src: ?LazySrcLoc = null; - var i: u32 = 0; - while (i < tuple_len) : (i += 1) { - types[i] = operand_ty.structFieldType(i); - values[i] = operand_ty.structFieldDefaultValue(i); + for (0..tuple_len) |i| { + types[i] = operand_ty.structFieldType(i, mod).toIntern(); + values[i] = operand_ty.structFieldDefaultValue(i, mod).toIntern(); const operand_src = lhs_src; // TODO better source location - if (values[i].tag() == .unreachable_value) { + if (values[i] == .unreachable_value) { runtime_src = operand_src; + values[i] = .none; // TODO don't treat unreachable_value as special } } - i = 0; - while (i < factor) : (i += 1) { - mem.copyForwards(Type, types[tuple_len * i ..], types[0..tuple_len]); - mem.copyForwards(Value, values[tuple_len * i ..], values[0..tuple_len]); + for (0..factor) |i| { + mem.copyForwards(InternPool.Index, types[tuple_len * i ..], types[0..tuple_len]); + mem.copyForwards(InternPool.Index, values[tuple_len * i ..], values[0..tuple_len]); } break :rs runtime_src; }; - const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{ + const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ .types = types, .values = values, - }); + .names = &.{}, + } }); const runtime_src = opt_runtime_src orelse { - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstant(tuple_ty, tuple_val); + const tuple_val = try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty, + .storage = .{ .elems = values }, + } }); + return sema.addConstant(tuple_ty.toType(), tuple_val.toValue()); }; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -12947,13 +13010,14 @@ fn analyzeTupleMul( @memcpy(element_refs[tuple_len * i ..][0..tuple_len], element_refs[0..tuple_len]); } - return block.addAggregateInit(tuple_ty, element_refs); + return block.addAggregateInit(tuple_ty.toType(), element_refs); } fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = try sema.resolveInst(extra.lhs); @@ -12963,18 +13027,19 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const operator_src: LazySrcLoc = .{ .node_offset_main_token = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - if (lhs_ty.isTuple()) { + if (lhs_ty.isTuple(mod)) { // In `**` rhs must be comptime-known, but lhs can be runtime-known const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize, "array multiplication factor must be comptime-known"); - return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor); + const factor_casted = try sema.usizeCast(block, rhs_src, factor); + return sema.analyzeTupleMul(block, inst_data.src_node, lhs, factor_casted); } // Analyze the lhs first, to catch the case that someone tried to do exponentiation const lhs_info = try sema.getArrayCatInfo(block, lhs_src, lhs, lhs_ty) orelse { const msg = msg: { - const msg = try sema.errMsg(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, lhs_src, "expected indexable; found '{}'", .{lhs_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Int, .Float, .ComptimeFloat, .ComptimeInt, .Vector => { try sema.errNote(block, operator_src, msg, "this operator multiplies arrays; use std.math.pow for exponentiation", .{}); }, @@ -12992,15 +13057,13 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.fail(block, rhs_src, "operation results in overflow", .{}); const result_len = try sema.usizeCast(block, src, result_len_u64); - const result_ty = try Type.array(sema.arena, result_len, lhs_info.sentinel, lhs_info.elem_type, sema.mod); + const result_ty = try Type.array(sema.arena, result_len, lhs_info.sentinel, lhs_info.elem_type, mod); - const ptr_addrspace = if (lhs_ty.zigTypeTag() == .Pointer) lhs_ty.ptrAddressSpace() else null; + const ptr_addrspace = if (lhs_ty.zigTypeTag(mod) == .Pointer) lhs_ty.ptrAddressSpace(mod) else null; const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len); if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| { - const final_len_including_sent = result_len + @boolToInt(lhs_info.sentinel != null); - - const lhs_sub_val = if (lhs_ty.isSinglePointer()) + const lhs_sub_val = if (lhs_ty.isSinglePointer(mod)) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val; @@ -13008,38 +13071,41 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const val = v: { // Optimization for the common pattern of a single element repeated N times, such // as zero-filling a byte array. - if (lhs_len == 1) { - const elem_val = try lhs_sub_val.elemValue(sema.mod, sema.arena, 0); - break :v try Value.Tag.repeated.create(sema.arena, elem_val); + if (lhs_len == 1 and lhs_info.sentinel == null) { + const elem_val = try lhs_sub_val.elemValue(mod, 0); + break :v try mod.intern(.{ .aggregate = .{ + .ty = result_ty.toIntern(), + .storage = .{ .repeated_elem = elem_val.toIntern() }, + } }); } - const element_vals = try sema.arena.alloc(Value, final_len_including_sent); + const element_vals = try sema.arena.alloc(InternPool.Index, result_len); var elem_i: usize = 0; while (elem_i < result_len) { var lhs_i: usize = 0; while (lhs_i < lhs_len) : (lhs_i += 1) { - const elem_val = try lhs_sub_val.elemValue(sema.mod, sema.arena, lhs_i); - element_vals[elem_i] = elem_val; + const elem_val = try lhs_sub_val.elemValue(mod, lhs_i); + element_vals[elem_i] = elem_val.toIntern(); elem_i += 1; } } - if (lhs_info.sentinel) |sent_val| { - element_vals[result_len] = sent_val; - } - break :v try Value.Tag.aggregate.create(sema.arena, element_vals); + break :v try mod.intern(.{ .aggregate = .{ + .ty = result_ty.toIntern(), + .storage = .{ .elems = element_vals }, + } }); }; - return sema.addConstantMaybeRef(block, result_ty, val, ptr_addrspace != null); + return sema.addConstantMaybeRef(block, result_ty, val.toValue(), ptr_addrspace != null); } try sema.requireRuntimeBlock(block, src, lhs_src); if (ptr_addrspace) |ptr_as| { - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ + const alloc_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = result_ty, .@"addrspace" = ptr_as, }); const alloc = try block.addTy(.alloc, alloc_ty); - const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + const elem_ptr_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = lhs_info.elem_type, .@"addrspace" = ptr_as, }); @@ -13082,6 +13148,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const lhs_src = src; @@ -13089,34 +13156,31 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const rhs = try sema.resolveInst(inst_data.operand); const rhs_ty = sema.typeOf(rhs); - const rhs_scalar_ty = rhs_ty.scalarType(); + const rhs_scalar_ty = rhs_ty.scalarType(mod); - if (rhs_scalar_ty.isUnsignedInt() or switch (rhs_scalar_ty.zigTypeTag()) { + if (rhs_scalar_ty.isUnsignedInt(mod) or switch (rhs_scalar_ty.zigTypeTag(mod)) { .Int, .ComptimeInt, .Float, .ComptimeFloat => false, else => true, }) { - return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(sema.mod)}); + return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(mod)}); } if (rhs_scalar_ty.isAnyFloat()) { // We handle float negation here to ensure negative zero is represented in the bits. if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { - if (rhs_val.isUndef()) return sema.addConstUndef(rhs_ty); - return sema.addConstant(rhs_ty, try rhs_val.floatNeg(rhs_ty, sema.arena, sema.mod)); + if (rhs_val.isUndef(mod)) return sema.addConstUndef(rhs_ty); + return sema.addConstant(rhs_ty, try rhs_val.floatNeg(rhs_ty, sema.arena, mod)); } try sema.requireRuntimeBlock(block, src, null); return block.addUnOp(if (block.float_mode == .Optimized) .neg_optimized else .neg, rhs); } - const lhs = if (rhs_ty.zigTypeTag() == .Vector) - try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero)) - else - try sema.resolveInst(.zero); - + const lhs = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))); return sema.analyzeArithmetic(block, .sub, lhs, rhs, src, lhs_src, rhs_src, true); } fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const lhs_src = src; @@ -13124,18 +13188,14 @@ fn zirNegateWrap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! const rhs = try sema.resolveInst(inst_data.operand); const rhs_ty = sema.typeOf(rhs); - const rhs_scalar_ty = rhs_ty.scalarType(); + const rhs_scalar_ty = rhs_ty.scalarType(mod); - switch (rhs_scalar_ty.zigTypeTag()) { + switch (rhs_scalar_ty.zigTypeTag(mod)) { .Int, .ComptimeInt, .Float, .ComptimeFloat => {}, - else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(sema.mod)}), + else => return sema.fail(block, src, "negation of type '{}'", .{rhs_ty.fmt(mod)}), } - const lhs = if (rhs_ty.zigTypeTag() == .Vector) - try sema.addConstant(rhs_ty, try Value.Tag.repeated.create(sema.arena, Value.zero)) - else - try sema.resolveInst(.zero); - + const lhs = try sema.addConstant(rhs_ty, try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))); return sema.analyzeArithmetic(block, .subwrap, lhs, rhs, src, lhs_src, rhs_src, true); } @@ -13161,6 +13221,7 @@ fn zirArithmetic( } fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13171,8 +13232,8 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13181,25 +13242,22 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); - if ((lhs_ty.zigTypeTag() == .ComptimeFloat and rhs_ty.zigTypeTag() == .ComptimeInt) or - (lhs_ty.zigTypeTag() == .ComptimeInt and rhs_ty.zigTypeTag() == .ComptimeFloat)) + if ((lhs_ty.zigTypeTag(mod) == .ComptimeFloat and rhs_ty.zigTypeTag(mod) == .ComptimeInt) or + (lhs_ty.zigTypeTag(mod) == .ComptimeInt and rhs_ty.zigTypeTag(mod) == .ComptimeFloat)) { // If it makes a difference whether we coerce to ints or floats before doing the division, error. // If lhs % rhs is 0, it doesn't matter. @@ -13207,9 +13265,12 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const rhs_val = maybe_rhs_val orelse unreachable; const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod) catch unreachable; if (!rem.compareAllWithZero(.eq, mod)) { - return sema.fail(block, src, "ambiguous coercion of division operands '{s}' and '{s}'; non-zero remainder '{}'", .{ - @tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()), rem.fmtValue(resolved_type, sema.mod), - }); + return sema.fail( + block, + src, + "ambiguous coercion of division operands '{}' and '{}'; non-zero remainder '{}'", + .{ lhs_ty.fmt(mod), rhs_ty.fmt(mod), rem.fmtValue(resolved_type, mod) }, + ); } } @@ -13243,17 +13304,20 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins switch (scalar_tag) { .Int, .ComptimeInt, .ComptimeFloat => { if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -13267,10 +13331,10 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const runtime_src = rs: { if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { - if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { + if (lhs_val.isUndef(mod)) { + if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { + if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { return sema.addConstUndef(resolved_type); } } @@ -13281,10 +13345,10 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins if (maybe_rhs_val) |rhs_val| { if (is_int) { - const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod); - var vector_index: usize = undefined; - if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) { - return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index); + var overflow_idx: ?usize = null; + const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod); + if (overflow_idx) |vec_idx| { + return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx); } return sema.addConstant(resolved_type, res); } else { @@ -13309,8 +13373,13 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } const air_tag = if (is_int) blk: { - if (lhs_ty.isSignedInt() or rhs_ty.isSignedInt()) { - return sema.fail(block, src, "division with '{s}' and '{s}': signed integers must use @divTrunc, @divFloor, or @divExact", .{ @tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()) }); + if (lhs_ty.isSignedInt(mod) or rhs_ty.isSignedInt(mod)) { + return sema.fail( + block, + src, + "division with '{}' and '{}': signed integers must use @divTrunc, @divFloor, or @divExact", + .{ lhs_ty.fmt(mod), rhs_ty.fmt(mod) }, + ); } break :blk Air.Inst.Tag.div_trunc; } else switch (block.float_mode) { @@ -13321,6 +13390,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13331,8 +13401,8 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13341,19 +13411,16 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_exact); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13375,19 +13442,22 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // If the lhs is undefined, compile error because there is a possible // value for which the division would result in a remainder. if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } else { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -13402,10 +13472,10 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (!(modulus_val.compareAllWithZero(.eq, mod))) { return sema.fail(block, src, "exact division produced remainder", .{}); } - const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod); - var vector_index: usize = undefined; - if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) { - return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index); + var overflow_idx: ?usize = null; + const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod); + if (overflow_idx) |vec_idx| { + return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx); } return sema.addConstant(resolved_type, res); } else { @@ -13437,7 +13507,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const ok = if (!is_int) ok: { const floored = try block.addUnOp(.floor, result); - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { const eql = try block.addCmpVector(result, floored, .eq); break :ok try block.addInst(.{ .tag = switch (block.float_mode) { @@ -13459,8 +13529,13 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } else ok: { const remainder = try block.addBinOp(.rem, casted_lhs, casted_rhs); - if (resolved_type.zigTypeTag() == .Vector) { - const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; + if (resolved_type.zigTypeTag(mod) == .Vector) { + const zero_val = try sema.splat(resolved_type, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); const eql = try block.addCmpVector(remainder, zero, .eq); break :ok try block.addInst(.{ @@ -13471,7 +13546,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }, }); } else { - const zero = try sema.addConstant(resolved_type, Value.zero); + const zero = try sema.addConstant(resolved_type, scalar_zero); const is_in_range = try block.addBinOp(.cmp_eq, remainder, zero); break :ok is_in_range; } @@ -13484,6 +13559,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13494,8 +13570,8 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13504,20 +13580,17 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_floor); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13542,17 +13615,20 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -13561,10 +13637,10 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // TODO: if the RHS is one, return the LHS directly } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { - if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { + if (lhs_val.isUndef(mod)) { + if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { + if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { return sema.addConstUndef(resolved_type); } } @@ -13600,6 +13676,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13610,8 +13687,8 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13620,20 +13697,17 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .div_trunc); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13658,17 +13732,20 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // value (zero) for which the division would be illegal behavior. // If the lhs is undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -13676,10 +13753,10 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { - if (lhs_scalar_ty.isSignedInt() and rhs_scalar_ty.isSignedInt()) { + if (lhs_val.isUndef(mod)) { + if (lhs_scalar_ty.isSignedInt(mod) and rhs_scalar_ty.isSignedInt(mod)) { if (maybe_rhs_val) |rhs_val| { - if (try sema.compareAll(rhs_val, .neq, Value.negative_one, resolved_type)) { + if (try sema.compareAll(rhs_val, .neq, try mod.intValue(resolved_type, -1), resolved_type)) { return sema.addConstUndef(resolved_type); } } @@ -13690,10 +13767,10 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai if (maybe_rhs_val) |rhs_val| { if (is_int) { - const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod); - var vector_index: usize = undefined; - if (!(try sema.intFitsInType(res, resolved_type, &vector_index))) { - return sema.failWithIntegerOverflow(block, src, resolved_type, res, vector_index); + var overflow_idx: ?usize = null; + const res = try lhs_val.intDiv(rhs_val, resolved_type, &overflow_idx, sema.arena, mod); + if (overflow_idx) |vec_idx| { + return sema.failWithIntegerOverflow(block, src, resolved_type, res, vec_idx); } return sema.addConstant(resolved_type, res); } else { @@ -13727,39 +13804,34 @@ fn addDivIntOverflowSafety( casted_rhs: Air.Inst.Ref, is_int: bool, ) CompileError!void { + const mod = sema.mod; if (!is_int) return; // If the LHS is unsigned, it cannot cause overflow. - if (!lhs_scalar_ty.isSignedInt()) return; - - const mod = sema.mod; - const target = mod.getTarget(); + if (!lhs_scalar_ty.isSignedInt(mod)) return; // If the LHS is widened to a larger integer type, no overflow is possible. - if (lhs_scalar_ty.intInfo(target).bits < resolved_type.intInfo(target).bits) { + if (lhs_scalar_ty.intInfo(mod).bits < resolved_type.intInfo(mod).bits) { return; } - const min_int = try resolved_type.minInt(sema.arena, target); - const neg_one_scalar = try Value.Tag.int_i64.create(sema.arena, -1); - const neg_one = if (resolved_type.zigTypeTag() == .Vector) - try Value.Tag.repeated.create(sema.arena, neg_one_scalar) - else - neg_one_scalar; + const min_int = try resolved_type.minInt(mod, resolved_type); + const neg_one_scalar = try mod.intValue(lhs_scalar_ty, -1); + const neg_one = try sema.splat(resolved_type, neg_one_scalar); // If the LHS is comptime-known to be not equal to the min int, // no overflow is possible. if (maybe_lhs_val) |lhs_val| { - if (lhs_val.compareAll(.neq, min_int, resolved_type, mod)) return; + if (try lhs_val.compareAll(.neq, min_int, resolved_type, mod)) return; } // If the RHS is comptime-known to not be equal to -1, no overflow is possible. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.compareAll(.neq, neg_one, resolved_type, mod)) return; + if (try rhs_val.compareAll(.neq, neg_one, resolved_type, mod)) return; } var ok: Air.Inst.Ref = .none; - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { if (maybe_lhs_val == null) { const min_int_ref = try sema.addConstant(resolved_type, min_int); ok = try block.addCmpVector(casted_lhs, min_int_ref, .neq); @@ -13815,8 +13887,13 @@ fn addDivByZeroSafety( // emitted above. if (maybe_rhs_val != null) return; - const ok = if (resolved_type.zigTypeTag() == .Vector) ok: { - const zero_val = try Value.Tag.repeated.create(sema.arena, Value.zero); + const mod = sema.mod; + const scalar_zero = if (is_int) + try mod.intValue(resolved_type.scalarType(mod), 0) + else + try mod.floatValue(resolved_type.scalarType(mod), 0.0); + const ok = if (resolved_type.zigTypeTag(mod) == .Vector) ok: { + const zero_val = try sema.splat(resolved_type, scalar_zero); const zero = try sema.addConstant(resolved_type, zero_val); const ok = try block.addCmpVector(casted_rhs, zero, .neq); break :ok try block.addInst(.{ @@ -13827,7 +13904,7 @@ fn addDivByZeroSafety( } }, }); } else ok: { - const zero = try sema.addConstant(resolved_type, Value.zero); + const zero = try sema.addConstant(resolved_type, scalar_zero); break :ok try block.addBinOp(if (is_int) .cmp_neq else .cmp_neq_optimized, casted_rhs, zero); }; try sema.addSafetyCheck(block, ok, .divide_by_zero); @@ -13842,6 +13919,7 @@ fn airTag(block: *Block, is_int: bool, normal: Air.Inst.Tag, optimized: Air.Inst } fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -13852,8 +13930,8 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -13862,20 +13940,19 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; + const is_vector = resolved_type.zigTypeTag(mod) == .Vector; const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const lhs_scalar_ty = lhs_ty.scalarType(); - const rhs_scalar_ty = rhs_ty.scalarType(); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const lhs_scalar_ty = lhs_ty.scalarType(mod); + const rhs_scalar_ty = rhs_ty.scalarType(mod); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod_rem); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -13895,20 +13972,26 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. // then emit a compile error saying you have to pick one. if (is_int) { if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, lhs_src); } if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(resolved_type.scalarType(mod), 0.0), + .ComptimeInt, .Int => try mod.intValue(resolved_type.scalarType(mod), 0), + else => unreachable, + }; + const zero_val = if (is_vector) (try mod.intern(.{ .aggregate = .{ + .ty = resolved_type.toIntern(), + .storage = .{ .repeated_elem = scalar_zero.toIntern() }, + } })).toValue() else scalar_zero; return sema.addConstant(resolved_type, zero_val); } - } else if (lhs_scalar_ty.isSignedInt()) { + } else if (lhs_scalar_ty.isSignedInt(mod)) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -13929,7 +14012,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.addConstant(resolved_type, rem_result); } break :rs lhs_src; - } else if (rhs_scalar_ty.isSignedInt()) { + } else if (rhs_scalar_ty.isSignedInt(mod)) { return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } else { break :rs rhs_src; @@ -13937,7 +14020,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } // float operands if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -13947,7 +14030,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef() or !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))) { + if (lhs_val.isUndef(mod) or !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema))) { return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); } return sema.addConstant( @@ -13978,32 +14061,31 @@ fn intRem( lhs: Value, rhs: Value, ) CompileError!Value { - if (ty.zigTypeTag() == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.intRemScalar(lhs_elem, rhs_elem); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try sema.intRemScalar(lhs_elem, rhs_elem, scalar_ty)).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return sema.intRemScalar(lhs, rhs); + return sema.intRemScalar(lhs, rhs, ty); } -fn intRemScalar( - sema: *Sema, - lhs: Value, - rhs: Value, -) CompileError!Value { - const target = sema.mod.getTarget(); +fn intRemScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) CompileError!Value { + const mod = sema.mod; // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs_q = try sema.arena.alloc( math.big.Limb, lhs_bigint.limbs.len, @@ -14021,10 +14103,11 @@ fn intRemScalar( var result_q = math.big.int.Mutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; var result_r = math.big.int.Mutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return Value.fromBigInt(sema.arena, result_r.toConst()); + return mod.intValue_big(scalar_ty, result_r.toConst()); } fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -14035,8 +14118,8 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -14048,13 +14131,12 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .mod); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -14072,12 +14154,12 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins // If the lhs is undefined, result is undefined. if (is_int) { if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, lhs_src); } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -14096,7 +14178,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } // float operands if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -14104,7 +14186,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { @@ -14127,6 +14209,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; sema.src = src; @@ -14137,8 +14220,8 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const rhs = try sema.resolveInst(extra.rhs); const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); try sema.checkInvalidPtrArithmetic(block, src, lhs_ty); @@ -14150,13 +14233,12 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const scalar_tag = resolved_type.scalarType(mod).zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, .rem); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); @@ -14174,12 +14256,12 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins // If the lhs is undefined, result is undefined. if (is_int) { if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, lhs_src); } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -14198,7 +14280,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } // float operands if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, rhs_src); } if (!(try rhs_val.compareAllWithZeroAdvanced(.neq, sema))) { @@ -14206,7 +14288,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins } } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { @@ -14268,7 +14350,7 @@ fn zirOverflowArithmetic( const lhs = try sema.coerce(block, dest_ty, uncasted_lhs, lhs_src); const rhs = try sema.coerce(block, rhs_dest_ty, uncasted_rhs, rhs_src); - if (dest_ty.scalarType().zigTypeTag() != .Int) { + if (dest_ty.scalarType(mod).zigTypeTag(mod) != .Int) { return sema.fail(block, src, "expected vector of integers or integer tag type, found '{}'", .{dest_ty.fmt(mod)}); } @@ -14276,30 +14358,32 @@ fn zirOverflowArithmetic( const maybe_rhs_val = try sema.resolveMaybeUndefVal(rhs); const tuple_ty = try sema.overflowArithmeticTupleType(dest_ty); + const overflow_ty = mod.intern_pool.indexToKey(tuple_ty.toIntern()).anon_struct_type.types[1].toType(); var result: struct { inst: Air.Inst.Ref = .none, - wrapped: Value = Value.initTag(.unreachable_value), + wrapped: Value = Value.@"unreachable", overflow_bit: Value, } = result: { + const zero_bit = try mod.intValue(Type.u1, 0); switch (zir_tag) { .add_with_overflow => { // If either of the arguments is zero, `false` is returned and the other is stored // to the result, even if it is undefined.. // Otherwise, if either of the argument is undefined, undefined is returned. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs }; + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; } } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef() and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { - if (lhs_val.isUndef() or rhs_val.isUndef()) { + if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } @@ -14312,12 +14396,12 @@ fn zirOverflowArithmetic( // If the rhs is zero, then the result is lhs and no overflow occured. // Otherwise, if either result is undefined, both results are undefined. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } else if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } else if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } @@ -14330,29 +14414,30 @@ fn zirOverflowArithmetic( // If either of the arguments is zero, the result is zero and no overflow occured. // If either of the arguments is one, the result is the other and no overflow occured. // Otherwise, if either of the arguments is undefined, both results are undefined. + const scalar_one = try mod.intValue(dest_ty.scalarType(mod), 1); if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; - } else if (try sema.compareAll(lhs_val, .eq, try maybeRepeated(sema, dest_ty, Value.one), dest_ty)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; + } else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; } } } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef()) { + if (!rhs_val.isUndef(mod)) { if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = rhs }; - } else if (try sema.compareAll(rhs_val, .eq, try maybeRepeated(sema, dest_ty, Value.one), dest_ty)) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = rhs }; + } else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } } if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { - if (lhs_val.isUndef() or rhs_val.isUndef()) { + if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } @@ -14366,22 +14451,22 @@ fn zirOverflowArithmetic( // If rhs is zero, the result is lhs (even if undefined) and no overflow occurred. // Oterhwise if either of the arguments is undefined, both results are undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } if (maybe_rhs_val) |rhs_val| { - if (!rhs_val.isUndef() and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { - break :result .{ .overflow_bit = try maybeRepeated(sema, dest_ty, Value.zero), .inst = lhs }; + if (!rhs_val.isUndef(mod) and (try rhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + break :result .{ .overflow_bit = try sema.splat(overflow_ty, zero_bit), .inst = lhs }; } } if (maybe_lhs_val) |lhs_val| { if (maybe_rhs_val) |rhs_val| { - if (lhs_val.isUndef() or rhs_val.isUndef()) { + if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { break :result .{ .overflow_bit = Value.undef, .wrapped = Value.undef }; } - const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, sema.mod); + const result = try lhs_val.shlWithOverflow(rhs_val, dest_ty, sema.arena, mod); break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result }; } } @@ -14420,40 +14505,46 @@ fn zirOverflowArithmetic( } if (result.inst == .none) { - const values = try sema.arena.alloc(Value, 2); - values[0] = result.wrapped; - values[1] = result.overflow_bit; - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstant(tuple_ty, tuple_val); + return sema.addConstant(tuple_ty, (try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty.toIntern(), + .storage = .{ .elems = &.{ + result.wrapped.toIntern(), + result.overflow_bit.toIntern(), + } }, + } })).toValue()); } const element_refs = try sema.arena.alloc(Air.Inst.Ref, 2); element_refs[0] = result.inst; - element_refs[1] = try sema.addConstant(tuple_ty.structFieldType(1), result.overflow_bit); + element_refs[1] = try sema.addConstant(tuple_ty.structFieldType(1, mod), result.overflow_bit); return block.addAggregateInit(tuple_ty, element_refs); } -fn maybeRepeated(sema: *Sema, ty: Type, val: Value) !Value { - if (ty.zigTypeTag() != .Vector) return val; - return Value.Tag.repeated.create(sema.arena, val); +fn splat(sema: *Sema, ty: Type, val: Value) !Value { + const mod = sema.mod; + if (ty.zigTypeTag(mod) != .Vector) return val; + const repeated = try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .repeated_elem = val.toIntern() }, + } }); + return repeated.toValue(); } fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { - const ov_ty = if (ty.zigTypeTag() == .Vector) try Type.vector(sema.arena, ty.vectorLen(), Type.u1) else Type.u1; + const mod = sema.mod; + const ov_ty = if (ty.zigTypeTag(mod) == .Vector) try mod.vectorType(.{ + .len = ty.vectorLen(mod), + .child = .u1_type, + }) else Type.u1; - const types = try sema.arena.alloc(Type, 2); - const values = try sema.arena.alloc(Value, 2); - const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{ - .types = types, - .values = values, - }); - - types[0] = ty; - types[1] = ov_ty; - values[0] = Value.initTag(.unreachable_value); - values[1] = Value.initTag(.unreachable_value); - - return tuple_ty; + const types = [2]InternPool.Index{ ty.toIntern(), ov_ty.toIntern() }; + const values = [2]InternPool.Index{ .none, .none }; + const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ + .types = &types, + .values = &values, + .names = &.{}, + } }); + return tuple_ty.toType(); } fn analyzeArithmetic( @@ -14468,13 +14559,14 @@ fn analyzeArithmetic( rhs_src: LazySrcLoc, want_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize()) { + if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize(mod)) { .One, .Slice => {}, .Many, .C => { const air_tag: Air.Inst.Tag = switch (zir_tag) { @@ -14491,18 +14583,16 @@ fn analyzeArithmetic( .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); - const is_vector = resolved_type.zigTypeTag() == .Vector; - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - const scalar_tag = resolved_type.scalarType().zigTypeTag(); + const scalar_type = resolved_type.scalarType(mod); + const scalar_tag = scalar_type.zigTypeTag(mod); const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; try sema.checkArithmeticOp(block, src, scalar_tag, lhs_zig_ty_tag, rhs_zig_ty_tag, zir_tag); - const mod = sema.mod; const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(casted_lhs); const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(casted_rhs); const rs: struct { src: LazySrcLoc, air_tag: Air.Inst.Tag } = rs: { @@ -14516,12 +14606,12 @@ fn analyzeArithmetic( // overflow (max_int), causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { return casted_rhs; } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { @@ -14534,7 +14624,7 @@ fn analyzeArithmetic( } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .add_optimized else .add; if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { @@ -14543,16 +14633,16 @@ fn analyzeArithmetic( } if (maybe_rhs_val) |rhs_val| { if (is_int) { - const sum = try sema.intAdd(lhs_val, rhs_val, resolved_type); - var vector_index: usize = undefined; - if (!(try sema.intFitsInType(sum, resolved_type, &vector_index))) { - return sema.failWithIntegerOverflow(block, src, resolved_type, sum, vector_index); + var overflow_idx: ?usize = null; + const sum = try sema.intAdd(lhs_val, rhs_val, resolved_type, &overflow_idx); + if (overflow_idx) |vec_idx| { + return sema.failWithIntegerOverflow(block, src, resolved_type, sum, vec_idx); } return sema.addConstant(resolved_type, sum); } else { return sema.addConstant( resolved_type, - try sema.floatAdd(lhs_val, rhs_val, resolved_type), + try Value.floatAdd(lhs_val, rhs_val, resolved_type, sema.arena, mod), ); } } else break :rs .{ .src = rhs_src, .air_tag = air_tag }; @@ -14563,13 +14653,13 @@ fn analyzeArithmetic( // If either of the operands are zero, the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { return casted_rhs; } } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .addwrap_optimized else .addwrap; if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -14588,12 +14678,12 @@ fn analyzeArithmetic( // If either of the operands are zero, then the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef() and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { + if (!lhs_val.isUndef(mod) and (try lhs_val.compareAllWithZeroAdvanced(.eq, sema))) { return casted_rhs; } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -14601,7 +14691,7 @@ fn analyzeArithmetic( } if (maybe_lhs_val) |lhs_val| { const val = if (scalar_tag == .ComptimeInt) - try sema.intAdd(lhs_val, rhs_val, resolved_type) + try sema.intAdd(lhs_val, rhs_val, resolved_type, undefined) else try lhs_val.intAddSat(rhs_val, resolved_type, sema.arena, mod); @@ -14618,7 +14708,7 @@ fn analyzeArithmetic( // overflow, causing illegal behavior. // For floats: either operand being undef makes the result undef. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { @@ -14631,7 +14721,7 @@ fn analyzeArithmetic( } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .sub_optimized else .sub; if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { @@ -14640,16 +14730,16 @@ fn analyzeArithmetic( } if (maybe_rhs_val) |rhs_val| { if (is_int) { - const diff = try sema.intSub(lhs_val, rhs_val, resolved_type); - var vector_index: usize = undefined; - if (!(try sema.intFitsInType(diff, resolved_type, &vector_index))) { - return sema.failWithIntegerOverflow(block, src, resolved_type, diff, vector_index); + var overflow_idx: ?usize = null; + const diff = try sema.intSub(lhs_val, rhs_val, resolved_type, &overflow_idx); + if (overflow_idx) |vec_idx| { + return sema.failWithIntegerOverflow(block, src, resolved_type, diff, vec_idx); } return sema.addConstant(resolved_type, diff); } else { return sema.addConstant( resolved_type, - try sema.floatSub(lhs_val, rhs_val, resolved_type), + try Value.floatSub(lhs_val, rhs_val, resolved_type, sema.arena, mod), ); } } else break :rs .{ .src = rhs_src, .air_tag = air_tag }; @@ -14660,7 +14750,7 @@ fn analyzeArithmetic( // If the RHS is zero, then the other operand is returned, even if it is undefined. // If either of the operands are undefined, the result is undefined. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -14669,7 +14759,7 @@ fn analyzeArithmetic( } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .subwrap_optimized else .subwrap; if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { @@ -14685,7 +14775,7 @@ fn analyzeArithmetic( // If the RHS is zero, result is LHS. // If either of the operands are undefined, result is undefined. if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { @@ -14693,12 +14783,12 @@ fn analyzeArithmetic( } } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (maybe_rhs_val) |rhs_val| { const val = if (scalar_tag == .ComptimeInt) - try sema.intSub(lhs_val, rhs_val, resolved_type) + try sema.intSub(lhs_val, rhs_val, resolved_type, undefined) else try lhs_val.intSubSat(rhs_val, resolved_type, sema.arena, mod); @@ -14718,62 +14808,74 @@ fn analyzeArithmetic( // If either of the operands are inf, and the other operand is zero, // the result is nan. // If either of the operands are nan, the result is nan. + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 0), + else => unreachable, + }; + const scalar_one = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 1), + else => unreachable, + }; if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { - if (lhs_val.isNan()) { + if (!lhs_val.isUndef(mod)) { + if (lhs_val.isNan(mod)) { return sema.addConstant(resolved_type, lhs_val); } if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) lz: { if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isNan()) { + if (rhs_val.isNan(mod)) { return sema.addConstant(resolved_type, rhs_val); } - if (rhs_val.isInf()) { - return sema.addConstant(resolved_type, try Value.Tag.float_32.create(sema.arena, std.math.nan_f32)); + if (rhs_val.isInf(mod)) { + return sema.addConstant( + resolved_type, + try mod.floatValue(resolved_type, std.math.nan_f128), + ); } } else if (resolved_type.isAnyFloat()) { break :lz; } - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_rhs; } } } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .mul_optimized else .mul; if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, rhs_src); } else { return sema.addConstUndef(resolved_type); } } - if (rhs_val.isNan()) { + if (rhs_val.isNan(mod)) { return sema.addConstant(resolved_type, rhs_val); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) rz: { if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isInf()) { - return sema.addConstant(resolved_type, try Value.Tag.float_32.create(sema.arena, std.math.nan_f32)); + if (lhs_val.isInf(mod)) { + return sema.addConstant( + resolved_type, + try mod.floatValue(resolved_type, std.math.nan_f128), + ); } } else if (resolved_type.isAnyFloat()) { break :rz; } - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { if (is_int) { return sema.failWithUseOfUndef(block, lhs_src); } else { @@ -14781,16 +14883,16 @@ fn analyzeArithmetic( } } if (is_int) { - const product = try lhs_val.intMul(rhs_val, resolved_type, sema.arena, sema.mod); - var vector_index: usize = undefined; - if (!(try sema.intFitsInType(product, resolved_type, &vector_index))) { - return sema.failWithIntegerOverflow(block, src, resolved_type, product, vector_index); + var overflow_idx: ?usize = null; + const product = try lhs_val.intMul(rhs_val, resolved_type, &overflow_idx, sema.arena, mod); + if (overflow_idx) |vec_idx| { + return sema.failWithIntegerOverflow(block, src, resolved_type, product, vec_idx); } return sema.addConstant(resolved_type, product); } else { return sema.addConstant( resolved_type, - try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, sema.mod), + try lhs_val.floatMul(rhs_val, resolved_type, sema.arena, mod), ); } } else break :rs .{ .src = lhs_src, .air_tag = air_tag }; @@ -14801,40 +14903,46 @@ fn analyzeArithmetic( // If either of the operands are zero, result is zero. // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 0), + else => unreachable, + }; + const scalar_one = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 1), + else => unreachable, + }; if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_rhs; } } } const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .mulwrap_optimized else .mulwrap; if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } return sema.addConstant( resolved_type, - try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, sema.mod), + try lhs_val.numberMulWrap(rhs_val, resolved_type, sema.arena, mod), ); } else break :rs .{ .src = lhs_src, .air_tag = air_tag }; } else break :rs .{ .src = rhs_src, .air_tag = air_tag }; @@ -14844,41 +14952,47 @@ fn analyzeArithmetic( // If either of the operands are zero, result is zero. // If either of the operands are one, result is the other operand. // If either of the operands are undefined, result is undefined. + const scalar_zero = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 0.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 0), + else => unreachable, + }; + const scalar_one = switch (scalar_tag) { + .ComptimeFloat, .Float => try mod.floatValue(scalar_type, 1.0), + .ComptimeInt, .Int => try mod.intValue(scalar_type, 1), + else => unreachable, + }; if (maybe_lhs_val) |lhs_val| { - if (!lhs_val.isUndef()) { + if (!lhs_val.isUndef(mod)) { if (try lhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(lhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(lhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_rhs; } } } if (maybe_rhs_val) |rhs_val| { - if (rhs_val.isUndef()) { + if (rhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } if (try rhs_val.compareAllWithZeroAdvanced(.eq, sema)) { - const zero_val = if (is_vector) b: { - break :b try Value.Tag.repeated.create(sema.arena, Value.zero); - } else Value.zero; + const zero_val = try sema.splat(resolved_type, scalar_zero); return sema.addConstant(resolved_type, zero_val); } - if (try sema.compareAll(rhs_val, .eq, Value.one, resolved_type)) { + if (try sema.compareAll(rhs_val, .eq, try sema.splat(resolved_type, scalar_one), resolved_type)) { return casted_lhs; } if (maybe_lhs_val) |lhs_val| { - if (lhs_val.isUndef()) { + if (lhs_val.isUndef(mod)) { return sema.addConstUndef(resolved_type); } const val = if (scalar_tag == .ComptimeInt) - try lhs_val.intMul(rhs_val, resolved_type, sema.arena, sema.mod) + try lhs_val.intMul(rhs_val, resolved_type, undefined, sema.arena, mod) else - try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, sema.mod); + try lhs_val.intMulSat(rhs_val, resolved_type, sema.arena, mod); return sema.addConstant(resolved_type, val); } else break :rs .{ .src = lhs_src, .air_tag = .mul_sat }; @@ -14910,7 +15024,7 @@ fn analyzeArithmetic( } }, }); const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty); - const any_ov_bit = if (resolved_type.zigTypeTag() == .Vector) + const any_ov_bit = if (resolved_type.zigTypeTag(mod) == .Vector) try block.addInst(.{ .tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce, .data = .{ .reduce = .{ @@ -14920,7 +15034,7 @@ fn analyzeArithmetic( }) else ov_bit; - const zero_ov = try sema.addConstant(Type.u1, Value.zero); + const zero_ov = try sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); const no_ov = try block.addBinOp(.cmp_eq, any_ov_bit, zero_ov); try sema.addSafetyCheck(block, no_ov, .integer_overflow); @@ -14944,15 +15058,12 @@ fn analyzePtrArithmetic( // TODO if the operand is comptime-known to be negative, or is a negative int, // coerce to isize instead of usize. const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src); - const target = sema.mod.getTarget(); + const mod = sema.mod; const opt_ptr_val = try sema.resolveMaybeUndefVal(ptr); const opt_off_val = try sema.resolveDefinedValue(block, offset_src, offset); const ptr_ty = sema.typeOf(ptr); - const ptr_info = ptr_ty.ptrInfo().data; - const elem_ty = if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Array) - ptr_info.pointee_type.childType() - else - ptr_info.pointee_type; + const ptr_info = ptr_ty.ptrInfo(mod); + assert(ptr_info.size == .Many or ptr_info.size == .C); const new_ptr_ty = t: { // Calculate the new pointer alignment. @@ -14963,9 +15074,9 @@ fn analyzePtrArithmetic( } // If the addend is not a comptime-known value we can still count on // it being a multiple of the type size. - const elem_size = elem_ty.abiSize(target); + const elem_size = ptr_info.pointee_type.abiSize(mod); const addend = if (opt_off_val) |off_val| a: { - const off_int = try sema.usizeCast(block, offset_src, off_val.toUnsignedInt(target)); + const off_int = try sema.usizeCast(block, offset_src, off_val.toUnsignedInt(mod)); break :a elem_size * off_int; } else elem_size; @@ -14974,7 +15085,7 @@ fn analyzePtrArithmetic( // non zero). const new_align = @as(u32, 1) << @intCast(u5, @ctz(addend | ptr_info.@"align")); - break :t try Type.ptr(sema.arena, sema.mod, .{ + break :t try Type.ptr(sema.arena, mod, .{ .pointee_type = ptr_info.pointee_type, .sentinel = ptr_info.sentinel, .@"align" = new_align, @@ -14989,24 +15100,24 @@ fn analyzePtrArithmetic( const runtime_src = rs: { if (opt_ptr_val) |ptr_val| { if (opt_off_val) |offset_val| { - if (ptr_val.isUndef()) return sema.addConstUndef(new_ptr_ty); + if (ptr_val.isUndef(mod)) return sema.addConstUndef(new_ptr_ty); - const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(target)); + const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt(mod)); if (offset_int == 0) return ptr; - if (try ptr_val.getUnsignedIntAdvanced(target, sema)) |addr| { - const elem_size = elem_ty.abiSize(target); + if (try ptr_val.getUnsignedIntAdvanced(mod, sema)) |addr| { + const elem_size = ptr_info.pointee_type.abiSize(mod); const new_addr = switch (air_tag) { .ptr_add => addr + elem_size * offset_int, .ptr_sub => addr - elem_size * offset_int, else => unreachable, }; - const new_ptr_val = try Value.Tag.int_u64.create(sema.arena, new_addr); + const new_ptr_val = try mod.ptrIntValue(new_ptr_ty, new_addr); return sema.addConstant(new_ptr_ty, new_ptr_val); } if (air_tag == .ptr_sub) { return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{}); } - const new_ptr_val = try ptr_val.elemPtr(ptr_ty, sema.arena, offset_int, sema.mod); + const new_ptr_val = try ptr_val.elemPtr(new_ptr_ty, offset_int, mod); return sema.addConstant(new_ptr_ty, new_ptr_val); } else break :rs offset_src; } else break :rs ptr_src; @@ -15052,7 +15163,7 @@ fn zirAsm( const inputs_len = @truncate(u5, extended.small >> 5); const clobbers_len = @truncate(u5, extended.small >> 10); const is_volatile = @truncate(u1, extended.small >> 15) != 0; - const is_global_assembly = sema.func == null; + const is_global_assembly = sema.func_index == .none; const asm_source: []const u8 = if (tmpl_is_expr) blk: { const tmpl = @intToEnum(Zir.Inst.Ref, extra.data.asm_source); @@ -15116,6 +15227,7 @@ fn zirAsm( const args = try sema.arena.alloc(Air.Inst.Ref, inputs_len); const inputs = try sema.arena.alloc(ConstraintName, inputs_len); + const mod = sema.mod; for (args, 0..) |*arg, arg_i| { const input = sema.code.extraData(Zir.Inst.Asm.Input, extra_i); @@ -15123,9 +15235,9 @@ fn zirAsm( const uncasted_arg = try sema.resolveInst(input.data.operand); const uncasted_arg_ty = sema.typeOf(uncasted_arg); - switch (uncasted_arg_ty.zigTypeTag()) { - .ComptimeInt => arg.* = try sema.coerce(block, Type.initTag(.usize), uncasted_arg, src), - .ComptimeFloat => arg.* = try sema.coerce(block, Type.initTag(.f64), uncasted_arg, src), + switch (uncasted_arg_ty.zigTypeTag(mod)) { + .ComptimeInt => arg.* = try sema.coerce(block, Type.usize, uncasted_arg, src), + .ComptimeFloat => arg.* = try sema.coerce(block, Type.f64, uncasted_arg, src), else => { arg.* = uncasted_arg; try sema.queueFullTypeResolution(uncasted_arg_ty); @@ -15205,6 +15317,7 @@ fn zirCmpEq( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src: LazySrcLoc = inst_data.src(); @@ -15215,8 +15328,8 @@ fn zirCmpEq( const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - const lhs_ty_tag = lhs_ty.zigTypeTag(); - const rhs_ty_tag = rhs_ty.zigTypeTag(); + const lhs_ty_tag = lhs_ty.zigTypeTag(mod); + const rhs_ty_tag = rhs_ty.zigTypeTag(mod); if (lhs_ty_tag == .Null and rhs_ty_tag == .Null) { // null == null, null != null if (op == .eq) { @@ -15227,16 +15340,16 @@ fn zirCmpEq( } // comparing null with optionals - if (lhs_ty_tag == .Null and (rhs_ty_tag == .Optional or rhs_ty.isCPtr())) { + if (lhs_ty_tag == .Null and (rhs_ty_tag == .Optional or rhs_ty.isCPtr(mod))) { return sema.analyzeIsNull(block, src, rhs, op == .neq); } - if (rhs_ty_tag == .Null and (lhs_ty_tag == .Optional or lhs_ty.isCPtr())) { + if (rhs_ty_tag == .Null and (lhs_ty_tag == .Optional or lhs_ty.isCPtr(mod))) { return sema.analyzeIsNull(block, src, lhs, op == .neq); } if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) { const non_null_type = if (lhs_ty_tag == .Null) rhs_ty else lhs_ty; - return sema.fail(block, src, "comparison of '{}' with null", .{non_null_type.fmt(sema.mod)}); + return sema.fail(block, src, "comparison of '{}' with null", .{non_null_type.fmt(mod)}); } if (lhs_ty_tag == .Union and (rhs_ty_tag == .EnumLiteral or rhs_ty_tag == .Enum)) { @@ -15250,15 +15363,12 @@ fn zirCmpEq( const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(lhs)) |lval| { if (try sema.resolveMaybeUndefVal(rhs)) |rval| { - if (lval.isUndef() or rval.isUndef()) { + if (lval.isUndef(mod) or rval.isUndef(mod)) { return sema.addConstUndef(Type.bool); } - // TODO optimisation opportunity: evaluate if mem.eql is faster with the names, - // or calling to Module.getErrorValue to get the values and then compare them is - // faster. - const lhs_name = lval.castTag(.@"error").?.data.name; - const rhs_name = rval.castTag(.@"error").?.data.name; - if (mem.eql(u8, lhs_name, rhs_name) == (op == .eq)) { + const lkey = mod.intern_pool.indexToKey(lval.toIntern()); + const rkey = mod.intern_pool.indexToKey(rval.toIntern()); + if ((lkey.err.name == rkey.err.name) == (op == .eq)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; @@ -15276,7 +15386,7 @@ fn zirCmpEq( if (lhs_ty_tag == .Type and rhs_ty_tag == .Type) { const lhs_as_type = try sema.analyzeAsType(block, lhs_src, lhs); const rhs_as_type = try sema.analyzeAsType(block, rhs_src, rhs); - if (lhs_as_type.eql(rhs_as_type, sema.mod) == (op == .eq)) { + if (lhs_as_type.eql(rhs_as_type, mod) == (op == .eq)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; @@ -15295,12 +15405,13 @@ fn analyzeCmpUnionTag( tag_src: LazySrcLoc, op: std.math.CompareOperator, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const union_ty = try sema.resolveTypeFields(sema.typeOf(un)); - const union_tag_ty = union_ty.unionTagType() orelse { + const union_tag_ty = union_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(block, un_src, "comparison of union and enum literal is only valid for tagged union types", .{}); errdefer msg.destroy(sema.gpa); - try sema.mod.errNoteNonLazy(union_ty.declSrcLoc(sema.mod), msg, "union '{}' is not a tagged union", .{union_ty.fmt(sema.mod)}); + try mod.errNoteNonLazy(union_ty.declSrcLoc(mod), msg, "union '{}' is not a tagged union", .{union_ty.fmt(mod)}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -15311,9 +15422,9 @@ fn analyzeCmpUnionTag( const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src); if (try sema.resolveMaybeUndefVal(coerced_tag)) |enum_val| { - if (enum_val.isUndef()) return sema.addConstUndef(Type.bool); - const field_ty = union_ty.unionFieldType(enum_val, sema.mod); - if (field_ty.zigTypeTag() == .NoReturn) { + if (enum_val.isUndef(mod)) return sema.addConstUndef(Type.bool); + const field_ty = union_ty.unionFieldType(enum_val, mod); + if (field_ty.zigTypeTag(mod) == .NoReturn) { return Air.Inst.Ref.bool_false; } } @@ -15352,34 +15463,35 @@ fn analyzeCmp( rhs_src: LazySrcLoc, is_equality_cmp: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - if (lhs_ty.zigTypeTag() != .Optional and rhs_ty.zigTypeTag() != .Optional) { + if (lhs_ty.zigTypeTag(mod) != .Optional and rhs_ty.zigTypeTag(mod) != .Optional) { try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); } - if (lhs_ty.zigTypeTag() == .Vector and rhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector and rhs_ty.zigTypeTag(mod) == .Vector) { return sema.cmpVector(block, src, lhs, rhs, op, lhs_src, rhs_src); } - if (lhs_ty.isNumeric() and rhs_ty.isNumeric()) { + if (lhs_ty.isNumeric(mod) and rhs_ty.isNumeric(mod)) { // This operation allows any combination of integer and float types, regardless of the // signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for // numeric types. return sema.cmpNumeric(block, src, lhs, rhs, op, lhs_src, rhs_src); } - if (is_equality_cmp and lhs_ty.zigTypeTag() == .ErrorUnion and rhs_ty.zigTypeTag() == .ErrorSet) { + if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorUnion and rhs_ty.zigTypeTag(mod) == .ErrorSet) { const casted_lhs = try sema.analyzeErrUnionCode(block, lhs_src, lhs); return sema.cmpSelf(block, src, casted_lhs, rhs, op, lhs_src, rhs_src); } - if (is_equality_cmp and lhs_ty.zigTypeTag() == .ErrorSet and rhs_ty.zigTypeTag() == .ErrorUnion) { + if (is_equality_cmp and lhs_ty.zigTypeTag(mod) == .ErrorSet and rhs_ty.zigTypeTag(mod) == .ErrorUnion) { const casted_rhs = try sema.analyzeErrUnionCode(block, rhs_src, rhs); return sema.cmpSelf(block, src, lhs, casted_rhs, op, lhs_src, rhs_src); } const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src } }); - if (!resolved_type.isSelfComparable(is_equality_cmp)) { + if (!resolved_type.isSelfComparable(mod, is_equality_cmp)) { return sema.fail(block, src, "operator {s} not allowed for type '{}'", .{ - compareOperatorName(op), resolved_type.fmt(sema.mod), + compareOperatorName(op), resolved_type.fmt(mod), }); } const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); @@ -15408,15 +15520,19 @@ fn cmpSelf( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const resolved_type = sema.typeOf(casted_lhs); const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| { - if (lhs_val.isUndef()) return sema.addConstUndef(Type.bool); + if (lhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| { - if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool); + if (rhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); - if (resolved_type.zigTypeTag() == .Vector) { - const result_ty = try Type.vector(sema.arena, resolved_type.vectorLen(), Type.bool); + if (resolved_type.zigTypeTag(mod) == .Vector) { + const result_ty = try mod.vectorType(.{ + .len = resolved_type.vectorLen(mod), + .child = .bool_type, + }); const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_type); return sema.addConstant(result_ty, cmp_val); } @@ -15427,7 +15543,7 @@ fn cmpSelf( return Air.Inst.Ref.bool_false; } } else { - if (resolved_type.zigTypeTag() == .Bool) { + if (resolved_type.zigTypeTag(mod) == .Bool) { // We can lower bool eq/neq more efficiently. return sema.runtimeBoolCmp(block, src, op, casted_rhs, lhs_val.toBool(), rhs_src); } @@ -15436,9 +15552,9 @@ fn cmpSelf( } else { // For bools, we still check the other operand, because we can lower // bool eq/neq more efficiently. - if (resolved_type.zigTypeTag() == .Bool) { + if (resolved_type.zigTypeTag(mod) == .Bool) { if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| { - if (rhs_val.isUndef()) return sema.addConstUndef(Type.bool); + if (rhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(), lhs_src); } } @@ -15446,7 +15562,7 @@ fn cmpSelf( } }; try sema.requireRuntimeBlock(block, src, runtime_src); - if (resolved_type.zigTypeTag() == .Vector) { + if (resolved_type.zigTypeTag(mod) == .Vector) { return block.addCmpVector(casted_lhs, casted_rhs, op); } const tag = Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized); @@ -15475,16 +15591,17 @@ fn runtimeBoolCmp( } fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, operand_src, inst_data.operand); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Fn, .NoReturn, .Undefined, .Null, .Opaque, - => return sema.fail(block, operand_src, "no size available for type '{}'", .{ty.fmt(sema.mod)}), + => return sema.fail(block, operand_src, "no size available for type '{}'", .{ty.fmt(mod)}), .Type, .EnumLiteral, @@ -15509,25 +15626,25 @@ fn zirSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. .AnyFrame, => {}, } - const target = sema.mod.getTarget(); - const val = try ty.lazyAbiSize(target, sema.arena); - if (val.tag() == .lazy_size) { + const val = try ty.lazyAbiSize(mod); + if (val.isLazySize(mod)) { try sema.queueFullTypeResolution(ty); } return sema.addConstant(Type.comptime_int, val); } fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Fn, .NoReturn, .Undefined, .Null, .Opaque, - => return sema.fail(block, operand_src, "no size available for type '{}'", .{operand_ty.fmt(sema.mod)}), + => return sema.fail(block, operand_src, "no size available for type '{}'", .{operand_ty.fmt(mod)}), .Type, .EnumLiteral, @@ -15552,8 +15669,7 @@ fn zirBitSizeOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .AnyFrame, => {}, } - const target = sema.mod.getTarget(); - const bit_size = try operand_ty.bitSizeAdvanced(target, sema); + const bit_size = try operand_ty.bitSizeAdvanced(mod, sema); return sema.addIntUnsigned(Type.comptime_int, bit_size); } @@ -15562,17 +15678,13 @@ fn zirThis( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { - const this_decl_index = block.namespace.getDeclIndex(); + const mod = sema.mod; + const this_decl_index = mod.namespaceDeclIndex(block.namespace); const src = LazySrcLoc.nodeOffset(@bitCast(i32, extended.operand)); return sema.analyzeDeclVal(block, src, this_decl_index); } -fn zirClosureCapture( - sema: *Sema, - block: *Block, - inst: Zir.Inst.Index, -) CompileError!void { - // TODO: Compile error when closed over values are modified +fn zirClosureCapture(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_tok; // Closures are not necessarily constant values. For example, the // code might do something like this: @@ -15580,26 +15692,24 @@ fn zirClosureCapture( // ...in which case the closure_capture instruction has access to a runtime // value only. In such case we preserve the type and use a dummy runtime value. const operand = try sema.resolveInst(inst_data.operand); - const val = (try sema.resolveMaybeUndefValAllowVariables(operand)) orelse - Value.initTag(.unreachable_value); - - try block.wip_capture_scope.captures.putNoClobber(sema.gpa, inst, .{ - .ty = try sema.typeOf(operand).copy(sema.perm_arena), - .val = try val.copy(sema.perm_arena), - }); + const ty = sema.typeOf(operand); + const capture: CaptureScope.Capture = blk: { + if (try sema.resolveMaybeUndefValAllowVariables(operand)) |val| { + const ip_index = try val.intern(ty, sema.mod); + break :blk .{ .comptime_val = ip_index }; + } + break :blk .{ .runtime_val = ty.toIntern() }; + }; + try block.wip_capture_scope.captures.putNoClobber(sema.gpa, inst, capture); } -fn zirClosureGet( - sema: *Sema, - block: *Block, - inst: Zir.Inst.Index, -) CompileError!Air.Inst.Ref { - // TODO CLOSURE: Test this with inline functions +fn zirClosureGet(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].inst_node; - var scope: *CaptureScope = sema.mod.declPtr(block.src_decl).src_scope.?; + var scope: *CaptureScope = mod.declPtr(block.src_decl).src_scope.?; // Note: The target closure must be in this scope list. // If it's not here, the zir is invalid, or the list is broken. - const tv = while (true) { + const capture = while (true) { // Note: We don't need to add a dependency here, because // decls always depend on their lexical parents. @@ -15612,17 +15722,17 @@ fn zirClosureGet( } return error.AnalysisFail; } - if (scope.captures.getPtr(inst_data.inst)) |tv| { - break tv; + if (scope.captures.get(inst_data.inst)) |capture| { + break capture; } scope = scope.parent.?; }; - if (tv.val.tag() == .unreachable_value and !block.is_typeof and sema.func == null) { + if (capture == .runtime_val and !block.is_typeof and sema.func_index == .none) { const msg = msg: { const name = name: { - const file = sema.owner_decl.getFileScope(); - const tree = file.getTree(sema.mod.gpa) catch |err| { + const file = sema.owner_decl.getFileScope(mod); + const tree = file.getTree(sema.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ file.sub_file_path, @errorName(err), @@ -15646,11 +15756,11 @@ fn zirClosureGet( return sema.failWithOwnedErrorMsg(msg); } - if (tv.val.tag() == .unreachable_value and !block.is_typeof and !block.is_comptime and sema.func != null) { + if (capture == .runtime_val and !block.is_typeof and !block.is_comptime and sema.func_index != .none) { const msg = msg: { const name = name: { - const file = sema.owner_decl.getFileScope(); - const tree = file.getTree(sema.mod.gpa) catch |err| { + const file = sema.owner_decl.getFileScope(mod); + const tree = file.getTree(sema.gpa) catch |err| { // In this case we emit a warning + a less precise source location. log.warn("unable to load {s}: {s}", .{ file.sub_file_path, @errorName(err), @@ -15676,13 +15786,17 @@ fn zirClosureGet( return sema.failWithOwnedErrorMsg(msg); } - if (tv.val.tag() == .unreachable_value) { - assert(block.is_typeof); - // We need a dummy runtime instruction with the correct type. - return block.addTy(.alloc, tv.ty); + switch (capture) { + .runtime_val => |ty_ip_index| { + assert(block.is_typeof); + // We need a dummy runtime instruction with the correct type. + return block.addTy(.alloc, ty_ip_index.toType()); + }, + .comptime_val => |val_ip_index| { + const ty = mod.intern_pool.typeOf(val_ip_index).toType(); + return sema.addConstant(ty, val_ip_index.toValue()); + }, } - - return sema.addConstant(tv.ty, tv.val); } fn zirRetAddr( @@ -15717,345 +15831,422 @@ fn zirBuiltinSrc( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.Src, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const func = sema.func orelse return sema.fail(block, src, "@src outside function", .{}); - const fn_owner_decl = sema.mod.declPtr(func.owner_decl); + const fn_owner_decl = mod.declPtr(func.owner_decl); const func_name_val = blk: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const name = std.mem.span(fn_owner_decl.name); - const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]); + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, mod.intern_pool.stringToSlice(fn_owner_decl.name)); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len - 1), - try Value.Tag.bytes.create(anon_decl.arena(), bytes), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); - break :blk try Value.Tag.decl_ref.create(sema.arena, new_decl); + break :blk try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_sentinel_0_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), + } }); }; const file_name_val = blk: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); // The compiler must not call realpath anywhere. - const name = try fn_owner_decl.getFileScope().fullPathZ(anon_decl.arena()); + const name = try fn_owner_decl.getFileScope(mod).fullPathZ(sema.arena); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), name.len), - try Value.Tag.bytes.create(anon_decl.arena(), name[0 .. name.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); - break :blk try Value.Tag.decl_ref.create(sema.arena, new_decl); + break :blk try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_sentinel_0_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), + } }); }; - const field_values = try sema.arena.alloc(Value, 4); - // file: [:0]const u8, - field_values[0] = file_name_val; - // fn_name: [:0]const u8, - field_values[1] = func_name_val; - // line: u32 - field_values[2] = try Value.Tag.runtime_value.create(sema.arena, try Value.Tag.int_u64.create(sema.arena, extra.line + 1)); - // column: u32, - field_values[3] = try Value.Tag.int_u64.create(sema.arena, extra.column + 1); - - return sema.addConstant( - try sema.getBuiltinType("SourceLocation"), - try Value.Tag.aggregate.create(sema.arena, field_values), - ); + const src_loc_ty = try sema.getBuiltinType("SourceLocation"); + const fields = .{ + // file: [:0]const u8, + file_name_val, + // fn_name: [:0]const u8, + func_name_val, + // line: u32, + try mod.intern(.{ .runtime_value = .{ + .ty = .u32_type, + .val = (try mod.intValue(Type.u32, extra.line + 1)).toIntern(), + } }), + // column: u32, + (try mod.intValue(Type.u32, extra.column + 1)).toIntern(), + }; + return sema.addConstant(src_loc_ty, (try mod.intern(.{ .aggregate = .{ + .ty = src_loc_ty.toIntern(), + .storage = .{ .elems = &fields }, + } })).toValue()); } fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); const type_info_ty = try sema.getBuiltinType("Type"); - const target = sema.mod.getTarget(); + const type_info_tag_ty = type_info_ty.unionTagType(mod).?; - switch (ty.zigTypeTag()) { - .Type => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Type)), - .val = Value.void, - }), - ), - .Void => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Void)), - .val = Value.void, - }), - ), - .Bool => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Bool)), - .val = Value.void, - }), - ), - .NoReturn => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.NoReturn)), - .val = Value.void, - }), - ), - .ComptimeFloat => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ComptimeFloat)), - .val = Value.void, - }), - ), - .ComptimeInt => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ComptimeInt)), - .val = Value.void, - }), - ), - .Undefined => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Undefined)), - .val = Value.void, - }), - ), - .Null => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Null)), - .val = Value.void, - }), - ), - .EnumLiteral => return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.EnumLiteral)), - .val = Value.void, - }), - ), + switch (ty.zigTypeTag(mod)) { + .Type, + .Void, + .Bool, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .EnumLiteral, + => |type_info_tag| return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(type_info_tag))).toIntern(), + .val = .void_value, + } })).toValue()), .Fn => { // TODO: look into memoizing this result. - const info = ty.fnInfo(); - var params_anon_decl = try block.startAnonDecl(); defer params_anon_decl.deinit(); - const param_vals = try params_anon_decl.arena().alloc(Value, info.param_types.len); + const fn_info_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Fn"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index); + try sema.ensureDeclAnalyzed(fn_info_decl_index); + const fn_info_decl = mod.declPtr(fn_info_decl_index); + const fn_info_ty = fn_info_decl.val.toType(); + + const param_info_decl_index = (try sema.namespaceLookup( + block, + src, + fn_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Param"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index); + try sema.ensureDeclAnalyzed(param_info_decl_index); + const param_info_decl = mod.declPtr(param_info_decl_index); + const param_info_ty = param_info_decl.val.toType(); + + const param_vals = try sema.arena.alloc(InternPool.Index, mod.typeToFunc(ty).?.param_types.len); for (param_vals, 0..) |*param_val, i| { + const info = mod.typeToFunc(ty).?; const param_ty = info.param_types[i]; - const is_generic = param_ty.tag() == .generic_poison; - const param_ty_val = if (is_generic) - Value.null - else - try Value.Tag.opt_payload.create( - params_anon_decl.arena(), - try Value.Tag.ty.create(params_anon_decl.arena(), try param_ty.copy(params_anon_decl.arena())), - ); + const is_generic = param_ty == .generic_poison_type; + const param_ty_val = try ip.get(gpa, .{ .opt = .{ + .ty = try ip.get(gpa, .{ .opt_type = .type_type }), + .val = if (is_generic) .none else param_ty, + } }); const is_noalias = blk: { const index = std.math.cast(u5, i) orelse break :blk false; break :blk @truncate(u1, info.noalias_bits >> index) != 0; }; - const param_fields = try params_anon_decl.arena().create([3]Value); - param_fields.* = .{ + const param_fields = .{ // is_generic: bool, - Value.makeBool(is_generic), + Value.makeBool(is_generic).toIntern(), // is_noalias: bool, - Value.makeBool(is_noalias), + Value.makeBool(is_noalias).toIntern(), // type: ?type, param_ty_val, }; - param_val.* = try Value.Tag.aggregate.create(params_anon_decl.arena(), param_fields); + param_val.* = try mod.intern(.{ .aggregate = .{ + .ty = param_info_ty.toIntern(), + .storage = .{ .elems = ¶m_fields }, + } }); } const args_val = v: { - const fn_info_decl_index = (try sema.namespaceLookup( - block, - src, - type_info_ty.getNamespace().?, - "Fn", - )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, fn_info_decl_index); - try sema.ensureDeclAnalyzed(fn_info_decl_index); - const fn_info_decl = sema.mod.declPtr(fn_info_decl_index); - var fn_ty_buffer: Value.ToTypeBuffer = undefined; - const fn_ty = fn_info_decl.val.toType(&fn_ty_buffer); - const param_info_decl_index = (try sema.namespaceLookup( - block, - src, - fn_ty.getNamespace().?, - "Param", - )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, param_info_decl_index); - try sema.ensureDeclAnalyzed(param_info_decl_index); - const param_info_decl = sema.mod.declPtr(param_info_decl_index); - var param_buffer: Value.ToTypeBuffer = undefined; - const param_ty = param_info_decl.val.toType(¶m_buffer); + const new_decl_ty = try mod.arrayType(.{ + .len = param_vals.len, + .child = param_info_ty.toIntern(), + }); const new_decl = try params_anon_decl.finish( - try Type.Tag.array.create(params_anon_decl.arena(), .{ - .len = param_vals.len, - .elem_type = try param_ty.copy(params_anon_decl.arena()), - }), - try Value.Tag.aggregate.create( - params_anon_decl.arena(), - param_vals, - ), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .elems = param_vals }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.slice.create(sema.arena, .{ - .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try Value.Tag.int_u64.create(sema.arena, param_vals.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .child = param_info_ty.toIntern(), + .flags = .{ + .size = .Slice, + .is_const = true, + }, + })).toIntern(), + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, param_vals.len)).toIntern(), + } }); }; - const ret_ty_opt = if (info.return_type.tag() != .generic_poison) - try Value.Tag.opt_payload.create( - sema.arena, - try Value.Tag.ty.create(sema.arena, info.return_type), - ) - else - Value.null; + const info = mod.typeToFunc(ty).?; + const ret_ty_opt = try mod.intern(.{ .opt = .{ + .ty = try ip.get(gpa, .{ .opt_type = .type_type }), + .val = if (info.return_type == .generic_poison_type) .none else info.return_type, + } }); - const field_values = try sema.arena.create([6]Value); - field_values.* = .{ + const callconv_ty = try sema.getBuiltinType("CallingConvention"); + + const field_values = .{ // calling_convention: CallingConvention, - try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.cc)), + (try mod.enumValueFieldIndex(callconv_ty, @enumToInt(info.cc))).toIntern(), // alignment: comptime_int, - try Value.Tag.int_u64.create(sema.arena, ty.abiAlignment(target)), + (try mod.intValue(Type.comptime_int, ty.abiAlignment(mod))).toIntern(), // is_generic: bool, - Value.makeBool(info.is_generic), + Value.makeBool(info.is_generic).toIntern(), // is_var_args: bool, - Value.makeBool(info.is_var_args), + Value.makeBool(info.is_var_args).toIntern(), // return_type: ?type, ret_ty_opt, // args: []const Fn.Param, args_val, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Fn)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Fn))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = fn_info_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Int => { - const info = ty.intInfo(target); - const field_values = try sema.arena.alloc(Value, 2); - // signedness: Signedness, - field_values[0] = try Value.Tag.enum_field_index.create( - sema.arena, - @enumToInt(info.signedness), - ); - // bits: comptime_int, - field_values[1] = try Value.Tag.int_u64.create(sema.arena, info.bits); + const int_info_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Int"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, int_info_decl_index); + try sema.ensureDeclAnalyzed(int_info_decl_index); + const int_info_decl = mod.declPtr(int_info_decl_index); + const int_info_ty = int_info_decl.val.toType(); - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Int)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const signedness_ty = try sema.getBuiltinType("Signedness"); + const info = ty.intInfo(mod); + const field_values = .{ + // signedness: Signedness, + try (try mod.enumValueFieldIndex(signedness_ty, @enumToInt(info.signedness))).intern(signedness_ty, mod), + // bits: u16, + (try mod.intValue(Type.u16, info.bits)).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Int))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = int_info_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Float => { - const field_values = try sema.arena.alloc(Value, 1); - // bits: comptime_int, - field_values[0] = try Value.Tag.int_u64.create(sema.arena, ty.bitSize(target)); + const float_info_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Float"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, float_info_decl_index); + try sema.ensureDeclAnalyzed(float_info_decl_index); + const float_info_decl = mod.declPtr(float_info_decl_index); + const float_info_ty = float_info_decl.val.toType(); - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Float)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const field_vals = .{ + // bits: u16, + (try mod.intValue(Type.u16, ty.bitSize(mod))).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Float))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = float_info_ty.toIntern(), + .storage = .{ .elems = &field_vals }, + } }), + } })).toValue()); }, .Pointer => { - const info = ty.ptrInfo().data; + const info = ty.ptrInfo(mod); const alignment = if (info.@"align" != 0) - try Value.Tag.int_u64.create(sema.arena, info.@"align") + try mod.intValue(Type.comptime_int, info.@"align") else - try info.pointee_type.lazyAbiAlignment(target, sema.arena); + try info.pointee_type.lazyAbiAlignment(mod); - const field_values = try sema.arena.create([8]Value); - field_values.* = .{ - // size: Size, - try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.size)), - // is_const: bool, - Value.makeBool(!info.mutable), - // is_volatile: bool, - Value.makeBool(info.@"volatile"), - // alignment: comptime_int, - alignment, - // address_space: AddressSpace - try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(info.@"addrspace")), - // child: type, - try Value.Tag.ty.create(sema.arena, info.pointee_type), - // is_allowzero: bool, - Value.makeBool(info.@"allowzero"), - // sentinel: ?*const anyopaque, - try sema.optRefValue(block, info.pointee_type, info.sentinel), + const addrspace_ty = try sema.getBuiltinType("AddressSpace"); + const pointer_ty = t: { + const decl_index = (try sema.namespaceLookup( + block, + src, + (try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Pointer"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, decl_index); + try sema.ensureDeclAnalyzed(decl_index); + const decl = mod.declPtr(decl_index); + break :t decl.val.toType(); + }; + const ptr_size_ty = t: { + const decl_index = (try sema.namespaceLookup( + block, + src, + pointer_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Size"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, decl_index); + try sema.ensureDeclAnalyzed(decl_index); + const decl = mod.declPtr(decl_index); + break :t decl.val.toType(); }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Pointer)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const field_values = .{ + // size: Size, + try (try mod.enumValueFieldIndex(ptr_size_ty, @enumToInt(info.size))).intern(ptr_size_ty, mod), + // is_const: bool, + Value.makeBool(!info.mutable).toIntern(), + // is_volatile: bool, + Value.makeBool(info.@"volatile").toIntern(), + // alignment: comptime_int, + alignment.toIntern(), + // address_space: AddressSpace + try (try mod.enumValueFieldIndex(addrspace_ty, @enumToInt(info.@"addrspace"))).intern(addrspace_ty, mod), + // child: type, + info.pointee_type.toIntern(), + // is_allowzero: bool, + Value.makeBool(info.@"allowzero").toIntern(), + // sentinel: ?*const anyopaque, + (try sema.optRefValue(block, info.pointee_type, info.sentinel)).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Pointer))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = pointer_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Array => { - const info = ty.arrayInfo(); - const field_values = try sema.arena.alloc(Value, 3); - // len: comptime_int, - field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len); - // child: type, - field_values[1] = try Value.Tag.ty.create(sema.arena, info.elem_type); - // sentinel: ?*const anyopaque, - field_values[2] = try sema.optRefValue(block, info.elem_type, info.sentinel); + const array_field_ty = t: { + const array_field_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Array"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, array_field_ty_decl_index); + try sema.ensureDeclAnalyzed(array_field_ty_decl_index); + const array_field_ty_decl = mod.declPtr(array_field_ty_decl_index); + break :t array_field_ty_decl.val.toType(); + }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Array)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const info = ty.arrayInfo(mod); + const field_values = .{ + // len: comptime_int, + (try mod.intValue(Type.comptime_int, info.len)).toIntern(), + // child: type, + info.elem_type.toIntern(), + // sentinel: ?*const anyopaque, + (try sema.optRefValue(block, info.elem_type, info.sentinel)).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Array))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = array_field_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Vector => { - const info = ty.arrayInfo(); - const field_values = try sema.arena.alloc(Value, 2); - // len: comptime_int, - field_values[0] = try Value.Tag.int_u64.create(sema.arena, info.len); - // child: type, - field_values[1] = try Value.Tag.ty.create(sema.arena, info.elem_type); + const vector_field_ty = t: { + const vector_field_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Vector"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, vector_field_ty_decl_index); + try sema.ensureDeclAnalyzed(vector_field_ty_decl_index); + const vector_field_ty_decl = mod.declPtr(vector_field_ty_decl_index); + break :t vector_field_ty_decl.val.toType(); + }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Vector)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const info = ty.arrayInfo(mod); + const field_values = .{ + // len: comptime_int, + (try mod.intValue(Type.comptime_int, info.len)).toIntern(), + // child: type, + info.elem_type.toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Vector))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = vector_field_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Optional => { - const field_values = try sema.arena.alloc(Value, 1); - // child: type, - field_values[0] = try Value.Tag.ty.create(sema.arena, try ty.optionalChildAlloc(sema.arena)); + const optional_field_ty = t: { + const optional_field_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Optional"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, optional_field_ty_decl_index); + try sema.ensureDeclAnalyzed(optional_field_ty_decl_index); + const optional_field_ty_decl = mod.declPtr(optional_field_ty_decl_index); + break :t optional_field_ty_decl.val.toType(); + }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Optional)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const field_values = .{ + // child: type, + ty.optionalChild(mod).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Optional))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = optional_field_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .ErrorSet => { var fields_anon_decl = try block.startAnonDecl(); @@ -16066,17 +16257,16 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const set_field_ty_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, - "Error", + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Error"), )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, set_field_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, set_field_ty_decl_index); try sema.ensureDeclAnalyzed(set_field_ty_decl_index); - const set_field_ty_decl = sema.mod.declPtr(set_field_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try set_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); + const set_field_ty_decl = mod.declPtr(set_field_ty_decl_index); + break :t set_field_ty_decl.val.toType(); }; - try sema.queueFullTypeResolution(try error_field_ty.copy(sema.arena)); + try sema.queueFullTypeResolution(error_field_ty); // If the error set is inferred it must be resolved at this point try sema.resolveInferredErrorSetTy(block, src, ty); @@ -16084,90 +16274,119 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // Build our list of Error values // Optional value is only null if anyerror // Value can be zero-length slice otherwise - const error_field_vals: ?[]Value = if (ty.isAnyError()) null else blk: { - const names = ty.errorSetNames(); - const vals = try fields_anon_decl.arena().alloc(Value, names.len); + const error_field_vals = if (ty.isAnyError(mod)) null else blk: { + const vals = try sema.arena.alloc(InternPool.Index, ty.errorSetNames(mod).len); for (vals, 0..) |*field_val, i| { - const name = names[i]; + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(ty.errorSetNames(mod)[i])); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + }); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), + } }); }; - const error_field_fields = try fields_anon_decl.arena().create([1]Value); - error_field_fields.* = .{ + const error_field_fields = .{ // name: []const u8, name_val, }; - - field_val.* = try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - error_field_fields, - ); + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = error_field_ty.toIntern(), + .storage = .{ .elems = &error_field_fields }, + } }); } break :blk vals; }; // Build our ?[]const Error value - const errors_val = if (error_field_vals) |vals| v: { + const slice_errors_ty = try mod.ptrType(.{ + .child = error_field_ty.toIntern(), + .flags = .{ + .size = .Slice, + .is_const = true, + }, + }); + const opt_slice_errors_ty = try mod.optionalType(slice_errors_ty.toIntern()); + const errors_payload_val: InternPool.Index = if (error_field_vals) |vals| v: { + const array_errors_ty = try mod.arrayType(.{ + .len = vals.len, + .child = error_field_ty.toIntern(), + .sentinel = .none, + }); const new_decl = try fields_anon_decl.finish( - try Type.Tag.array.create(fields_anon_decl.arena(), .{ - .len = vals.len, - .elem_type = error_field_ty, - }), - try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - vals, - ), + array_errors_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = array_errors_ty.toIntern(), + .storage = .{ .elems = vals }, + } })).toValue(), 0, // default alignment ); - - const new_decl_val = try Value.Tag.decl_ref.create(sema.arena, new_decl); - const slice_val = try Value.Tag.slice.create(sema.arena, .{ - .ptr = new_decl_val, - .len = try Value.Tag.int_u64.create(sema.arena, vals.len), - }); - break :v try Value.Tag.opt_payload.create(sema.arena, slice_val); - } else Value.null; + break :v try mod.intern(.{ .ptr = .{ + .ty = slice_errors_ty.toIntern(), + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, vals.len)).toIntern(), + } }); + } else .none; + const errors_val = try mod.intern(.{ .opt = .{ + .ty = opt_slice_errors_ty.toIntern(), + .val = errors_payload_val, + } }); // Construct Type{ .ErrorSet = errors_val } - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ErrorSet)), - .val = errors_val, - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorSet))).toIntern(), + .val = errors_val, + } })).toValue()); }, .ErrorUnion => { - const field_values = try sema.arena.alloc(Value, 2); - // error_set: type, - field_values[0] = try Value.Tag.ty.create(sema.arena, ty.errorUnionSet()); - // payload: type, - field_values[1] = try Value.Tag.ty.create(sema.arena, ty.errorUnionPayload()); + const error_union_field_ty = t: { + const error_union_field_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "ErrorUnion"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, error_union_field_ty_decl_index); + try sema.ensureDeclAnalyzed(error_union_field_ty_decl_index); + const error_union_field_ty_decl = mod.declPtr(error_union_field_ty_decl_index); + break :t error_union_field_ty_decl.val.toType(); + }; - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.ErrorUnion)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + const field_values = .{ + // error_set: type, + ty.errorUnionSet(mod).toIntern(), + // payload: type, + ty.errorUnionPayload(mod).toIntern(), + }; + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.ErrorUnion))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = error_union_field_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Enum => { // TODO: look into memoizing this result. - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = try ty.intTagType(&int_tag_type_buffer).copy(sema.arena); - - const is_exhaustive = Value.makeBool(!ty.isNonexhaustiveEnum()); + const is_exhaustive = Value.makeBool(ip.indexToKey(ty.toIntern()).enum_type.tag_mode != .nonexhaustive); var fields_anon_decl = try block.startAnonDecl(); defer fields_anon_decl.deinit(); @@ -16176,88 +16395,121 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const enum_field_ty_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, - "EnumField", + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "EnumField"), )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, enum_field_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, enum_field_ty_decl_index); try sema.ensureDeclAnalyzed(enum_field_ty_decl_index); - const enum_field_ty_decl = sema.mod.declPtr(enum_field_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try enum_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); + const enum_field_ty_decl = mod.declPtr(enum_field_ty_decl_index); + break :t enum_field_ty_decl.val.toType(); }; - const enum_fields = ty.enumFields(); - const enum_field_vals = try fields_anon_decl.arena().alloc(Value, enum_fields.count()); - + const enum_field_vals = try sema.arena.alloc(InternPool.Index, ip.indexToKey(ty.toIntern()).enum_type.names.len); for (enum_field_vals, 0..) |*field_val, i| { - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, i), - }; - const tag_val = Value.initPayload(&tag_val_payload.base); - - var buffer: Value.Payload.U64 = undefined; - const int_val = try tag_val.enumToInt(ty, &buffer).copy(fields_anon_decl.arena()); - - const name = enum_fields.keys()[i]; + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; + const value_val = if (enum_type.values.len > 0) + try mod.intern_pool.getCoerced(gpa, enum_type.values[i], .comptime_int_type) + else + try mod.intern(.{ .int = .{ + .ty = .comptime_int_type, + .storage = .{ .u64 = @intCast(u64, i) }, + } }); + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(enum_type.names[i])); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + }); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), + } }); }; - const enum_field_fields = try fields_anon_decl.arena().create([2]Value); - enum_field_fields.* = .{ + const enum_field_fields = .{ // name: []const u8, name_val, // value: comptime_int, - int_val, + value_val, }; - field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), enum_field_fields); + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = enum_field_ty.toIntern(), + .storage = .{ .elems = &enum_field_fields }, + } }); } const fields_val = v: { + const fields_array_ty = try mod.arrayType(.{ + .len = enum_field_vals.len, + .child = enum_field_ty.toIntern(), + .sentinel = .none, + }); const new_decl = try fields_anon_decl.finish( - try Type.Tag.array.create(fields_anon_decl.arena(), .{ - .len = enum_field_vals.len, - .elem_type = enum_field_ty, - }), - try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - enum_field_vals, - ), + fields_array_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = fields_array_ty.toIntern(), + .storage = .{ .elems = enum_field_vals }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.decl_ref.create(sema.arena, new_decl); + break :v try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .child = enum_field_ty.toIntern(), + .flags = .{ + .size = .Slice, + .is_const = true, + }, + })).toIntern(), + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, enum_field_vals.len)).toIntern(), + } }); }; - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ty.getNamespace()); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, ip.indexToKey(ty.toIntern()).enum_type.namespace); - const field_values = try sema.arena.create([4]Value); - field_values.* = .{ + const type_enum_ty = t: { + const type_enum_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Enum"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, type_enum_ty_decl_index); + try sema.ensureDeclAnalyzed(type_enum_ty_decl_index); + const type_enum_ty_decl = mod.declPtr(type_enum_ty_decl_index); + break :t type_enum_ty_decl.val.toType(); + }; + + const field_values = .{ // tag_type: type, - try Value.Tag.ty.create(sema.arena, int_tag_ty), + ip.indexToKey(ty.toIntern()).enum_type.tag_ty, // fields: []const EnumField, fields_val, // decls: []const Declaration, decls_val, // is_exhaustive: bool, - is_exhaustive, + is_exhaustive.toIntern(), }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Enum)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Enum))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = type_enum_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Union => { // TODO: look into memoizing this result. @@ -16265,91 +16517,135 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var fields_anon_decl = try block.startAnonDecl(); defer fields_anon_decl.deinit(); + const type_union_ty = t: { + const type_union_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Union"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, type_union_ty_decl_index); + try sema.ensureDeclAnalyzed(type_union_ty_decl_index); + const type_union_ty_decl = mod.declPtr(type_union_ty_decl_index); + break :t type_union_ty_decl.val.toType(); + }; + const union_field_ty = t: { const union_field_ty_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, - "UnionField", + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "UnionField"), )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, union_field_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, union_field_ty_decl_index); try sema.ensureDeclAnalyzed(union_field_ty_decl_index); - const union_field_ty_decl = sema.mod.declPtr(union_field_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try union_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); + const union_field_ty_decl = mod.declPtr(union_field_ty_decl_index); + break :t union_field_ty_decl.val.toType(); }; const union_ty = try sema.resolveTypeFields(ty); try sema.resolveTypeLayout(ty); // Getting alignment requires type layout - const layout = union_ty.containerLayout(); + const layout = union_ty.containerLayout(mod); - const union_fields = union_ty.unionFields(); - const union_field_vals = try fields_anon_decl.arena().alloc(Value, union_fields.count()); + const union_fields = union_ty.unionFields(mod); + const union_field_vals = try gpa.alloc(InternPool.Index, union_fields.count()); + defer gpa.free(union_field_vals); for (union_field_vals, 0..) |*field_val, i| { const field = union_fields.values()[i]; - const name = union_fields.keys()[i]; + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(union_fields.keys()[i])); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + }); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), + } }); }; - const union_field_fields = try fields_anon_decl.arena().create([3]Value); const alignment = switch (layout) { .Auto, .Extern => try sema.unionFieldAlignment(field), .Packed => 0, }; - union_field_fields.* = .{ + const union_field_fields = .{ // name: []const u8, name_val, // type: type, - try Value.Tag.ty.create(fields_anon_decl.arena(), field.ty), + field.ty.toIntern(), // alignment: comptime_int, - try Value.Tag.int_u64.create(fields_anon_decl.arena(), alignment), + (try mod.intValue(Type.comptime_int, alignment)).toIntern(), }; - field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), union_field_fields); + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = union_field_ty.toIntern(), + .storage = .{ .elems = &union_field_fields }, + } }); } const fields_val = v: { + const array_fields_ty = try mod.arrayType(.{ + .len = union_field_vals.len, + .child = union_field_ty.toIntern(), + .sentinel = .none, + }); const new_decl = try fields_anon_decl.finish( - try Type.Tag.array.create(fields_anon_decl.arena(), .{ - .len = union_field_vals.len, - .elem_type = union_field_ty, - }), - try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - try fields_anon_decl.arena().dupe(Value, union_field_vals), - ), + array_fields_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = array_fields_ty.toIntern(), + .storage = .{ .elems = union_field_vals }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.slice.create(sema.arena, .{ - .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try Value.Tag.int_u64.create(sema.arena, union_field_vals.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .child = union_field_ty.toIntern(), + .flags = .{ + .size = .Slice, + .is_const = true, + }, + })).toIntern(), + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, union_field_vals.len)).toIntern(), + } }); }; - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespace()); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, union_ty.getNamespaceIndex(mod)); - const enum_tag_ty_val = if (union_ty.unionTagType()) |tag_ty| v: { - const ty_val = try Value.Tag.ty.create(sema.arena, tag_ty); - break :v try Value.Tag.opt_payload.create(sema.arena, ty_val); - } else Value.null; + const enum_tag_ty_val = try mod.intern(.{ .opt = .{ + .ty = (try mod.optionalType(.type_type)).toIntern(), + .val = if (union_ty.unionTagType(mod)) |tag_ty| tag_ty.toIntern() else .none, + } }); - const field_values = try sema.arena.create([4]Value); - field_values.* = .{ + const container_layout_ty = t: { + const decl_index = (try sema.namespaceLookup( + block, + src, + (try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "ContainerLayout"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, decl_index); + try sema.ensureDeclAnalyzed(decl_index); + const decl = mod.declPtr(decl_index); + break :t decl.val.toType(); + }; + + const field_values = .{ // layout: ContainerLayout, - try Value.Tag.enum_field_index.create( - sema.arena, - @enumToInt(layout), - ), + (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).toIntern(), // tag_type: ?type, enum_tag_ty_val, @@ -16358,14 +16654,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // decls: []const Declaration, decls_val, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Union)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Union))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = type_union_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Struct => { // TODO: look into memoizing this result. @@ -16373,154 +16669,212 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var fields_anon_decl = try block.startAnonDecl(); defer fields_anon_decl.deinit(); + const type_struct_ty = t: { + const type_struct_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Struct"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, type_struct_ty_decl_index); + try sema.ensureDeclAnalyzed(type_struct_ty_decl_index); + const type_struct_ty_decl = mod.declPtr(type_struct_ty_decl_index); + break :t type_struct_ty_decl.val.toType(); + }; + const struct_field_ty = t: { const struct_field_ty_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, - "StructField", + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "StructField"), )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, struct_field_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, struct_field_ty_decl_index); try sema.ensureDeclAnalyzed(struct_field_ty_decl_index); - const struct_field_ty_decl = sema.mod.declPtr(struct_field_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try struct_field_ty_decl.val.toType(&buffer).copy(fields_anon_decl.arena()); + const struct_field_ty_decl = mod.declPtr(struct_field_ty_decl_index); + break :t struct_field_ty_decl.val.toType(); }; + const struct_ty = try sema.resolveTypeFields(ty); try sema.resolveTypeLayout(ty); // Getting alignment requires type layout - const layout = struct_ty.containerLayout(); + const layout = struct_ty.containerLayout(mod); - const struct_field_vals = fv: { - if (struct_ty.isSimpleTupleOrAnonStruct()) { - const tuple = struct_ty.tupleFields(); - const field_types = tuple.types; - const struct_field_vals = try fields_anon_decl.arena().alloc(Value, field_types.len); - for (struct_field_vals, 0..) |*struct_field_val, i| { - const field_ty = field_types[i]; - const name_val = v: { - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); - const bytes = if (struct_ty.castTag(.anon_struct)) |payload| - try anon_decl.arena().dupeZ(u8, payload.data.names[i]) - else - try std.fmt.allocPrintZ(anon_decl.arena(), "{d}", .{i}); - const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), - 0, // default alignment - ); - break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ - .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl), - .len = try Value.Tag.int_u64.create(fields_anon_decl.arena(), bytes.len), - }); - }; + var struct_field_vals: []InternPool.Index = &.{}; + defer gpa.free(struct_field_vals); + fv: { + const struct_type = switch (ip.indexToKey(struct_ty.toIntern())) { + .anon_struct_type => |tuple| { + struct_field_vals = try gpa.alloc(InternPool.Index, tuple.types.len); + for (struct_field_vals, 0..) |*struct_field_val, i| { + const anon_struct_type = ip.indexToKey(struct_ty.toIntern()).anon_struct_type; + const field_ty = anon_struct_type.types[i]; + const field_val = anon_struct_type.values[i]; + const name_val = v: { + var anon_decl = try block.startAnonDecl(); + defer anon_decl.deinit(); + // TODO: write something like getCoercedInts to avoid needing to dupe + const bytes = if (tuple.names.len != 0) + // https://github.com/ziglang/zig/issues/15709 + try sema.arena.dupe(u8, ip.stringToSlice(ip.indexToKey(struct_ty.toIntern()).anon_struct_type.names[i])) + else + try std.fmt.allocPrint(sema.arena, "{d}", .{i}); + const new_decl_ty = try mod.arrayType(.{ + .len = bytes.len, + .child = .u8_type, + }); + const new_decl = try anon_decl.finish( + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = bytes }, + } })).toValue(), + 0, // default alignment + ); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, bytes.len)).toIntern(), + } }); + }; - const struct_field_fields = try fields_anon_decl.arena().create([5]Value); - const field_val = tuple.values[i]; - const is_comptime = field_val.tag() != .unreachable_value; - const opt_default_val = if (is_comptime) field_val else null; - const default_val_ptr = try sema.optRefValue(block, field_ty, opt_default_val); - struct_field_fields.* = .{ - // name: []const u8, - name_val, - // type: type, - try Value.Tag.ty.create(fields_anon_decl.arena(), field_ty), - // default_value: ?*const anyopaque, - try default_val_ptr.copy(fields_anon_decl.arena()), - // is_comptime: bool, - Value.makeBool(is_comptime), - // alignment: comptime_int, - try field_ty.lazyAbiAlignment(target, fields_anon_decl.arena()), - }; - struct_field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields); - } - break :fv struct_field_vals; - } - const struct_fields = struct_ty.structFields(); - const struct_field_vals = try fields_anon_decl.arena().alloc(Value, struct_fields.count()); + const is_comptime = field_val != .none; + const opt_default_val = if (is_comptime) field_val.toValue() else null; + const default_val_ptr = try sema.optRefValue(block, field_ty.toType(), opt_default_val); + const struct_field_fields = .{ + // name: []const u8, + name_val, + // type: type, + field_ty, + // default_value: ?*const anyopaque, + default_val_ptr.toIntern(), + // is_comptime: bool, + Value.makeBool(is_comptime).toIntern(), + // alignment: comptime_int, + (try mod.intValue(Type.comptime_int, field_ty.toType().abiAlignment(mod))).toIntern(), + }; + struct_field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = struct_field_ty.toIntern(), + .storage = .{ .elems = &struct_field_fields }, + } }); + } + break :fv; + }, + .struct_type => |s| s, + else => unreachable, + }; + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :fv; + struct_field_vals = try gpa.alloc(InternPool.Index, struct_obj.fields.count()); - for (struct_field_vals, 0..) |*field_val, i| { - const field = struct_fields.values()[i]; - const name = struct_fields.keys()[i]; + for ( + struct_field_vals, + struct_obj.fields.keys(), + struct_obj.fields.values(), + ) |*field_val, name_nts, field| { + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(name_nts)); const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, name); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + }); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.slice.create(fields_anon_decl.arena(), .{ - .ptr = try Value.Tag.decl_ref.create(fields_anon_decl.arena(), new_decl), - .len = try Value.Tag.int_u64.create(fields_anon_decl.arena(), bytes.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), + } }); }; - const struct_field_fields = try fields_anon_decl.arena().create([5]Value); - const opt_default_val = if (field.default_val.tag() == .unreachable_value) + const opt_default_val = if (field.default_val == .none) null else - field.default_val; + field.default_val.toValue(); const default_val_ptr = try sema.optRefValue(block, field.ty, opt_default_val); - const alignment = field.alignment(target, layout); + const alignment = field.alignment(mod, layout); - struct_field_fields.* = .{ + const struct_field_fields = .{ // name: []const u8, name_val, // type: type, - try Value.Tag.ty.create(fields_anon_decl.arena(), field.ty), + field.ty.toIntern(), // default_value: ?*const anyopaque, - try default_val_ptr.copy(fields_anon_decl.arena()), + default_val_ptr.toIntern(), // is_comptime: bool, - Value.makeBool(field.is_comptime), + Value.makeBool(field.is_comptime).toIntern(), // alignment: comptime_int, - try Value.Tag.int_u64.create(fields_anon_decl.arena(), alignment), + (try mod.intValue(Type.comptime_int, alignment)).toIntern(), }; - field_val.* = try Value.Tag.aggregate.create(fields_anon_decl.arena(), struct_field_fields); + field_val.* = try mod.intern(.{ .aggregate = .{ + .ty = struct_field_ty.toIntern(), + .storage = .{ .elems = &struct_field_fields }, + } }); } - break :fv struct_field_vals; - }; + } const fields_val = v: { + const array_fields_ty = try mod.arrayType(.{ + .len = struct_field_vals.len, + .child = struct_field_ty.toIntern(), + .sentinel = .none, + }); const new_decl = try fields_anon_decl.finish( - try Type.Tag.array.create(fields_anon_decl.arena(), .{ - .len = struct_field_vals.len, - .elem_type = struct_field_ty, - }), - try Value.Tag.aggregate.create( - fields_anon_decl.arena(), - try fields_anon_decl.arena().dupe(Value, struct_field_vals), - ), + array_fields_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = array_fields_ty.toIntern(), + .storage = .{ .elems = struct_field_vals }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.slice.create(sema.arena, .{ - .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try Value.Tag.int_u64.create(sema.arena, struct_field_vals.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .child = struct_field_ty.toIntern(), + .flags = .{ + .size = .Slice, + .is_const = true, + }, + })).toIntern(), + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, struct_field_vals.len)).toIntern(), + } }); }; - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespace()); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, struct_ty.getNamespaceIndex(mod)); - const backing_integer_val = blk: { - if (layout == .Packed) { - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const backing_integer_val = try mod.intern(.{ .opt = .{ + .ty = (try mod.optionalType(.type_type)).toIntern(), + .val = if (layout == .Packed) val: { + const struct_obj = mod.typeToStruct(struct_ty).?; assert(struct_obj.haveLayout()); - assert(struct_obj.backing_int_ty.isInt()); - const backing_int_ty_val = try Value.Tag.ty.create(sema.arena, struct_obj.backing_int_ty); - break :blk try Value.Tag.opt_payload.create(sema.arena, backing_int_ty_val); - } else { - break :blk Value.initTag(.null_value); - } + assert(struct_obj.backing_int_ty.isInt(mod)); + break :val struct_obj.backing_int_ty.toIntern(); + } else .none, + } }); + + const container_layout_ty = t: { + const decl_index = (try sema.namespaceLookup( + block, + src, + (try sema.getBuiltinType("Type")).getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "ContainerLayout"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, decl_index); + try sema.ensureDeclAnalyzed(decl_index); + const decl = mod.declPtr(decl_index); + break :t decl.val.toType(); }; - const field_values = try sema.arena.create([5]Value); - field_values.* = .{ + const field_values = [_]InternPool.Index{ // layout: ContainerLayout, - try Value.Tag.enum_field_index.create( - sema.arena, - @enumToInt(layout), - ), + (try mod.enumValueFieldIndex(container_layout_ty, @enumToInt(layout))).toIntern(), // backing_integer: ?type, backing_integer_val, // fields: []const StructField, @@ -16528,36 +16882,48 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // decls: []const Declaration, decls_val, // is_tuple: bool, - Value.makeBool(struct_ty.isTuple()), + Value.makeBool(struct_ty.isTuple(mod)).toIntern(), }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Struct)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Struct))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = type_struct_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Opaque => { // TODO: look into memoizing this result. - const opaque_ty = try sema.resolveTypeFields(ty); - const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, opaque_ty.getNamespace()); + const type_opaque_ty = t: { + const type_opaque_ty_decl_index = (try sema.namespaceLookup( + block, + src, + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, "Opaque"), + )).?; + try mod.declareDeclDependency(sema.owner_decl_index, type_opaque_ty_decl_index); + try sema.ensureDeclAnalyzed(type_opaque_ty_decl_index); + const type_opaque_ty_decl = mod.declPtr(type_opaque_ty_decl_index); + break :t type_opaque_ty_decl.val.toType(); + }; - const field_values = try sema.arena.create([1]Value); - field_values.* = .{ + const opaque_ty = try sema.resolveTypeFields(ty); + const decls_val = try sema.typeInfoDecls(block, src, type_info_ty, opaque_ty.getNamespaceIndex(mod)); + + const field_values = .{ // decls: []const Declaration, decls_val, }; - - return sema.addConstant( - type_info_ty, - try Value.Tag.@"union".create(sema.arena, .{ - .tag = try Value.Tag.enum_field_index.create(sema.arena, @enumToInt(std.builtin.TypeId.Opaque)), - .val = try Value.Tag.aggregate.create(sema.arena, field_values), - }), - ); + return sema.addConstant(type_info_ty, (try mod.intern(.{ .un = .{ + .ty = type_info_ty.toIntern(), + .tag = (try mod.enumValueFieldIndex(type_info_tag_ty, @enumToInt(std.builtin.TypeId.Opaque))).toIntern(), + .val = try mod.intern(.{ .aggregate = .{ + .ty = type_opaque_ty.toIntern(), + .storage = .{ .elems = &field_values }, + } }), + } })).toValue()); }, .Frame => return sema.failWithUseOfAsync(block, src), .AnyFrame => return sema.failWithUseOfAsync(block, src), @@ -16569,8 +16935,11 @@ fn typeInfoDecls( block: *Block, src: LazySrcLoc, type_info_ty: Type, - opt_namespace: ?*Module.Namespace, -) CompileError!Value { + opt_namespace: Module.Namespace.OptionalIndex, +) CompileError!InternPool.Index { + const mod = sema.mod; + const gpa = sema.gpa; + var decls_anon_decl = try block.startAnonDecl(); defer decls_anon_decl.deinit(); @@ -16578,89 +16947,110 @@ fn typeInfoDecls( const declaration_ty_decl_index = (try sema.namespaceLookup( block, src, - type_info_ty.getNamespace().?, - "Declaration", + type_info_ty.getNamespaceIndex(mod).unwrap().?, + try mod.intern_pool.getOrPutString(gpa, "Declaration"), )).?; - try sema.mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, declaration_ty_decl_index); try sema.ensureDeclAnalyzed(declaration_ty_decl_index); - const declaration_ty_decl = sema.mod.declPtr(declaration_ty_decl_index); - var buffer: Value.ToTypeBuffer = undefined; - break :t try declaration_ty_decl.val.toType(&buffer).copy(decls_anon_decl.arena()); + const declaration_ty_decl = mod.declPtr(declaration_ty_decl_index); + break :t declaration_ty_decl.val.toType(); }; - try sema.queueFullTypeResolution(try declaration_ty.copy(sema.arena)); + try sema.queueFullTypeResolution(declaration_ty); - var decl_vals = std.ArrayList(Value).init(sema.gpa); + var decl_vals = std.ArrayList(InternPool.Index).init(gpa); defer decl_vals.deinit(); - var seen_namespaces = std.AutoHashMap(*Namespace, void).init(sema.gpa); + var seen_namespaces = std.AutoHashMap(*Namespace, void).init(gpa); defer seen_namespaces.deinit(); - if (opt_namespace) |some| { - try sema.typeInfoNamespaceDecls(block, decls_anon_decl.arena(), some, &decl_vals, &seen_namespaces); + if (opt_namespace.unwrap()) |namespace_index| { + const namespace = mod.namespacePtr(namespace_index); + try sema.typeInfoNamespaceDecls(block, namespace, declaration_ty, &decl_vals, &seen_namespaces); } + const array_decl_ty = try mod.arrayType(.{ + .len = decl_vals.items.len, + .child = declaration_ty.toIntern(), + .sentinel = .none, + }); const new_decl = try decls_anon_decl.finish( - try Type.Tag.array.create(decls_anon_decl.arena(), .{ - .len = decl_vals.items.len, - .elem_type = declaration_ty, - }), - try Value.Tag.aggregate.create( - decls_anon_decl.arena(), - try decls_anon_decl.arena().dupe(Value, decl_vals.items), - ), + array_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = array_decl_ty.toIntern(), + .storage = .{ .elems = decl_vals.items }, + } })).toValue(), 0, // default alignment ); - return try Value.Tag.slice.create(sema.arena, .{ - .ptr = try Value.Tag.decl_ref.create(sema.arena, new_decl), - .len = try Value.Tag.int_u64.create(sema.arena, decl_vals.items.len), - }); + return try mod.intern(.{ .ptr = .{ + .ty = (try mod.ptrType(.{ + .child = declaration_ty.toIntern(), + .flags = .{ + .size = .Slice, + .is_const = true, + }, + })).toIntern(), + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, decl_vals.items.len)).toIntern(), + } }); } fn typeInfoNamespaceDecls( sema: *Sema, block: *Block, - decls_anon_decl: Allocator, namespace: *Namespace, - decl_vals: *std.ArrayList(Value), + declaration_ty: Type, + decl_vals: *std.ArrayList(InternPool.Index), seen_namespaces: *std.AutoHashMap(*Namespace, void), ) !void { + const mod = sema.mod; + const ip = &mod.intern_pool; const gop = try seen_namespaces.getOrPut(namespace); if (gop.found_existing) return; const decls = namespace.decls.keys(); for (decls) |decl_index| { - const decl = sema.mod.declPtr(decl_index); + const decl = mod.declPtr(decl_index); if (decl.kind == .@"usingnamespace") { if (decl.analysis == .in_progress) continue; - try sema.mod.ensureDeclAnalyzed(decl_index); - var buf: Value.ToTypeBuffer = undefined; - const new_ns = decl.val.toType(&buf).getNamespace().?; - try sema.typeInfoNamespaceDecls(block, decls_anon_decl, new_ns, decl_vals, seen_namespaces); + try mod.ensureDeclAnalyzed(decl_index); + const new_ns = decl.val.toType().getNamespace(mod).?; + try sema.typeInfoNamespaceDecls(block, new_ns, declaration_ty, decl_vals, seen_namespaces); continue; } if (decl.kind != .named) continue; const name_val = v: { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try anon_decl.arena().dupeZ(u8, mem.sliceTo(decl.name, 0)); + // TODO: write something like getCoercedInts to avoid needing to dupe + const name = try sema.arena.dupe(u8, ip.stringToSlice(decl.name)); + const new_decl_ty = try mod.arrayType(.{ + .len = name.len, + .child = .u8_type, + }); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + new_decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = new_decl_ty.toIntern(), + .storage = .{ .bytes = name }, + } })).toValue(), 0, // default alignment ); - break :v try Value.Tag.slice.create(decls_anon_decl, .{ - .ptr = try Value.Tag.decl_ref.create(decls_anon_decl, new_decl), - .len = try Value.Tag.int_u64.create(decls_anon_decl, bytes.len), - }); + break :v try mod.intern(.{ .ptr = .{ + .ty = .slice_const_u8_type, + .addr = .{ .decl = new_decl }, + .len = (try mod.intValue(Type.usize, name.len)).toIntern(), + } }); }; - const fields = try decls_anon_decl.create([2]Value); - fields.* = .{ + const fields = .{ //name: []const u8, name_val, //is_pub: bool, - Value.makeBool(decl.is_pub), + Value.makeBool(decl.is_pub).toIntern(), }; - try decl_vals.append(try Value.Tag.aggregate.create(decls_anon_decl, fields)); + try decl_vals.append(try mod.intern(.{ .aggregate = .{ + .ty = declaration_ty.toIntern(), + .storage = .{ .elems = &fields }, + } })); } } @@ -16695,7 +17085,7 @@ fn zirTypeofBuiltin(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr const operand = try sema.resolveBody(&child_block, body, inst); const operand_ty = sema.typeOf(operand); - if (operand_ty.tag() == .generic_poison) return error.GenericPoison; + if (operand_ty.isGenericPoison()) return error.GenericPoison; return sema.addType(operand_ty); } @@ -16709,10 +17099,11 @@ fn zirTypeofLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil } fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) CompileError!Type { - switch (operand.zigTypeTag()) { + const mod = sema.mod; + switch (operand.zigTypeTag(mod)) { .ComptimeInt => return Type.comptime_int, .Int => { - const bits = operand.bitSize(sema.mod.getTarget()); + const bits = operand.bitSize(mod); const count = if (bits == 0) 0 else blk: { @@ -16723,14 +17114,14 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi } break :blk count; }; - return Module.makeIntType(sema.arena, .unsigned, count); + return mod.intType(.unsigned, count); }, .Vector => { - const elem_ty = operand.elemType2(); + const elem_ty = operand.elemType2(mod); const log2_elem_ty = try sema.log2IntType(block, elem_ty, src); - return Type.Tag.vector.create(sema.arena, .{ - .len = operand.vectorLen(), - .elem_type = log2_elem_ty, + return mod.vectorType(.{ + .len = operand.vectorLen(mod), + .child = log2_elem_ty.toIntern(), }); }, else => {}, @@ -16739,7 +17130,7 @@ fn log2IntType(sema: *Sema, block: *Block, operand: Type, src: LazySrcLoc) Compi block, src, "bit shifting operation expected integer type, found '{}'", - .{operand.fmt(sema.mod)}, + .{operand.fmt(mod)}, ); } @@ -16790,6 +17181,7 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_un_op = inst_data.src_node }; @@ -16797,7 +17189,7 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const operand = try sema.coerce(block, Type.bool, uncasted_operand, operand_src); if (try sema.resolveMaybeUndefVal(operand)) |val| { - return if (val.isUndef()) + return if (val.isUndef(mod)) sema.addConstUndef(Type.bool) else if (val.toBool()) Air.Inst.Ref.bool_false @@ -16817,6 +17209,7 @@ fn zirBoolBr( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const datas = sema.code.instructions.items(.data); const inst_data = datas[inst].bool_br; const lhs = try sema.resolveInst(inst_data.lhs); @@ -16865,12 +17258,12 @@ fn zirBoolBr( _ = try lhs_block.addBr(block_inst, lhs_result); const rhs_result = try sema.resolveBody(rhs_block, body, inst); - if (!sema.typeOf(rhs_result).isNoReturn()) { + if (!sema.typeOf(rhs_result).isNoReturn(mod)) { _ = try rhs_block.addBr(block_inst, rhs_result); } const result = sema.finishCondBr(parent_block, &child_block, &then_block, &else_block, lhs, block_inst); - if (!sema.typeOf(rhs_result).isNoReturn()) { + if (!sema.typeOf(rhs_result).isNoReturn(mod)) { if (try sema.resolveDefinedValue(rhs_block, sema.src, rhs_result)) |rhs_val| { if (is_bool_or and rhs_val.toBool()) { return Air.Inst.Ref.bool_true; @@ -16920,9 +17313,10 @@ fn finishCondBr( } fn checkNullableType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Optional, .Null, .Undefined => return, - .Pointer => if (ty.isPtrLikeOptional()) return, + .Pointer => if (ty.isPtrLikeOptional(mod)) return, else => {}, } return sema.failWithExpectedOptionalType(block, src, ty); @@ -16951,10 +17345,11 @@ fn zirIsNonNullPtr( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr = try sema.resolveInst(inst_data.operand); - try sema.checkNullableType(block, src, sema.typeOf(ptr).elemType2()); + try sema.checkNullableType(block, src, sema.typeOf(ptr).elemType2(mod)); if ((try sema.resolveMaybeUndefVal(ptr)) == null) { return block.addUnOp(.is_non_null_ptr, ptr); } @@ -16963,10 +17358,11 @@ fn zirIsNonNullPtr( } fn checkErrorType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .ErrorSet, .ErrorUnion, .Undefined => return, else => return sema.fail(block, src, "expected error union type, found '{}'", .{ - ty.fmt(sema.mod), + ty.fmt(mod), }), } } @@ -16986,10 +17382,11 @@ fn zirIsNonErrPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr = try sema.resolveInst(inst_data.operand); - try sema.checkErrorType(block, src, sema.typeOf(ptr).elemType2()); + try sema.checkErrorType(block, src, sema.typeOf(ptr).elemType2(mod)); const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNonErr(block, src, loaded); } @@ -17012,6 +17409,7 @@ fn zirCondbr( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); @@ -17052,8 +17450,8 @@ fn zirCondbr( const err_inst_data = sema.code.instructions.items(.data)[index].un_node; const err_operand = try sema.resolveInst(err_inst_data.operand); const operand_ty = sema.typeOf(err_operand); - assert(operand_ty.zigTypeTag() == .ErrorUnion); - const result_ty = operand_ty.errorUnionSet(); + assert(operand_ty.zigTypeTag(mod) == .ErrorUnion); + const result_ty = operand_ty.errorUnionSet(mod); break :blk try sub_block.addTyOp(.unwrap_errunion_err, result_ty, err_operand); }; @@ -17079,7 +17477,7 @@ fn zirCondbr( return always_noreturn; } -fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Ref { +fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -17087,9 +17485,10 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError! const body = sema.code.extra[extra.end..][0..extra.data.body_len]; const err_union = try sema.resolveInst(extra.data.operand); const err_union_ty = sema.typeOf(err_union); - if (err_union_ty.zigTypeTag() != .ErrorUnion) { + const mod = sema.mod; + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{ - err_union_ty.fmt(sema.mod), + err_union_ty.fmt(mod), }); } const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union); @@ -17124,7 +17523,7 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError! return try_inst; } -fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Ref { +fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -17133,9 +17532,10 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr const operand = try sema.resolveInst(extra.data.operand); const err_union = try sema.analyzeLoad(parent_block, src, operand, operand_src); const err_union_ty = sema.typeOf(err_union); - if (err_union_ty.zigTypeTag() != .ErrorUnion) { + const mod = sema.mod; + if (err_union_ty.zigTypeTag(mod) != .ErrorUnion) { return sema.fail(parent_block, operand_src, "expected error union type, found '{}'", .{ - err_union_ty.fmt(sema.mod), + err_union_ty.fmt(mod), }); } const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union); @@ -17156,9 +17556,9 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr _ = try sema.analyzeBodyInner(&sub_block, body); const operand_ty = sema.typeOf(operand); - const ptr_info = operand_ty.ptrInfo().data; - const res_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = err_union_ty.errorUnionPayload(), + const ptr_info = operand_ty.ptrInfo(mod); + const res_ty = try Type.ptr(sema.arena, mod, .{ + .pointee_type = err_union_ty.errorUnionPayload(mod), .@"addrspace" = ptr_info.@"addrspace", .mutable = ptr_info.mutable, .@"allowzero" = ptr_info.@"allowzero", @@ -17254,16 +17654,17 @@ fn zirRetErrValue( block: *Block, inst: Zir.Inst.Index, ) CompileError!Zir.Inst.Index { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - const err_name = inst_data.get(sema.code); + const err_name = try mod.intern_pool.getOrPutString(sema.gpa, inst_data.get(sema.code)); + _ = try mod.getErrorValue(err_name); const src = inst_data.src(); - // Return the error code from the function. - const kv = try sema.mod.getErrorValue(err_name); - const result_inst = try sema.addConstant( - try Type.Tag.error_set_single.create(sema.arena, kv.key), - try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), - ); + const error_set_type = try mod.singleErrorSetType(err_name); + const result_inst = try sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ + .ty = error_set_type.toIntern(), + .name = err_name, + } })).toValue()); return sema.analyzeRet(block, result_inst, src); } @@ -17275,16 +17676,17 @@ fn zirRetImplicit( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const operand = try sema.resolveInst(inst_data.operand); const r_brace_src = inst_data.src(); const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; - const base_tag = sema.fn_ret_ty.baseZigTypeTag(); + const base_tag = sema.fn_ret_ty.baseZigTypeTag(mod); if (base_tag == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "function declared '{}' implicitly returns", .{ - sema.fn_ret_ty.fmt(sema.mod), + sema.fn_ret_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, r_brace_src, msg, "control flow reaches end of body here", .{}); @@ -17294,7 +17696,7 @@ fn zirRetImplicit( } else if (base_tag != .Void) { const msg = msg: { const msg = try sema.errMsg(block, ret_ty_src, "function with non-void return type '{}' implicitly returns", .{ - sema.fn_ret_ty.fmt(sema.mod), + sema.fn_ret_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, r_brace_src, msg, "control flow reaches end of body here", .{}); @@ -17346,6 +17748,7 @@ fn retWithErrTracing( ret_tag: Air.Inst.Tag, operand: Air.Inst.Ref, ) CompileError!Zir.Inst.Index { + const mod = sema.mod; const need_check = switch (is_non_err) { .bool_true => { _ = try block.addUnOp(ret_tag, operand); @@ -17357,7 +17760,7 @@ fn retWithErrTracing( const gpa = sema.gpa; const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try Type.Tag.single_mut_pointer.create(sema.arena, stack_trace_ty); + const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); const err_return_trace = try block.addTy(.err_return_trace, ptr_stack_trace_ty); const return_err_fn = try sema.getBuiltin("returnError"); const args: [1]Air.Inst.Ref = .{err_return_trace}; @@ -17397,17 +17800,19 @@ fn retWithErrTracing( } fn wantErrorReturnTracing(sema: *Sema, fn_ret_ty: Type) bool { - if (!sema.mod.backendSupportsFeature(.error_return_trace)) return false; + const mod = sema.mod; + if (!mod.backendSupportsFeature(.error_return_trace)) return false; - return fn_ret_ty.isError() and - sema.mod.comp.bin_file.options.error_return_tracing; + return fn_ret_ty.isError(mod) and + mod.comp.bin_file.options.error_return_tracing; } fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].save_err_ret_index; - if (!sema.mod.backendSupportsFeature(.error_return_trace)) return; - if (!sema.mod.comp.bin_file.options.error_return_tracing) return; + if (!mod.backendSupportsFeature(.error_return_trace)) return; + if (!mod.comp.bin_file.options.error_return_tracing) return; // This is only relevant at runtime. if (block.is_comptime or block.is_typeof) return; @@ -17415,7 +17820,7 @@ fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const save_index = inst_data.operand == .none or b: { const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); - break :b operand_ty.isError(); + break :b operand_ty.isError(mod); }; if (save_index) @@ -17436,7 +17841,7 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) const tracy = trace(@src()); defer tracy.end(); - const saved_index = if (Zir.refToIndex(inst_data.block)) |zir_block| b: { + const saved_index = if (Zir.refToIndexAllowNone(inst_data.block)) |zir_block| b: { var block = start_block; while (true) { if (block.label) |label| { @@ -17462,22 +17867,21 @@ fn zirRestoreErrRetIndex(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) assert(saved_index != .none); // The .error_return_trace_index field was dropped somewhere - const operand = try sema.resolveInst(inst_data.operand); + const operand = try sema.resolveInstAllowNone(inst_data.operand); return sema.popErrorReturnTrace(start_block, src, operand, saved_index); } fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void { - assert(sema.fn_ret_ty.zigTypeTag() == .ErrorUnion); + const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; + assert(sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion); - if (sema.fn_ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| { + if (mod.typeToInferredErrorSet(sema.fn_ret_ty.errorUnionSet(mod))) |ies| { const op_ty = sema.typeOf(uncasted_operand); - switch (op_ty.zigTypeTag()) { - .ErrorSet => { - try payload.data.addErrorSet(sema.gpa, op_ty); - }, - .ErrorUnion => { - try payload.data.addErrorSet(sema.gpa, op_ty.errorUnionSet()); - }, + switch (op_ty.zigTypeTag(mod)) { + .ErrorSet => try ies.addErrorSet(op_ty, ip, gpa), + .ErrorUnion => try ies.addErrorSet(op_ty.errorUnionSet(mod), ip, gpa), else => {}, } } @@ -17492,7 +17896,8 @@ fn analyzeRet( // Special case for returning an error to an inferred error set; we need to // add the error tag to the inferred error set of the in-scope function, so // that the coercion below works correctly. - if (sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) { + const mod = sema.mod; + if (sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion) { try sema.addToInferredErrorSet(uncasted_operand); } const operand = sema.coerceExtra(block, sema.fn_ret_ty, uncasted_operand, src, .{ .is_ret = true }) catch |err| switch (err) { @@ -17540,6 +17945,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].ptr_type; const extra = sema.code.extraData(Zir.Inst.PtrType, inst_data.payload_index); const elem_ty_src: LazySrcLoc = .{ .node_offset_ptr_elem = extra.data.src_node }; @@ -17552,46 +17958,54 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const elem_ty = blk: { const air_inst = try sema.resolveInst(extra.data.elem_type); const ty = sema.analyzeAsType(block, elem_ty_src, air_inst) catch |err| { - if (err == error.AnalysisFail and sema.err != null and sema.typeOf(air_inst).isSinglePointer()) { + if (err == error.AnalysisFail and sema.err != null and sema.typeOf(air_inst).isSinglePointer(mod)) { try sema.errNote(block, elem_ty_src, sema.err.?, "use '.*' to dereference pointer", .{}); } return err; }; - if (ty.tag() == .generic_poison) return error.GenericPoison; + if (ty.isGenericPoison()) return error.GenericPoison; break :blk ty; }; - const target = sema.mod.getTarget(); + + if (elem_ty.zigTypeTag(mod) == .NoReturn) + return sema.fail(block, elem_ty_src, "pointer to noreturn not allowed", .{}); + + const target = mod.getTarget(); var extra_i = extra.end; const sentinel = if (inst_data.flags.has_sentinel) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); extra_i += 1; - break :blk (try sema.resolveInstConst(block, sentinel_src, ref, "pointer sentinel value must be comptime-known")).val; - } else null; + const coerced = try sema.coerce(block, elem_ty, try sema.resolveInst(ref), sentinel_src); + const val = try sema.resolveConstValue(block, sentinel_src, coerced, "pointer sentinel value must be comptime-known"); + break :blk val.toIntern(); + } else .none; - const abi_align: u32 = if (inst_data.flags.has_align) blk: { + const abi_align: InternPool.Alignment = if (inst_data.flags.has_align) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); extra_i += 1; const coerced = try sema.coerce(block, Type.u32, try sema.resolveInst(ref), align_src); const val = try sema.resolveConstValue(block, align_src, coerced, "pointer alignment must be comptime-known"); // Check if this happens to be the lazy alignment of our element type, in // which case we can make this 0 without resolving it. - if (val.castTag(.lazy_align)) |payload| { - if (payload.data.eql(elem_ty, sema.mod)) { - break :blk 0; - } + switch (mod.intern_pool.indexToKey(val.toIntern())) { + .int => |int| switch (int.storage) { + .lazy_align => |lazy_ty| if (lazy_ty == elem_ty.toIntern()) break :blk .none, + else => {}, + }, + else => {}, } - const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(target, sema)).?); + const abi_align = @intCast(u32, (try val.getUnsignedIntAdvanced(mod, sema)).?); try sema.validateAlign(block, align_src, abi_align); - break :blk abi_align; - } else 0; + break :blk InternPool.Alignment.fromByteUnits(abi_align); + } else .none; const address_space: std.builtin.AddressSpace = if (inst_data.flags.has_addrspace) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); extra_i += 1; break :blk try sema.analyzeAddressSpace(block, addrspace_src, ref, .pointer); - } else if (elem_ty.zigTypeTag() == .Fn and target.cpu.arch == .avr) .flash else .generic; + } else if (elem_ty.zigTypeTag(mod) == .Fn and target.cpu.arch == .avr) .flash else .generic; const bit_offset = if (inst_data.flags.has_bit_range) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); @@ -17611,50 +18025,52 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.fail(block, bitoffset_src, "bit offset starts after end of host integer", .{}); } - if (elem_ty.zigTypeTag() == .NoReturn) { - return sema.fail(block, elem_ty_src, "pointer to noreturn not allowed", .{}); - } else if (elem_ty.zigTypeTag() == .Fn) { + if (elem_ty.zigTypeTag(mod) == .Fn) { if (inst_data.size != .One) { return sema.fail(block, elem_ty_src, "function pointers must be single pointers", .{}); } - const fn_align = elem_ty.fnInfo().alignment; - if (inst_data.flags.has_align and abi_align != 0 and fn_align != 0 and + const fn_align = mod.typeToFunc(elem_ty).?.alignment; + if (inst_data.flags.has_align and abi_align != .none and fn_align != .none and abi_align != fn_align) { return sema.fail(block, align_src, "function pointer alignment disagrees with function alignment", .{}); } - } else if (inst_data.size == .Many and elem_ty.zigTypeTag() == .Opaque) { + } else if (inst_data.size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, elem_ty_src, "unknown-length pointer to opaque not allowed", .{}); } else if (inst_data.size == .C) { if (!try sema.validateExternType(elem_ty, .other)) { const msg = msg: { - const msg = try sema.errMsg(block, elem_ty_src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, elem_ty_src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, elem_ty_src.toSrcLoc(src_decl), elem_ty, .other); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsNotExtern(msg, elem_ty_src.toSrcLoc(src_decl, mod), elem_ty, .other); try sema.addDeclaredHereNote(msg, elem_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - if (elem_ty.zigTypeTag() == .Opaque) { + if (elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, elem_ty_src, "C pointers cannot point to opaque types", .{}); } } - const ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = elem_ty, + const ty = try mod.ptrType(.{ + .child = elem_ty.toIntern(), .sentinel = sentinel, - .@"align" = abi_align, - .@"addrspace" = address_space, - .bit_offset = bit_offset, - .host_size = host_size, - .mutable = inst_data.flags.is_mutable, - .@"allowzero" = inst_data.flags.is_allowzero, - .@"volatile" = inst_data.flags.is_volatile, - .size = inst_data.size, + .flags = .{ + .alignment = abi_align, + .address_space = address_space, + .is_const = !inst_data.flags.is_mutable, + .is_allowzero = inst_data.flags.is_allowzero, + .is_volatile = inst_data.flags.is_volatile, + .size = inst_data.size, + }, + .packed_offset = .{ + .bit_offset = bit_offset, + .host_size = host_size, + }, }); return sema.addType(ty); } @@ -17666,8 +18082,9 @@ fn zirStructInitEmpty(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const obj_ty = try sema.resolveType(block, src, inst_data.operand); + const mod = sema.mod; - switch (obj_ty.zigTypeTag()) { + switch (obj_ty.zigTypeTag(mod)) { .Struct => return sema.structInitEmpty(block, obj_ty, src, src), .Array, .Vector => return sema.arrayInitEmpty(block, src, obj_ty), .Void => return sema.addConstant(obj_ty, Value.void), @@ -17683,12 +18100,13 @@ fn structInitEmpty( dest_src: LazySrcLoc, init_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; // This logic must be synchronized with that in `zirStructInit`. const struct_ty = try sema.resolveTypeFields(obj_ty); // The init values to use for the struct instance. - const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount()); + const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount(mod)); defer gpa.free(field_inits); @memset(field_inits, .none); @@ -17696,20 +18114,19 @@ fn structInitEmpty( } fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) CompileError!Air.Inst.Ref { - const arr_len = obj_ty.arrayLen(); + const mod = sema.mod; + const arr_len = obj_ty.arrayLen(mod); if (arr_len != 0) { - if (obj_ty.zigTypeTag() == .Array) { + if (obj_ty.zigTypeTag(mod) == .Array) { return sema.fail(block, src, "expected {d} array elements; found 0", .{arr_len}); } else { return sema.fail(block, src, "expected {d} vector elements; found 0", .{arr_len}); } } - if (obj_ty.sentinel()) |sentinel| { - const val = try Value.Tag.empty_array_sentinel.create(sema.arena, sentinel); - return sema.addConstant(obj_ty, val); - } else { - return sema.addConstant(obj_ty, Value.initTag(.empty_array)); - } + return sema.addConstant(obj_ty, (try mod.intern(.{ .aggregate = .{ + .ty = obj_ty.toIntern(), + .storage = .{ .elems = &.{} }, + } })).toValue()); } fn zirUnionInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -17719,7 +18136,7 @@ fn zirUnionInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const init_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.UnionInit, inst_data.payload_index).data; const union_ty = try sema.resolveType(block, ty_src, extra.union_type); - const field_name = try sema.resolveConstString(block, field_src, extra.field_name, "name of field being initialized must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, field_src, extra.field_name, "name of field being initialized must be comptime-known"); const init = try sema.resolveInst(extra.init); return sema.unionInit(block, init, init_src, union_ty, ty_src, field_name, field_src); } @@ -17731,21 +18148,23 @@ fn unionInit( init_src: LazySrcLoc, union_ty: Type, union_ty_src: LazySrcLoc, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_src); - const field = union_ty.unionFields().values()[field_index]; + const field = union_ty.unionFields(mod).values()[field_index]; const init = try sema.coerce(block, field.ty, uncasted_init, init_src); if (try sema.resolveMaybeUndefVal(init)) |init_val| { - const tag_ty = union_ty.unionTagTypeHypothetical(); - const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); - const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); - return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = tag_val, - .val = init_val, - })); + const tag_ty = union_ty.unionTagTypeHypothetical(mod); + const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); + const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); + return sema.addConstant(union_ty, (try mod.intern(.{ .un = .{ + .ty = union_ty.toIntern(), + .tag = try tag_val.intern(tag_ty, mod), + .val = try init_val.intern(field.ty, mod), + } })).toValue()); } try sema.requireRuntimeBlock(block, init_src, null); @@ -17766,29 +18185,30 @@ fn zirStructInit( const extra = sema.code.extraData(Zir.Inst.StructInit, inst_data.payload_index); const src = inst_data.src(); + const mod = sema.mod; const first_item = sema.code.extraData(Zir.Inst.StructInit.Item, extra.end).data; const first_field_type_data = zir_datas[first_item.field_type].pl_node; const first_field_type_extra = sema.code.extraData(Zir.Inst.FieldType, first_field_type_data.payload_index).data; const resolved_ty = try sema.resolveType(block, src, first_field_type_extra.container_type); try sema.resolveTypeLayout(resolved_ty); - if (resolved_ty.zigTypeTag() == .Struct) { + if (resolved_ty.zigTypeTag(mod) == .Struct) { // This logic must be synchronized with that in `zirStructInitEmpty`. // Maps field index to field_type index of where it was already initialized. // For making sure all fields are accounted for and no fields are duplicated. - const found_fields = try gpa.alloc(Zir.Inst.Index, resolved_ty.structFieldCount()); + const found_fields = try gpa.alloc(Zir.Inst.Index, resolved_ty.structFieldCount(mod)); defer gpa.free(found_fields); // The init values to use for the struct instance. - const field_inits = try gpa.alloc(Air.Inst.Ref, resolved_ty.structFieldCount()); + const field_inits = try gpa.alloc(Air.Inst.Ref, resolved_ty.structFieldCount(mod)); defer gpa.free(field_inits); @memset(field_inits, .none); var field_i: u32 = 0; var extra_index = extra.end; - const is_packed = resolved_ty.containerLayout() == .Packed; + const is_packed = resolved_ty.containerLayout(mod) == .Packed; while (field_i < extra.data.fields_len) : (field_i += 1) { const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra_index); extra_index = item.end; @@ -17796,8 +18216,8 @@ fn zirStructInit( const field_type_data = zir_datas[item.data.field_type].pl_node; const field_src: LazySrcLoc = .{ .node_offset_initializer = field_type_data.src_node }; const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; - const field_name = sema.code.nullTerminatedString(field_type_extra.name_start); - const field_index = if (resolved_ty.isTuple()) + const field_name = try mod.intern_pool.getOrPutString(gpa, sema.code.nullTerminatedString(field_type_extra.name_start)); + const field_index = if (resolved_ty.isTuple(mod)) try sema.tupleFieldIndex(block, resolved_ty, field_name, field_src) else try sema.structFieldIndex(block, resolved_ty, field_name, field_src); @@ -17815,19 +18235,19 @@ fn zirStructInit( } found_fields[field_index] = item.data.field_type; field_inits[field_index] = try sema.resolveInst(item.data.init); - if (!is_packed) if (resolved_ty.structFieldValueComptime(field_index)) |default_value| { + if (!is_packed) if (try resolved_ty.structFieldValueComptime(mod, field_index)) |default_value| { const init_val = (try sema.resolveMaybeUndefVal(field_inits[field_index])) orelse { return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; - if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index), sema.mod)) { + if (!init_val.eql(default_value, resolved_ty.structFieldType(field_index, mod), mod)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, resolved_ty, field_index); } }; } return sema.finishStructInit(block, src, src, field_inits, resolved_ty, is_ref); - } else if (resolved_ty.zigTypeTag() == .Union) { + } else if (resolved_ty.zigTypeTag(mod) == .Union) { if (extra.data.fields_len != 1) { return sema.fail(block, src, "union initialization expects exactly one field", .{}); } @@ -17837,32 +18257,32 @@ fn zirStructInit( const field_type_data = zir_datas[item.data.field_type].pl_node; const field_src: LazySrcLoc = .{ .node_offset_initializer = field_type_data.src_node }; const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data; - const field_name = sema.code.nullTerminatedString(field_type_extra.name_start); + const field_name = try mod.intern_pool.getOrPutString(gpa, sema.code.nullTerminatedString(field_type_extra.name_start)); const field_index = try sema.unionFieldIndex(block, resolved_ty, field_name, field_src); - const tag_ty = resolved_ty.unionTagTypeHypothetical(); - const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); - const tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); + const tag_ty = resolved_ty.unionTagTypeHypothetical(mod); + const enum_field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name, mod).?); + const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); const init_inst = try sema.resolveInst(item.data.init); if (try sema.resolveMaybeUndefVal(init_inst)) |val| { - return sema.addConstantMaybeRef( - block, - resolved_ty, - try Value.Tag.@"union".create(sema.arena, .{ .tag = tag_val, .val = val }), - is_ref, - ); + const field = resolved_ty.unionFields(mod).values()[field_index]; + return sema.addConstantMaybeRef(block, resolved_ty, (try mod.intern(.{ .un = .{ + .ty = resolved_ty.toIntern(), + .tag = try tag_val.intern(tag_ty, mod), + .val = try val.intern(field.ty, mod), + } })).toValue(), is_ref); } if (is_ref) { - const target = sema.mod.getTarget(); - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ + const target = mod.getTarget(); + const alloc_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = resolved_ty, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); const alloc = try block.addTy(.alloc, alloc_ty); const field_ptr = try sema.unionFieldPtr(block, field_src, alloc, field_name, field_src, resolved_ty, true); try sema.storePtr(block, src, field_ptr, init_inst); - const new_tag = try sema.addConstant(resolved_ty.unionTagTypeHypothetical(), tag_val); + const new_tag = try sema.addConstant(resolved_ty.unionTagTypeHypothetical(mod), tag_val); _ = try block.addBinOp(.set_union_tag, alloc, new_tag); return sema.makePtrConst(block, alloc); } @@ -17870,7 +18290,7 @@ fn zirStructInit( try sema.requireRuntimeBlock(block, src, null); try sema.queueFullTypeResolution(resolved_ty); return block.addUnionInit(resolved_ty, field_index, init_inst); - } else if (resolved_ty.isAnonStruct()) { + } else if (resolved_ty.isAnonStruct(mod)) { return sema.fail(block, src, "TODO anon struct init validation", .{}); } unreachable; @@ -17885,76 +18305,70 @@ fn finishStructInit( struct_ty: Type, is_ref: bool, ) CompileError!Air.Inst.Ref { - const gpa = sema.gpa; + const mod = sema.mod; + const ip = &mod.intern_pool; var root_msg: ?*Module.ErrorMsg = null; errdefer if (root_msg) |msg| msg.destroy(sema.gpa); - if (struct_ty.isAnonStruct()) { - const struct_obj = struct_ty.castTag(.anon_struct).?.data; - for (struct_obj.values, 0..) |default_val, i| { - if (field_inits[i] != .none) continue; + switch (ip.indexToKey(struct_ty.toIntern())) { + .anon_struct_type => |anon_struct| { + for (anon_struct.types, anon_struct.values, 0..) |field_ty, default_val, i| { + if (field_inits[i] != .none) continue; - if (default_val.tag() == .unreachable_value) { - const field_name = struct_obj.names[i]; - const template = "missing struct field: {s}"; - const args = .{field_name}; - if (root_msg) |msg| { - try sema.errNote(block, init_src, msg, template, args); + if (default_val == .none) { + if (anon_struct.names.len == 0) { + const template = "missing tuple field with index {d}"; + if (root_msg) |msg| { + try sema.errNote(block, init_src, msg, template, .{i}); + } else { + root_msg = try sema.errMsg(block, init_src, template, .{i}); + } + } else { + const field_name = anon_struct.names[i]; + const template = "missing struct field: {}"; + const args = .{field_name.fmt(ip)}; + if (root_msg) |msg| { + try sema.errNote(block, init_src, msg, template, args); + } else { + root_msg = try sema.errMsg(block, init_src, template, args); + } + } } else { - root_msg = try sema.errMsg(block, init_src, template, args); + field_inits[i] = try sema.addConstant(field_ty.toType(), default_val.toValue()); } - } else { - field_inits[i] = try sema.addConstant(struct_obj.types[i], default_val); } - } - } else if (struct_ty.isTuple()) { - var i: u32 = 0; - const len = struct_ty.structFieldCount(); - while (i < len) : (i += 1) { - if (field_inits[i] != .none) continue; + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + for (struct_obj.fields.values(), 0..) |field, i| { + if (field_inits[i] != .none) continue; - const default_val = struct_ty.structFieldDefaultValue(i); - if (default_val.tag() == .unreachable_value) { - const template = "missing tuple field with index {d}"; - if (root_msg) |msg| { - try sema.errNote(block, init_src, msg, template, .{i}); + if (field.default_val == .none) { + const field_name = struct_obj.fields.keys()[i]; + const template = "missing struct field: {}"; + const args = .{field_name.fmt(ip)}; + if (root_msg) |msg| { + try sema.errNote(block, init_src, msg, template, args); + } else { + root_msg = try sema.errMsg(block, init_src, template, args); + } } else { - root_msg = try sema.errMsg(block, init_src, template, .{i}); + field_inits[i] = try sema.addConstant(field.ty, field.default_val.toValue()); } - } else { - field_inits[i] = try sema.addConstant(struct_ty.structFieldType(i), default_val); } - } - } else { - const struct_obj = struct_ty.castTag(.@"struct").?.data; - for (struct_obj.fields.values(), 0..) |field, i| { - if (field_inits[i] != .none) continue; - - if (field.default_val.tag() == .unreachable_value) { - const field_name = struct_obj.fields.keys()[i]; - const template = "missing struct field: {s}"; - const args = .{field_name}; - if (root_msg) |msg| { - try sema.errNote(block, init_src, msg, template, args); - } else { - root_msg = try sema.errMsg(block, init_src, template, args); - } - } else { - field_inits[i] = try sema.addConstant(field.ty, field.default_val); - } - } + }, + else => unreachable, } if (root_msg) |msg| { - if (struct_ty.castTag(.@"struct")) |struct_obj| { - const fqn = try struct_obj.data.getFullyQualifiedName(sema.mod); - defer gpa.free(fqn); - try sema.mod.errNoteNonLazy( - struct_obj.data.srcLoc(sema.mod), + if (mod.typeToStruct(struct_ty)) |struct_obj| { + const fqn = try struct_obj.getFullyQualifiedName(mod); + try mod.errNoteNonLazy( + struct_obj.srcLoc(mod), msg, - "struct '{s}' declared here", - .{fqn}, + "struct '{}' declared here", + .{fqn.fmt(ip)}, ); } root_msg = null; @@ -17969,18 +18383,22 @@ fn finishStructInit( } else null; const runtime_index = opt_runtime_index orelse { - const values = try sema.arena.alloc(Value, field_inits.len); - for (field_inits, 0..) |field_init, i| { - values[i] = (sema.resolveMaybeUndefVal(field_init) catch unreachable).?; + const elems = try sema.arena.alloc(InternPool.Index, field_inits.len); + for (elems, field_inits, 0..) |*elem, field_init, field_i| { + elem.* = try (sema.resolveMaybeUndefVal(field_init) catch unreachable).? + .intern(struct_ty.structFieldType(field_i, mod), mod); } - const struct_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstantMaybeRef(block, struct_ty, struct_val, is_ref); + const struct_val = try mod.intern(.{ .aggregate = .{ + .ty = struct_ty.toIntern(), + .storage = .{ .elems = elems }, + } }); + return sema.addConstantMaybeRef(block, struct_ty, struct_val.toValue(), is_ref); }; if (is_ref) { try sema.resolveStructLayout(struct_ty); const target = sema.mod.getTarget(); - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ + const alloc_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = struct_ty, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); @@ -17997,8 +18415,8 @@ fn finishStructInit( sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); - const field_src = Module.initSrc(dest_src.node_offset.x, sema.gpa, decl, runtime_index); + const decl = mod.declPtr(block.src_decl); + const field_src = mod.initSrc(dest_src.node_offset.x, decl, runtime_index); try sema.requireRuntimeBlock(block, dest_src, field_src); unreachable; }, @@ -18014,79 +18432,85 @@ fn zirStructInitAnon( inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.StructInitAnon, inst_data.payload_index); - const types = try sema.arena.alloc(Type, extra.data.fields_len); - const values = try sema.arena.alloc(Value, types.len); - var fields = std.StringArrayHashMapUnmanaged(u32){}; - defer fields.deinit(sema.gpa); - try fields.ensureUnusedCapacity(sema.gpa, types.len); + const types = try sema.arena.alloc(InternPool.Index, extra.data.fields_len); + const values = try sema.arena.alloc(InternPool.Index, types.len); + var fields = std.AutoArrayHashMap(InternPool.NullTerminatedString, u32).init(sema.arena); + try fields.ensureUnusedCapacity(types.len); // Find which field forces the expression to be runtime, if any. const opt_runtime_index = rs: { var runtime_index: ?usize = null; var extra_index = extra.end; - for (types, 0..) |*field_ty, i| { + for (types, 0..) |*field_ty, i_usize| { + const i = @intCast(u32, i_usize); const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index); extra_index = item.end; const name = sema.code.nullTerminatedString(item.data.field_name); - const gop = fields.getOrPutAssumeCapacity(name); + const name_ip = try mod.intern_pool.getOrPutString(gpa, name); + const gop = fields.getOrPutAssumeCapacity(name_ip); if (gop.found_existing) { const msg = msg: { - const decl = sema.mod.declPtr(block.src_decl); - const field_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, i); + const decl = mod.declPtr(block.src_decl); + const field_src = mod.initSrc(src.node_offset.x, decl, i); const msg = try sema.errMsg(block, field_src, "duplicate field", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); - const prev_source = Module.initSrc(src.node_offset.x, sema.gpa, decl, gop.value_ptr.*); + const prev_source = mod.initSrc(src.node_offset.x, decl, gop.value_ptr.*); try sema.errNote(block, prev_source, msg, "other field here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - gop.value_ptr.* = @intCast(u32, i); + gop.value_ptr.* = i; const init = try sema.resolveInst(item.data.init); - field_ty.* = sema.typeOf(init); - if (types[i].zigTypeTag() == .Opaque) { + field_ty.* = sema.typeOf(init).toIntern(); + if (field_ty.toType().zigTypeTag(mod) == .Opaque) { const msg = msg: { - const decl = sema.mod.declPtr(block.src_decl); - const field_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, i); + const decl = mod.declPtr(block.src_decl); + const field_src = mod.initSrc(src.node_offset.x, decl, i); const msg = try sema.errMsg(block, field_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); - try sema.addDeclaredHereNote(msg, types[i]); + try sema.addDeclaredHereNote(msg, field_ty.toType()); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } if (try sema.resolveMaybeUndefVal(init)) |init_val| { - values[i] = init_val; + values[i] = try init_val.intern(field_ty.toType(), mod); } else { - values[i] = Value.initTag(.unreachable_value); + values[i] = .none; runtime_index = i; } } break :rs runtime_index; }; - const tuple_ty = try Type.Tag.anon_struct.create(sema.arena, .{ - .names = try sema.arena.dupe([]const u8, fields.keys()), + const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ + .names = fields.keys(), .types = types, .values = values, - }); + } }); const runtime_index = opt_runtime_index orelse { - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstantMaybeRef(block, tuple_ty, tuple_val, is_ref); + const tuple_val = try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty, + .storage = .{ .elems = values }, + } }); + return sema.addConstantMaybeRef(block, tuple_ty.toType(), tuple_val.toValue(), is_ref); }; sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); - const field_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, runtime_index); + const decl = mod.declPtr(block.src_decl); + const field_src = mod.initSrc(src.node_offset.x, decl, runtime_index); try sema.requireRuntimeBlock(block, src, field_src); unreachable; }, @@ -18094,9 +18518,9 @@ fn zirStructInitAnon( }; if (is_ref) { - const target = sema.mod.getTarget(); - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = tuple_ty, + const target = mod.getTarget(); + const alloc_ty = try Type.ptr(sema.arena, mod, .{ + .pointee_type = tuple_ty.toType(), .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); const alloc = try block.addTy(.alloc, alloc_ty); @@ -18106,12 +18530,12 @@ fn zirStructInitAnon( const item = sema.code.extraData(Zir.Inst.StructInitAnon.Item, extra_index); extra_index = item.end; - const field_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + const field_ptr_ty = try Type.ptr(sema.arena, mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), - .pointee_type = field_ty, + .pointee_type = field_ty.toType(), }); - if (values[i].tag() == .unreachable_value) { + if (values[i] == .none) { const init = try sema.resolveInst(item.data.init); const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty); _ = try block.addBinOp(.store, field_ptr, init); @@ -18129,7 +18553,7 @@ fn zirStructInitAnon( element_refs[i] = try sema.resolveInst(item.data.init); } - return block.addAggregateInit(tuple_ty, element_refs); + return block.addAggregateInit(tuple_ty.toType(), element_refs); } fn zirArrayInit( @@ -18138,6 +18562,7 @@ fn zirArrayInit( inst: Zir.Inst.Index, is_ref: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -18147,20 +18572,20 @@ fn zirArrayInit( assert(args.len >= 2); // array_ty + at least one element const array_ty = try sema.resolveType(block, src, args[0]); - const sentinel_val = array_ty.sentinel(); + const sentinel_val = array_ty.sentinel(mod); const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len - 1 + @boolToInt(sentinel_val != null)); defer gpa.free(resolved_args); for (args[1..], 0..) |arg, i| { const resolved_arg = try sema.resolveInst(arg); - const elem_ty = if (array_ty.zigTypeTag() == .Struct) - array_ty.structFieldType(i) + const elem_ty = if (array_ty.zigTypeTag(mod) == .Struct) + array_ty.structFieldType(i, mod) else - array_ty.elemType2(); + array_ty.elemType2(mod); resolved_args[i] = sema.coerce(block, elem_ty, resolved_arg, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); - const elem_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, i); + const decl = mod.declPtr(block.src_decl); + const elem_src = mod.initSrc(src.node_offset.x, decl, i); _ = try sema.coerce(block, elem_ty, resolved_arg, elem_src); unreachable; }, @@ -18169,7 +18594,7 @@ fn zirArrayInit( } if (sentinel_val) |some| { - resolved_args[resolved_args.len - 1] = try sema.addConstant(array_ty.elemType2(), some); + resolved_args[resolved_args.len - 1] = try sema.addConstant(array_ty.elemType2(mod), some); } const opt_runtime_index: ?u32 = for (resolved_args, 0..) |arg, i| { @@ -18178,21 +18603,25 @@ fn zirArrayInit( } else null; const runtime_index = opt_runtime_index orelse { - const elem_vals = try sema.arena.alloc(Value, resolved_args.len); - - for (resolved_args, 0..) |arg, i| { + const elem_vals = try sema.arena.alloc(InternPool.Index, resolved_args.len); + for (elem_vals, resolved_args, 0..) |*val, arg, i| { + const elem_ty = if (array_ty.zigTypeTag(mod) == .Struct) + array_ty.structFieldType(i, mod) + else + array_ty.elemType2(mod); // We checked that all args are comptime above. - elem_vals[i] = (sema.resolveMaybeUndefVal(arg) catch unreachable).?; + val.* = try ((sema.resolveMaybeUndefVal(arg) catch unreachable).?).intern(elem_ty, mod); } - - const array_val = try Value.Tag.aggregate.create(sema.arena, elem_vals); - return sema.addConstantMaybeRef(block, array_ty, array_val, is_ref); + return sema.addConstantMaybeRef(block, array_ty, (try mod.intern(.{ .aggregate = .{ + .ty = array_ty.toIntern(), + .storage = .{ .elems = elem_vals }, + } })).toValue(), is_ref); }; sema.requireRuntimeBlock(block, .unneeded, null) catch |err| switch (err) { error.NeededSourceLocation => { - const decl = sema.mod.declPtr(block.src_decl); - const elem_src = Module.initSrc(src.node_offset.x, sema.gpa, decl, runtime_index); + const decl = mod.declPtr(block.src_decl); + const elem_src = mod.initSrc(src.node_offset.x, decl, runtime_index); try sema.requireRuntimeBlock(block, src, elem_src); unreachable; }, @@ -18201,19 +18630,19 @@ fn zirArrayInit( try sema.queueFullTypeResolution(array_ty); if (is_ref) { - const target = sema.mod.getTarget(); - const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ + const target = mod.getTarget(); + const alloc_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = array_ty, .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); const alloc = try block.addTy(.alloc, alloc_ty); - if (array_ty.isTuple()) { + if (array_ty.isTuple(mod)) { for (resolved_args, 0..) |arg, i| { - const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + const elem_ptr_ty = try Type.ptr(sema.arena, mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), - .pointee_type = array_ty.structFieldType(i), + .pointee_type = array_ty.structFieldType(i, mod), }); const elem_ptr_ty_ref = try sema.addType(elem_ptr_ty); @@ -18224,10 +18653,10 @@ fn zirArrayInit( return sema.makePtrConst(block, alloc); } - const elem_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ + const elem_ptr_ty = try Type.ptr(sema.arena, mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), - .pointee_type = array_ty.elemType2(), + .pointee_type = array_ty.elemType2(mod), }); const elem_ptr_ty_ref = try sema.addType(elem_ptr_ty); @@ -18252,44 +18681,49 @@ fn zirArrayInitAnon( const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index); const operands = sema.code.refSlice(extra.end, extra.data.operands_len); + const mod = sema.mod; - const types = try sema.arena.alloc(Type, operands.len); - const values = try sema.arena.alloc(Value, operands.len); + const types = try sema.arena.alloc(InternPool.Index, operands.len); + const values = try sema.arena.alloc(InternPool.Index, operands.len); const opt_runtime_src = rs: { var runtime_src: ?LazySrcLoc = null; for (operands, 0..) |operand, i| { const operand_src = src; // TODO better source location const elem = try sema.resolveInst(operand); - types[i] = sema.typeOf(elem); - if (types[i].zigTypeTag() == .Opaque) { + types[i] = sema.typeOf(elem).toIntern(); + if (types[i].toType().zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); errdefer msg.destroy(sema.gpa); - try sema.addDeclaredHereNote(msg, types[i]); + try sema.addDeclaredHereNote(msg, types[i].toType()); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } if (try sema.resolveMaybeUndefVal(elem)) |val| { - values[i] = val; + values[i] = val.toIntern(); } else { - values[i] = Value.initTag(.unreachable_value); + values[i] = .none; runtime_src = operand_src; } } break :rs runtime_src; }; - const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{ + const tuple_ty = try mod.intern(.{ .anon_struct_type = .{ .types = types, .values = values, - }); + .names = &.{}, + } }); const runtime_src = opt_runtime_src orelse { - const tuple_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstantMaybeRef(block, tuple_ty, tuple_val, is_ref); + const tuple_val = try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty, + .storage = .{ .elems = values }, + } }); + return sema.addConstantMaybeRef(block, tuple_ty.toType(), tuple_val.toValue(), is_ref); }; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -18297,7 +18731,7 @@ fn zirArrayInitAnon( if (is_ref) { const target = sema.mod.getTarget(); const alloc_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = tuple_ty, + .pointee_type = tuple_ty.toType(), .@"addrspace" = target_util.defaultAddressSpace(target, .local), }); const alloc = try block.addTy(.alloc, alloc_ty); @@ -18306,9 +18740,9 @@ fn zirArrayInitAnon( const field_ptr_ty = try Type.ptr(sema.arena, sema.mod, .{ .mutable = true, .@"addrspace" = target_util.defaultAddressSpace(target, .local), - .pointee_type = types[i], + .pointee_type = types[i].toType(), }); - if (values[i].tag() == .unreachable_value) { + if (values[i] == .none) { const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty); _ = try block.addBinOp(.store, field_ptr, try sema.resolveInst(operand)); } @@ -18322,7 +18756,7 @@ fn zirArrayInitAnon( element_refs[i] = try sema.resolveInst(operand); } - return block.addAggregateInit(tuple_ty, element_refs); + return block.addAggregateInit(tuple_ty.toType(), element_refs); } fn addConstantMaybeRef( @@ -18337,8 +18771,8 @@ fn addConstantMaybeRef( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const decl = try anon_decl.finish( - try ty.copy(anon_decl.arena()), - try val.copy(anon_decl.arena()), + ty, + val, 0, // default alignment ); return sema.analyzeDeclRef(decl); @@ -18350,11 +18784,13 @@ fn zirFieldTypeRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro const ty_src = inst_data.src(); const field_src = inst_data.src(); const aggregate_ty = try sema.resolveType(block, ty_src, extra.container_type); - const field_name = try sema.resolveConstString(block, field_src, extra.field_name, "field name must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, field_src, extra.field_name, "field name must be comptime-known"); return sema.fieldType(block, aggregate_ty, field_name, field_src, ty_src); } fn zirFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; const ty_src = inst_data.src(); @@ -18367,7 +18803,8 @@ fn zirFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A error.GenericPoison => return Air.Inst.Ref.generic_poison_type, else => |e| return e, }; - const field_name = sema.code.nullTerminatedString(extra.name_start); + const zir_field_name = sema.code.nullTerminatedString(extra.name_start); + const field_name = try ip.getOrPutString(sema.gpa, zir_field_name); return sema.fieldType(block, aggregate_ty, field_name, field_name_src, ty_src); } @@ -18375,41 +18812,43 @@ fn fieldType( sema: *Sema, block: *Block, aggregate_ty: Type, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ty_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; var cur_ty = aggregate_ty; while (true) { const resolved_ty = try sema.resolveTypeFields(cur_ty); cur_ty = resolved_ty; - switch (cur_ty.zigTypeTag()) { - .Struct => { - if (cur_ty.isAnonStruct()) { + switch (cur_ty.zigTypeTag(mod)) { + .Struct => switch (mod.intern_pool.indexToKey(cur_ty.toIntern())) { + .anon_struct_type => |anon_struct| { const field_index = try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src); - return sema.addType(cur_ty.tupleFields().types[field_index]); - } - const struct_obj = cur_ty.castTag(.@"struct").?.data; - const field = struct_obj.fields.get(field_name) orelse - return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); - return sema.addType(field.ty); + return sema.addType(anon_struct.types[field_index].toType()); + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + const field = struct_obj.fields.get(field_name) orelse + return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); + return sema.addType(field.ty); + }, + else => unreachable, }, .Union => { - const union_obj = cur_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(cur_ty).?; const field = union_obj.fields.get(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name); return sema.addType(field.ty); }, .Optional => { - if (cur_ty.castTag(.optional)) |some| { - // Struct/array init through optional requires the child type to not be a pointer. - // If the child of .optional is a pointer it'll error on the next loop. - cur_ty = some.data; - continue; - } + // Struct/array init through optional requires the child type to not be a pointer. + // If the child of .optional is a pointer it'll error on the next loop. + cur_ty = mod.intern_pool.indexToKey(cur_ty.toIntern()).opt_type.toType(); + continue; }, .ErrorUnion => { - cur_ty = cur_ty.errorUnionPayload(); + cur_ty = cur_ty.errorUnionPayload(mod); continue; }, else => {}, @@ -18425,18 +18864,23 @@ fn zirErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { } fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { + const mod = sema.mod; const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); - const opt_ptr_stack_trace_ty = try Type.Tag.optional_single_mut_pointer.create(sema.arena, stack_trace_ty); + const ptr_stack_trace_ty = try mod.singleMutPtrType(stack_trace_ty); + const opt_ptr_stack_trace_ty = try Type.optional(sema.arena, ptr_stack_trace_ty, mod); if (sema.owner_func != null and sema.owner_func.?.calls_or_awaits_errorable_fn and - sema.mod.comp.bin_file.options.error_return_tracing and - sema.mod.backendSupportsFeature(.error_return_trace)) + mod.comp.bin_file.options.error_return_tracing and + mod.backendSupportsFeature(.error_return_trace)) { return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty); } - return sema.addConstant(opt_ptr_stack_trace_ty, Value.null); + return sema.addConstant(opt_ptr_stack_trace_ty, (try mod.intern(.{ .opt = .{ + .ty = opt_ptr_stack_trace_ty.toIntern(), + .val = .none, + } })).toValue()); } fn zirFrame( @@ -18449,27 +18893,28 @@ fn zirFrame( } fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, operand_src, inst_data.operand); - if (ty.isNoReturn()) { + if (ty.isNoReturn(mod)) { return sema.fail(block, operand_src, "no align available for type '{}'", .{ty.fmt(sema.mod)}); } - const target = sema.mod.getTarget(); - const val = try ty.lazyAbiAlignment(target, sema.arena); - if (val.tag() == .lazy_align) { + const val = try ty.lazyAbiAlignment(mod); + if (val.isLazyAlign(mod)) { try sema.queueFullTypeResolution(ty); } return sema.addConstant(Type.comptime_int, val); } fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = try sema.resolveInst(inst_data.operand); if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(Type.u1); - if (val.toBool()) return sema.addConstant(Type.u1, Value.one); - return sema.addConstant(Type.u1, Value.zero); + if (val.isUndef(mod)) return sema.addConstUndef(Type.u1); + if (val.toBool()) return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 1)); + return sema.addConstant(Type.u1, try mod.intValue(Type.u1, 0)); } return block.addUnOp(.bool_to_int, operand); } @@ -18480,8 +18925,8 @@ fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { - const bytes = val.castTag(.@"error").?.data.name; - return sema.addStrLit(block, bytes); + const err_name = sema.mod.intern_pool.indexToKey(val.toIntern()).err.name; + return sema.addStrLit(block, sema.mod.intern_pool.stringToSlice(err_name)); } // Similar to zirTagName, we have special AIR instruction for the error name in case an optimimzation pass @@ -18499,16 +18944,17 @@ fn zirUnaryMath( const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = try sema.resolveInst(inst_data.operand); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = sema.typeOf(operand); - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .ComptimeFloat, .Float => {}, .Vector => { - const scalar_ty = operand_ty.scalarType(); - switch (scalar_ty.zigTypeTag()) { + const scalar_ty = operand_ty.scalarType(mod); + switch (scalar_ty.zigTypeTag(mod)) { .ComptimeFloat, .Float => {}, else => return sema.fail(block, operand_src, "expected vector of floats or float type, found '{}'", .{scalar_ty.fmt(sema.mod)}), } @@ -18516,25 +18962,27 @@ fn zirUnaryMath( else => return sema.fail(block, operand_src, "expected vector of floats or float type, found '{}'", .{operand_ty.fmt(sema.mod)}), } - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Vector => { - const scalar_ty = operand_ty.scalarType(); - const vec_len = operand_ty.vectorLen(); - const result_ty = try Type.vector(sema.arena, vec_len, scalar_ty); + const scalar_ty = operand_ty.scalarType(mod); + const vec_len = operand_ty.vectorLen(mod); + const result_ty = try mod.vectorType(.{ + .len = vec_len, + .child = scalar_ty.toIntern(), + }); if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) + if (val.isUndef(mod)) return sema.addConstUndef(result_ty); - var elem_buf: Value.ElemValueBuffer = undefined; - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - elem.* = try eval(elem_val, scalar_ty, sema.arena, sema.mod); + const elem_val = try val.elemValue(sema.mod, i); + elem.* = try (try eval(elem_val, scalar_ty, sema.arena, sema.mod)).intern(scalar_ty, mod); } - return sema.addConstant( - result_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(result_ty, (try mod.intern(.{ .aggregate = .{ + .ty = result_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } try sema.requireRuntimeBlock(block, operand_src, null); @@ -18542,7 +18990,7 @@ fn zirUnaryMath( }, .ComptimeFloat, .Float => { if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - if (operand_val.isUndef()) + if (operand_val.isUndef(mod)) return sema.addConstUndef(operand_ty); const result_val = try eval(operand_val, operand_ty, sema.arena, sema.mod); return sema.addConstant(operand_ty, result_val); @@ -18562,16 +19010,17 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const mod = sema.mod; + const ip = &mod.intern_pool; try sema.resolveTypeLayout(operand_ty); - const enum_ty = switch (operand_ty.zigTypeTag()) { + const enum_ty = switch (operand_ty.zigTypeTag(mod)) { .EnumLiteral => { const val = try sema.resolveConstValue(block, .unneeded, operand, ""); - const bytes = val.castTag(.enum_literal).?.data; - return sema.addStrLit(block, bytes); + const tag_name = ip.indexToKey(val.toIntern()).enum_literal; + return sema.addStrLit(block, ip.stringToSlice(tag_name)); }, .Enum => operand_ty, - .Union => operand_ty.unionTagType() orelse { + .Union => operand_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(block, src, "union '{}' is untagged", .{ operand_ty.fmt(sema.mod), @@ -18586,30 +19035,31 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air operand_ty.fmt(mod), }), }; - if (enum_ty.enumFieldCount() == 0) { + if (enum_ty.enumFieldCount(mod) == 0) { // TODO I don't think this is the correct way to handle this but // it prevents a crash. return sema.fail(block, operand_src, "cannot get @tagName of empty enum '{}'", .{ enum_ty.fmt(mod), }); } - const enum_decl_index = enum_ty.getOwnerDecl(); + const enum_decl_index = enum_ty.getOwnerDecl(mod); const casted_operand = try sema.coerce(block, enum_ty, operand, operand_src); if (try sema.resolveDefinedValue(block, operand_src, casted_operand)) |val| { const field_index = enum_ty.enumTagFieldIndex(val, mod) orelse { const enum_decl = mod.declPtr(enum_decl_index); const msg = msg: { - const msg = try sema.errMsg(block, src, "no field with value '{}' in enum '{s}'", .{ - val.fmtValue(enum_ty, sema.mod), enum_decl.name, + const msg = try sema.errMsg(block, src, "no field with value '{}' in enum '{}'", .{ + val.fmtValue(enum_ty, sema.mod), enum_decl.name.fmt(ip), }); errdefer msg.destroy(sema.gpa); - try mod.errNoteNonLazy(enum_decl.srcLoc(), msg, "declared here", .{}); + try mod.errNoteNonLazy(enum_decl.srcLoc(mod), msg, "declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); }; - const field_name = enum_ty.enumFieldName(field_index); - return sema.addStrLit(block, field_name); + // TODO: write something like getCoercedInts to avoid needing to dupe + const field_name = enum_ty.enumFieldName(field_index, mod); + return sema.addStrLit(block, ip.stringToSlice(field_name)); } try sema.requireRuntimeBlock(block, src, operand_src); if (block.wantSafety() and sema.mod.backendSupportsFeature(.is_named_enum_value)) { @@ -18622,8 +19072,15 @@ fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return block.addUnOp(.tag_name, casted_operand); } -fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { +fn zirReify( + sema: *Sema, + block: *Block, + extended: Zir.Inst.Extended.InstData, + inst: Zir.Inst.Index, +) CompileError!Air.Inst.Ref { const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const name_strategy = @intToEnum(Zir.Inst.NameStrategy, extended.small); const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); @@ -18632,10 +19089,10 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src); const val = try sema.resolveConstValue(block, operand_src, type_info, "operand to @Type must be comptime-known"); - const union_val = val.cast(Value.Payload.Union).?.data; + const union_val = ip.indexToKey(val.toIntern()).un; const target = mod.getTarget(); - const tag_index = type_info_ty.unionTagFieldIndex(union_val.tag, mod).?; - if (union_val.val.anyUndef(mod)) return sema.failWithUseOfUndef(block, src); + if (try union_val.val.toValue().anyUndef(mod)) return sema.failWithUseOfUndef(block, src); + const tag_index = type_info_ty.unionTagFieldIndex(union_val.tag.toValue(), mod).?; switch (@intToEnum(std.builtin.TypeId, tag_index)) { .Type => return Air.Inst.Ref.type_type, .Void => return Air.Inst.Ref.void_type, @@ -18648,41 +19105,48 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in .AnyFrame => return sema.failWithUseOfAsync(block, src), .EnumLiteral => return Air.Inst.Ref.enum_literal_type, .Int => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - const signedness_val = struct_val[0]; - const bits_val = struct_val[1]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const signedness_val = try union_val.val.toValue().fieldValue( + mod, + fields.getIndex(try ip.getOrPutString(gpa, "signedness")).?, + ); + const bits_val = try union_val.val.toValue().fieldValue( + mod, + fields.getIndex(try ip.getOrPutString(gpa, "bits")).?, + ); - const signedness = signedness_val.toEnum(std.builtin.Signedness); - const bits = @intCast(u16, bits_val.toUnsignedInt(target)); - const ty = switch (signedness) { - .signed => try Type.Tag.int_signed.create(sema.arena, bits), - .unsigned => try Type.Tag.int_unsigned.create(sema.arena, bits), - }; + const signedness = mod.toEnum(std.builtin.Signedness, signedness_val); + const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); + const ty = try mod.intType(signedness, bits); return sema.addType(ty); }, .Vector => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - const len_val = struct_val[0]; - const child_val = struct_val[1]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "len"), + ).?); + const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "child"), + ).?); - const len = len_val.toUnsignedInt(target); - var buffer: Value.ToTypeBuffer = undefined; - const child_ty = child_val.toType(&buffer); + const len = @intCast(u32, len_val.toUnsignedInt(mod)); + const child_ty = child_val.toType(); try sema.checkVectorElemType(block, src, child_ty); - const ty = try Type.vector(sema.arena, len, try child_ty.copy(sema.arena)); + const ty = try mod.vectorType(.{ + .len = len, + .child = child_ty.toIntern(), + }); return sema.addType(ty); }, .Float => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // bits: comptime_int, - const bits_val = struct_val[0]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const bits_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "bits"), + ).?); - const bits = @intCast(u16, bits_val.toUnsignedInt(target)); + const bits = @intCast(u16, bits_val.toUnsignedInt(mod)); const ty = switch (bits) { 16 => Type.f16, 32 => Type.f32, @@ -18694,25 +19158,42 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in return sema.addType(ty); }, .Pointer => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - const size_val = struct_val[0]; - const is_const_val = struct_val[1]; - const is_volatile_val = struct_val[2]; - const alignment_val = struct_val[3]; - const address_space_val = struct_val[4]; - const child_val = struct_val[5]; - const is_allowzero_val = struct_val[6]; - const sentinel_val = struct_val[7]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const size_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "size"), + ).?); + const is_const_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_const"), + ).?); + const is_volatile_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_volatile"), + ).?); + const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "alignment"), + ).?); + const address_space_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "address_space"), + ).?); + const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "child"), + ).?); + const is_allowzero_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_allowzero"), + ).?); + const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "sentinel"), + ).?); if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(target, sema)).?); - var buffer: Value.ToTypeBuffer = undefined; - const unresolved_elem_ty = child_val.toType(&buffer); - const elem_ty = if (abi_align == 0) + const abi_align = InternPool.Alignment.fromByteUnits( + (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?, + ); + + const unresolved_elem_ty = child_val.toType(); + const elem_ty = if (abi_align == .none) unresolved_elem_ty else t: { const elem_ty = try sema.resolveTypeFields(unresolved_elem_ty); @@ -18720,301 +19201,282 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in break :t elem_ty; }; - const ptr_size = size_val.toEnum(std.builtin.Type.Pointer.Size); + const ptr_size = mod.toEnum(std.builtin.Type.Pointer.Size, size_val); - var actual_sentinel: ?Value = null; - if (!sentinel_val.isNull()) { - if (ptr_size == .One or ptr_size == .C) { - return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{}); + const actual_sentinel: InternPool.Index = s: { + if (!sentinel_val.isNull(mod)) { + if (ptr_size == .One or ptr_size == .C) { + return sema.fail(block, src, "sentinels are only allowed on slices and unknown-length pointers", .{}); + } + const sentinel_ptr_val = sentinel_val.optionalValue(mod).?; + const ptr_ty = try Type.ptr(sema.arena, mod, .{ + .@"addrspace" = .generic, + .pointee_type = elem_ty, + }); + const sent_val = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?; + break :s sent_val.toIntern(); } - const sentinel_ptr_val = sentinel_val.castTag(.opt_payload).?.data; - const ptr_ty = try Type.ptr(sema.arena, mod, .{ - .@"addrspace" = .generic, - .pointee_type = try elem_ty.copy(sema.arena), - }); - actual_sentinel = (try sema.pointerDeref(block, src, sentinel_ptr_val, ptr_ty)).?; - } + break :s .none; + }; - if (elem_ty.zigTypeTag() == .NoReturn) { + if (elem_ty.zigTypeTag(mod) == .NoReturn) { return sema.fail(block, src, "pointer to noreturn not allowed", .{}); - } else if (elem_ty.zigTypeTag() == .Fn) { + } else if (elem_ty.zigTypeTag(mod) == .Fn) { if (ptr_size != .One) { return sema.fail(block, src, "function pointers must be single pointers", .{}); } - const fn_align = elem_ty.fnInfo().alignment; - if (abi_align != 0 and fn_align != 0 and + const fn_align = mod.typeToFunc(elem_ty).?.alignment; + if (abi_align != .none and fn_align != .none and abi_align != fn_align) { return sema.fail(block, src, "function pointer alignment disagrees with function alignment", .{}); } - } else if (ptr_size == .Many and elem_ty.zigTypeTag() == .Opaque) { + } else if (ptr_size == .Many and elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, src, "unknown-length pointer to opaque not allowed", .{}); } else if (ptr_size == .C) { if (!try sema.validateExternType(elem_ty, .other)) { const msg = msg: { - const msg = try sema.errMsg(block, src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(sema.mod)}); - errdefer msg.destroy(sema.gpa); + const msg = try sema.errMsg(block, src, "C pointers cannot point to non-C-ABI-compatible type '{}'", .{elem_ty.fmt(mod)}); + errdefer msg.destroy(gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), elem_ty, .other); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), elem_ty, .other); try sema.addDeclaredHereNote(msg, elem_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - if (elem_ty.zigTypeTag() == .Opaque) { + if (elem_ty.zigTypeTag(mod) == .Opaque) { return sema.fail(block, src, "C pointers cannot point to opaque types", .{}); } } - const ty = try Type.ptr(sema.arena, mod, .{ - .size = ptr_size, - .mutable = !is_const_val.toBool(), - .@"volatile" = is_volatile_val.toBool(), - .@"align" = abi_align, - .@"addrspace" = address_space_val.toEnum(std.builtin.AddressSpace), - .pointee_type = try elem_ty.copy(sema.arena), - .@"allowzero" = is_allowzero_val.toBool(), + const ty = try mod.ptrType(.{ + .child = elem_ty.toIntern(), .sentinel = actual_sentinel, + .flags = .{ + .size = ptr_size, + .is_const = is_const_val.toBool(), + .is_volatile = is_volatile_val.toBool(), + .alignment = abi_align, + .address_space = mod.toEnum(std.builtin.AddressSpace, address_space_val), + .is_allowzero = is_allowzero_val.toBool(), + }, }); return sema.addType(ty); }, .Array => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // len: comptime_int, - const len_val = struct_val[0]; - // child: type, - const child_val = struct_val[1]; - // sentinel: ?*const anyopaque, - const sentinel_val = struct_val[2]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const len_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "len"), + ).?); + const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "child"), + ).?); + const sentinel_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "sentinel"), + ).?); - const len = len_val.toUnsignedInt(target); - var buffer: Value.ToTypeBuffer = undefined; - const child_ty = try child_val.toType(&buffer).copy(sema.arena); - const sentinel = if (sentinel_val.castTag(.opt_payload)) |p| blk: { + const len = len_val.toUnsignedInt(mod); + const child_ty = child_val.toType(); + const sentinel = if (sentinel_val.optionalValue(mod)) |p| blk: { const ptr_ty = try Type.ptr(sema.arena, mod, .{ .@"addrspace" = .generic, .pointee_type = child_ty, }); - break :blk (try sema.pointerDeref(block, src, p.data, ptr_ty)).?; + break :blk (try sema.pointerDeref(block, src, p, ptr_ty)).?; } else null; - const ty = try Type.array(sema.arena, len, sentinel, child_ty, sema.mod); + const ty = try Type.array(sema.arena, len, sentinel, child_ty, mod); return sema.addType(ty); }, .Optional => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // child: type, - const child_val = struct_val[0]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const child_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "child"), + ).?); - var buffer: Value.ToTypeBuffer = undefined; - const child_ty = try child_val.toType(&buffer).copy(sema.arena); + const child_ty = child_val.toType(); - const ty = try Type.optional(sema.arena, child_ty); + const ty = try Type.optional(sema.arena, child_ty, mod); return sema.addType(ty); }, .ErrorUnion => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // error_set: type, - const error_set_val = struct_val[0]; - // payload: type, - const payload_val = struct_val[1]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const error_set_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "error_set"), + ).?); + const payload_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "payload"), + ).?); - var buffer: Value.ToTypeBuffer = undefined; - const error_set_ty = try error_set_val.toType(&buffer).copy(sema.arena); - const payload_ty = try payload_val.toType(&buffer).copy(sema.arena); + const error_set_ty = error_set_val.toType(); + const payload_ty = payload_val.toType(); - if (error_set_ty.zigTypeTag() != .ErrorSet) { + if (error_set_ty.zigTypeTag(mod) != .ErrorSet) { return sema.fail(block, src, "Type.ErrorUnion.error_set must be an error set type", .{}); } - const ty = try Type.Tag.error_union.create(sema.arena, .{ - .error_set = error_set_ty, - .payload = payload_ty, - }); + const ty = try mod.errorUnionType(error_set_ty, payload_ty); return sema.addType(ty); }, .ErrorSet => { - const payload_val = union_val.val.optionalValue() orelse - return sema.addType(Type.initTag(.anyerror)); - const slice_val = payload_val.castTag(.slice).?.data; + const payload_val = union_val.val.toValue().optionalValue(mod) orelse + return sema.addType(Type.anyerror); - const len = try sema.usizeCast(block, src, slice_val.len.toUnsignedInt(mod.getTarget())); - var names: Module.ErrorSet.NameMap = .{}; + const len = try sema.usizeCast(block, src, payload_val.sliceLen(mod)); + var names: Module.Fn.InferredErrorSet.NameMap = .{}; try names.ensureUnusedCapacity(sema.arena, len); - var i: usize = 0; - while (i < len) : (i += 1) { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = slice_val.ptr.elemValueBuffer(mod, i, &buf); - const struct_val = elem_val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // error_set: type, - const name_val = struct_val[0]; - const name_str = try name_val.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, sema.mod); + for (0..len) |i| { + const elem_val = try payload_val.elemValue(mod, i); + const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); + const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "name"), + ).?); - const kv = try mod.getErrorValue(name_str); - const gop = names.getOrPutAssumeCapacity(kv.key); + const name = try name_val.toIpString(Type.slice_const_u8, mod); + _ = try mod.getErrorValue(name); + const gop = names.getOrPutAssumeCapacity(name); if (gop.found_existing) { - return sema.fail(block, src, "duplicate error '{s}'", .{name_str}); + return sema.fail(block, src, "duplicate error '{}'", .{ + name.fmt(ip), + }); } } - // names must be sorted - Module.ErrorSet.sortNames(&names); - const ty = try Type.Tag.error_set_merged.create(sema.arena, names); + const ty = try mod.errorSetFromUnsortedNames(names.keys()); return sema.addType(ty); }, .Struct => { - // TODO use reflection instead of magic numbers here - const struct_val = union_val.val.castTag(.aggregate).?.data; - // layout: containerlayout, - const layout_val = struct_val[0]; - // backing_int: ?type, - const backing_int_val = struct_val[1]; - // fields: []const enumfield, - const fields_val = struct_val[2]; - // decls: []const declaration, - const decls_val = struct_val[3]; - // is_tuple: bool, - const is_tuple_val = struct_val[4]; - assert(struct_val.len == 5); + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "layout"), + ).?); + const backing_integer_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "backing_integer"), + ).?); + const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "fields"), + ).?); + const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "decls"), + ).?); + const is_tuple_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_tuple"), + ).?); - const layout = layout_val.toEnum(std.builtin.Type.ContainerLayout); + const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); // Decls if (decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified structs must have no decls", .{}); } - if (layout != .Packed and !backing_int_val.isNull()) { + if (layout != .Packed and !backing_integer_val.isNull(mod)) { return sema.fail(block, src, "non-packed struct does not support backing integer type", .{}); } - return try sema.reifyStruct(block, inst, src, layout, backing_int_val, fields_val, name_strategy, is_tuple_val.toBool()); + return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_val, name_strategy, is_tuple_val.toBool()); }, .Enum => { - const struct_val: []const Value = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // tag_type: type, - const tag_type_val = struct_val[0]; - // fields: []const EnumField, - const fields_val = struct_val[1]; - // decls: []const Declaration, - const decls_val = struct_val[2]; - // is_exhaustive: bool, - const is_exhaustive_val = struct_val[3]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "tag_type"), + ).?); + const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "fields"), + ).?); + const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "decls"), + ).?); + const is_exhaustive_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_exhaustive"), + ).?); // Decls if (decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified enums must have no decls", .{}); } - const gpa = sema.gpa; - var new_decl_arena = std.heap.ArenaAllocator.init(gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); + const int_tag_ty = tag_type_val.toType(); + if (int_tag_ty.zigTypeTag(mod) != .Int) { + return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{}); + } + + // Because these things each reference each other, `undefined` + // placeholders are used before being set after the enum type gains + // an InternPool index. - // Define our empty enum decl - const enum_obj = try new_decl_arena_allocator.create(Module.EnumFull); - const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumFull); - enum_ty_payload.* = .{ - .base = .{ - .tag = if (!is_exhaustive_val.toBool()) - .enum_nonexhaustive - else - .enum_full, - }, - .data = enum_obj, - }; - const enum_ty = Type.initPayload(&enum_ty_payload.base); - const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = enum_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, name_strategy, "enum", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); - - enum_obj.* = .{ - .owner_decl = new_decl_index, - .tag_ty = Type.null, - .tag_ty_inferred = false, - .fields = .{}, - .values = .{}, - .namespace = .{ - .parent = block.namespace, - .ty = enum_ty, - .file_scope = block.getFileScope(), - }, - }; - - // Enum tag type - var buffer: Value.ToTypeBuffer = undefined; - const int_tag_ty = try tag_type_val.toType(&buffer).copy(new_decl_arena_allocator); - - if (int_tag_ty.zigTypeTag() != .Int) { - return sema.fail(block, src, "Type.Enum.tag_type must be an integer type", .{}); + errdefer { + new_decl.has_tv = false; // namespace and val were destroyed by later errdefers + mod.abortAnonDecl(new_decl_index); } - enum_obj.tag_ty = int_tag_ty; - // Fields - const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); - try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); - try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{ - .ty = enum_obj.tag_ty, - .mod = mod, + // Define our empty enum decl + const fields_len = @intCast(u32, try sema.usizeCast(block, src, fields_val.sliceLen(mod))); + const incomplete_enum = try ip.getIncompleteEnum(gpa, .{ + .decl = new_decl_index, + .namespace = .none, + .fields_len = fields_len, + .has_values = true, + .tag_mode = if (!is_exhaustive_val.toBool()) + .nonexhaustive + else + .explicit, + .tag_ty = int_tag_ty.toIntern(), }); + // TODO: figure out InternPool removals for incremental compilation + //errdefer ip.remove(incomplete_enum.index); - var field_i: usize = 0; - while (field_i < fields_len) : (field_i += 1) { - const elem_val = try fields_val.elemValue(sema.mod, sema.arena, field_i); - const field_struct_val: []const Value = elem_val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // name: []const u8 - const name_val = field_struct_val[0]; - // value: comptime_int - const value_val = field_struct_val[1]; + new_decl.ty = Type.type; + new_decl.val = incomplete_enum.index.toValue(); - const field_name = try name_val.toAllocatedBytes( - Type.initTag(.const_slice_u8), - new_decl_arena_allocator, - sema.mod, - ); + for (0..fields_len) |field_i| { + const elem_val = try fields_val.elemValue(mod, field_i); + const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); + const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "name"), + ).?); + const value_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "value"), + ).?); - if (!try sema.intFitsInType(value_val, enum_obj.tag_ty, null)) { + const field_name = try name_val.toIpString(Type.slice_const_u8, mod); + + if (!try sema.intFitsInType(value_val, int_tag_ty, null)) { // TODO: better source location - return sema.fail(block, src, "field '{s}' with enumeration value '{}' is too large for backing int type '{}'", .{ - field_name, + return sema.fail(block, src, "field '{}' with enumeration value '{}' is too large for backing int type '{}'", .{ + field_name.fmt(ip), value_val.fmtValue(Type.comptime_int, mod), - enum_obj.tag_ty.fmt(mod), + int_tag_ty.fmt(mod), }); } - const gop_field = enum_obj.fields.getOrPutAssumeCapacity(field_name); - if (gop_field.found_existing) { + if (try incomplete_enum.addFieldName(ip, gpa, field_name)) |other_index| { const msg = msg: { - const msg = try sema.errMsg(block, src, "duplicate enum field '{s}'", .{field_name}); + const msg = try sema.errMsg(block, src, "duplicate enum field '{}'", .{ + field_name.fmt(ip), + }); errdefer msg.destroy(gpa); + _ = other_index; // TODO: this note is incorrect try sema.errNote(block, src, msg, "other field here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - const copied_tag_val = try value_val.copy(new_decl_arena_allocator); - const gop_val = enum_obj.values.getOrPutAssumeCapacityContext(copied_tag_val, .{ - .ty = enum_obj.tag_ty, - .mod = mod, - }); - if (gop_val.found_existing) { + if (try incomplete_enum.addFieldValue(ip, gpa, (try mod.getCoerced(value_val, int_tag_ty)).toIntern())) |other| { const msg = msg: { const msg = try sema.errMsg(block, src, "enum tag value {} already taken", .{value_val.fmtValue(Type.comptime_int, mod)}); errdefer msg.destroy(gpa); + _ = other; // TODO: this note is incorrect try sema.errNote(block, src, msg, "other enum tag value here", .{}); break :msg msg; }; @@ -19022,182 +19484,209 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in } } - try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; }, .Opaque => { - const struct_val = union_val.val.castTag(.aggregate).?.data; - // decls: []const Declaration, - const decls_val = struct_val[0]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "decls"), + ).?); // Decls if (decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified opaque must have no decls", .{}); } - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); + // Because these three things each reference each other, + // `undefined` placeholders are used in two places before being set + // after the opaque type gains an InternPool index. - const opaque_obj = try new_decl_arena_allocator.create(Module.Opaque); - const opaque_ty_payload = try new_decl_arena_allocator.create(Type.Payload.Opaque); - opaque_ty_payload.* = .{ - .base = .{ .tag = .@"opaque" }, - .data = opaque_obj, - }; - const opaque_ty = Type.initPayload(&opaque_ty_payload.base); - const opaque_val = try Value.Tag.ty.create(new_decl_arena_allocator, opaque_ty); const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = opaque_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, name_strategy, "opaque", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); + errdefer { + new_decl.has_tv = false; // namespace and val were destroyed by later errdefers + mod.abortAnonDecl(new_decl_index); + } - opaque_obj.* = .{ - .owner_decl = new_decl_index, - .namespace = .{ - .parent = block.namespace, - .ty = opaque_ty, - .file_scope = block.getFileScope(), - }, - }; + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); - try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const opaque_ty = try mod.intern(.{ .opaque_type = .{ + .decl = new_decl_index, + .namespace = new_namespace_index, + } }); + // TODO: figure out InternPool removals for incremental compilation + //errdefer ip.remove(opaque_ty); + + new_decl.ty = Type.type; + new_decl.val = opaque_ty.toValue(); + new_namespace.ty = opaque_ty.toType(); + + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; }, .Union => { - // TODO use reflection instead of magic numbers here - const struct_val = union_val.val.castTag(.aggregate).?.data; - // layout: containerlayout, - const layout_val = struct_val[0]; - // tag_type: ?type, - const tag_type_val = struct_val[1]; - // fields: []const enumfield, - const fields_val = struct_val[2]; - // decls: []const declaration, - const decls_val = struct_val[3]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const layout_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "layout"), + ).?); + const tag_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "tag_type"), + ).?); + const fields_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "fields"), + ).?); + const decls_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "decls"), + ).?); // Decls if (decls_val.sliceLen(mod) > 0) { return sema.fail(block, src, "reified unions must have no decls", .{}); } - const layout = layout_val.toEnum(std.builtin.Type.ContainerLayout); + const layout = mod.toEnum(std.builtin.Type.ContainerLayout, layout_val); - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the union type gains an + // InternPool index. - const union_obj = try new_decl_arena_allocator.create(Module.Union); - const type_tag = if (!tag_type_val.isNull()) - Type.Tag.union_tagged - else if (layout != .Auto) - Type.Tag.@"union" - else switch (block.sema.mod.optimizeMode()) { - .Debug, .ReleaseSafe => Type.Tag.union_safety_tagged, - .ReleaseFast, .ReleaseSmall => Type.Tag.@"union", - }; - const union_payload = try new_decl_arena_allocator.create(Type.Payload.Union); - union_payload.* = .{ - .base = .{ .tag = type_tag }, - .data = union_obj, - }; - const union_ty = Type.initPayload(&union_payload.base); - const new_union_val = try Value.Tag.ty.create(new_decl_arena_allocator, union_ty); const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = new_union_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, name_strategy, "union", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); - union_obj.* = .{ + errdefer { + new_decl.has_tv = false; // namespace and val were destroyed by later errdefers + mod.abortAnonDecl(new_decl_index); + } + + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); + + const union_index = try mod.createUnion(.{ .owner_decl = new_decl_index, - .tag_ty = Type.initTag(.null), + .tag_ty = Type.null, .fields = .{}, .zir_index = inst, .layout = layout, .status = .have_field_types, - .namespace = .{ - .parent = block.namespace, - .ty = union_ty, - .file_scope = block.getFileScope(), + .namespace = new_namespace_index, + }); + const union_obj = mod.unionPtr(union_index); + errdefer mod.destroyUnion(union_index); + + const union_ty = try ip.get(gpa, .{ .union_type = .{ + .index = union_index, + .runtime_tag = if (!tag_type_val.isNull(mod)) + .tagged + else if (layout != .Auto) + .none + else switch (mod.optimizeMode()) { + .Debug, .ReleaseSafe => .safety, + .ReleaseFast, .ReleaseSmall => .none, }, - }; + } }); + // TODO: figure out InternPool removals for incremental compilation + //errdefer ip.remove(union_ty); + + new_decl.ty = Type.type; + new_decl.val = union_ty.toValue(); + new_namespace.ty = union_ty.toType(); // Tag type - var tag_ty_field_names: ?Module.EnumFull.NameMap = null; - var enum_field_names: ?*Module.EnumNumbered.NameMap = null; const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); - if (tag_type_val.optionalValue()) |payload_val| { - var buffer: Value.ToTypeBuffer = undefined; - union_obj.tag_ty = try payload_val.toType(&buffer).copy(new_decl_arena_allocator); + var explicit_tags_seen: []bool = &.{}; + var enum_field_names: []InternPool.NullTerminatedString = &.{}; + if (tag_type_val.optionalValue(mod)) |payload_val| { + union_obj.tag_ty = payload_val.toType(); - if (union_obj.tag_ty.zigTypeTag() != .Enum) { - return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}); - } - tag_ty_field_names = try union_obj.tag_ty.enumFields().clone(sema.arena); + const enum_type = switch (ip.indexToKey(union_obj.tag_ty.toIntern())) { + .enum_type => |x| x, + else => return sema.fail(block, src, "Type.Union.tag_type must be an enum type", .{}), + }; + + explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len); + @memset(explicit_tags_seen, false); } else { - union_obj.tag_ty = try sema.generateUnionTagTypeSimple(block, fields_len, null); - enum_field_names = &union_obj.tag_ty.castTag(.enum_simple).?.data.fields; + enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len); } // Fields - try union_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); + try union_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len); - var i: usize = 0; - while (i < fields_len) : (i += 1) { - const elem_val = try fields_val.elemValue(sema.mod, sema.arena, i); - const field_struct_val = elem_val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // name: []const u8 - const name_val = field_struct_val[0]; - // type: type, - const type_val = field_struct_val[1]; - // alignment: comptime_int, - const alignment_val = field_struct_val[2]; + for (0..fields_len) |i| { + const elem_val = try fields_val.elemValue(mod, i); + const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); + const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "name"), + ).?); + const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "type"), + ).?); + const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "alignment"), + ).?); - const field_name = try name_val.toAllocatedBytes( - Type.initTag(.const_slice_u8), - new_decl_arena_allocator, - sema.mod, - ); + const field_name = try name_val.toIpString(Type.slice_const_u8, mod); - if (enum_field_names) |set| { - set.putAssumeCapacity(field_name, {}); + if (enum_field_names.len != 0) { + enum_field_names[i] = field_name; } - if (tag_ty_field_names) |*names| { - const enum_has_field = names.orderedRemove(field_name); - if (!enum_has_field) { + if (explicit_tags_seen.len > 0) { + const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type; + const enum_index = tag_info.nameIndex(ip, field_name) orelse { const msg = msg: { - const msg = try sema.errMsg(block, src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(sema.mod) }); - errdefer msg.destroy(sema.gpa); + const msg = try sema.errMsg(block, src, "no field named '{}' in enum '{}'", .{ + field_name.fmt(ip), + union_obj.tag_ty.fmt(mod), + }); + errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } + }; + // No check for duplicate because the check already happened in order + // to create the enum type in the first place. + assert(!explicit_tags_seen[enum_index]); + explicit_tags_seen[enum_index] = true; } const gop = union_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { // TODO: better source location - return sema.fail(block, src, "duplicate union field {s}", .{field_name}); + return sema.fail(block, src, "duplicate union field {}", .{field_name.fmt(ip)}); } - var buffer: Value.ToTypeBuffer = undefined; - const field_ty = try type_val.toType(&buffer).copy(new_decl_arena_allocator); + const field_ty = type_val.toType(); gop.value_ptr.* = .{ .ty = field_ty, - .abi_align = @intCast(u32, (try alignment_val.getUnsignedIntAdvanced(target, sema)).?), + .abi_align = @intCast(u32, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?), }; - if (field_ty.zigTypeTag() == .Opaque) { + if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19206,23 +19695,23 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in } if (union_obj.layout == .Extern and !try sema.validateExternType(field_ty, .union_field)) { const msg = msg: { - const msg = try sema.errMsg(block, src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); - errdefer msg.destroy(sema.gpa); + const msg = try sema.errMsg(block, src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); + errdefer msg.destroy(gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), field_ty, .union_field); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), field_ty, .union_field); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty))) { + } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { - const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); - errdefer msg.destroy(sema.gpa); + const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); + errdefer msg.destroy(gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl), field_ty); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl, mod), field_ty); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19231,47 +19720,61 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in } } - if (tag_ty_field_names) |names| { - if (names.count() > 0) { + if (explicit_tags_seen.len > 0) { + const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type; + if (tag_info.names.len > fields_len) { const msg = msg: { const msg = try sema.errMsg(block, src, "enum field(s) missing in union", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const enum_ty = union_obj.tag_ty; - for (names.keys()) |field_name| { - const field_index = enum_ty.enumFieldIndex(field_name).?; - try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{s}' missing, declared here", .{field_name}); + for (tag_info.names, 0..) |field_name, field_index| { + if (explicit_tags_seen[field_index]) continue; + try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{}' missing, declared here", .{ + field_name.fmt(ip), + }); } try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } + } else { + union_obj.tag_ty = try sema.generateUnionTagTypeSimple(block, enum_field_names, null); } - try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; }, .Fn => { - const struct_val: []const Value = union_val.val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // calling_convention: CallingConvention, - const cc = struct_val[0].toEnum(std.builtin.CallingConvention); - // alignment: comptime_int, - const alignment_val = struct_val[1]; - // is_generic: bool, - const is_generic = struct_val[2].toBool(); - // is_var_args: bool, - const is_var_args = struct_val[3].toBool(); - // return_type: ?type, - const return_type_val = struct_val[4]; - // args: []const Param, - const args_val = struct_val[5]; + const fields = ip.typeOf(union_val.val).toType().structFields(mod); + const calling_convention_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "calling_convention"), + ).?); + const alignment_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "alignment"), + ).?); + const is_generic_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_generic"), + ).?); + const is_var_args_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "is_var_args"), + ).?); + const return_type_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "return_type"), + ).?); + const params_val = try union_val.val.toValue().fieldValue(mod, fields.getIndex( + try ip.getOrPutString(gpa, "params"), + ).?); + const is_generic = is_generic_val.toBool(); if (is_generic) { return sema.fail(block, src, "Type.Fn.is_generic must be false for @Type", .{}); } + const is_var_args = is_var_args_val.toBool(); + const cc = mod.toEnum(std.builtin.CallingConvention, calling_convention_val); if (is_var_args and cc != .C) { return sema.fail(block, src, "varargs functions must have C calling convention", .{}); } @@ -19280,63 +19783,55 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const alignment = @intCast(u29, alignment_val.toUnsignedInt(target)); + const alignment = @intCast(u29, alignment_val.toUnsignedInt(mod)); if (alignment == target_util.defaultFunctionAlignment(target)) { - break :alignment 0; + break :alignment .none; } else { - break :alignment alignment; + break :alignment InternPool.Alignment.fromByteUnits(alignment); } }; - const return_type = return_type_val.optionalValue() orelse + const return_type = return_type_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.return_type must be non-null for @Type", .{}); - var buf: Value.ToTypeBuffer = undefined; - - const args_slice_val = args_val.castTag(.slice).?.data; - const args_len = try sema.usizeCast(block, src, args_slice_val.len.toUnsignedInt(mod.getTarget())); - - const param_types = try sema.arena.alloc(Type, args_len); - const comptime_params = try sema.arena.alloc(bool, args_len); + const args_len = try sema.usizeCast(block, src, params_val.sliceLen(mod)); + const param_types = try sema.arena.alloc(InternPool.Index, args_len); var noalias_bits: u32 = 0; - var i: usize = 0; - while (i < args_len) : (i += 1) { - var arg_buf: Value.ElemValueBuffer = undefined; - const arg = args_slice_val.ptr.elemValueBuffer(mod, i, &arg_buf); - const arg_val = arg.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // is_generic: bool, - const arg_is_generic = arg_val[0].toBool(); - // is_noalias: bool, - const arg_is_noalias = arg_val[1].toBool(); - // type: ?type, - const param_type_opt_val = arg_val[2]; + for (param_types, 0..) |*param_type, i| { + const elem_val = try params_val.elemValue(mod, i); + const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); + const param_is_generic_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "is_generic"), + ).?); + const param_is_noalias_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "is_noalias"), + ).?); + const opt_param_type_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "type"), + ).?); - if (arg_is_generic) { + if (param_is_generic_val.toBool()) { return sema.fail(block, src, "Type.Fn.Param.is_generic must be false for @Type", .{}); } - const param_type_val = param_type_opt_val.optionalValue() orelse + const param_type_val = opt_param_type_val.optionalValue(mod) orelse return sema.fail(block, src, "Type.Fn.Param.arg_type must be non-null for @Type", .{}); - const param_type = try param_type_val.toType(&buf).copy(sema.arena); + param_type.* = param_type_val.toIntern(); - if (arg_is_noalias) { - if (!param_type.isPtrAtRuntime()) { + if (param_is_noalias_val.toBool()) { + if (!param_type.toType().isPtrAtRuntime(mod)) { return sema.fail(block, src, "non-pointer parameter declared noalias", .{}); } noalias_bits |= @as(u32, 1) << (std.math.cast(u5, i) orelse return sema.fail(block, src, "this compiler implementation only supports 'noalias' on the first 32 parameters", .{})); } - - param_types[i] = param_type; - comptime_params[i] = false; } - var fn_info = Type.Payload.Function.Data{ + const ty = try mod.funcType(.{ .param_types = param_types, - .comptime_params = comptime_params.ptr, + .comptime_bits = 0, .noalias_bits = noalias_bits, - .return_type = try return_type.toType(&buf).copy(sema.arena), + .return_type = return_type.toIntern(), .alignment = alignment, .cc = cc, .is_var_args = is_var_args, @@ -19346,9 +19841,7 @@ fn zirReify(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData, in .cc_is_generic = false, .section_is_generic = false, .addrspace_is_generic = false, - }; - - const ty = try Type.Tag.function.create(sema.arena, fn_info); + }); return sema.addType(ty); }, .Frame => return sema.failWithUseOfAsync(block, src), @@ -19366,22 +19859,34 @@ fn reifyStruct( name_strategy: Zir.Inst.NameStrategy, is_tuple: bool, ) CompileError!Air.Inst.Ref { - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - - const struct_obj = try new_decl_arena_allocator.create(Module.Struct); - const struct_ty = try Type.Tag.@"struct".create(new_decl_arena_allocator, struct_obj); - const new_struct_val = try Value.Tag.ty.create(new_decl_arena_allocator, struct_ty); const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; + + // Because these three things each reference each other, `undefined` + // placeholders are used before being set after the struct type gains an + // InternPool index. + const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{ - .ty = Type.type, - .val = new_struct_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, name_strategy, "struct", inst); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); - struct_obj.* = .{ + errdefer { + new_decl.has_tv = false; // namespace and val were destroyed by later errdefers + mod.abortAnonDecl(new_decl_index); + } + + const new_namespace_index = try mod.createNamespace(.{ + .parent = block.namespace.toOptional(), + .ty = undefined, + .file_scope = block.getFileScope(mod), + }); + const new_namespace = mod.namespacePtr(new_namespace_index); + errdefer mod.destroyNamespace(new_namespace_index); + + const struct_index = try mod.createStruct(.{ .owner_decl = new_decl_index, .fields = .{}, .zir_index = inst, @@ -19389,38 +19894,49 @@ fn reifyStruct( .status = .have_field_types, .known_non_opv = false, .is_tuple = is_tuple, - .namespace = .{ - .parent = block.namespace, - .ty = struct_ty, - .file_scope = block.getFileScope(), - }, - }; + .namespace = new_namespace_index, + }); + const struct_obj = mod.structPtr(struct_index); + errdefer mod.destroyStruct(struct_index); - const target = mod.getTarget(); + const struct_ty = try ip.get(gpa, .{ .struct_type = .{ + .index = struct_index.toOptional(), + .namespace = new_namespace_index.toOptional(), + } }); + // TODO: figure out InternPool removals for incremental compilation + //errdefer ip.remove(struct_ty); + + new_decl.ty = Type.type; + new_decl.val = struct_ty.toValue(); + new_namespace.ty = struct_ty.toType(); // Fields const fields_len = try sema.usizeCast(block, src, fields_val.sliceLen(mod)); - try struct_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); + try struct_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len); var i: usize = 0; while (i < fields_len) : (i += 1) { - const elem_val = try fields_val.elemValue(sema.mod, sema.arena, i); - const field_struct_val = elem_val.castTag(.aggregate).?.data; - // TODO use reflection instead of magic numbers here - // name: []const u8 - const name_val = field_struct_val[0]; - // type: type, - const type_val = field_struct_val[1]; - // default_value: ?*const anyopaque, - const default_value_val = field_struct_val[2]; - // is_comptime: bool, - const is_comptime_val = field_struct_val[3]; - // alignment: comptime_int, - const alignment_val = field_struct_val[4]; + const elem_val = try fields_val.elemValue(mod, i); + const elem_fields = ip.typeOf(elem_val.toIntern()).toType().structFields(mod); + const name_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "name"), + ).?); + const type_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "type"), + ).?); + const default_value_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "default_value"), + ).?); + const is_comptime_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "is_comptime"), + ).?); + const alignment_val = try elem_val.fieldValue(mod, elem_fields.getIndex( + try ip.getOrPutString(gpa, "alignment"), + ).?); if (!try sema.intFitsInType(alignment_val, Type.u32, null)) { return sema.fail(block, src, "alignment must fit in 'u32'", .{}); } - const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(target, sema)).?); + const abi_align = @intCast(u29, (try alignment_val.getUnsignedIntAdvanced(mod, sema)).?); if (layout == .Packed) { if (abi_align != 0) return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{}); @@ -19430,21 +19946,15 @@ fn reifyStruct( return sema.fail(block, src, "extern struct fields cannot be marked comptime", .{}); } - const field_name = try name_val.toAllocatedBytes( - Type.initTag(.const_slice_u8), - new_decl_arena_allocator, - mod, - ); + const field_name = try name_val.toIpString(Type.slice_const_u8, mod); if (is_tuple) { - const field_index = std.fmt.parseUnsigned(u32, field_name, 10) catch { - return sema.fail( - block, - src, - "tuple cannot have non-numeric field '{s}'", - .{field_name}, - ); - }; + const field_index = field_name.toUnsigned(ip) orelse return sema.fail( + block, + src, + "tuple cannot have non-numeric field '{}'", + .{field_name.fmt(ip)}, + ); if (field_index >= fields_len) { return sema.fail( @@ -19458,22 +19968,19 @@ fn reifyStruct( const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { // TODO: better source location - return sema.fail(block, src, "duplicate struct field {s}", .{field_name}); + return sema.fail(block, src, "duplicate struct field {}", .{field_name.fmt(ip)}); } - const default_val = if (default_value_val.optionalValue()) |opt_val| blk: { - const payload_val = if (opt_val.pointerDecl()) |opt_decl| - mod.declPtr(opt_decl).val - else - opt_val; - break :blk try payload_val.copy(new_decl_arena_allocator); - } else Value.initTag(.unreachable_value); - if (is_comptime_val.toBool() and default_val.tag() == .unreachable_value) { + const field_ty = type_val.toType(); + const default_val = if (default_value_val.optionalValue(mod)) |opt_val| + (try sema.pointerDeref(block, src, opt_val, try mod.singleConstPtrType(field_ty)) orelse + return sema.failWithNeededComptime(block, src, "struct field default value must be comptime-known")).toIntern() + else + .none; + if (is_comptime_val.toBool() and default_val == .none) { return sema.fail(block, src, "comptime field without default initialization value", .{}); } - var buffer: Value.ToTypeBuffer = undefined; - const field_ty = try type_val.toType(&buffer).copy(new_decl_arena_allocator); gop.value_ptr.* = .{ .ty = field_ty, .abi_align = abi_align, @@ -19482,20 +19989,20 @@ fn reifyStruct( .offset = undefined, }; - if (field_ty.zigTypeTag() == .Opaque) { + if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { const msg = try sema.errMsg(block, src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - if (field_ty.zigTypeTag() == .NoReturn) { + if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, src, "struct fields cannot be 'noreturn'", .{}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19505,22 +20012,22 @@ fn reifyStruct( if (struct_obj.layout == .Extern and !try sema.validateExternType(field_ty, .struct_field)) { const msg = msg: { const msg = try sema.errMsg(block, src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), field_ty, .struct_field); + try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), field_ty, .struct_field); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (struct_obj.layout == .Packed and !(validatePackedType(field_ty))) { + } else if (struct_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { const msg = try sema.errMsg(block, src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); - errdefer msg.destroy(sema.gpa); + errdefer msg.destroy(gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl), field_ty); + try sema.explainWhyTypeIsNotPacked(msg, src.toSrcLoc(src_decl, mod), field_ty); try sema.addDeclaredHereNote(msg, field_ty); break :msg msg; @@ -19536,7 +20043,7 @@ fn reifyStruct( sema.resolveTypeLayout(field.ty) catch |err| switch (err) { error.AnalysisFail => { const msg = sema.err orelse return err; - try sema.addFieldErrNote(struct_ty, index, msg, "while checking this field", .{}); + try sema.addFieldErrNote(struct_ty.toType(), index, msg, "while checking this field", .{}); return err; }, else => return err, @@ -19545,30 +20052,27 @@ fn reifyStruct( var fields_bit_sum: u64 = 0; for (struct_obj.fields.values()) |field| { - fields_bit_sum += field.ty.bitSize(target); + fields_bit_sum += field.ty.bitSize(mod); } - if (backing_int_val.optionalValue()) |payload| { - var buf: Value.ToTypeBuffer = undefined; - const backing_int_ty = payload.toType(&buf); + if (backing_int_val.optionalValue(mod)) |payload| { + const backing_int_ty = payload.toType(); try sema.checkBackingIntType(block, src, backing_int_ty, fields_bit_sum); - struct_obj.backing_int_ty = try backing_int_ty.copy(new_decl_arena_allocator); + struct_obj.backing_int_ty = backing_int_ty; } else { - var buf: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, fields_bit_sum), - }; - struct_obj.backing_int_ty = try Type.initPayload(&buf.base).copy(new_decl_arena_allocator); + struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum)); } struct_obj.status = .have_layout; } - try new_decl.finalizeNewArena(&new_decl_arena); - return sema.analyzeDeclVal(block, src, new_decl_index); + const decl_val = sema.analyzeDeclVal(block, src, new_decl_index); + try mod.finalizeAnonDecl(new_decl_index); + return decl_val; } fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const addrspace_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -19580,7 +20084,7 @@ fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst try sema.checkPtrOperand(block, ptr_src, ptr_ty); - var ptr_info = ptr_ty.ptrInfo().data; + var ptr_info = ptr_ty.ptrInfo(mod); const src_addrspace = ptr_info.@"addrspace"; if (!target_util.addrSpaceCastIsValid(sema.mod.getTarget(), src_addrspace, dest_addrspace)) { const msg = msg: { @@ -19594,8 +20098,8 @@ fn zirAddrSpaceCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst ptr_info.@"addrspace" = dest_addrspace; const dest_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); - const dest_ty = if (ptr_ty.zigTypeTag() == .Optional) - try Type.optional(sema.arena, dest_ptr_ty) + const dest_ty = if (ptr_ty.zigTypeTag(mod) == .Optional) + try Type.optional(sema.arena, dest_ptr_ty, mod) else dest_ptr_ty; @@ -19624,6 +20128,7 @@ fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.In } fn zirCVaArg(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const va_list_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -19638,7 +20143,7 @@ fn zirCVaArg(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl), arg_ty, .param_ty); + try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl, mod), arg_ty, .param_ty); try sema.addDeclaredHereNote(msg, arg_ty); break :msg msg; @@ -19685,6 +20190,7 @@ fn zirCVaStart(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) } fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, ty_src, inst_data.operand); @@ -19692,11 +20198,19 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); - const bytes = try ty.nameAllocArena(anon_decl.arena(), sema.mod); + const bytes = try ty.nameAllocArena(sema.arena, mod); + const decl_ty = try mod.arrayType(.{ + .len = bytes.len, + .child = .u8_type, + .sentinel = .zero_u8, + }); const new_decl = try anon_decl.finish( - try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), bytes.len), - try Value.Tag.bytes.create(anon_decl.arena(), bytes[0 .. bytes.len + 1]), + decl_ty, + (try mod.intern(.{ .aggregate = .{ + .ty = decl_ty.toIntern(), + .storage = .{ .bytes = bytes }, + } })).toValue(), 0, // default alignment ); @@ -19716,6 +20230,7 @@ fn zirFrameSize(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A } fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -19730,24 +20245,24 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (try sema.resolveMaybeUndefVal(operand)) |val| { const result_val = try sema.floatToInt(block, operand_src, val, operand_ty, dest_ty); return sema.addConstant(dest_ty, result_val); - } else if (dest_ty.zigTypeTag() == .ComptimeInt) { + } else if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { return sema.failWithNeededComptime(block, operand_src, "value being casted to 'comptime_int' must be comptime-known"); } try sema.requireRuntimeBlock(block, inst_data.src(), operand_src); - if (dest_ty.intInfo(sema.mod.getTarget()).bits == 0) { + if (dest_ty.intInfo(mod).bits == 0) { if (block.wantSafety()) { - const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, try sema.addConstant(operand_ty, Value.zero)); + const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, try sema.addConstant(operand_ty, try mod.intValue(operand_ty, 0))); try sema.addSafetyCheck(block, ok, .integer_part_out_of_bounds); } - return sema.addConstant(dest_ty, Value.zero); + return sema.addConstant(dest_ty, try mod.intValue(dest_ty, 0)); } const result = try block.addTyOp(if (block.float_mode == .Optimized) .float_to_int_optimized else .float_to_int, dest_ty, operand); if (block.wantSafety()) { const back = try block.addTyOp(.int_to_float, operand_ty, result); const diff = try block.addBinOp(.sub, operand, back); - const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, try sema.addConstant(operand_ty, Value.one)); - const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, try sema.addConstant(operand_ty, Value.negative_one)); + const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, try sema.addConstant(operand_ty, try mod.floatValue(operand_ty, 1.0))); + const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, try sema.addConstant(operand_ty, try mod.floatValue(operand_ty, -1.0))); const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg); try sema.addSafetyCheck(block, ok, .integer_part_out_of_bounds); } @@ -19755,6 +20270,7 @@ fn zirFloatToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -19769,7 +20285,7 @@ fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (try sema.resolveMaybeUndefVal(operand)) |val| { const result_val = try val.intToFloatAdvanced(sema.arena, operand_ty, dest_ty, sema.mod, sema); return sema.addConstant(dest_ty, result_val); - } else if (dest_ty.zigTypeTag() == .ComptimeFloat) { + } else if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) { return sema.failWithNeededComptime(block, operand_src, "value being casted to 'comptime_float' must be comptime-known"); } @@ -19778,6 +20294,7 @@ fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -19790,11 +20307,10 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr_ty = try sema.resolveType(block, src, extra.lhs); try sema.checkPtrType(block, type_src, ptr_ty); - const elem_ty = ptr_ty.elemType2(); - const target = sema.mod.getTarget(); - const ptr_align = try ptr_ty.ptrAlignmentAdvanced(target, sema); + const elem_ty = ptr_ty.elemType2(mod); + const ptr_align = try ptr_ty.ptrAlignmentAdvanced(mod, sema); - if (ptr_ty.isSlice()) { + if (ptr_ty.isSlice(mod)) { const msg = msg: { const msg = try sema.errMsg(block, type_src, "integer cannot be converted to slice type '{}'", .{ptr_ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -19805,36 +20321,26 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (try sema.resolveDefinedValue(block, operand_src, operand_coerced)) |val| { - const addr = val.toUnsignedInt(target); - if (!ptr_ty.isAllowzeroPtr() and addr == 0) + const addr = val.toUnsignedInt(mod); + if (!ptr_ty.isAllowzeroPtr(mod) and addr == 0) return sema.fail(block, operand_src, "pointer type '{}' does not allow address zero", .{ptr_ty.fmt(sema.mod)}); if (addr != 0 and ptr_align != 0 and addr % ptr_align != 0) return sema.fail(block, operand_src, "pointer type '{}' requires aligned address", .{ptr_ty.fmt(sema.mod)}); - const val_payload = try sema.arena.create(Value.Payload.U64); - val_payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = addr, - }; - return sema.addConstant(ptr_ty, Value.initPayload(&val_payload.base)); + return sema.addConstant(ptr_ty, try mod.ptrIntValue(ptr_ty, addr)); } try sema.requireRuntimeBlock(block, src, operand_src); - if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag() == .Fn)) { - if (!ptr_ty.isAllowzeroPtr()) { + if (block.wantSafety() and (try sema.typeHasRuntimeBits(elem_ty) or elem_ty.zigTypeTag(mod) == .Fn)) { + if (!ptr_ty.isAllowzeroPtr(mod)) { const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize); try sema.addSafetyCheck(block, is_non_zero, .cast_to_null); } if (ptr_align > 1) { - const val_payload = try sema.arena.create(Value.Payload.U64); - val_payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = ptr_align - 1, - }; const align_minus_1 = try sema.addConstant( Type.usize, - Value.initPayload(&val_payload.base), + try mod.intValue(Type.usize, ptr_align - 1), ); const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); @@ -19845,6 +20351,8 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; + const ip = &mod.intern_pool; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -19860,22 +20368,27 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat if (disjoint: { // Try avoiding resolving inferred error sets if we can - if (!dest_ty.isAnyError() and dest_ty.errorSetNames().len == 0) break :disjoint true; - if (!operand_ty.isAnyError() and operand_ty.errorSetNames().len == 0) break :disjoint true; - if (dest_ty.isAnyError()) break :disjoint false; - if (operand_ty.isAnyError()) break :disjoint false; - for (dest_ty.errorSetNames()) |dest_err_name| - if (operand_ty.errorSetHasField(dest_err_name)) + if (!dest_ty.isAnyError(mod) and dest_ty.errorSetNames(mod).len == 0) break :disjoint true; + if (!operand_ty.isAnyError(mod) and operand_ty.errorSetNames(mod).len == 0) break :disjoint true; + if (dest_ty.isAnyError(mod)) break :disjoint false; + if (operand_ty.isAnyError(mod)) break :disjoint false; + for (dest_ty.errorSetNames(mod)) |dest_err_name| { + if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_name)) break :disjoint false; + } - if (dest_ty.tag() != .error_set_inferred and operand_ty.tag() != .error_set_inferred) + if (!ip.isInferredErrorSetType(dest_ty.toIntern()) and + !ip.isInferredErrorSetType(operand_ty.toIntern())) + { break :disjoint true; + } try sema.resolveInferredErrorSetTy(block, dest_ty_src, dest_ty); try sema.resolveInferredErrorSetTy(block, operand_src, operand_ty); - for (dest_ty.errorSetNames()) |dest_err_name| - if (operand_ty.errorSetHasField(dest_err_name)) + for (dest_ty.errorSetNames(mod)) |dest_err_name| { + if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_name)) break :disjoint false; + } break :disjoint true; }) { @@ -19895,15 +20408,15 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat } if (maybe_operand_val) |val| { - if (!dest_ty.isAnyError()) { - const error_name = val.castTag(.@"error").?.data.name; - if (!dest_ty.errorSetHasField(error_name)) { + if (!dest_ty.isAnyError(mod)) { + const error_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; + if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), error_name)) { const msg = msg: { const msg = try sema.errMsg( block, src, - "'error.{s}' not a member of error set '{}'", - .{ error_name, dest_ty.fmt(sema.mod) }, + "'error.{}' not a member of error set '{}'", + .{ error_name.fmt(ip), dest_ty.fmt(sema.mod) }, ); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, dest_ty); @@ -19913,11 +20426,11 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat } } - return sema.addConstant(dest_ty, val); + return sema.addConstant(dest_ty, try mod.getCoerced(val, dest_ty)); } try sema.requireRuntimeBlock(block, src, operand_src); - if (block.wantSafety() and !dest_ty.isAnyError() and sema.mod.backendSupportsFeature(.error_set_has_value)) { + if (block.wantSafety() and !dest_ty.isAnyError(mod) and sema.mod.backendSupportsFeature(.error_set_has_value)) { const err_int_inst = try block.addBitCast(Type.err_int, operand); const ok = try block.addTyOp(.error_set_has_value, dest_ty, err_int_inst); try sema.addSafetyCheck(block, ok, .invalid_error_code); @@ -19926,6 +20439,7 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat } fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -19934,13 +20448,12 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - const target = sema.mod.getTarget(); try sema.checkPtrType(block, dest_ty_src, dest_ty); try sema.checkPtrOperand(block, operand_src, operand_ty); - const operand_info = operand_ty.ptrInfo().data; - const dest_info = dest_ty.ptrInfo().data; + const operand_info = operand_ty.ptrInfo(mod); + const dest_info = dest_ty.ptrInfo(mod); if (!operand_info.mutable and dest_info.mutable) { const msg = msg: { const msg = try sema.errMsg(block, src, "cast discards const qualifier", .{}); @@ -19972,8 +20485,8 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air return sema.failWithOwnedErrorMsg(msg); } - const dest_is_slice = dest_ty.isSlice(); - const operand_is_slice = operand_ty.isSlice(); + const dest_is_slice = dest_ty.isSlice(mod); + const operand_is_slice = operand_ty.isSlice(mod); if (dest_is_slice and !operand_is_slice) { return sema.fail(block, dest_ty_src, "illegal pointer cast to slice", .{}); } @@ -19982,32 +20495,31 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air else operand; - const dest_elem_ty = dest_ty.elemType2(); + const dest_elem_ty = dest_ty.elemType2(mod); try sema.resolveTypeLayout(dest_elem_ty); - const dest_align = dest_ty.ptrAlignment(target); + const dest_align = dest_ty.ptrAlignment(mod); - const operand_elem_ty = operand_ty.elemType2(); + const operand_elem_ty = operand_ty.elemType2(mod); try sema.resolveTypeLayout(operand_elem_ty); - const operand_align = operand_ty.ptrAlignment(target); + const operand_align = operand_ty.ptrAlignment(mod); // If the destination is less aligned than the source, preserve the source alignment const aligned_dest_ty = if (operand_align <= dest_align) dest_ty else blk: { // Unwrap the pointer (or pointer-like optional) type, set alignment, and re-wrap into result - if (dest_ty.zigTypeTag() == .Optional) { - var buf: Type.Payload.ElemType = undefined; - var dest_ptr_info = dest_ty.optionalChild(&buf).ptrInfo().data; + if (dest_ty.zigTypeTag(mod) == .Optional) { + var dest_ptr_info = dest_ty.optionalChild(mod).ptrInfo(mod); dest_ptr_info.@"align" = operand_align; - break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, sema.mod, dest_ptr_info)); + break :blk try Type.optional(sema.arena, try Type.ptr(sema.arena, mod, dest_ptr_info), mod); } else { - var dest_ptr_info = dest_ty.ptrInfo().data; + var dest_ptr_info = dest_ty.ptrInfo(mod); dest_ptr_info.@"align" = operand_align; - break :blk try Type.ptr(sema.arena, sema.mod, dest_ptr_info); + break :blk try Type.ptr(sema.arena, mod, dest_ptr_info); } }; if (dest_is_slice) { - const operand_elem_size = operand_elem_ty.abiSize(target); - const dest_elem_size = dest_elem_ty.abiSize(target); + const operand_elem_size = operand_elem_ty.abiSize(mod); + const dest_elem_size = dest_elem_ty.abiSize(mod); if (operand_elem_size != dest_elem_size) { return sema.fail(block, dest_ty_src, "TODO: implement @ptrCast between slices changing the length", .{}); } @@ -20019,10 +20531,10 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air errdefer msg.destroy(sema.gpa); try sema.errNote(block, operand_src, msg, "'{}' has alignment '{d}'", .{ - operand_ty.fmt(sema.mod), operand_align, + operand_ty.fmt(mod), operand_align, }); try sema.errNote(block, dest_ty_src, msg, "'{}' has alignment '{d}'", .{ - dest_ty.fmt(sema.mod), dest_align, + dest_ty.fmt(mod), dest_align, }); try sema.errNote(block, src, msg, "consider using '@alignCast'", .{}); @@ -20032,21 +20544,18 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } if (try sema.resolveMaybeUndefVal(ptr)) |operand_val| { - if (!dest_ty.ptrAllowsZero() and operand_val.isUndef()) { + if (!dest_ty.ptrAllowsZero(mod) and operand_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, operand_src); } - if (!dest_ty.ptrAllowsZero() and operand_val.isNull()) { - return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); + if (!dest_ty.ptrAllowsZero(mod) and operand_val.isNull(mod)) { + return sema.fail(block, operand_src, "null pointer casted to type '{}'", .{dest_ty.fmt(mod)}); } - if (dest_ty.zigTypeTag() == .Optional and sema.typeOf(ptr).zigTypeTag() != .Optional) { - return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, operand_val)); - } - return sema.addConstant(aligned_dest_ty, operand_val); + return sema.addConstant(aligned_dest_ty, try mod.getCoerced(operand_val, aligned_dest_ty)); } try sema.requireRuntimeBlock(block, src, null); - if (block.wantSafety() and operand_ty.ptrAllowsZero() and !dest_ty.ptrAllowsZero() and - (try sema.typeHasRuntimeBits(dest_ty.elemType2()) or dest_ty.elemType2().zigTypeTag() == .Fn)) + if (block.wantSafety() and operand_ty.ptrAllowsZero(mod) and !dest_ty.ptrAllowsZero(mod) and + (try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn)) { const ptr_int = try block.addUnOp(.ptrtoint, ptr); const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize); @@ -20062,6 +20571,7 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -20069,12 +20579,12 @@ fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData const operand_ty = sema.typeOf(operand); try sema.checkPtrOperand(block, operand_src, operand_ty); - var ptr_info = operand_ty.ptrInfo().data; + var ptr_info = operand_ty.ptrInfo(mod); ptr_info.mutable = true; - const dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); + const dest_ty = try Type.ptr(sema.arena, mod, ptr_info); if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - return sema.addConstant(dest_ty, operand_val); + return sema.addConstant(dest_ty, try mod.getCoerced(operand_val, dest_ty)); } try sema.requireRuntimeBlock(block, src, null); @@ -20082,6 +20592,7 @@ fn zirConstCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData } fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; @@ -20089,9 +20600,9 @@ fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD const operand_ty = sema.typeOf(operand); try sema.checkPtrOperand(block, operand_src, operand_ty); - var ptr_info = operand_ty.ptrInfo().data; + var ptr_info = operand_ty.ptrInfo(mod); ptr_info.@"volatile" = false; - const dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); + const dest_ty = try Type.ptr(sema.arena, mod, ptr_info); if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { return sema.addConstant(dest_ty, operand_val); @@ -20102,6 +20613,7 @@ fn zirVolatileCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD } fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -20112,9 +20624,12 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_scalar_ty); const operand_ty = sema.typeOf(operand); const operand_scalar_ty = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src); - const is_vector = operand_ty.zigTypeTag() == .Vector; + const is_vector = operand_ty.zigTypeTag(mod) == .Vector; const dest_ty = if (is_vector) - try Type.vector(sema.arena, operand_ty.vectorLen(), dest_scalar_ty) + try mod.vectorType(.{ + .len = operand_ty.vectorLen(mod), + .child = dest_scalar_ty.toIntern(), + }) else dest_scalar_ty; @@ -20122,22 +20637,21 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.coerce(block, dest_ty, operand, operand_src); } - const target = sema.mod.getTarget(); - const dest_info = dest_scalar_ty.intInfo(target); + const dest_info = dest_scalar_ty.intInfo(mod); if (try sema.typeHasOnePossibleValue(dest_ty)) |val| { return sema.addConstant(dest_ty, val); } - if (operand_scalar_ty.zigTypeTag() != .ComptimeInt) { - const operand_info = operand_ty.intInfo(target); + if (operand_scalar_ty.zigTypeTag(mod) != .ComptimeInt) { + const operand_info = operand_ty.intInfo(mod); if (try sema.typeHasOnePossibleValue(operand_ty)) |val| { return sema.addConstant(operand_ty, val); } if (operand_info.signedness != dest_info.signedness) { return sema.fail(block, operand_src, "expected {s} integer type, found '{}'", .{ - @tagName(dest_info.signedness), operand_ty.fmt(sema.mod), + @tagName(dest_info.signedness), operand_ty.fmt(mod), }); } if (operand_info.bits < dest_info.bits) { @@ -20146,7 +20660,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai block, src, "destination type '{}' has more bits than source type '{}'", - .{ dest_ty.fmt(sema.mod), operand_ty.fmt(sema.mod) }, + .{ dest_ty.fmt(mod), operand_ty.fmt(mod) }, ); errdefer msg.destroy(sema.gpa); try sema.errNote(block, dest_ty_src, msg, "destination type has {d} bits", .{ @@ -20162,23 +20676,22 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } if (try sema.resolveMaybeUndefValIntable(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(dest_ty); + if (val.isUndef(mod)) return sema.addConstUndef(dest_ty); if (!is_vector) { - return sema.addConstant( + return sema.addConstant(dest_ty, try mod.getCoerced( + try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, mod), dest_ty, - try val.intTrunc(operand_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod), - ); + )); } - var elem_buf: Value.ElemValueBuffer = undefined; - const elems = try sema.arena.alloc(Value, operand_ty.vectorLen()); + const elems = try sema.arena.alloc(InternPool.Index, operand_ty.vectorLen(mod)); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - elem.* = try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, sema.mod); + const elem_val = try val.elemValue(mod, i); + elem.* = try (try elem_val.intTrunc(operand_scalar_ty, sema.arena, dest_info.signedness, dest_info.bits, mod)).intern(dest_scalar_ty, mod); } - return sema.addConstant( - dest_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(dest_ty, (try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, operand_src); @@ -20186,6 +20699,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const align_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -20196,43 +20710,38 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A try sema.checkPtrOperand(block, ptr_src, ptr_ty); - var ptr_info = ptr_ty.ptrInfo().data; + var ptr_info = ptr_ty.ptrInfo(mod); ptr_info.@"align" = dest_align; - var dest_ty = try Type.ptr(sema.arena, sema.mod, ptr_info); - if (ptr_ty.zigTypeTag() == .Optional) { - dest_ty = try Type.Tag.optional.create(sema.arena, dest_ty); + var dest_ty = try Type.ptr(sema.arena, mod, ptr_info); + if (ptr_ty.zigTypeTag(mod) == .Optional) { + dest_ty = try mod.optionalType(dest_ty.toIntern()); } if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |val| { - if (try val.getUnsignedIntAdvanced(sema.mod.getTarget(), null)) |addr| { + if (try val.getUnsignedIntAdvanced(mod, null)) |addr| { if (addr % dest_align != 0) { return sema.fail(block, ptr_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, dest_align }); } } - return sema.addConstant(dest_ty, val); + return sema.addConstant(dest_ty, try mod.getCoerced(val, dest_ty)); } try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src); if (block.wantSafety() and dest_align > 1 and try sema.typeHasRuntimeBits(ptr_info.pointee_type)) { - const val_payload = try sema.arena.create(Value.Payload.U64); - val_payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = dest_align - 1, - }; const align_minus_1 = try sema.addConstant( Type.usize, - Value.initPayload(&val_payload.base), + try mod.intValue(Type.usize, dest_align - 1), ); - const actual_ptr = if (ptr_ty.isSlice()) + const actual_ptr = if (ptr_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, ptr_src, ptr, ptr_ty) else ptr; const ptr_int = try block.addUnOp(.ptrtoint, actual_ptr); const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); - const ok = if (ptr_ty.isSlice()) ok: { + const ok = if (ptr_ty.isSlice(mod)) ok: { const len = try sema.analyzeSliceLen(block, ptr_src, ptr); const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize); break :ok try block.addBinOp(.bit_or, len_zero, is_aligned); @@ -20247,51 +20756,52 @@ fn zirBitCount( block: *Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, - comptime comptimeOp: fn (val: Value, ty: Type, target: std.Target) u64, + comptime comptimeOp: fn (val: Value, ty: Type, mod: *Module) u64, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); _ = try sema.checkIntOrVector(block, operand, operand_src); - const target = sema.mod.getTarget(); - const bits = operand_ty.intInfo(target).bits; + const bits = operand_ty.intInfo(mod).bits; if (try sema.typeHasOnePossibleValue(operand_ty)) |val| { return sema.addConstant(operand_ty, val); } - const result_scalar_ty = try Type.smallestUnsignedInt(sema.arena, bits); - switch (operand_ty.zigTypeTag()) { + const result_scalar_ty = try mod.smallestUnsignedInt(bits); + switch (operand_ty.zigTypeTag(mod)) { .Vector => { - const vec_len = operand_ty.vectorLen(); - const result_ty = try Type.vector(sema.arena, vec_len, result_scalar_ty); + const vec_len = operand_ty.vectorLen(mod); + const result_ty = try mod.vectorType(.{ + .len = vec_len, + .child = result_scalar_ty.toIntern(), + }); if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(result_ty); + if (val.isUndef(mod)) return sema.addConstUndef(result_ty); - var elem_buf: Value.ElemValueBuffer = undefined; - const elems = try sema.arena.alloc(Value, vec_len); - const scalar_ty = operand_ty.scalarType(); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); + const scalar_ty = operand_ty.scalarType(mod); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - const count = comptimeOp(elem_val, scalar_ty, target); - elem.* = try Value.Tag.int_u64.create(sema.arena, count); + const elem_val = try val.elemValue(mod, i); + const count = comptimeOp(elem_val, scalar_ty, mod); + elem.* = (try mod.intValue(result_scalar_ty, count)).toIntern(); } - return sema.addConstant( - result_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(result_ty, (try mod.intern(.{ .aggregate = .{ + .ty = result_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else { try sema.requireRuntimeBlock(block, src, operand_src); return block.addTyOp(air_tag, result_ty, operand); } }, .Int => { - if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(result_scalar_ty); - try sema.resolveLazyValue(val); - return sema.addIntUnsigned(result_scalar_ty, comptimeOp(val, operand_ty, target)); + if (try sema.resolveMaybeUndefLazyVal(operand)) |val| { + if (val.isUndef(mod)) return sema.addConstUndef(result_scalar_ty); + return sema.addIntUnsigned(result_scalar_ty, comptimeOp(val, operand_ty, mod)); } else { try sema.requireRuntimeBlock(block, src, operand_src); return block.addTyOp(air_tag, result_scalar_ty, operand); @@ -20302,20 +20812,20 @@ fn zirBitCount( } fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); const operand_ty = sema.typeOf(operand); const scalar_ty = try sema.checkIntOrVector(block, operand, operand_src); - const target = sema.mod.getTarget(); - const bits = scalar_ty.intInfo(target).bits; + const bits = scalar_ty.intInfo(mod).bits; if (bits % 8 != 0) { return sema.fail( block, operand_src, "@byteSwap requires the number of bits to be evenly divisible by 8, but {} has {} bits", - .{ scalar_ty.fmt(sema.mod), bits }, + .{ scalar_ty.fmt(mod), bits }, ); } @@ -20323,11 +20833,11 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai return sema.addConstant(operand_ty, val); } - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Int => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(operand_ty); - const result_val = try val.byteSwap(operand_ty, target, sema.arena); + if (val.isUndef(mod)) return sema.addConstUndef(operand_ty); + const result_val = try val.byteSwap(operand_ty, mod, sema.arena); return sema.addConstant(operand_ty, result_val); } else operand_src; @@ -20336,20 +20846,19 @@ fn zirByteSwap(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Vector => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) + if (val.isUndef(mod)) return sema.addConstUndef(operand_ty); - const vec_len = operand_ty.vectorLen(); - var elem_buf: Value.ElemValueBuffer = undefined; - const elems = try sema.arena.alloc(Value, vec_len); + const vec_len = operand_ty.vectorLen(mod); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - elem.* = try elem_val.byteSwap(operand_ty, target, sema.arena); + const elem_val = try val.elemValue(mod, i); + elem.* = try (try elem_val.byteSwap(scalar_ty, mod, sema.arena)).intern(scalar_ty, mod); } - return sema.addConstant( - operand_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(operand_ty, (try mod.intern(.{ .aggregate = .{ + .ty = operand_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else operand_src; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -20371,12 +20880,12 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! return sema.addConstant(operand_ty, val); } - const target = sema.mod.getTarget(); - switch (operand_ty.zigTypeTag()) { + const mod = sema.mod; + switch (operand_ty.zigTypeTag(mod)) { .Int => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) return sema.addConstUndef(operand_ty); - const result_val = try val.bitReverse(operand_ty, target, sema.arena); + if (val.isUndef(mod)) return sema.addConstUndef(operand_ty); + const result_val = try val.bitReverse(operand_ty, mod, sema.arena); return sema.addConstant(operand_ty, result_val); } else operand_src; @@ -20385,20 +20894,19 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! }, .Vector => { const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| { - if (val.isUndef()) + if (val.isUndef(mod)) return sema.addConstUndef(operand_ty); - const vec_len = operand_ty.vectorLen(); - var elem_buf: Value.ElemValueBuffer = undefined; - const elems = try sema.arena.alloc(Value, vec_len); + const vec_len = operand_ty.vectorLen(mod); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf); - elem.* = try elem_val.bitReverse(scalar_ty, target, sema.arena); + const elem_val = try val.elemValue(mod, i); + elem.* = try (try elem_val.bitReverse(scalar_ty, mod, sema.arena)).intern(scalar_ty, mod); } - return sema.addConstant( - operand_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(operand_ty, (try mod.intern(.{ .aggregate = .{ + .ty = operand_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else operand_src; try sema.requireRuntimeBlock(block, src, runtime_src); @@ -20428,15 +20936,15 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const ty = try sema.resolveType(block, lhs_src, extra.lhs); - const field_name = try sema.resolveConstString(block, rhs_src, extra.rhs, "name of field must be comptime-known"); - const target = sema.mod.getTarget(); + const field_name = try sema.resolveConstStringIntern(block, rhs_src, extra.rhs, "name of field must be comptime-known"); + const mod = sema.mod; try sema.resolveTypeLayout(ty); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct => {}, else => { const msg = msg: { - const msg = try sema.errMsg(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(sema.mod)}); + const msg = try sema.errMsg(block, lhs_src, "expected struct type, found '{}'", .{ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, ty); break :msg msg; @@ -20445,45 +20953,47 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6 }, } - const field_index = if (ty.isTuple()) blk: { - if (mem.eql(u8, field_name, "len")) { + const field_index = if (ty.isTuple(mod)) blk: { + if (mod.intern_pool.stringEqlSlice(field_name, "len")) { return sema.fail(block, src, "no offset available for 'len' field of tuple", .{}); } break :blk try sema.tupleFieldIndex(block, ty, field_name, rhs_src); } else try sema.structFieldIndex(block, ty, field_name, rhs_src); - if (ty.structFieldIsComptime(field_index)) { + if (ty.structFieldIsComptime(field_index, mod)) { return sema.fail(block, src, "no offset available for comptime field", .{}); } - switch (ty.containerLayout()) { + switch (ty.containerLayout(mod)) { .Packed => { var bit_sum: u64 = 0; - const fields = ty.structFields(); + const fields = ty.structFields(mod); for (fields.values(), 0..) |field, i| { if (i == field_index) { return bit_sum; } - bit_sum += field.ty.bitSize(target); + bit_sum += field.ty.bitSize(mod); } else unreachable; }, - else => return ty.structFieldOffset(field_index, target) * 8, + else => return ty.structFieldOffset(field_index, mod) * 8, } } fn checkNamespaceType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Struct, .Enum, .Union, .Opaque => return, - else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, src, "expected struct, enum, union, or opaque; found '{}'", .{ty.fmt(mod)}), } } /// Returns `true` if the type was a comptime_int. fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool { - switch (try ty.zigTypeTagOrPoison()) { + const mod = sema.mod; + switch (try ty.zigTypeTagOrPoison(mod)) { .ComptimeInt => return true, .Int => return false, - else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, src, "expected integer type, found '{}'", .{ty.fmt(mod)}), } } @@ -20493,8 +21003,9 @@ fn checkInvalidPtrArithmetic( src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (try ty.zigTypeTagOrPoison()) { - .Pointer => switch (ty.ptrSize()) { + const mod = sema.mod; + switch (try ty.zigTypeTagOrPoison(mod)) { + .Pointer => switch (ty.ptrSize(mod)) { .One, .Slice => return, .Many, .C => return sema.fail( block, @@ -20532,7 +21043,8 @@ fn checkPtrOperand( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Pointer => return, .Fn => { const msg = msg: { @@ -20540,7 +21052,7 @@ fn checkPtrOperand( block, ty_src, "expected pointer, found '{}'", - .{ty.fmt(sema.mod)}, + .{ty.fmt(mod)}, ); errdefer msg.destroy(sema.gpa); @@ -20550,10 +21062,10 @@ fn checkPtrOperand( }; return sema.failWithOwnedErrorMsg(msg); }, - .Optional => if (ty.isPtrLikeOptional()) return, + .Optional => if (ty.isPtrLikeOptional(mod)) return, else => {}, } - return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)}); + return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)}); } fn checkPtrType( @@ -20562,7 +21074,8 @@ fn checkPtrType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Pointer => return, .Fn => { const msg = msg: { @@ -20570,7 +21083,7 @@ fn checkPtrType( block, ty_src, "expected pointer type, found '{}'", - .{ty.fmt(sema.mod)}, + .{ty.fmt(mod)}, ); errdefer msg.destroy(sema.gpa); @@ -20580,10 +21093,10 @@ fn checkPtrType( }; return sema.failWithOwnedErrorMsg(msg); }, - .Optional => if (ty.isPtrLikeOptional()) return, + .Optional => if (ty.isPtrLikeOptional(mod)) return, else => {}, } - return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(sema.mod)}); + return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty.fmt(mod)}); } fn checkVectorElemType( @@ -20592,11 +21105,12 @@ fn checkVectorElemType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Int, .Float, .Bool => return, - else => if (ty.isPtrAtRuntime()) return, + else => if (ty.isPtrAtRuntime(mod)) return, } - return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(sema.mod)}); + return sema.fail(block, ty_src, "expected integer, float, bool, or pointer for the vector element type; found '{}'", .{ty.fmt(mod)}); } fn checkFloatType( @@ -20605,9 +21119,10 @@ fn checkFloatType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .ComptimeInt, .ComptimeFloat, .Float => {}, - else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty.fmt(mod)}), } } @@ -20617,13 +21132,14 @@ fn checkNumericType( ty_src: LazySrcLoc, ty: Type, ) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, - .Vector => switch (ty.childType().zigTypeTag()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { .ComptimeFloat, .Float, .ComptimeInt, .Int => {}, else => |t| return sema.fail(block, ty_src, "expected number, found '{}'", .{t}), }, - else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, ty_src, "expected number, found '{}'", .{ty.fmt(mod)}), } } @@ -20637,9 +21153,10 @@ fn checkAtomicPtrOperand( ptr_src: LazySrcLoc, ptr_const: bool, ) CompileError!Air.Inst.Ref { - const target = sema.mod.getTarget(); - var diag: target_util.AtomicPtrAlignmentDiagnostics = .{}; - const alignment = target_util.atomicPtrAlignment(target, elem_ty, &diag) catch |err| switch (err) { + const mod = sema.mod; + var diag: Module.AtomicPtrAlignmentDiagnostics = .{}; + const alignment = mod.atomicPtrAlignment(elem_ty, &diag) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.FloatTooBig => return sema.fail( block, elem_ty_src, @@ -20656,7 +21173,7 @@ fn checkAtomicPtrOperand( block, elem_ty_src, "expected bool, integer, float, enum, or pointer type; found '{}'", - .{elem_ty.fmt(sema.mod)}, + .{elem_ty.fmt(mod)}, ), }; @@ -20668,10 +21185,10 @@ fn checkAtomicPtrOperand( }; const ptr_ty = sema.typeOf(ptr); - const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison()) { - .Pointer => ptr_ty.ptrInfo().data, + const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison(mod)) { + .Pointer => ptr_ty.ptrInfo(mod), else => { - const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data); + const wanted_ptr_ty = try Type.ptr(sema.arena, mod, wanted_ptr_data); _ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); unreachable; }, @@ -20681,7 +21198,7 @@ fn checkAtomicPtrOperand( wanted_ptr_data.@"allowzero" = ptr_data.@"allowzero"; wanted_ptr_data.@"volatile" = ptr_data.@"volatile"; - const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data); + const wanted_ptr_ty = try Type.ptr(sema.arena, mod, wanted_ptr_data); const casted_ptr = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src); return casted_ptr; @@ -20695,7 +21212,7 @@ fn checkPtrIsNotComptimeMutable( operand_src: LazySrcLoc, ) CompileError!void { _ = operand_src; - if (ptr_val.isComptimeMutablePtr()) { + if (ptr_val.isComptimeMutablePtr(sema.mod)) { return sema.fail(block, ptr_src, "cannot store runtime value in compile time variable", .{}); } } @@ -20704,7 +21221,7 @@ fn checkComptimeVarStore( sema: *Sema, block: *Block, src: LazySrcLoc, - decl_ref_mut: Value.Payload.DeclRefMut.Data, + decl_ref_mut: InternPool.Key.Ptr.Addr.MutDecl, ) CompileError!void { if (@enumToInt(decl_ref_mut.runtime_index) < @enumToInt(block.runtime_index)) { if (block.runtime_cond) |cond_src| { @@ -20735,20 +21252,21 @@ fn checkIntOrVector( operand: Air.Inst.Ref, operand_src: LazySrcLoc, ) CompileError!Type { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - switch (try operand_ty.zigTypeTagOrPoison()) { + switch (try operand_ty.zigTypeTagOrPoison(mod)) { .Int => return operand_ty, .Vector => { - const elem_ty = operand_ty.childType(); - switch (try elem_ty.zigTypeTagOrPoison()) { + const elem_ty = operand_ty.childType(mod); + switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ - elem_ty.fmt(sema.mod), + elem_ty.fmt(mod), }), } }, else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }), } } @@ -20759,27 +21277,29 @@ fn checkIntOrVectorAllowComptime( operand_ty: Type, operand_src: LazySrcLoc, ) CompileError!Type { - switch (try operand_ty.zigTypeTagOrPoison()) { + const mod = sema.mod; + switch (try operand_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return operand_ty, .Vector => { - const elem_ty = operand_ty.childType(); - switch (try elem_ty.zigTypeTagOrPoison()) { + const elem_ty = operand_ty.childType(mod); + switch (try elem_ty.zigTypeTagOrPoison(mod)) { .Int, .ComptimeInt => return elem_ty, else => return sema.fail(block, operand_src, "expected vector of integers; found vector of '{}'", .{ - elem_ty.fmt(sema.mod), + elem_ty.fmt(mod), }), } }, else => return sema.fail(block, operand_src, "expected integer or vector, found '{}'", .{ - operand_ty.fmt(sema.mod), + operand_ty.fmt(mod), }), } } fn checkErrorSetType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .ErrorSet => return, - else => return sema.fail(block, src, "expected error set type, found '{}'", .{ty.fmt(sema.mod)}), + else => return sema.fail(block, src, "expected error set type, found '{}'", .{ty.fmt(mod)}), } } @@ -20805,11 +21325,12 @@ fn checkSimdBinOp( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!SimdBinOp { + const mod = sema.mod; const lhs_ty = sema.typeOf(uncasted_lhs); const rhs_ty = sema.typeOf(uncasted_rhs); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); - var vec_len: ?usize = if (lhs_ty.zigTypeTag() == .Vector) lhs_ty.vectorLen() else null; + var vec_len: ?usize = if (lhs_ty.zigTypeTag(mod) == .Vector) lhs_ty.vectorLen(mod) else null; const result_ty = try sema.resolvePeerTypes(block, src, &.{ uncasted_lhs, uncasted_rhs }, .{ .override = &[_]?LazySrcLoc{ lhs_src, rhs_src }, }); @@ -20823,7 +21344,7 @@ fn checkSimdBinOp( .lhs_val = try sema.resolveMaybeUndefVal(lhs), .rhs_val = try sema.resolveMaybeUndefVal(rhs), .result_ty = result_ty, - .scalar_ty = result_ty.scalarType(), + .scalar_ty = result_ty.scalarType(mod), }; } @@ -20836,8 +21357,9 @@ fn checkVectorizableBinaryOperands( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!void { - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); + const mod = sema.mod; + const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(mod); + const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(mod); if (lhs_zig_ty_tag != .Vector and rhs_zig_ty_tag != .Vector) return; const lhs_is_vector = switch (lhs_zig_ty_tag) { @@ -20850,8 +21372,8 @@ fn checkVectorizableBinaryOperands( }; if (lhs_is_vector and rhs_is_vector) { - const lhs_len = lhs_ty.arrayLen(); - const rhs_len = rhs_ty.arrayLen(); + const lhs_len = lhs_ty.arrayLen(mod); + const rhs_len = rhs_ty.arrayLen(mod); if (lhs_len != rhs_len) { const msg = msg: { const msg = try sema.errMsg(block, src, "vector length mismatch", .{}); @@ -20865,7 +21387,7 @@ fn checkVectorizableBinaryOperands( } else { const msg = msg: { const msg = try sema.errMsg(block, src, "mixed scalar and vector operands: '{}' and '{}'", .{ - lhs_ty.fmt(sema.mod), rhs_ty.fmt(sema.mod), + lhs_ty.fmt(mod), rhs_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); if (lhs_is_vector) { @@ -20883,7 +21405,8 @@ fn checkVectorizableBinaryOperands( fn maybeOptionsSrc(sema: *Sema, block: *Block, base_src: LazySrcLoc, wanted: []const u8) LazySrcLoc { if (base_src == .unneeded) return .unneeded; - return Module.optionsSrc(sema.gpa, sema.mod.declPtr(block.src_decl), base_src, wanted); + const mod = sema.mod; + return mod.optionsSrc(mod.declPtr(block.src_decl), base_src, wanted); } fn resolveExportOptions( @@ -20891,7 +21414,10 @@ fn resolveExportOptions( block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, -) CompileError!std.builtin.ExportOptions { +) CompileError!Module.Export.Options { + const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const export_options_ty = try sema.getBuiltinType("ExportOptions"); const air_ref = try sema.resolveInst(zir_ref); const options = try sema.coerce(block, export_options_ty, air_ref, src); @@ -20901,26 +21427,26 @@ fn resolveExportOptions( const section_src = sema.maybeOptionsSrc(block, src, "section"); const visibility_src = sema.maybeOptionsSrc(block, src, "visibility"); - const name_operand = try sema.fieldVal(block, src, options, "name", name_src); + const name_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "name"), name_src); const name_val = try sema.resolveConstValue(block, name_src, name_operand, "name of exported value must be comptime-known"); - const name_ty = Type.initTag(.const_slice_u8); - const name = try name_val.toAllocatedBytes(name_ty, sema.arena, sema.mod); + const name_ty = Type.slice_const_u8; + const name = try name_val.toAllocatedBytes(name_ty, sema.arena, mod); - const linkage_operand = try sema.fieldVal(block, src, options, "linkage", linkage_src); + const linkage_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "linkage"), linkage_src); const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_operand, "linkage of exported value must be comptime-known"); - const linkage = linkage_val.toEnum(std.builtin.GlobalLinkage); + const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val); - const section_operand = try sema.fieldVal(block, src, options, "section", section_src); + const section_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "section"), section_src); const section_opt_val = try sema.resolveConstValue(block, section_src, section_operand, "linksection of exported value must be comptime-known"); - const section_ty = Type.initTag(.const_slice_u8); - const section = if (section_opt_val.optionalValue()) |section_val| - try section_val.toAllocatedBytes(section_ty, sema.arena, sema.mod) + const section_ty = Type.slice_const_u8; + const section = if (section_opt_val.optionalValue(mod)) |section_val| + try section_val.toAllocatedBytes(section_ty, sema.arena, mod) else null; - const visibility_operand = try sema.fieldVal(block, src, options, "visibility", visibility_src); + const visibility_operand = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "visibility"), visibility_src); const visibility_val = try sema.resolveConstValue(block, visibility_src, visibility_operand, "visibility of exported value must be comptime-known"); - const visibility = visibility_val.toEnum(std.builtin.SymbolVisibility); + const visibility = mod.toEnum(std.builtin.SymbolVisibility, visibility_val); if (name.len < 1) { return sema.fail(block, name_src, "exported symbol name cannot be empty", .{}); @@ -20932,10 +21458,10 @@ fn resolveExportOptions( }); } - return std.builtin.ExportOptions{ - .name = name, + return .{ + .name = try ip.getOrPutString(gpa, name), .linkage = linkage, - .section = section, + .section = try ip.getOrPutStringOpt(gpa, section), .visibility = visibility, }; } @@ -20948,11 +21474,12 @@ fn resolveBuiltinEnum( comptime name: []const u8, reason: []const u8, ) CompileError!@field(std.builtin, name) { + const mod = sema.mod; const ty = try sema.getBuiltinType(name); const air_ref = try sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, ty, air_ref, src); const val = try sema.resolveConstValue(block, src, coerced, reason); - return val.toEnum(@field(std.builtin, name)); + return mod.toEnum(@field(std.builtin, name), val); } fn resolveAtomicOrder( @@ -20979,6 +21506,7 @@ fn zirCmpxchg( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.Cmpxchg, extended.operand).data; const air_tag: Air.Inst.Tag = switch (extended.small) { 0 => .cmpxchg_weak, @@ -20996,12 +21524,12 @@ fn zirCmpxchg( // zig fmt: on const expected_value = try sema.resolveInst(extra.expected_value); const elem_ty = sema.typeOf(expected_value); - if (elem_ty.zigTypeTag() == .Float) { + if (elem_ty.zigTypeTag(mod) == .Float) { return sema.fail( block, elem_ty_src, "expected bool, integer, enum, or pointer type; found '{}'", - .{elem_ty.fmt(sema.mod)}, + .{elem_ty.fmt(mod)}, ); } const uncasted_ptr = try sema.resolveInst(extra.ptr); @@ -21023,29 +21551,34 @@ fn zirCmpxchg( return sema.fail(block, failure_order_src, "failure atomic ordering must not be Release or AcqRel", .{}); } - const result_ty = try Type.optional(sema.arena, elem_ty); + const result_ty = try Type.optional(sema.arena, elem_ty, mod); // special case zero bit types if ((try sema.typeHasOnePossibleValue(elem_ty)) != null) { - return sema.addConstant(result_ty, Value.null); + return sema.addConstant(result_ty, (try mod.intern(.{ .opt = .{ + .ty = result_ty.toIntern(), + .val = .none, + } })).toValue()); } const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { if (try sema.resolveMaybeUndefVal(expected_value)) |expected_val| { if (try sema.resolveMaybeUndefVal(new_value)) |new_val| { - if (expected_val.isUndef() or new_val.isUndef()) { + if (expected_val.isUndef(mod) or new_val.isUndef(mod)) { // TODO: this should probably cause the memory stored at the pointer // to become undef as well return sema.addConstUndef(result_ty); } const ptr_ty = sema.typeOf(ptr); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; - const result_val = if (stored_val.eql(expected_val, elem_ty, sema.mod)) blk: { - try sema.storePtr(block, src, ptr, new_value); - break :blk Value.null; - } else try Value.Tag.opt_payload.create(sema.arena, stored_val); - - return sema.addConstant(result_ty, result_val); + const result_val = try mod.intern(.{ .opt = .{ + .ty = result_ty.toIntern(), + .val = if (stored_val.eql(expected_val, elem_ty, mod)) blk: { + try sema.storePtr(block, src, ptr, new_value); + break :blk .none; + } else stored_val.toIntern(), + } }); + return sema.addConstant(result_ty, result_val.toValue()); } else break :rs new_value_src; } else break :rs expected_src; } else ptr_src; @@ -21069,6 +21602,7 @@ fn zirCmpxchg( } fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const len_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -21077,17 +21611,13 @@ fn zirSplat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I const scalar = try sema.resolveInst(extra.rhs); const scalar_ty = sema.typeOf(scalar); try sema.checkVectorElemType(block, scalar_src, scalar_ty); - const vector_ty = try Type.Tag.vector.create(sema.arena, .{ + const vector_ty = try mod.vectorType(.{ .len = len, - .elem_type = scalar_ty, + .child = scalar_ty.toIntern(), }); if (try sema.resolveMaybeUndefVal(scalar)) |scalar_val| { - if (scalar_val.isUndef()) return sema.addConstUndef(vector_ty); - - return sema.addConstant( - vector_ty, - try Value.Tag.repeated.create(sema.arena, scalar_val), - ); + if (scalar_val.isUndef(mod)) return sema.addConstUndef(vector_ty); + return sema.addConstant(vector_ty, try sema.splat(vector_ty, scalar_val)); } try sema.requireRuntimeBlock(block, inst_data.src(), scalar_src); @@ -21102,31 +21632,31 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const operation = try sema.resolveBuiltinEnum(block, op_src, extra.lhs, "ReduceOp", "@reduce operation must be comptime-known"); const operand = try sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - const target = sema.mod.getTarget(); + const mod = sema.mod; - if (operand_ty.zigTypeTag() != .Vector) { - return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(sema.mod)}); + if (operand_ty.zigTypeTag(mod) != .Vector) { + return sema.fail(block, operand_src, "expected vector, found '{}'", .{operand_ty.fmt(mod)}); } - const scalar_ty = operand_ty.childType(); + const scalar_ty = operand_ty.childType(mod); // Type-check depending on operation. switch (operation) { - .And, .Or, .Xor => switch (scalar_ty.zigTypeTag()) { + .And, .Or, .Xor => switch (scalar_ty.zigTypeTag(mod)) { .Int, .Bool => {}, else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or boolean operand; found '{}'", .{ - @tagName(operation), operand_ty.fmt(sema.mod), + @tagName(operation), operand_ty.fmt(mod), }), }, - .Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag()) { + .Min, .Max, .Add, .Mul => switch (scalar_ty.zigTypeTag(mod)) { .Int, .Float => {}, else => return sema.fail(block, operand_src, "@reduce operation '{s}' requires integer or float operand; found '{}'", .{ - @tagName(operation), operand_ty.fmt(sema.mod), + @tagName(operation), operand_ty.fmt(mod), }), }, } - const vec_len = operand_ty.vectorLen(); + const vec_len = operand_ty.vectorLen(mod); if (vec_len == 0) { // TODO re-evaluate if we should introduce a "neutral value" for some operations, // e.g. zero for add and one for mul. @@ -21134,21 +21664,20 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } if (try sema.resolveMaybeUndefVal(operand)) |operand_val| { - if (operand_val.isUndef()) return sema.addConstUndef(scalar_ty); + if (operand_val.isUndef(mod)) return sema.addConstUndef(scalar_ty); - var accum: Value = try operand_val.elemValue(sema.mod, sema.arena, 0); - var elem_buf: Value.ElemValueBuffer = undefined; + var accum: Value = try operand_val.elemValue(mod, 0); var i: u32 = 1; while (i < vec_len) : (i += 1) { - const elem_val = operand_val.elemValueBuffer(sema.mod, i, &elem_buf); + const elem_val = try operand_val.elemValue(mod, i); switch (operation) { - .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, sema.mod), - .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, sema.mod), - .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, sema.mod), - .Min => accum = accum.numberMin(elem_val, target), - .Max => accum = accum.numberMax(elem_val, target), + .And => accum = try accum.bitwiseAnd(elem_val, scalar_ty, sema.arena, mod), + .Or => accum = try accum.bitwiseOr(elem_val, scalar_ty, sema.arena, mod), + .Xor => accum = try accum.bitwiseXor(elem_val, scalar_ty, sema.arena, mod), + .Min => accum = accum.numberMin(elem_val, mod), + .Max => accum = accum.numberMax(elem_val, mod), .Add => accum = try sema.numberAddWrapScalar(accum, elem_val, scalar_ty), - .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, sema.mod), + .Mul => accum = try accum.numberMulWrap(elem_val, scalar_ty, sema.arena, mod), } } return sema.addConstant(scalar_ty, accum); @@ -21165,6 +21694,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Shuffle, inst_data.payload_index).data; const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -21177,13 +21707,13 @@ fn zirShuffle(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air var mask = try sema.resolveInst(extra.mask); var mask_ty = sema.typeOf(mask); - const mask_len = switch (sema.typeOf(mask).zigTypeTag()) { - .Array, .Vector => sema.typeOf(mask).arrayLen(), + const mask_len = switch (sema.typeOf(mask).zigTypeTag(mod)) { + .Array, .Vector => sema.typeOf(mask).arrayLen(mod), else => return sema.fail(block, mask_src, "expected vector or array, found '{}'", .{sema.typeOf(mask).fmt(sema.mod)}), }; - mask_ty = try Type.Tag.vector.create(sema.arena, .{ - .len = mask_len, - .elem_type = Type.i32, + mask_ty = try mod.vectorType(.{ + .len = @intCast(u32, mask_len), + .child = .i32_type, }); mask = try sema.coerce(block, mask_ty, mask, mask_src); const mask_val = try sema.resolveConstMaybeUndefVal(block, mask_src, mask, "shuffle mask must be comptime-known"); @@ -21200,27 +21730,28 @@ fn analyzeShuffle( mask: Value, mask_len: u32, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const a_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = src_node }; const b_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = src_node }; const mask_src: LazySrcLoc = .{ .node_offset_builtin_call_arg3 = src_node }; var a = a_arg; var b = b_arg; - const res_ty = try Type.Tag.vector.create(sema.arena, .{ + const res_ty = try mod.vectorType(.{ .len = mask_len, - .elem_type = elem_ty, + .child = elem_ty.toIntern(), }); - var maybe_a_len = switch (sema.typeOf(a).zigTypeTag()) { - .Array, .Vector => sema.typeOf(a).arrayLen(), + var maybe_a_len = switch (sema.typeOf(a).zigTypeTag(mod)) { + .Array, .Vector => sema.typeOf(a).arrayLen(mod), .Undefined => null, else => return sema.fail(block, a_src, "expected vector or array with element type '{}', found '{}'", .{ elem_ty.fmt(sema.mod), sema.typeOf(a).fmt(sema.mod), }), }; - var maybe_b_len = switch (sema.typeOf(b).zigTypeTag()) { - .Array, .Vector => sema.typeOf(b).arrayLen(), + var maybe_b_len = switch (sema.typeOf(b).zigTypeTag(mod)) { + .Array, .Vector => sema.typeOf(b).arrayLen(mod), .Undefined => null, else => return sema.fail(block, b_src, "expected vector or array with element type '{}', found '{}'", .{ elem_ty.fmt(sema.mod), @@ -21230,16 +21761,16 @@ fn analyzeShuffle( if (maybe_a_len == null and maybe_b_len == null) { return sema.addConstUndef(res_ty); } - const a_len = maybe_a_len orelse maybe_b_len.?; - const b_len = maybe_b_len orelse a_len; + const a_len = @intCast(u32, maybe_a_len orelse maybe_b_len.?); + const b_len = @intCast(u32, maybe_b_len orelse a_len); - const a_ty = try Type.Tag.vector.create(sema.arena, .{ + const a_ty = try mod.vectorType(.{ .len = a_len, - .elem_type = elem_ty, + .child = elem_ty.toIntern(), }); - const b_ty = try Type.Tag.vector.create(sema.arena, .{ + const b_ty = try mod.vectorType(.{ .len = b_len, - .elem_type = elem_ty, + .child = elem_ty.toIntern(), }); if (maybe_a_len == null) a = try sema.addConstUndef(a_ty) else a = try sema.coerce(block, a_ty, a, a_src); @@ -21250,12 +21781,10 @@ fn analyzeShuffle( .{ b_len, b_src, b_ty }, }; - var i: usize = 0; - while (i < mask_len) : (i += 1) { - var buf: Value.ElemValueBuffer = undefined; - const elem = mask.elemValueBuffer(sema.mod, i, &buf); - if (elem.isUndef()) continue; - const int = elem.toSignedInt(sema.mod.getTarget()); + for (0..@intCast(usize, mask_len)) |i| { + const elem = try mask.elemValue(sema.mod, i); + if (elem.isUndef(mod)) continue; + const int = elem.toSignedInt(mod); var unsigned: u32 = undefined; var chosen: u32 = undefined; if (int >= 0) { @@ -21287,26 +21816,21 @@ fn analyzeShuffle( if (try sema.resolveMaybeUndefVal(a)) |a_val| { if (try sema.resolveMaybeUndefVal(b)) |b_val| { - const values = try sema.arena.alloc(Value, mask_len); - - i = 0; - while (i < mask_len) : (i += 1) { - var buf: Value.ElemValueBuffer = undefined; - const mask_elem_val = mask.elemValueBuffer(sema.mod, i, &buf); - if (mask_elem_val.isUndef()) { - values[i] = Value.undef; + const values = try sema.arena.alloc(InternPool.Index, mask_len); + for (values, 0..) |*value, i| { + const mask_elem_val = try mask.elemValue(sema.mod, i); + if (mask_elem_val.isUndef(mod)) { + value.* = try mod.intern(.{ .undef = elem_ty.toIntern() }); continue; } - const int = mask_elem_val.toSignedInt(sema.mod.getTarget()); + const int = mask_elem_val.toSignedInt(mod); const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int); - if (int >= 0) { - values[i] = try a_val.elemValue(sema.mod, sema.arena, unsigned); - } else { - values[i] = try b_val.elemValue(sema.mod, sema.arena, unsigned); - } + values[i] = try (try (if (int >= 0) a_val else b_val).elemValue(mod, unsigned)).intern(elem_ty, mod); } - const res_val = try Value.Tag.aggregate.create(sema.arena, values); - return sema.addConstant(res_ty, res_val); + return sema.addConstant(res_ty, (try mod.intern(.{ .aggregate = .{ + .ty = res_ty.toIntern(), + .storage = .{ .elems = values }, + } })).toValue()); } } @@ -21320,27 +21844,27 @@ fn analyzeShuffle( const max_src = if (a_len > b_len) a_src else b_src; const max_len = try sema.usizeCast(block, max_src, std.math.max(a_len, b_len)); - const expand_mask_values = try sema.arena.alloc(Value, max_len); - i = 0; - while (i < min_len) : (i += 1) { - expand_mask_values[i] = try Value.Tag.int_u64.create(sema.arena, i); + const expand_mask_values = try sema.arena.alloc(InternPool.Index, max_len); + for (@intCast(usize, 0)..@intCast(usize, min_len)) |i| { + expand_mask_values[i] = (try mod.intValue(Type.comptime_int, i)).toIntern(); } - while (i < max_len) : (i += 1) { - expand_mask_values[i] = Value.negative_one; + for (@intCast(usize, min_len)..@intCast(usize, max_len)) |i| { + expand_mask_values[i] = (try mod.intValue(Type.comptime_int, -1)).toIntern(); } - const expand_mask = try Value.Tag.aggregate.create(sema.arena, expand_mask_values); + const expand_mask = try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = @intCast(u32, max_len), .child = .comptime_int_type })).toIntern(), + .storage = .{ .elems = expand_mask_values }, + } }); if (a_len < b_len) { const undef = try sema.addConstUndef(a_ty); - a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, expand_mask, @intCast(u32, max_len)); + a = try sema.analyzeShuffle(block, src_node, elem_ty, a, undef, expand_mask.toValue(), @intCast(u32, max_len)); } else { const undef = try sema.addConstUndef(b_ty); - b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, expand_mask, @intCast(u32, max_len)); + b = try sema.analyzeShuffle(block, src_node, elem_ty, b, undef, expand_mask.toValue(), @intCast(u32, max_len)); } } - const mask_index = @intCast(u32, sema.air_values.items.len); - try sema.air_values.append(sema.gpa, mask); return block.addInst(.{ .tag = .shuffle, .data = .{ .ty_pl = .{ @@ -21348,7 +21872,7 @@ fn analyzeShuffle( .payload = try block.sema.addExtra(Air.Shuffle{ .a = a, .b = b, - .mask = mask_index, + .mask = mask.toIntern(), .mask_len = mask_len, }), } }, @@ -21356,6 +21880,7 @@ fn analyzeShuffle( } fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.Select, extended.operand).data; const src = LazySrcLoc.nodeOffset(extra.node); @@ -21369,16 +21894,22 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const pred_uncoerced = try sema.resolveInst(extra.pred); const pred_ty = sema.typeOf(pred_uncoerced); - const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison()) { - .Vector, .Array => pred_ty.arrayLen(), - else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(sema.mod)}), + const vec_len_u64 = switch (try pred_ty.zigTypeTagOrPoison(mod)) { + .Vector, .Array => pred_ty.arrayLen(mod), + else => return sema.fail(block, pred_src, "expected vector or array, found '{}'", .{pred_ty.fmt(mod)}), }; - const vec_len = try sema.usizeCast(block, pred_src, vec_len_u64); + const vec_len = @intCast(u32, try sema.usizeCast(block, pred_src, vec_len_u64)); - const bool_vec_ty = try Type.vector(sema.arena, vec_len, Type.bool); + const bool_vec_ty = try mod.vectorType(.{ + .len = vec_len, + .child = .bool_type, + }); const pred = try sema.coerce(block, bool_vec_ty, pred_uncoerced, pred_src); - const vec_ty = try Type.vector(sema.arena, vec_len, elem_ty); + const vec_ty = try mod.vectorType(.{ + .len = vec_len, + .child = elem_ty.toIntern(), + }); const a = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.a), a_src); const b = try sema.coerce(block, vec_ty, try sema.resolveInst(extra.b), b_src); @@ -21387,45 +21918,40 @@ fn zirSelect(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const maybe_b = try sema.resolveMaybeUndefVal(b); const runtime_src = if (maybe_pred) |pred_val| rs: { - if (pred_val.isUndef()) return sema.addConstUndef(vec_ty); + if (pred_val.isUndef(mod)) return sema.addConstUndef(vec_ty); if (maybe_a) |a_val| { - if (a_val.isUndef()) return sema.addConstUndef(vec_ty); + if (a_val.isUndef(mod)) return sema.addConstUndef(vec_ty); if (maybe_b) |b_val| { - if (b_val.isUndef()) return sema.addConstUndef(vec_ty); + if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty); - var buf: Value.ElemValueBuffer = undefined; - const elems = try sema.gpa.alloc(Value, vec_len); + const elems = try sema.gpa.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const pred_elem_val = pred_val.elemValueBuffer(sema.mod, i, &buf); + const pred_elem_val = try pred_val.elemValue(mod, i); const should_choose_a = pred_elem_val.toBool(); - if (should_choose_a) { - elem.* = a_val.elemValueBuffer(sema.mod, i, &buf); - } else { - elem.* = b_val.elemValueBuffer(sema.mod, i, &buf); - } + elem.* = try (try (if (should_choose_a) a_val else b_val).elemValue(mod, i)).intern(elem_ty, mod); } - return sema.addConstant( - vec_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + return sema.addConstant(vec_ty, (try mod.intern(.{ .aggregate = .{ + .ty = vec_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else { break :rs b_src; } } else { if (maybe_b) |b_val| { - if (b_val.isUndef()) return sema.addConstUndef(vec_ty); + if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty); } break :rs a_src; } } else rs: { if (maybe_a) |a_val| { - if (a_val.isUndef()) return sema.addConstUndef(vec_ty); + if (a_val.isUndef(mod)) return sema.addConstUndef(vec_ty); } if (maybe_b) |b_val| { - if (b_val.isUndef()) return sema.addConstUndef(vec_ty); + if (b_val.isUndef(mod)) return sema.addConstUndef(vec_ty); } break :rs pred_src; }; @@ -21489,6 +22015,7 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! } fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data; const src = inst_data.src(); @@ -21505,7 +22032,7 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false); const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation); - switch (elem_ty.zigTypeTag()) { + switch (elem_ty.zigTypeTag(mod)) { .Enum => if (op != .Xchg) { return sema.fail(block, op_src, "@atomicRmw with enum only allowed with .Xchg", .{}); }, @@ -21535,8 +22062,7 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src); break :rs operand_src; }; - if (ptr_val.isComptimeMutablePtr()) { - const target = sema.mod.getTarget(); + if (ptr_val.isComptimeMutablePtr(mod)) { const ptr_ty = sema.typeOf(ptr); const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src; const new_val = switch (op) { @@ -21544,12 +22070,12 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A .Xchg => operand_val, .Add => try sema.numberAddWrapScalar(stored_val, operand_val, elem_ty), .Sub => try sema.numberSubWrapScalar(stored_val, operand_val, elem_ty), - .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, sema.mod), - .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, sema.mod), - .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, sema.mod), - .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, sema.mod), - .Max => stored_val.numberMax (operand_val, target), - .Min => stored_val.numberMin (operand_val, target), + .And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, mod), + .Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, mod), + .Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, mod), + .Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, mod), + .Max => stored_val.numberMax (operand_val, mod), + .Min => stored_val.numberMin (operand_val, mod), // zig fmt: on }; try sema.storePtrVal(block, src, ptr_val, new_val, elem_ty); @@ -21623,18 +22149,19 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const maybe_mulend1 = try sema.resolveMaybeUndefVal(mulend1); const maybe_mulend2 = try sema.resolveMaybeUndefVal(mulend2); const maybe_addend = try sema.resolveMaybeUndefVal(addend); + const mod = sema.mod; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .ComptimeFloat, .Float, .Vector => {}, else => return sema.fail(block, src, "expected vector of floats or float type, found '{}'", .{ty.fmt(sema.mod)}), } const runtime_src = if (maybe_mulend1) |mulend1_val| rs: { if (maybe_mulend2) |mulend2_val| { - if (mulend2_val.isUndef()) return sema.addConstUndef(ty); + if (mulend2_val.isUndef(mod)) return sema.addConstUndef(ty); if (maybe_addend) |addend_val| { - if (addend_val.isUndef()) return sema.addConstUndef(ty); + if (addend_val.isUndef(mod)) return sema.addConstUndef(ty); const result_val = try Value.mulAdd(ty, mulend1_val, mulend2_val, addend_val, sema.arena, sema.mod); return sema.addConstant(ty, result_val); } else { @@ -21642,16 +22169,16 @@ fn zirMulAdd(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. } } else { if (maybe_addend) |addend_val| { - if (addend_val.isUndef()) return sema.addConstUndef(ty); + if (addend_val.isUndef(mod)) return sema.addConstUndef(ty); } break :rs mulend2_src; } } else rs: { if (maybe_mulend2) |mulend2_val| { - if (mulend2_val.isUndef()) return sema.addConstUndef(ty); + if (mulend2_val.isUndef(mod)) return sema.addConstUndef(ty); } if (maybe_addend) |addend_val| { - if (addend_val.isUndef()) return sema.addConstUndef(ty); + if (addend_val.isUndef(mod)) return sema.addConstUndef(ty); } break :rs mulend1_src; }; @@ -21673,6 +22200,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const modifier_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const func_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; @@ -21686,7 +22214,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const air_ref = try sema.resolveInst(extra.modifier); const modifier_ref = try sema.coerce(block, modifier_ty, air_ref, modifier_src); const modifier_val = try sema.resolveConstValue(block, modifier_src, modifier_ref, "call modifier must be comptime-known"); - var modifier = modifier_val.toEnum(std.builtin.CallModifier); + var modifier = mod.toEnum(std.builtin.CallModifier, modifier_val); switch (modifier) { // These can be upgraded to comptime or nosuspend calls. .auto, .never_tail, .no_async => { @@ -21732,18 +22260,17 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const args = try sema.resolveInst(extra.args); const args_ty = sema.typeOf(args); - if (!args_ty.isTuple() and args_ty.tag() != .empty_struct_literal) { + if (!args_ty.isTuple(mod) and args_ty.toIntern() != .empty_struct_type) { return sema.fail(block, args_src, "expected a tuple, found '{}'", .{args_ty.fmt(sema.mod)}); } - var resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount()); + var resolved_args: []Air.Inst.Ref = try sema.arena.alloc(Air.Inst.Ref, args_ty.structFieldCount(mod)); for (resolved_args, 0..) |*resolved, i| { resolved.* = try sema.tupleFieldValByIndex(block, args_src, args, @intCast(u32, i), args_ty); } const callee_ty = sema.typeOf(func); const func_ty = try sema.checkCallArgumentCount(block, func, func_src, callee_ty, resolved_args.len, false); - const ensure_result_used = extra.flags.ensure_result_used; return sema.analyzeCall(block, func, func_ty, func_src, call_src, modifier, ensure_result_used, resolved_args, null, null); } @@ -21757,19 +22284,21 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const parent_ty = try sema.resolveType(block, ty_src, extra.parent_type); - const field_name = try sema.resolveConstString(block, name_src, extra.field_name, "field name must be comptime-known"); + const field_name = try sema.resolveConstStringIntern(block, name_src, extra.field_name, "field name must be comptime-known"); const field_ptr = try sema.resolveInst(extra.field_ptr); const field_ptr_ty = sema.typeOf(field_ptr); + const mod = sema.mod; + const ip = &mod.intern_pool; - if (parent_ty.zigTypeTag() != .Struct and parent_ty.zigTypeTag() != .Union) { + if (parent_ty.zigTypeTag(mod) != .Struct and parent_ty.zigTypeTag(mod) != .Union) { return sema.fail(block, ty_src, "expected struct or union type, found '{}'", .{parent_ty.fmt(sema.mod)}); } try sema.resolveTypeLayout(parent_ty); - const field_index = switch (parent_ty.zigTypeTag()) { + const field_index = switch (parent_ty.zigTypeTag(mod)) { .Struct => blk: { - if (parent_ty.isTuple()) { - if (mem.eql(u8, field_name, "len")) { + if (parent_ty.isTuple(mod)) { + if (ip.stringEqlSlice(field_name, "len")) { return sema.fail(block, src, "cannot get @fieldParentPtr of 'len' field of tuple", .{}); } break :blk try sema.tupleFieldIndex(block, parent_ty, field_name, name_src); @@ -21781,27 +22310,27 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr else => unreachable, }; - if (parent_ty.zigTypeTag() == .Struct and parent_ty.structFieldIsComptime(field_index)) { + if (parent_ty.zigTypeTag(mod) == .Struct and parent_ty.structFieldIsComptime(field_index, mod)) { return sema.fail(block, src, "cannot get @fieldParentPtr of a comptime field", .{}); } try sema.checkPtrOperand(block, ptr_src, field_ptr_ty); - const field_ptr_ty_info = field_ptr_ty.ptrInfo().data; + const field_ptr_ty_info = field_ptr_ty.ptrInfo(mod); var ptr_ty_data: Type.Payload.Pointer.Data = .{ - .pointee_type = parent_ty.structFieldType(field_index), + .pointee_type = parent_ty.structFieldType(field_index, mod), .mutable = field_ptr_ty_info.mutable, .@"addrspace" = field_ptr_ty_info.@"addrspace", }; - if (parent_ty.containerLayout() == .Packed) { + if (parent_ty.containerLayout(mod) == .Packed) { return sema.fail(block, src, "TODO handle packed structs/unions with @fieldParentPtr", .{}); } else { ptr_ty_data.@"align" = blk: { - if (parent_ty.castTag(.@"struct")) |struct_obj| { - break :blk struct_obj.data.fields.values()[field_index].abi_align; - } else if (parent_ty.cast(Type.Payload.Union)) |union_obj| { - break :blk union_obj.data.fields.values()[field_index].abi_align; + if (mod.typeToStruct(parent_ty)) |struct_obj| { + break :blk struct_obj.fields.values()[field_index].abi_align; + } else if (mod.typeToUnion(parent_ty)) |union_obj| { + break :blk union_obj.fields.values()[field_index].abi_align; } else { break :blk 0; } @@ -21815,19 +22344,24 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr const result_ptr = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| { - const payload = field_ptr_val.castTag(.field_ptr) orelse { - return sema.fail(block, ptr_src, "pointer value not based on parent struct", .{}); - }; - if (payload.data.field_index != field_index) { + const field = switch (ip.indexToKey(field_ptr_val.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .field => |field| field, + else => null, + }, + else => null, + } orelse return sema.fail(block, ptr_src, "pointer value not based on parent struct", .{}); + + if (field.index != field_index) { const msg = msg: { const msg = try sema.errMsg( block, src, - "field '{s}' has index '{d}' but pointer value is index '{d}' of struct '{}'", + "field '{}' has index '{d}' but pointer value is index '{d}' of struct '{}'", .{ - field_name, + field_name.fmt(ip), field_index, - payload.data.field_index, + field.index, parent_ty.fmt(sema.mod), }, ); @@ -21837,7 +22371,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr }; return sema.failWithOwnedErrorMsg(msg); } - return sema.addConstant(result_ptr, payload.data.container_ptr); + return sema.addConstant(result_ptr, field.base.toValue()); } try sema.requireRuntimeBlock(block, src, ptr_src); @@ -21913,15 +22447,14 @@ fn analyzeMinMax( ) CompileError!Air.Inst.Ref { assert(operands.len == operand_srcs.len); assert(operands.len > 0); + const mod = sema.mod; if (operands.len == 1) return operands[0]; - const mod = sema.mod; - const target = mod.getTarget(); const opFunc = switch (air_tag) { .min => Value.numberMin, .max => Value.numberMax, - else => unreachable, + else => @compileError("unreachable"), }; // First, find all comptime-known arguments, and get their min/max @@ -21939,32 +22472,30 @@ fn analyzeMinMax( runtime_known.unset(operand_idx); - if (cur_val.isUndef()) continue; // result is also undef - if (operand_val.isUndef()) { + if (cur_val.isUndef(mod)) continue; // result is also undef + if (operand_val.isUndef(mod)) { cur_minmax = try sema.addConstUndef(simd_op.result_ty); continue; } - try sema.resolveLazyValue(cur_val); - try sema.resolveLazyValue(operand_val); + const resolved_cur_val = try sema.resolveLazyValue(cur_val); + const resolved_operand_val = try sema.resolveLazyValue(operand_val); const vec_len = simd_op.len orelse { - const result_val = opFunc(cur_val, operand_val, target); + const result_val = opFunc(resolved_cur_val, resolved_operand_val, mod); cur_minmax = try sema.addConstant(simd_op.result_ty, result_val); continue; }; - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const elems = try sema.arena.alloc(Value, vec_len); + const elems = try sema.arena.alloc(InternPool.Index, vec_len); for (elems, 0..) |*elem, i| { - const lhs_elem_val = cur_val.elemValueBuffer(mod, i, &lhs_buf); - const rhs_elem_val = operand_val.elemValueBuffer(mod, i, &rhs_buf); - elem.* = opFunc(lhs_elem_val, rhs_elem_val, target); + const lhs_elem_val = try resolved_cur_val.elemValue(mod, i); + const rhs_elem_val = try resolved_operand_val.elemValue(mod, i); + elem.* = try opFunc(lhs_elem_val, rhs_elem_val, mod).intern(simd_op.scalar_ty, mod); } - cur_minmax = try sema.addConstant( - simd_op.result_ty, - try Value.Tag.aggregate.create(sema.arena, elems), - ); + cur_minmax = try sema.addConstant(simd_op.result_ty, (try mod.intern(.{ .aggregate = .{ + .ty = simd_op.result_ty.toIntern(), + .storage = .{ .elems = elems }, + } })).toValue()); } else { runtime_known.unset(operand_idx); cur_minmax = try sema.addConstant(sema.typeOf(operand), uncasted_operand_val); @@ -21984,28 +22515,31 @@ fn analyzeMinMax( break :refined orig_ty; } - const refined_ty = if (orig_ty.zigTypeTag() == .Vector) blk: { - const elem_ty = orig_ty.childType(); - const len = orig_ty.vectorLen(); + const refined_ty = if (orig_ty.zigTypeTag(mod) == .Vector) blk: { + const elem_ty = orig_ty.childType(mod); + const len = orig_ty.vectorLen(mod); if (len == 0) break :blk orig_ty; if (elem_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats - var cur_min: Value = try val.elemValue(mod, sema.arena, 0); + var cur_min: Value = try val.elemValue(mod, 0); var cur_max: Value = cur_min; for (1..len) |idx| { - const elem_val = try val.elemValue(mod, sema.arena, idx); - if (elem_val.isUndef()) break :blk orig_ty; // can't refine undef - if (Value.order(elem_val, cur_min, target).compare(.lt)) cur_min = elem_val; - if (Value.order(elem_val, cur_max, target).compare(.gt)) cur_max = elem_val; + const elem_val = try val.elemValue(mod, idx); + if (elem_val.isUndef(mod)) break :blk orig_ty; // can't refine undef + if (Value.order(elem_val, cur_min, mod).compare(.lt)) cur_min = elem_val; + if (Value.order(elem_val, cur_max, mod).compare(.gt)) cur_max = elem_val; } - const refined_elem_ty = try Type.intFittingRange(target, sema.arena, cur_min, cur_max); - break :blk try Type.vector(sema.arena, len, refined_elem_ty); + const refined_elem_ty = try mod.intFittingRange(cur_min, cur_max); + break :blk try mod.vectorType(.{ + .len = len, + .child = refined_elem_ty.toIntern(), + }); } else blk: { if (orig_ty.isAnyFloat()) break :blk orig_ty; // can't refine floats - if (val.isUndef()) break :blk orig_ty; // can't refine undef - break :blk try Type.intFittingRange(target, sema.arena, val, val); + if (val.isUndef(mod)) break :blk orig_ty; // can't refine undef + break :blk try mod.intFittingRange(val, val); }; // Apply the refined type to the current value - this isn't strictly necessary in the @@ -22016,7 +22550,7 @@ fn analyzeMinMax( if (std.debug.runtime_safety) { assert(try sema.intFitsInType(val, refined_ty, null)); } - cur_minmax = try sema.addConstant(refined_ty, val); + cur_minmax = try sema.coerceInMemory(block, val, orig_ty, refined_ty, src); } break :refined refined_ty; @@ -22032,7 +22566,7 @@ fn analyzeMinMax( // If the comptime-known part is undef we can avoid emitting actual instructions later const known_undef = if (cur_minmax) |operand| blk: { const val = (try sema.resolveMaybeUndefVal(operand)).?; - break :blk val.isUndef(); + break :blk val.isUndef(mod); } else false; if (cur_minmax == null) { @@ -22061,29 +22595,32 @@ fn analyzeMinMax( // Finally, refine the type based on the comptime-known bound. if (known_undef) break :refine; // can't refine undef const unrefined_ty = sema.typeOf(cur_minmax.?); - const is_vector = unrefined_ty.zigTypeTag() == .Vector; - const comptime_elem_ty = if (is_vector) comptime_ty.childType() else comptime_ty; - const unrefined_elem_ty = if (is_vector) unrefined_ty.childType() else unrefined_ty; + const is_vector = unrefined_ty.zigTypeTag(mod) == .Vector; + const comptime_elem_ty = if (is_vector) comptime_ty.childType(mod) else comptime_ty; + const unrefined_elem_ty = if (is_vector) unrefined_ty.childType(mod) else unrefined_ty; if (unrefined_elem_ty.isAnyFloat()) break :refine; // we can't refine floats // Compute the final bounds based on the runtime type and the comptime-known bound type const min_val = switch (air_tag) { - .min => try unrefined_elem_ty.minInt(sema.arena, target), - .max => try comptime_elem_ty.minInt(sema.arena, target), // @max(ct, rt) >= ct + .min => try unrefined_elem_ty.minInt(mod, unrefined_elem_ty), + .max => try comptime_elem_ty.minInt(mod, comptime_elem_ty), // @max(ct, rt) >= ct else => unreachable, }; const max_val = switch (air_tag) { - .min => try comptime_elem_ty.maxInt(sema.arena, target), // @min(ct, rt) <= ct - .max => try unrefined_elem_ty.maxInt(sema.arena, target), + .min => try comptime_elem_ty.maxInt(mod, comptime_elem_ty), // @min(ct, rt) <= ct + .max => try unrefined_elem_ty.maxInt(mod, unrefined_elem_ty), else => unreachable, }; // Find the smallest type which can contain these bounds - const final_elem_ty = try Type.intFittingRange(target, sema.arena, min_val, max_val); + const final_elem_ty = try mod.intFittingRange(min_val, max_val); const final_ty = if (is_vector) - try Type.vector(sema.arena, unrefined_ty.vectorLen(), final_elem_ty) + try mod.vectorType(.{ + .len = unrefined_ty.vectorLen(mod), + .child = final_elem_ty.toIntern(), + }) else final_elem_ty; @@ -22098,7 +22635,7 @@ fn analyzeMinMax( fn upgradeToArrayPtr(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, len: u64) !Air.Inst.Ref { const mod = sema.mod; - const info = sema.typeOf(ptr).ptrInfo().data; + const info = sema.typeOf(ptr).ptrInfo(mod); if (info.size == .One) { // Already an array pointer. return ptr; @@ -22132,8 +22669,9 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const dest_len = try indexablePtrLenOrNone(sema, block, dest_src, dest_ptr); const src_len = try indexablePtrLenOrNone(sema, block, src_src, src_ptr); const target = sema.mod.getTarget(); + const mod = sema.mod; - if (dest_ty.isConstPtr()) { + if (dest_ty.isConstPtr(mod)) { return sema.fail(block, dest_src, "cannot memcpy to constant pointer", .{}); } @@ -22194,9 +22732,9 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void } const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |dest_ptr_val| rs: { - if (!dest_ptr_val.isComptimeMutablePtr()) break :rs dest_src; + if (!dest_ptr_val.isComptimeMutablePtr(mod)) break :rs dest_src; if (try sema.resolveDefinedValue(block, src_src, src_ptr)) |_| { - const len_u64 = (try len_val.?.getUnsignedIntAdvanced(target, sema)).?; + const len_u64 = (try len_val.?.getUnsignedIntAdvanced(mod, sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); for (0..len) |i| { const elem_index = try sema.addIntUnsigned(Type.usize, i); @@ -22239,12 +22777,12 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void // lowering. The AIR instruction requires pointers with element types of // equal ABI size. - if (dest_ty.zigTypeTag() != .Pointer or src_ty.zigTypeTag() != .Pointer) { + if (dest_ty.zigTypeTag(mod) != .Pointer or src_ty.zigTypeTag(mod) != .Pointer) { return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the source or destination iterable is a tuple", .{}); } - const dest_elem_ty = dest_ty.elemType2(); - const src_elem_ty = src_ty.elemType2(); + const dest_elem_ty = dest_ty.elemType2(mod); + const src_elem_ty = src_ty.elemType2(mod); if (.ok != try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, true, target, dest_src, src_src)) { return sema.fail(block, src, "TODO: lower @memcpy to a for loop because the element types have different ABI sizes", .{}); } @@ -22255,7 +22793,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void var new_dest_ptr = dest_ptr; var new_src_ptr = src_ptr; if (len_val) |val| { - const len = val.toUnsignedInt(target); + const len = val.toUnsignedInt(mod); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. return; @@ -22268,7 +22806,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void // Change the src from slice to a many pointer, to avoid multiple ptr // slice extractions in AIR instructions. const new_src_ptr_ty = sema.typeOf(new_src_ptr); - if (new_src_ptr_ty.isSlice()) { + if (new_src_ptr_ty.isSlice(mod)) { new_src_ptr = try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty); } } else if (dest_len == .none and len_val == null) { @@ -22276,7 +22814,7 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const dest_ptr_ptr = try sema.analyzeRef(block, dest_src, new_dest_ptr); new_dest_ptr = try sema.analyzeSlice(block, dest_src, dest_ptr_ptr, .zero, src_len, .none, .unneeded, dest_src, dest_src, dest_src, false); const new_src_ptr_ty = sema.typeOf(new_src_ptr); - if (new_src_ptr_ty.isSlice()) { + if (new_src_ptr_ty.isSlice(mod)) { new_src_ptr = try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty); } } @@ -22295,14 +22833,30 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void // Extract raw pointer from dest slice. The AIR instructions could support them, but // it would cause redundant machine code instructions. const new_dest_ptr_ty = sema.typeOf(new_dest_ptr); - const raw_dest_ptr = if (new_dest_ptr_ty.isSlice()) + const raw_dest_ptr = if (new_dest_ptr_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, dest_src, new_dest_ptr, new_dest_ptr_ty) - else - new_dest_ptr; + else if (new_dest_ptr_ty.ptrSize(mod) == .One) ptr: { + var dest_manyptr_ty_key = mod.intern_pool.indexToKey(new_dest_ptr_ty.toIntern()).ptr_type; + assert(dest_manyptr_ty_key.flags.size == .One); + dest_manyptr_ty_key.child = dest_elem_ty.toIntern(); + dest_manyptr_ty_key.flags.size = .Many; + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(dest_manyptr_ty_key), new_dest_ptr, dest_src); + } else new_dest_ptr; + + const new_src_ptr_ty = sema.typeOf(new_src_ptr); + const raw_src_ptr = if (new_src_ptr_ty.isSlice(mod)) + try sema.analyzeSlicePtr(block, src_src, new_src_ptr, new_src_ptr_ty) + else if (new_src_ptr_ty.ptrSize(mod) == .One) ptr: { + var src_manyptr_ty_key = mod.intern_pool.indexToKey(new_src_ptr_ty.toIntern()).ptr_type; + assert(src_manyptr_ty_key.flags.size == .One); + src_manyptr_ty_key.child = src_elem_ty.toIntern(); + src_manyptr_ty_key.flags.size = .Many; + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(src_manyptr_ty_key), new_src_ptr, src_src); + } else new_src_ptr; // ok1: dest >= src + len // ok2: src >= dest + len - const src_plus_len = try sema.analyzePtrArithmetic(block, src, new_src_ptr, len, .ptr_add, src_src, src); + const src_plus_len = try sema.analyzePtrArithmetic(block, src, raw_src_ptr, len, .ptr_add, src_src, src); const dest_plus_len = try sema.analyzePtrArithmetic(block, src, raw_dest_ptr, len, .ptr_add, dest_src, src); const ok1 = try block.addBinOp(.cmp_gte, raw_dest_ptr, src_plus_len); const ok2 = try block.addBinOp(.cmp_gte, new_src_ptr, dest_plus_len); @@ -22320,6 +22874,9 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void } fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { + const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -22330,25 +22887,24 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const dest_ptr_ty = sema.typeOf(dest_ptr); try checkMemOperand(sema, block, dest_src, dest_ptr_ty); - if (dest_ptr_ty.isConstPtr()) { + if (dest_ptr_ty.isConstPtr(mod)) { return sema.fail(block, dest_src, "cannot memset constant pointer", .{}); } - const dest_elem_ty = dest_ptr_ty.elemType2(); - const target = sema.mod.getTarget(); + const dest_elem_ty = dest_ptr_ty.elemType2(mod); const runtime_src = if (try sema.resolveDefinedValue(block, dest_src, dest_ptr)) |ptr_val| rs: { - const len_air_ref = try sema.fieldVal(block, src, dest_ptr, "len", dest_src); + const len_air_ref = try sema.fieldVal(block, src, dest_ptr, try ip.getOrPutString(gpa, "len"), dest_src); const len_val = (try sema.resolveDefinedValue(block, dest_src, len_air_ref)) orelse break :rs dest_src; - const len_u64 = (try len_val.getUnsignedIntAdvanced(target, sema)).?; + const len_u64 = (try len_val.getUnsignedIntAdvanced(mod, sema)).?; const len = try sema.usizeCast(block, dest_src, len_u64); if (len == 0) { // This AIR instruction guarantees length > 0 if it is comptime-known. return; } - if (!ptr_val.isComptimeMutablePtr()) break :rs dest_src; + if (!ptr_val.isComptimeMutablePtr(mod)) break :rs dest_src; if (try sema.resolveMaybeUndefVal(uncoerced_elem)) |_| { for (0..len) |i| { const elem_index = try sema.addIntUnsigned(Type.usize, i); @@ -22426,6 +22982,7 @@ fn zirVarExtended( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = 0 }; const init_src: LazySrcLoc = .{ .node_offset_var_decl_init = 0 }; @@ -22461,47 +23018,33 @@ fn zirVarExtended( else uncasted_init; - break :blk (try sema.resolveMaybeUndefVal(init)) orelse - return sema.failWithNeededComptime(block, init_src, "container level variable initializers must be comptime-known"); - } else Value.initTag(.unreachable_value); + break :blk ((try sema.resolveMaybeUndefVal(init)) orelse + return sema.failWithNeededComptime(block, init_src, "container level variable initializers must be comptime-known")).toIntern(); + } else .none; try sema.validateVarType(block, ty_src, var_ty, small.is_extern); - const new_var = try sema.gpa.create(Module.Var); - errdefer sema.gpa.destroy(new_var); - - log.debug("created variable {*} owner_decl: {*} ({s})", .{ - new_var, sema.owner_decl, sema.owner_decl.name, - }); - - new_var.* = .{ - .owner_decl = sema.owner_decl_index, + return sema.addConstant(var_ty, (try mod.intern(.{ .variable = .{ + .ty = var_ty.toIntern(), .init = init_val, + .decl = sema.owner_decl_index, + .lib_name = if (lib_name) |lname| (try mod.intern_pool.getOrPutString( + sema.gpa, + try sema.handleExternLibName(block, ty_src, lname), + )).toOptional() else .none, .is_extern = small.is_extern, - .is_mutable = true, .is_threadlocal = small.is_threadlocal, - .is_weak_linkage = false, - .lib_name = null, - }; - - if (lib_name) |lname| { - new_var.lib_name = try sema.handleExternLibName(block, ty_src, lname); - } - - const result = try sema.addConstant( - var_ty, - try Value.Tag.variable.create(sema.arena, new_var), - ); - return result; + } })).toValue()); } fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FuncFancy, inst_data.payload_index); - const target = sema.mod.getTarget(); + const target = mod.getTarget(); const align_src: LazySrcLoc = .{ .node_offset_fn_type_align = inst_data.src_node }; const addrspace_src: LazySrcLoc = .{ .node_offset_fn_type_addrspace = inst_data.src_node }; @@ -22532,10 +23075,10 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A extra_index += body.len; const val = try sema.resolveGenericBody(block, align_src, body, inst, Type.u29, "alignment must be comptime-known"); - if (val.tag() == .generic_poison) { + if (val.isGenericPoison()) { break :blk null; } - const alignment = @intCast(u32, val.toUnsignedInt(target)); + const alignment = @intCast(u32, val.toUnsignedInt(mod)); try sema.validateAlign(block, align_src, alignment); if (alignment == target_util.defaultFunctionAlignment(target)) { break :blk 0; @@ -22551,7 +23094,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - const alignment = @intCast(u32, align_tv.val.toUnsignedInt(target)); + const alignment = @intCast(u32, align_tv.val.toUnsignedInt(mod)); try sema.validateAlign(block, align_src, alignment); if (alignment == target_util.defaultFunctionAlignment(target)) { break :blk 0; @@ -22568,10 +23111,10 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const addrspace_ty = try sema.getBuiltinType("AddressSpace"); const val = try sema.resolveGenericBody(block, addrspace_src, body, inst, addrspace_ty, "addrespace must be comptime-known"); - if (val.tag() == .generic_poison) { + if (val.isGenericPoison()) { break :blk null; } - break :blk val.toEnum(std.builtin.AddressSpace); + break :blk mod.toEnum(std.builtin.AddressSpace, val); } else if (extra.data.bits.has_addrspace_ref) blk: { const addrspace_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; @@ -22581,7 +23124,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - break :blk addrspace_tv.val.toEnum(std.builtin.AddressSpace); + break :blk mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val); } else target_util.defaultAddressSpace(target, .function); const @"linksection": FuncLinkSection = if (extra.data.bits.has_section_body) blk: { @@ -22590,16 +23133,16 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body.len; - const ty = Type.initTag(.const_slice_u8); + const ty = Type.slice_const_u8; const val = try sema.resolveGenericBody(block, section_src, body, inst, ty, "linksection must be comptime-known"); - if (val.tag() == .generic_poison) { + if (val.isGenericPoison()) { break :blk FuncLinkSection{ .generic = {} }; } - break :blk FuncLinkSection{ .explicit = try val.toAllocatedBytes(ty, sema.arena, sema.mod) }; + break :blk FuncLinkSection{ .explicit = try val.toIpString(ty, mod) }; } else if (extra.data.bits.has_section_ref) blk: { const section_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; - const section_name = sema.resolveConstString(block, section_src, section_ref, "linksection must be comptime-known") catch |err| switch (err) { + const section_name = sema.resolveConstStringIntern(block, section_src, section_ref, "linksection must be comptime-known") catch |err| switch (err) { error.GenericPoison => { break :blk FuncLinkSection{ .generic = {} }; }, @@ -22616,10 +23159,10 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A const cc_ty = try sema.getBuiltinType("CallingConvention"); const val = try sema.resolveGenericBody(block, cc_src, body, inst, cc_ty, "calling convention must be comptime-known"); - if (val.tag() == .generic_poison) { + if (val.isGenericPoison()) { break :blk null; } - break :blk val.toEnum(std.builtin.CallingConvention); + break :blk mod.toEnum(std.builtin.CallingConvention, val); } else if (extra.data.bits.has_cc_ref) blk: { const cc_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; @@ -22629,7 +23172,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, else => |e| return e, }; - break :blk cc_tv.val.toEnum(std.builtin.CallingConvention); + break :blk mod.toEnum(std.builtin.CallingConvention, cc_tv.val); } else if (sema.owner_decl.is_exported and has_body) .C else @@ -22642,20 +23185,18 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A extra_index += body.len; const val = try sema.resolveGenericBody(block, ret_src, body, inst, Type.type, "return type must be comptime-known"); - var buffer: Value.ToTypeBuffer = undefined; - const ty = try val.toType(&buffer).copy(sema.arena); + const ty = val.toType(); break :blk ty; } else if (extra.data.bits.has_ret_ty_ref) blk: { const ret_ty_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; const ret_ty_tv = sema.resolveInstConst(block, ret_src, ret_ty_ref, "return type must be comptime-known") catch |err| switch (err) { error.GenericPoison => { - break :blk Type.initTag(.generic_poison); + break :blk Type.generic_poison; }, else => |e| return e, }; - var buffer: Value.ToTypeBuffer = undefined; - const ty = try ret_ty_tv.val.toType(&buffer).copy(sema.arena); + const ty = ret_ty_tv.val.toType(); break :blk ty; } else Type.void; @@ -22727,13 +23268,14 @@ fn zirCDefine( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const val_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node }; const name = try sema.resolveConstString(block, name_src, extra.lhs, "name of macro being undefined must be comptime-known"); const rhs = try sema.resolveInst(extra.rhs); - if (sema.typeOf(rhs).zigTypeTag() != .Void) { + if (sema.typeOf(rhs).zigTypeTag(mod) != .Void) { const value = try sema.resolveConstString(block, val_src, extra.rhs, "value of macro being undefined must be comptime-known"); try block.c_import_buf.?.writer().print("#define {s} {s}\n", .{ name, value }); } else { @@ -22799,27 +23341,29 @@ fn resolvePrefetchOptions( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) CompileError!std.builtin.PrefetchOptions { + const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const options_ty = try sema.getBuiltinType("PrefetchOptions"); const options = try sema.coerce(block, options_ty, try sema.resolveInst(zir_ref), src); - const target = sema.mod.getTarget(); const rw_src = sema.maybeOptionsSrc(block, src, "rw"); const locality_src = sema.maybeOptionsSrc(block, src, "locality"); const cache_src = sema.maybeOptionsSrc(block, src, "cache"); - const rw = try sema.fieldVal(block, src, options, "rw", rw_src); + const rw = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "rw"), rw_src); const rw_val = try sema.resolveConstValue(block, rw_src, rw, "prefetch read/write must be comptime-known"); - const locality = try sema.fieldVal(block, src, options, "locality", locality_src); + const locality = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "locality"), locality_src); const locality_val = try sema.resolveConstValue(block, locality_src, locality, "prefetch locality must be comptime-known"); - const cache = try sema.fieldVal(block, src, options, "cache", cache_src); + const cache = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "cache"), cache_src); const cache_val = try sema.resolveConstValue(block, cache_src, cache, "prefetch cache must be comptime-known"); return std.builtin.PrefetchOptions{ - .rw = rw_val.toEnum(std.builtin.PrefetchOptions.Rw), - .locality = @intCast(u2, locality_val.toUnsignedInt(target)), - .cache = cache_val.toEnum(std.builtin.PrefetchOptions.Cache), + .rw = mod.toEnum(std.builtin.PrefetchOptions.Rw, rw_val), + .locality = @intCast(u2, locality_val.toUnsignedInt(mod)), + .cache = mod.toEnum(std.builtin.PrefetchOptions.Cache, cache_val), }; } @@ -22862,34 +23406,40 @@ fn resolveExternOptions( block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, -) CompileError!std.builtin.ExternOptions { +) CompileError!struct { + name: InternPool.NullTerminatedString, + library_name: InternPool.OptionalNullTerminatedString = .none, + linkage: std.builtin.GlobalLinkage = .Strong, + is_thread_local: bool = false, +} { + const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; const options_inst = try sema.resolveInst(zir_ref); const extern_options_ty = try sema.getBuiltinType("ExternOptions"); const options = try sema.coerce(block, extern_options_ty, options_inst, src); - const mod = sema.mod; const name_src = sema.maybeOptionsSrc(block, src, "name"); const library_src = sema.maybeOptionsSrc(block, src, "library"); const linkage_src = sema.maybeOptionsSrc(block, src, "linkage"); const thread_local_src = sema.maybeOptionsSrc(block, src, "thread_local"); - const name_ref = try sema.fieldVal(block, src, options, "name", name_src); + const name_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "name"), name_src); const name_val = try sema.resolveConstValue(block, name_src, name_ref, "name of the extern symbol must be comptime-known"); - const name = try name_val.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, mod); + const name = try name_val.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod); - const library_name_inst = try sema.fieldVal(block, src, options, "library_name", library_src); + const library_name_inst = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "library_name"), library_src); const library_name_val = try sema.resolveConstValue(block, library_src, library_name_inst, "library in which extern symbol is must be comptime-known"); - const linkage_ref = try sema.fieldVal(block, src, options, "linkage", linkage_src); + const linkage_ref = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "linkage"), linkage_src); const linkage_val = try sema.resolveConstValue(block, linkage_src, linkage_ref, "linkage of the extern symbol must be comptime-known"); - const linkage = linkage_val.toEnum(std.builtin.GlobalLinkage); + const linkage = mod.toEnum(std.builtin.GlobalLinkage, linkage_val); - const is_thread_local = try sema.fieldVal(block, src, options, "is_thread_local", thread_local_src); + const is_thread_local = try sema.fieldVal(block, src, options, try ip.getOrPutString(gpa, "is_thread_local"), thread_local_src); const is_thread_local_val = try sema.resolveConstValue(block, thread_local_src, is_thread_local, "threadlocality of the extern symbol must be comptime-known"); - const library_name = if (!library_name_val.isNull()) blk: { - const payload = library_name_val.castTag(.opt_payload).?.data; - const library_name = try payload.toAllocatedBytes(Type.initTag(.const_slice_u8), sema.arena, mod); + const library_name = if (library_name_val.optionalValue(mod)) |payload| blk: { + const library_name = try payload.toAllocatedBytes(Type.slice_const_u8, sema.arena, mod); if (library_name.len == 0) { return sema.fail(block, library_src, "library name cannot be empty", .{}); } @@ -22904,9 +23454,9 @@ fn resolveExternOptions( return sema.fail(block, linkage_src, "extern symbol must use strong or weak linkage", .{}); } - return std.builtin.ExternOptions{ - .name = name, - .library_name = library_name, + return .{ + .name = try ip.getOrPutString(gpa, name), + .library_name = try ip.getOrPutStringOpt(gpa, library_name), .linkage = linkage, .is_thread_local = is_thread_local_val.toBool(), }; @@ -22917,21 +23467,21 @@ fn zirBuiltinExtern( block: *Block, extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node }; const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node }; var ty = try sema.resolveType(block, ty_src, extra.lhs); - if (!ty.isPtrAtRuntime()) { + if (!ty.isPtrAtRuntime(mod)) { return sema.fail(block, ty_src, "expected (optional) pointer", .{}); } - if (!try sema.validateExternType(ty.childType(), .other)) { + if (!try sema.validateExternType(ty.childType(mod), .other)) { const msg = msg: { - const mod = sema.mod; const msg = try sema.errMsg(block, ty_src, "extern symbol cannot have type '{}'", .{ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl), ty, .other); + try sema.explainWhyTypeIsNotExtern(msg, ty_src.toSrcLoc(src_decl, mod), ty, .other); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -22945,52 +23495,51 @@ fn zirBuiltinExtern( else => |e| return e, }; - if (options.linkage == .Weak and !ty.ptrAllowsZero()) { - ty = try Type.optional(sema.arena, ty); + if (options.linkage == .Weak and !ty.ptrAllowsZero(mod)) { + ty = try Type.optional(sema.arena, ty, mod); } // TODO check duplicate extern - const new_decl_index = try sema.mod.allocateNewDecl(sema.owner_decl.src_namespace, sema.owner_decl.src_node, null); - errdefer sema.mod.destroyDecl(new_decl_index); - const new_decl = sema.mod.declPtr(new_decl_index); - new_decl.name = try sema.gpa.dupeZ(u8, options.name); + const new_decl_index = try mod.allocateNewDecl(sema.owner_decl.src_namespace, sema.owner_decl.src_node, null); + errdefer mod.destroyDecl(new_decl_index); + const new_decl = mod.declPtr(new_decl_index); + new_decl.name = options.name; { - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - - const new_var = try new_decl_arena_allocator.create(Module.Var); - new_var.* = .{ - .owner_decl = sema.owner_decl_index, - .init = Value.initTag(.unreachable_value), + const new_var = try mod.intern(.{ .variable = .{ + .ty = ty.toIntern(), + .init = .none, + .decl = sema.owner_decl_index, .is_extern = true, - .is_mutable = false, + .is_const = true, .is_threadlocal = options.is_thread_local, .is_weak_linkage = options.linkage == .Weak, - .lib_name = null, - }; + } }); new_decl.src_line = sema.owner_decl.src_line; // We only access this decl through the decl_ref with the correct type created // below, so this type doesn't matter - new_decl.ty = Type.Tag.init(.anyopaque); - new_decl.val = try Value.Tag.variable.create(new_decl_arena_allocator, new_var); + new_decl.ty = ty; + new_decl.val = new_var.toValue(); new_decl.@"align" = 0; - new_decl.@"linksection" = null; + new_decl.@"linksection" = .none; new_decl.has_tv = true; new_decl.analysis = .complete; - new_decl.generation = sema.mod.generation; - - try new_decl.finalizeNewArena(&new_decl_arena); + new_decl.generation = mod.generation; } - try sema.mod.declareDeclDependency(sema.owner_decl_index, new_decl_index); + try mod.declareDeclDependency(sema.owner_decl_index, new_decl_index); try sema.ensureDeclAnalyzed(new_decl_index); - const ref = try Value.Tag.decl_ref.create(sema.arena, new_decl_index); - return sema.addConstant(ty, ref); + return sema.addConstant(ty, try mod.getCoerced((try mod.intern(.{ .ptr = .{ + .ty = switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => ty.toIntern(), + .opt_type => |child_type| child_type, + else => unreachable, + }, + .addr = .{ .decl = new_decl_index }, + } })).toValue(), ty)); } fn zirWorkItem( @@ -23073,7 +23622,7 @@ fn validateVarType( const msg = try sema.errMsg(block, src, "extern variable cannot have type '{}'", .{var_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl), var_ty, .other); + try sema.explainWhyTypeIsNotExtern(msg, src.toSrcLoc(src_decl, mod), var_ty, .other); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -23086,8 +23635,8 @@ fn validateVarType( errdefer msg.destroy(sema.gpa); const src_decl = mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl), var_ty); - if (var_ty.zigTypeTag() == .ComptimeInt or var_ty.zigTypeTag() == .ComptimeFloat) { + try sema.explainWhyTypeIsComptime(msg, src.toSrcLoc(src_decl, mod), var_ty); + if (var_ty.zigTypeTag(mod) == .ComptimeInt or var_ty.zigTypeTag(mod) == .ComptimeFloat) { try sema.errNote(block, src, msg, "to modify this variable at runtime, it must be given an explicit fixed-size number type", .{}); } @@ -23101,8 +23650,9 @@ fn validateRunTimeType( var_ty: Type, is_extern: bool, ) CompileError!bool { + const mod = sema.mod; var ty = var_ty; - while (true) switch (ty.zigTypeTag()) { + while (true) switch (ty.zigTypeTag(mod)) { .Bool, .Int, .Float, @@ -23125,23 +23675,22 @@ fn validateRunTimeType( => return false, .Pointer => { - const elem_ty = ty.childType(); - switch (elem_ty.zigTypeTag()) { + const elem_ty = ty.childType(mod); + switch (elem_ty.zigTypeTag(mod)) { .Opaque => return true, - .Fn => return elem_ty.isFnOrHasRuntimeBits(), + .Fn => return elem_ty.isFnOrHasRuntimeBits(mod), else => ty = elem_ty, } }, .Opaque => return is_extern, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); + const child_ty = ty.optionalChild(mod); return sema.validateRunTimeType(child_ty, is_extern); }, - .Array, .Vector => ty = ty.elemType(), + .Array, .Vector => ty = ty.childType(mod), - .ErrorUnion => ty = ty.errorUnionPayload(), + .ErrorUnion => ty = ty.errorUnionPayload(mod), .Struct, .Union => { const resolved_ty = try sema.resolveTypeFields(ty); @@ -23151,7 +23700,7 @@ fn validateRunTimeType( }; } -const TypeSet = std.HashMapUnmanaged(Type, void, Type.HashContext64, std.hash_map.default_max_load_percentage); +const TypeSet = std.AutoHashMapUnmanaged(InternPool.Index, void); fn explainWhyTypeIsComptime( sema: *Sema, @@ -23174,7 +23723,7 @@ fn explainWhyTypeIsComptimeInner( type_set: *TypeSet, ) CompileError!void { const mod = sema.mod; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Bool, .Int, .Float, @@ -23208,12 +23757,12 @@ fn explainWhyTypeIsComptimeInner( }, .Array, .Vector => { - try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.elemType(), type_set); + try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set); }, .Pointer => { - const elem_ty = ty.elemType2(); - if (elem_ty.zigTypeTag() == .Fn) { - const fn_info = elem_ty.fnInfo(); + const elem_ty = ty.elemType2(mod); + if (elem_ty.zigTypeTag(mod) == .Fn) { + const fn_info = mod.typeToFunc(elem_ty).?; if (fn_info.is_generic) { try mod.errNoteNonLazy(src_loc, msg, "function is generic", .{}); } @@ -23221,29 +23770,27 @@ fn explainWhyTypeIsComptimeInner( .Inline => try mod.errNoteNonLazy(src_loc, msg, "function has inline calling convention", .{}), else => {}, } - if (fn_info.return_type.comptimeOnly()) { + if (fn_info.return_type.toType().comptimeOnly(mod)) { try mod.errNoteNonLazy(src_loc, msg, "function has a comptime-only return type", .{}); } return; } - try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.elemType(), type_set); + try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.childType(mod), type_set); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(&buf), type_set); + try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.optionalChild(mod), type_set); }, .ErrorUnion => { - try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.errorUnionPayload(), type_set); + try sema.explainWhyTypeIsComptimeInner(msg, src_loc, ty.errorUnionPayload(mod), type_set); }, .Struct => { - if ((try type_set.getOrPutContext(sema.gpa, ty, .{ .mod = mod })).found_existing) return; + if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return; - if (ty.castTag(.@"struct")) |payload| { - const struct_obj = payload.data; + if (mod.typeToStruct(ty)) |struct_obj| { for (struct_obj.fields.values(), 0..) |field, i| { - const field_src_loc = struct_obj.fieldSrcLoc(sema.mod, .{ + const field_src_loc = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = i, .range = .type, }); @@ -23258,12 +23805,11 @@ fn explainWhyTypeIsComptimeInner( }, .Union => { - if ((try type_set.getOrPutContext(sema.gpa, ty, .{ .mod = mod })).found_existing) return; + if ((try type_set.getOrPut(sema.gpa, ty.toIntern())).found_existing) return; - if (ty.cast(Type.Payload.Union)) |payload| { - const union_obj = payload.data; + if (mod.typeToUnion(ty)) |union_obj| { for (union_obj.fields.values(), 0..) |field, i| { - const field_src_loc = union_obj.fieldSrcLoc(sema.mod, .{ + const field_src_loc = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = i, .range = .type, }); @@ -23295,7 +23841,8 @@ fn validateExternType( ty: Type, position: ExternPosition, ) !bool { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Type, .ComptimeFloat, .ComptimeInt, @@ -23313,8 +23860,8 @@ fn validateExternType( .Float, .AnyFrame, => return true, - .Pointer => return !(ty.isSlice() or try sema.typeRequiresComptime(ty)), - .Int => switch (ty.intInfo(sema.mod.getTarget()).bits) { + .Pointer => return !(ty.isSlice(mod) or try sema.typeRequiresComptime(ty)), + .Int => switch (ty.intInfo(mod).bits) { 8, 16, 32, 64, 128 => return true, else => return false, }, @@ -23323,20 +23870,18 @@ fn validateExternType( const target = sema.mod.getTarget(); // For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI. // The goal is to experiment with more integrated CPU/GPU code. - if (ty.fnCallingConvention() == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) { + if (ty.fnCallingConvention(mod) == .Kernel and (target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64)) { return true; } - return !Type.fnCallingConventionAllowsZigTypes(target, ty.fnCallingConvention()); + return !target_util.fnCallConvAllowsZigTypes(target, ty.fnCallingConvention(mod)); }, .Enum => { - var buf: Type.Payload.Bits = undefined; - return sema.validateExternType(ty.intTagType(&buf), position); + return sema.validateExternType(ty.intTagType(mod), position); }, - .Struct, .Union => switch (ty.containerLayout()) { + .Struct, .Union => switch (ty.containerLayout(mod)) { .Extern => return true, .Packed => { - const target = sema.mod.getTarget(); - const bit_size = try ty.bitSizeAdvanced(target, sema); + const bit_size = try ty.bitSizeAdvanced(mod, sema); switch (bit_size) { 8, 16, 32, 64, 128 => return true, else => return false, @@ -23346,10 +23891,10 @@ fn validateExternType( }, .Array => { if (position == .ret_ty or position == .param_ty) return false; - return sema.validateExternType(ty.elemType2(), .element); + return sema.validateExternType(ty.elemType2(mod), .element); }, - .Vector => return sema.validateExternType(ty.elemType2(), .element), - .Optional => return ty.isPtrLikeOptional(), + .Vector => return sema.validateExternType(ty.elemType2(mod), .element), + .Optional => return ty.isPtrLikeOptional(mod), } } @@ -23361,7 +23906,7 @@ fn explainWhyTypeIsNotExtern( position: ExternPosition, ) CompileError!void { const mod = sema.mod; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Opaque, .Bool, .Float, @@ -23380,17 +23925,17 @@ fn explainWhyTypeIsNotExtern( => return, .Pointer => { - if (ty.isSlice()) { + if (ty.isSlice(mod)) { try mod.errNoteNonLazy(src_loc, msg, "slices have no guaranteed in-memory representation", .{}); } else { - const pointee_ty = ty.childType(); + const pointee_ty = ty.childType(mod); try mod.errNoteNonLazy(src_loc, msg, "pointer to comptime-only type '{}'", .{pointee_ty.fmt(sema.mod)}); try sema.explainWhyTypeIsComptime(msg, src_loc, pointee_ty); } }, .Void => try mod.errNoteNonLazy(src_loc, msg, "'void' is a zero bit type; for C 'void' use 'anyopaque'", .{}), .NoReturn => try mod.errNoteNonLazy(src_loc, msg, "'noreturn' is only allowed as a return type", .{}), - .Int => if (!std.math.isPowerOfTwo(ty.intInfo(sema.mod.getTarget()).bits)) { + .Int => if (!std.math.isPowerOfTwo(ty.intInfo(mod).bits)) { try mod.errNoteNonLazy(src_loc, msg, "only integers with power of two bits are extern compatible", .{}); } else { try mod.errNoteNonLazy(src_loc, msg, "only integers with 8, 16, 32, 64 and 128 bits are extern compatible", .{}); @@ -23401,7 +23946,7 @@ fn explainWhyTypeIsNotExtern( try mod.errNoteNonLazy(src_loc, msg, "use '*const ' to make a function pointer type", .{}); return; } - switch (ty.fnCallingConvention()) { + switch (ty.fnCallingConvention(mod)) { .Unspecified => try mod.errNoteNonLazy(src_loc, msg, "extern function must specify calling convention", .{}), .Async => try mod.errNoteNonLazy(src_loc, msg, "async function cannot be extern", .{}), .Inline => try mod.errNoteNonLazy(src_loc, msg, "inline function cannot be extern", .{}), @@ -23409,8 +23954,7 @@ fn explainWhyTypeIsNotExtern( } }, .Enum => { - var buf: Type.Payload.Bits = undefined; - const tag_ty = ty.intTagType(&buf); + const tag_ty = ty.intTagType(mod); try mod.errNoteNonLazy(src_loc, msg, "enum tag type '{}' is not extern compatible", .{tag_ty.fmt(sema.mod)}); try sema.explainWhyTypeIsNotExtern(msg, src_loc, tag_ty, position); }, @@ -23422,17 +23966,17 @@ fn explainWhyTypeIsNotExtern( } else if (position == .param_ty) { return mod.errNoteNonLazy(src_loc, msg, "arrays are not allowed as a parameter type", .{}); } - try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(), .element); + try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element); }, - .Vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(), .element), + .Vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.elemType2(mod), .element), .Optional => try mod.errNoteNonLazy(src_loc, msg, "only pointer like optionals are extern compatible", .{}), } } /// Returns true if `ty` is allowed in packed types. /// Does *NOT* require `ty` to be resolved in any way. -fn validatePackedType(ty: Type) bool { - switch (ty.zigTypeTag()) { +fn validatePackedType(ty: Type, mod: *Module) bool { + switch (ty.zigTypeTag(mod)) { .Type, .ComptimeFloat, .ComptimeInt, @@ -23448,7 +23992,7 @@ fn validatePackedType(ty: Type) bool { .Fn, .Array, => return false, - .Optional => return ty.isPtrLikeOptional(), + .Optional => return ty.isPtrLikeOptional(mod), .Void, .Bool, .Float, @@ -23456,8 +24000,8 @@ fn validatePackedType(ty: Type) bool { .Vector, .Enum, => return true, - .Pointer => return !ty.isSlice(), - .Struct, .Union => return ty.containerLayout() == .Packed, + .Pointer => return !ty.isSlice(mod), + .Struct, .Union => return ty.containerLayout(mod) == .Packed, } } @@ -23468,7 +24012,7 @@ fn explainWhyTypeIsNotPacked( ty: Type, ) CompileError!void { const mod = sema.mod; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Void, .Bool, .Float, @@ -23616,7 +24160,6 @@ fn panicWithMsg( msg_inst: Air.Inst.Ref, ) !void { const mod = sema.mod; - const arena = sema.arena; if (!mod.backendSupportsFeature(.panic_fn)) { _ = try block.addNoOp(.trap); @@ -23626,16 +24169,24 @@ fn panicWithMsg( const unresolved_stack_trace_ty = try sema.getBuiltinType("StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(unresolved_stack_trace_ty); const target = mod.getTarget(); - const ptr_stack_trace_ty = try Type.ptr(arena, mod, .{ - .pointee_type = stack_trace_ty, - .@"addrspace" = target_util.defaultAddressSpace(target, .global_constant), // TODO might need a place that is more dynamic + const ptr_stack_trace_ty = try mod.ptrType(.{ + .child = stack_trace_ty.toIntern(), + .flags = .{ + .address_space = target_util.defaultAddressSpace(target, .global_constant), // TODO might need a place that is more dynamic + }, }); - const null_stack_trace = try sema.addConstant( - try Type.optional(arena, ptr_stack_trace_ty), - Value.null, - ); - const args: [3]Air.Inst.Ref = .{ msg_inst, null_stack_trace, .null_value }; - try sema.callBuiltin(block, panic_fn, .auto, &args); + const opt_ptr_stack_trace_ty = try mod.optionalType(ptr_stack_trace_ty.toIntern()); + const null_stack_trace = try sema.addConstant(opt_ptr_stack_trace_ty, (try mod.intern(.{ .opt = .{ + .ty = opt_ptr_stack_trace_ty.toIntern(), + .val = .none, + } })).toValue()); + + const opt_usize_ty = try mod.optionalType(.usize_type); + const null_ret_addr = try sema.addConstant(opt_usize_ty, (try mod.intern(.{ .opt = .{ + .ty = opt_usize_ty.toIntern(), + .val = .none, + } })).toValue()); + try sema.callBuiltin(block, panic_fn, .auto, &.{ msg_inst, null_stack_trace, null_ret_addr }); } fn panicUnwrapError( @@ -23694,20 +24245,6 @@ fn panicIndexOutOfBounds( try sema.safetyCheckFormatted(parent_block, ok, "panicOutOfBounds", &.{ index, len }); } -fn panicStartGreaterThanEnd( - sema: *Sema, - parent_block: *Block, - start: Air.Inst.Ref, - end: Air.Inst.Ref, -) !void { - assert(!parent_block.is_comptime); - const ok = try parent_block.addBinOp(.cmp_lte, start, end); - if (!sema.mod.comp.formatted_panics) { - return sema.addSafetyCheck(parent_block, ok, .start_index_greater_than_end); - } - try sema.safetyCheckFormatted(parent_block, ok, "panicStartGreaterThanEnd", &.{ start, end }); -} - fn panicInactiveUnionField( sema: *Sema, parent_block: *Block, @@ -23731,11 +24268,12 @@ fn panicSentinelMismatch( sentinel_index: Air.Inst.Ref, ) !void { assert(!parent_block.is_comptime); + const mod = sema.mod; const expected_sentinel_val = maybe_sentinel orelse return; const expected_sentinel = try sema.addConstant(sentinel_ty, expected_sentinel_val); const ptr_ty = sema.typeOf(ptr); - const actual_sentinel = if (ptr_ty.isSlice()) + const actual_sentinel = if (ptr_ty.isSlice(mod)) try parent_block.addBinOp(.slice_elem_val, ptr, sentinel_index) else blk: { const elem_ptr_ty = try sema.elemPtrType(ptr_ty, null); @@ -23743,7 +24281,7 @@ fn panicSentinelMismatch( break :blk try parent_block.addTyOp(.load, sentinel_ty, sentinel_ptr); }; - const ok = if (sentinel_ty.zigTypeTag() == .Vector) ok: { + const ok = if (sentinel_ty.zigTypeTag(mod) == .Vector) ok: { const eql = try parent_block.addCmpVector(expected_sentinel, actual_sentinel, .eq); break :ok try parent_block.addInst(.{ @@ -23753,7 +24291,7 @@ fn panicSentinelMismatch( .operation = .And, } }, }); - } else if (sentinel_ty.isSelfComparable(true)) + } else if (sentinel_ty.isSelfComparable(mod, true)) try parent_block.addBinOp(.cmp_eq, expected_sentinel, actual_sentinel) else { const panic_fn = try sema.getBuiltin("checkNonScalarSentinel"); @@ -23805,12 +24343,14 @@ fn safetyPanic( block: *Block, panic_id: PanicId, ) CompileError!void { + const mod = sema.mod; + const gpa = sema.gpa; const panic_messages_ty = try sema.getBuiltinType("panic_messages"); const msg_decl_index = (try sema.namespaceLookup( block, sema.src, - panic_messages_ty.getNamespace().?, - @tagName(panic_id), + panic_messages_ty.getNamespaceIndex(mod).unwrap().?, + try mod.intern_pool.getOrPutString(gpa, @tagName(panic_id)), )).?; const msg_inst = try sema.analyzeDeclVal(block, sema.src, msg_decl_index); @@ -23842,37 +24382,38 @@ fn fieldVal( block: *Block, src: LazySrcLoc, object: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { // When editing this function, note that there is corresponding logic to be edited // in `fieldPtr`. This function takes a value and returns a value. - const arena = sema.arena; + const mod = sema.mod; + const ip = &mod.intern_pool; const object_src = src; // TODO better source location const object_ty = sema.typeOf(object); // Zig allows dereferencing a single pointer during field lookup. Note that // we don't actually need to generate the dereference some field lookups, like the // length of arrays and other comptime operations. - const is_pointer_to = object_ty.isSinglePointer(); + const is_pointer_to = object_ty.isSinglePointer(mod); const inner_ty = if (is_pointer_to) - object_ty.childType() + object_ty.childType(mod) else object_ty; - switch (inner_ty.zigTypeTag()) { + switch (inner_ty.zigTypeTag(mod)) { .Array => { - if (mem.eql(u8, field_name, "len")) { + if (ip.stringEqlSlice(field_name, "len")) { return sema.addConstant( Type.usize, - try Value.Tag.int_u64.create(arena, inner_ty.arrayLen()), + try mod.intValue(Type.usize, inner_ty.arrayLen(mod)), ); - } else if (mem.eql(u8, field_name, "ptr") and is_pointer_to) { - const ptr_info = object_ty.ptrInfo().data; - const result_ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = ptr_info.pointee_type.childType(), + } else if (ip.stringEqlSlice(field_name, "ptr") and is_pointer_to) { + const ptr_info = object_ty.ptrInfo(mod); + const result_ty = try Type.ptr(sema.arena, mod, .{ + .pointee_type = ptr_info.pointee_type.childType(mod), .sentinel = ptr_info.sentinel, .@"align" = ptr_info.@"align", .@"addrspace" = ptr_info.@"addrspace", @@ -23889,21 +24430,21 @@ fn fieldVal( return sema.fail( block, field_name_src, - "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(sema.mod) }, + "no member named '{}' in '{}'", + .{ field_name.fmt(ip), object_ty.fmt(mod) }, ); } }, .Pointer => { - const ptr_info = inner_ty.ptrInfo().data; + const ptr_info = inner_ty.ptrInfo(mod); if (ptr_info.size == .Slice) { - if (mem.eql(u8, field_name, "ptr")) { + if (ip.stringEqlSlice(field_name, "ptr")) { const slice = if (is_pointer_to) try sema.analyzeLoad(block, src, object, object_src) else object; return sema.analyzeSlicePtr(block, object_src, slice, inner_ty); - } else if (mem.eql(u8, field_name, "len")) { + } else if (ip.stringEqlSlice(field_name, "len")) { const slice = if (is_pointer_to) try sema.analyzeLoad(block, src, object, object_src) else @@ -23913,8 +24454,8 @@ fn fieldVal( return sema.fail( block, field_name_src, - "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(sema.mod) }, + "no member named '{}' in '{}'", + .{ field_name.fmt(ip), object_ty.fmt(mod) }, ); } } @@ -23926,66 +24467,74 @@ fn fieldVal( object; const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?; - var to_type_buffer: Value.ToTypeBuffer = undefined; - const child_type = val.toType(&to_type_buffer); + const child_type = val.toType(); - switch (try child_type.zigTypeTagOrPoison()) { + switch (try child_type.zigTypeTagOrPoison(mod)) { .ErrorSet => { - const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { - if (payload.data.names.getEntry(field_name)) |entry| { - break :blk entry.key_ptr.*; - } - const msg = msg: { - const msg = try sema.errMsg(block, src, "no error named '{s}' in '{}'", .{ - field_name, child_type.fmt(sema.mod), - }); - errdefer msg.destroy(sema.gpa); - try sema.addDeclaredHereNote(msg, child_type); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); - } else (try sema.mod.getErrorValue(field_name)).key; + switch (ip.indexToKey(child_type.toIntern())) { + .error_set_type => |error_set_type| blk: { + if (error_set_type.nameIndex(ip, field_name) != null) break :blk; + const msg = msg: { + const msg = try sema.errMsg(block, src, "no error named '{}' in '{}'", .{ + field_name.fmt(ip), child_type.fmt(mod), + }); + errdefer msg.destroy(sema.gpa); + try sema.addDeclaredHereNote(msg, child_type); + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(msg); + }, + .inferred_error_set_type => { + return sema.fail(block, src, "TODO handle inferred error sets here", .{}); + }, + .simple_type => |t| { + assert(t == .anyerror); + _ = try mod.getErrorValue(field_name); + }, + else => unreachable, + } - return sema.addConstant( - if (!child_type.isAnyError()) - try child_type.copy(arena) - else - try Type.Tag.error_set_single.create(arena, name), - try Value.Tag.@"error".create(arena, .{ .name = name }), - ); + const error_set_type = if (!child_type.isAnyError(mod)) + child_type + else + try mod.singleErrorSetType(field_name); + return sema.addConstant(error_set_type, (try mod.intern(.{ .err = .{ + .ty = error_set_type.toIntern(), + .name = field_name, + } })).toValue()); }, .Union => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { return inst; } } const union_ty = try sema.resolveTypeFields(child_type); - if (union_ty.unionTagType()) |enum_ty| { - if (enum_ty.enumFieldIndex(field_name)) |field_index_usize| { + if (union_ty.unionTagType(mod)) |enum_ty| { + if (enum_ty.enumFieldIndex(field_name, mod)) |field_index_usize| { const field_index = @intCast(u32, field_index_usize); return sema.addConstant( enum_ty, - try Value.Tag.enum_field_index.create(sema.arena, field_index), + try mod.enumValueFieldIndex(enum_ty, field_index), ); } } return sema.failWithBadMemberAccess(block, union_ty, field_name_src, field_name); }, .Enum => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { return inst; } } - const field_index_usize = child_type.enumFieldIndex(field_name) orelse + const field_index_usize = child_type.enumFieldIndex(field_name, mod) orelse return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); const field_index = @intCast(u32, field_index_usize); - const enum_val = try Value.Tag.enum_field_index.create(arena, field_index); - return sema.addConstant(try child_type.copy(arena), enum_val); + const enum_val = try mod.enumValueFieldIndex(child_type, field_index); + return sema.addConstant(child_type, enum_val); }, .Struct, .Opaque => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| { return inst; } @@ -23994,10 +24543,10 @@ fn fieldVal( }, else => { const msg = msg: { - const msg = try sema.errMsg(block, src, "type '{}' has no members", .{child_type.fmt(sema.mod)}); + const msg = try sema.errMsg(block, src, "type '{}' has no members", .{child_type.fmt(mod)}); errdefer msg.destroy(sema.gpa); - if (child_type.isSlice()) try sema.errNote(block, src, msg, "slice values have 'len' and 'ptr' members", .{}); - if (child_type.zigTypeTag() == .Array) try sema.errNote(block, src, msg, "array values have 'len' member", .{}); + if (child_type.isSlice(mod)) try sema.errNote(block, src, msg, "slice values have 'len' and 'ptr' members", .{}); + if (child_type.zigTypeTag(mod) == .Array) try sema.errNote(block, src, msg, "array values have 'len' member", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -24028,50 +24577,52 @@ fn fieldPtr( block: *Block, src: LazySrcLoc, object_ptr: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, initializing: bool, ) CompileError!Air.Inst.Ref { // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. + const mod = sema.mod; + const ip = &mod.intern_pool; const object_ptr_src = src; // TODO better source location const object_ptr_ty = sema.typeOf(object_ptr); - const object_ty = switch (object_ptr_ty.zigTypeTag()) { - .Pointer => object_ptr_ty.elemType(), - else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(sema.mod)}), + const object_ty = switch (object_ptr_ty.zigTypeTag(mod)) { + .Pointer => object_ptr_ty.childType(mod), + else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty.fmt(mod)}), }; // Zig allows dereferencing a single pointer during field lookup. Note that // we don't actually need to generate the dereference some field lookups, like the // length of arrays and other comptime operations. - const is_pointer_to = object_ty.isSinglePointer(); + const is_pointer_to = object_ty.isSinglePointer(mod); const inner_ty = if (is_pointer_to) - object_ty.childType() + object_ty.childType(mod) else object_ty; - switch (inner_ty.zigTypeTag()) { + switch (inner_ty.zigTypeTag(mod)) { .Array => { - if (mem.eql(u8, field_name, "len")) { + if (ip.stringEqlSlice(field_name, "len")) { var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( Type.usize, - try Value.Tag.int_u64.create(anon_decl.arena(), inner_ty.arrayLen()), + try mod.intValue(Type.usize, inner_ty.arrayLen(mod)), 0, // default alignment )); } else { return sema.fail( block, field_name_src, - "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(sema.mod) }, + "no member named '{}' in '{}'", + .{ field_name.fmt(ip), object_ty.fmt(mod) }, ); } }, - .Pointer => if (inner_ty.isSlice()) { + .Pointer => if (inner_ty.isSlice(mod)) { const inner_ptr = if (is_pointer_to) try sema.analyzeLoad(block, src, object_ptr, object_ptr_src) else @@ -24079,47 +24630,44 @@ fn fieldPtr( const attr_ptr_ty = if (is_pointer_to) object_ty else object_ptr_ty; - if (mem.eql(u8, field_name, "ptr")) { - const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); - const slice_ptr_ty = inner_ty.slicePtrFieldType(buf); + if (ip.stringEqlSlice(field_name, "ptr")) { + const slice_ptr_ty = inner_ty.slicePtrFieldType(mod); - const result_ty = try Type.ptr(sema.arena, sema.mod, .{ + const result_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = slice_ptr_ty, - .mutable = attr_ptr_ty.ptrIsMutable(), - .@"volatile" = attr_ptr_ty.isVolatilePtr(), - .@"addrspace" = attr_ptr_ty.ptrAddressSpace(), + .mutable = attr_ptr_ty.ptrIsMutable(mod), + .@"volatile" = attr_ptr_ty.isVolatilePtr(mod), + .@"addrspace" = attr_ptr_ty.ptrAddressSpace(mod), }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - return sema.addConstant( - result_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = val, - .container_ty = inner_ty, - .field_index = Value.Payload.Slice.ptr_index, - }), - ); + return sema.addConstant(result_ty, (try mod.intern(.{ .ptr = .{ + .ty = result_ty.toIntern(), + .addr = .{ .field = .{ + .base = val.toIntern(), + .index = Value.slice_ptr_index, + } }, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, null); return block.addTyOp(.ptr_slice_ptr_ptr, result_ty, inner_ptr); - } else if (mem.eql(u8, field_name, "len")) { - const result_ty = try Type.ptr(sema.arena, sema.mod, .{ + } else if (ip.stringEqlSlice(field_name, "len")) { + const result_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = Type.usize, - .mutable = attr_ptr_ty.ptrIsMutable(), - .@"volatile" = attr_ptr_ty.isVolatilePtr(), - .@"addrspace" = attr_ptr_ty.ptrAddressSpace(), + .mutable = attr_ptr_ty.ptrIsMutable(mod), + .@"volatile" = attr_ptr_ty.isVolatilePtr(mod), + .@"addrspace" = attr_ptr_ty.ptrAddressSpace(mod), }); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - return sema.addConstant( - result_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = val, - .container_ty = inner_ty, - .field_index = Value.Payload.Slice.len_index, - }), - ); + return sema.addConstant(result_ty, (try mod.intern(.{ .ptr = .{ + .ty = result_ty.toIntern(), + .addr = .{ .field = .{ + .base = val.toIntern(), + .index = Value.slice_len_index, + } }, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, null); @@ -24128,8 +24676,8 @@ fn fieldPtr( return sema.fail( block, field_name_src, - "no member named '{s}' in '{}'", - .{ field_name, object_ty.fmt(sema.mod) }, + "no member named '{}' in '{}'", + .{ field_name.fmt(ip), object_ty.fmt(mod) }, ); } }, @@ -24142,47 +24690,59 @@ fn fieldPtr( result; const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?; - var to_type_buffer: Value.ToTypeBuffer = undefined; - const child_type = val.toType(&to_type_buffer); + const child_type = val.toType(); - switch (child_type.zigTypeTag()) { + switch (child_type.zigTypeTag(mod)) { .ErrorSet => { - // TODO resolve inferred error sets - const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: { - if (payload.data.names.getEntry(field_name)) |entry| { - break :blk entry.key_ptr.*; - } - return sema.fail(block, src, "no error named '{s}' in '{}'", .{ - field_name, child_type.fmt(sema.mod), - }); - } else (try sema.mod.getErrorValue(field_name)).key; + switch (ip.indexToKey(child_type.toIntern())) { + .error_set_type => |error_set_type| blk: { + if (error_set_type.nameIndex(ip, field_name) != null) { + break :blk; + } + return sema.fail(block, src, "no error named '{}' in '{}'", .{ + field_name.fmt(ip), child_type.fmt(mod), + }); + }, + .inferred_error_set_type => { + return sema.fail(block, src, "TODO handle inferred error sets here", .{}); + }, + .simple_type => |t| { + assert(t == .anyerror); + _ = try mod.getErrorValue(field_name); + }, + else => unreachable, + } var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); + const error_set_type = if (!child_type.isAnyError(mod)) + child_type + else + try mod.singleErrorSetType(field_name); return sema.analyzeDeclRef(try anon_decl.finish( - if (!child_type.isAnyError()) - try child_type.copy(anon_decl.arena()) - else - try Type.Tag.error_set_single.create(anon_decl.arena(), name), - try Value.Tag.@"error".create(anon_decl.arena(), .{ .name = name }), + error_set_type, + (try mod.intern(.{ .err = .{ + .ty = error_set_type.toIntern(), + .name = field_name, + } })).toValue(), 0, // default alignment )); }, .Union => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { return inst; } } const union_ty = try sema.resolveTypeFields(child_type); - if (union_ty.unionTagType()) |enum_ty| { - if (enum_ty.enumFieldIndex(field_name)) |field_index| { + if (union_ty.unionTagType(mod)) |enum_ty| { + if (enum_ty.enumFieldIndex(field_name, mod)) |field_index| { const field_index_u32 = @intCast(u32, field_index); var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( - try enum_ty.copy(anon_decl.arena()), - try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32), + enum_ty, + try mod.enumValueFieldIndex(enum_ty, field_index_u32), 0, // default alignment )); } @@ -24190,32 +24750,32 @@ fn fieldPtr( return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }, .Enum => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { return inst; } } - const field_index = child_type.enumFieldIndex(field_name) orelse { + const field_index = child_type.enumFieldIndex(field_name, mod) orelse { return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }; const field_index_u32 = @intCast(u32, field_index); var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( - try child_type.copy(anon_decl.arena()), - try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32), + child_type, + try mod.enumValueFieldIndex(child_type, field_index_u32), 0, // default alignment )); }, .Struct, .Opaque => { - if (child_type.getNamespace()) |namespace| { + if (child_type.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| { return inst; } } return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }, - else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(sema.mod)}), + else => return sema.fail(block, src, "type '{}' has no members", .{child_type.fmt(mod)}), } }, .Struct => { @@ -24252,22 +24812,24 @@ fn fieldCallBind( block: *Block, src: LazySrcLoc, raw_ptr: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, ) CompileError!ResolvedFieldCallee { // When editing this function, note that there is corresponding logic to be edited // in `fieldVal`. This function takes a pointer and returns a pointer. + const mod = sema.mod; + const ip = &mod.intern_pool; const raw_ptr_src = src; // TODO better source location const raw_ptr_ty = sema.typeOf(raw_ptr); - const inner_ty = if (raw_ptr_ty.zigTypeTag() == .Pointer and (raw_ptr_ty.ptrSize() == .One or raw_ptr_ty.ptrSize() == .C)) - raw_ptr_ty.childType() + const inner_ty = if (raw_ptr_ty.zigTypeTag(mod) == .Pointer and (raw_ptr_ty.ptrSize(mod) == .One or raw_ptr_ty.ptrSize(mod) == .C)) + raw_ptr_ty.childType(mod) else - return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(sema.mod)}); + return sema.fail(block, raw_ptr_src, "expected single pointer, found '{}'", .{raw_ptr_ty.fmt(mod)}); // Optionally dereference a second pointer to get the concrete type. - const is_double_ptr = inner_ty.zigTypeTag() == .Pointer and inner_ty.ptrSize() == .One; - const concrete_ty = if (is_double_ptr) inner_ty.childType() else inner_ty; + const is_double_ptr = inner_ty.zigTypeTag(mod) == .Pointer and inner_ty.ptrSize(mod) == .One; + const concrete_ty = if (is_double_ptr) inner_ty.childType(mod) else inner_ty; const ptr_ty = if (is_double_ptr) inner_ty else raw_ptr_ty; const object_ptr = if (is_double_ptr) try sema.analyzeLoad(block, src, raw_ptr, src) @@ -24275,37 +24837,37 @@ fn fieldCallBind( raw_ptr; find_field: { - switch (concrete_ty.zigTypeTag()) { + switch (concrete_ty.zigTypeTag(mod)) { .Struct => { const struct_ty = try sema.resolveTypeFields(concrete_ty); - if (struct_ty.castTag(.@"struct")) |struct_obj| { - const field_index_usize = struct_obj.data.fields.getIndex(field_name) orelse + if (mod.typeToStruct(struct_ty)) |struct_obj| { + const field_index_usize = struct_obj.fields.getIndex(field_name) orelse break :find_field; const field_index = @intCast(u32, field_index_usize); - const field = struct_obj.data.fields.values()[field_index]; + const field = struct_obj.fields.values()[field_index]; return sema.finishFieldCallBind(block, src, ptr_ty, field.ty, field_index, object_ptr); - } else if (struct_ty.isTuple()) { - if (mem.eql(u8, field_name, "len")) { - return .{ .direct = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount()) }; + } else if (struct_ty.isTuple(mod)) { + if (ip.stringEqlSlice(field_name, "len")) { + return .{ .direct = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount(mod)) }; + } + if (field_name.toUnsigned(ip)) |field_index| { + if (field_index >= struct_ty.structFieldCount(mod)) break :find_field; + return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(field_index, mod), field_index, object_ptr); } - if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| { - if (field_index >= struct_ty.structFieldCount()) break :find_field; - return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(field_index), field_index, object_ptr); - } else |_| {} } else { - const max = struct_ty.structFieldCount(); - var i: u32 = 0; - while (i < max) : (i += 1) { - if (mem.eql(u8, struct_ty.structFieldName(i), field_name)) { - return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(i), i, object_ptr); + const max = struct_ty.structFieldCount(mod); + for (0..max) |i_usize| { + const i = @intCast(u32, i_usize); + if (field_name == struct_ty.structFieldName(i, mod)) { + return sema.finishFieldCallBind(block, src, ptr_ty, struct_ty.structFieldType(i, mod), i, object_ptr); } } } }, .Union => { const union_ty = try sema.resolveTypeFields(concrete_ty); - const fields = union_ty.unionFields(); + const fields = union_ty.unionFields(mod); const field_index_usize = fields.getIndex(field_name) orelse break :find_field; const field_index = @intCast(u32, field_index_usize); const field = fields.values()[field_index]; @@ -24321,24 +24883,23 @@ fn fieldCallBind( } // If we get here, we need to look for a decl in the struct type instead. - const found_decl = switch (concrete_ty.zigTypeTag()) { + const found_decl = switch (concrete_ty.zigTypeTag(mod)) { .Struct, .Opaque, .Union, .Enum => found_decl: { - if (concrete_ty.getNamespace()) |namespace| { + if (concrete_ty.getNamespaceIndex(mod).unwrap()) |namespace| { if (try sema.namespaceLookup(block, src, namespace, field_name)) |decl_idx| { try sema.addReferencedBy(block, src, decl_idx); const decl_val = try sema.analyzeDeclVal(block, src, decl_idx); const decl_type = sema.typeOf(decl_val); - if (decl_type.zigTypeTag() == .Fn and - decl_type.fnParamLen() >= 1) - { - const first_param_type = decl_type.fnParamType(0); - const first_param_tag = first_param_type.tag(); + if (mod.typeToFunc(decl_type)) |func_type| f: { + if (func_type.param_types.len == 0) break :f; + + const first_param_type = func_type.param_types[0].toType(); // zig fmt: off - if (first_param_tag == .generic_poison or ( - first_param_type.zigTypeTag() == .Pointer and - (first_param_type.ptrSize() == .One or - first_param_type.ptrSize() == .C) and - first_param_type.childType().eql(concrete_ty, sema.mod))) + if (first_param_type.isGenericPoison() or ( + first_param_type.zigTypeTag(mod) == .Pointer and + (first_param_type.ptrSize(mod) == .One or + first_param_type.ptrSize(mod) == .C) and + first_param_type.childType(mod).eql(concrete_ty, mod))) { // zig fmt: on // Note that if the param type is generic poison, we know that it must @@ -24350,32 +24911,31 @@ fn fieldCallBind( .func_inst = decl_val, .arg0_inst = object_ptr, } }; - } else if (first_param_type.eql(concrete_ty, sema.mod)) { + } else if (first_param_type.eql(concrete_ty, mod)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ .func_inst = decl_val, .arg0_inst = deref, } }; - } else if (first_param_type.zigTypeTag() == .Optional) { - var opt_buf: Type.Payload.ElemType = undefined; - const child = first_param_type.optionalChild(&opt_buf); - if (child.eql(concrete_ty, sema.mod)) { + } else if (first_param_type.zigTypeTag(mod) == .Optional) { + const child = first_param_type.optionalChild(mod); + if (child.eql(concrete_ty, mod)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ .func_inst = decl_val, .arg0_inst = deref, } }; - } else if (child.zigTypeTag() == .Pointer and - child.ptrSize() == .One and - child.childType().eql(concrete_ty, sema.mod)) + } else if (child.zigTypeTag(mod) == .Pointer and + child.ptrSize(mod) == .One and + child.childType(mod).eql(concrete_ty, mod)) { return .{ .method = .{ .func_inst = decl_val, .arg0_inst = object_ptr, } }; } - } else if (first_param_type.zigTypeTag() == .ErrorUnion and - first_param_type.errorUnionPayload().eql(concrete_ty, sema.mod)) + } else if (first_param_type.zigTypeTag(mod) == .ErrorUnion and + first_param_type.errorUnionPayload(mod).eql(concrete_ty, mod)) { const deref = try sema.analyzeLoad(block, src, object_ptr, src); return .{ .method = .{ @@ -24393,12 +24953,15 @@ fn fieldCallBind( }; const msg = msg: { - const msg = try sema.errMsg(block, src, "no field or member function named '{s}' in '{}'", .{ field_name, concrete_ty.fmt(sema.mod) }); + const msg = try sema.errMsg(block, src, "no field or member function named '{}' in '{}'", .{ + field_name.fmt(ip), + concrete_ty.fmt(mod), + }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, concrete_ty); if (found_decl) |decl_idx| { - const decl = sema.mod.declPtr(decl_idx); - try sema.mod.errNoteNonLazy(decl.srcLoc(), msg, "'{s}' is not a member function", .{field_name}); + const decl = mod.declPtr(decl_idx); + try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "'{}' is not a member function", .{field_name.fmt(ip)}); } break :msg msg; }; @@ -24414,29 +24977,29 @@ fn finishFieldCallBind( field_index: u32, object_ptr: Air.Inst.Ref, ) CompileError!ResolvedFieldCallee { + const mod = sema.mod; const arena = sema.arena; - const ptr_field_ty = try Type.ptr(arena, sema.mod, .{ + const ptr_field_ty = try Type.ptr(arena, mod, .{ .pointee_type = field_ty, - .mutable = ptr_ty.ptrIsMutable(), - .@"addrspace" = ptr_ty.ptrAddressSpace(), + .mutable = ptr_ty.ptrIsMutable(mod), + .@"addrspace" = ptr_ty.ptrAddressSpace(mod), }); - const container_ty = ptr_ty.childType(); - if (container_ty.zigTypeTag() == .Struct) { - if (container_ty.structFieldValueComptime(field_index)) |default_val| { + const container_ty = ptr_ty.childType(mod); + if (container_ty.zigTypeTag(mod) == .Struct) { + if (try container_ty.structFieldValueComptime(mod, field_index)) |default_val| { return .{ .direct = try sema.addConstant(field_ty, default_val) }; } } if (try sema.resolveDefinedValue(block, src, object_ptr)) |struct_ptr_val| { - const pointer = try sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(arena, .{ - .container_ptr = struct_ptr_val, - .container_ty = container_ty, - .field_index = field_index, - }), - ); + const pointer = try sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.toIntern(), + .addr = .{ .field = .{ + .base = struct_ptr_val.toIntern(), + .index = field_index, + } }, + } })).toValue()); return .{ .direct = try sema.analyzeLoad(block, src, pointer, src) }; } @@ -24449,19 +25012,20 @@ fn namespaceLookup( sema: *Sema, block: *Block, src: LazySrcLoc, - namespace: *Namespace, - decl_name: []const u8, + namespace: Namespace.Index, + decl_name: InternPool.NullTerminatedString, ) CompileError!?Decl.Index { + const mod = sema.mod; const gpa = sema.gpa; if (try sema.lookupInNamespace(block, src, namespace, decl_name, true)) |decl_index| { - const decl = sema.mod.declPtr(decl_index); - if (!decl.is_pub and decl.getFileScope() != block.getFileScope()) { + const decl = mod.declPtr(decl_index); + if (!decl.is_pub and decl.getFileScope(mod) != block.getFileScope(mod)) { const msg = msg: { - const msg = try sema.errMsg(block, src, "'{s}' is not marked 'pub'", .{ - decl_name, + const msg = try sema.errMsg(block, src, "'{}' is not marked 'pub'", .{ + decl_name.fmt(&mod.intern_pool), }); errdefer msg.destroy(gpa); - try sema.mod.errNoteNonLazy(decl.srcLoc(), msg, "declared here", .{}); + try mod.errNoteNonLazy(decl.srcLoc(mod), msg, "declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -24475,8 +25039,8 @@ fn namespaceLookupRef( sema: *Sema, block: *Block, src: LazySrcLoc, - namespace: *Namespace, - decl_name: []const u8, + namespace: Namespace.Index, + decl_name: InternPool.NullTerminatedString, ) CompileError!?Air.Inst.Ref { const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null; try sema.addReferencedBy(block, src, decl); @@ -24487,8 +25051,8 @@ fn namespaceLookupVal( sema: *Sema, block: *Block, src: LazySrcLoc, - namespace: *Namespace, - decl_name: []const u8, + namespace: Namespace.Index, + decl_name: InternPool.NullTerminatedString, ) CompileError!?Air.Inst.Ref { const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null; return try sema.analyzeDeclVal(block, src, decl); @@ -24499,29 +25063,30 @@ fn structFieldPtr( block: *Block, src: LazySrcLoc, struct_ptr: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, initializing: bool, ) CompileError!Air.Inst.Ref { - assert(unresolved_struct_ty.zigTypeTag() == .Struct); + const mod = sema.mod; + assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct); const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); try sema.resolveStructLayout(struct_ty); - if (struct_ty.isTuple()) { - if (mem.eql(u8, field_name, "len")) { - const len_inst = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount()); + if (struct_ty.isTuple(mod)) { + if (mod.intern_pool.stringEqlSlice(field_name, "len")) { + const len_inst = try sema.addIntUnsigned(Type.usize, struct_ty.structFieldCount(mod)); return sema.analyzeRef(block, src, len_inst); } const field_index = try sema.tupleFieldIndex(block, struct_ty, field_name, field_name_src); return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing); - } else if (struct_ty.isAnonStruct()) { + } else if (struct_ty.isAnonStruct(mod)) { const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src); return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing); } - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(struct_ty).?; const field_index_big = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name); @@ -24540,14 +25105,15 @@ fn structFieldPtrByIndex( struct_ty: Type, initializing: bool, ) CompileError!Air.Inst.Ref { - if (struct_ty.isAnonStruct()) { + const mod = sema.mod; + if (struct_ty.isAnonStruct(mod)) { return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing); } - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(struct_ty).?; const field = struct_obj.fields.values()[field_index]; const struct_ptr_ty = sema.typeOf(struct_ptr); - const struct_ptr_ty_info = struct_ptr_ty.ptrInfo().data; + const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod); var ptr_ty_data: Type.Payload.Pointer.Data = .{ .pointee_type = field.ty, @@ -24556,7 +25122,7 @@ fn structFieldPtrByIndex( .@"addrspace" = struct_ptr_ty_info.@"addrspace", }; - const target = sema.mod.getTarget(); + const target = mod.getTarget(); if (struct_obj.layout == .Packed) { comptime assert(Type.packed_struct_layout_version == 2); @@ -24568,7 +25134,7 @@ fn structFieldPtrByIndex( if (i == field_index) { ptr_ty_data.bit_offset = running_bits; } - running_bits += @intCast(u16, f.ty.bitSize(target)); + running_bits += @intCast(u16, f.ty.bitSize(mod)); } ptr_ty_data.host_size = (running_bits + 7) / 8; @@ -24582,7 +25148,7 @@ fn structFieldPtrByIndex( const parent_align = if (struct_ptr_ty_info.@"align" != 0) struct_ptr_ty_info.@"align" else - struct_ptr_ty_info.pointee_type.abiAlignment(target); + struct_ptr_ty_info.pointee_type.abiAlignment(mod); ptr_ty_data.@"align" = parent_align; // If the field happens to be byte-aligned, simplify the pointer type. @@ -24596,8 +25162,8 @@ fn structFieldPtrByIndex( if (parent_align != 0 and ptr_ty_data.bit_offset % 8 == 0 and target.cpu.arch.endian() == .Little) { - const elem_size_bytes = ptr_ty_data.pointee_type.abiSize(target); - const elem_size_bits = ptr_ty_data.pointee_type.bitSize(target); + const elem_size_bytes = ptr_ty_data.pointee_type.abiSize(mod); + const elem_size_bits = ptr_ty_data.pointee_type.bitSize(mod); if (elem_size_bytes * 8 == elem_size_bits) { const byte_offset = ptr_ty_data.bit_offset / 8; const new_align = @as(u32, 1) << @intCast(u5, @ctz(byte_offset | parent_align)); @@ -24610,25 +25176,25 @@ fn structFieldPtrByIndex( ptr_ty_data.@"align" = field.abi_align; } - const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); + const ptr_field_ty = try Type.ptr(sema.arena, mod, ptr_ty_data); if (field.is_comptime) { - const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ - .field_ty = try field.ty.copy(sema.arena), - .field_val = try field.default_val.copy(sema.arena), - }); - return sema.addConstant(ptr_field_ty, val); + const val = try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.toIntern(), + .addr = .{ .comptime_field = field.default_val }, + } }); + return sema.addConstant(ptr_field_ty, val.toValue()); } if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { - return sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = struct_ptr_val, - .container_ty = struct_ptr_ty.childType(), - .field_index = field_index, - }), - ); + const val = try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.toIntern(), + .addr = .{ .field = .{ + .base = try struct_ptr_val.intern(struct_ptr_ty, mod), + .index = field_index, + } }, + } }); + return sema.addConstant(ptr_field_ty, val.toValue()); } try sema.requireRuntimeBlock(block, src, null); @@ -24640,21 +25206,17 @@ fn structFieldVal( block: *Block, src: LazySrcLoc, struct_byval: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, ) CompileError!Air.Inst.Ref { - assert(unresolved_struct_ty.zigTypeTag() == .Struct); + const mod = sema.mod; + assert(unresolved_struct_ty.zigTypeTag(mod) == .Struct); const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); - switch (struct_ty.tag()) { - .tuple, .empty_struct_literal => return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty), - .anon_struct => { - const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src); - return sema.tupleFieldValByIndex(block, src, struct_byval, field_index, struct_ty); - }, - .@"struct" => { - const struct_obj = struct_ty.castTag(.@"struct").?.data; + switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; if (struct_obj.is_tuple) return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); const field_index_usize = struct_obj.fields.getIndex(field_name) orelse @@ -24663,22 +25225,28 @@ fn structFieldVal( const field = struct_obj.fields.values()[field_index]; if (field.is_comptime) { - return sema.addConstant(field.ty, field.default_val); + return sema.addConstant(field.ty, field.default_val.toValue()); } if (try sema.resolveMaybeUndefVal(struct_byval)) |struct_val| { - if (struct_val.isUndef()) return sema.addConstUndef(field.ty); + if (struct_val.isUndef(mod)) return sema.addConstUndef(field.ty); if ((try sema.typeHasOnePossibleValue(field.ty))) |opv| { return sema.addConstant(field.ty, opv); } - - const field_values = struct_val.castTag(.aggregate).?.data; - return sema.addConstant(field.ty, field_values[field_index]); + return sema.addConstant(field.ty, try struct_val.fieldValue(mod, field_index)); } try sema.requireRuntimeBlock(block, src, null); return block.addStructFieldVal(struct_byval, field_index, field.ty); }, + .anon_struct_type => |anon_struct| { + if (anon_struct.names.len == 0) { + return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); + } else { + const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src); + return sema.tupleFieldValByIndex(block, src, struct_byval, field_index, struct_ty); + } + }, else => unreachable, } } @@ -24688,12 +25256,13 @@ fn tupleFieldVal( block: *Block, src: LazySrcLoc, tuple_byval: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, tuple_ty: Type, ) CompileError!Air.Inst.Ref { - if (mem.eql(u8, field_name, "len")) { - return sema.addIntUnsigned(Type.usize, tuple_ty.structFieldCount()); + const mod = sema.mod; + if (mod.intern_pool.stringEqlSlice(field_name, "len")) { + return sema.addIntUnsigned(Type.usize, tuple_ty.structFieldCount(mod)); } const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_name_src); return sema.tupleFieldValByIndex(block, src, tuple_byval, field_index, tuple_ty); @@ -24704,19 +25273,20 @@ fn tupleFieldIndex( sema: *Sema, block: *Block, tuple_ty: Type, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, ) CompileError!u32 { - assert(!std.mem.eql(u8, field_name, "len")); - if (std.fmt.parseUnsigned(u32, field_name, 10)) |field_index| { - if (field_index < tuple_ty.structFieldCount()) return field_index; - return sema.fail(block, field_name_src, "index '{s}' out of bounds of tuple '{}'", .{ - field_name, tuple_ty.fmt(sema.mod), + const mod = sema.mod; + assert(!mod.intern_pool.stringEqlSlice(field_name, "len")); + if (field_name.toUnsigned(&mod.intern_pool)) |field_index| { + if (field_index < tuple_ty.structFieldCount(mod)) return field_index; + return sema.fail(block, field_name_src, "index '{}' out of bounds of tuple '{}'", .{ + field_name.fmt(&mod.intern_pool), tuple_ty.fmt(mod), }); - } else |_| {} + } - return sema.fail(block, field_name_src, "no field named '{s}' in tuple '{}'", .{ - field_name, tuple_ty.fmt(sema.mod), + return sema.fail(block, field_name_src, "no field named '{}' in tuple '{}'", .{ + field_name.fmt(&mod.intern_pool), tuple_ty.fmt(mod), }); } @@ -24728,22 +25298,29 @@ fn tupleFieldValByIndex( field_index: u32, tuple_ty: Type, ) CompileError!Air.Inst.Ref { - const field_ty = tuple_ty.structFieldType(field_index); + const mod = sema.mod; + const field_ty = tuple_ty.structFieldType(field_index, mod); - if (tuple_ty.structFieldValueComptime(field_index)) |default_value| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); } if (try sema.resolveMaybeUndefVal(tuple_byval)) |tuple_val| { - if (tuple_val.isUndef()) return sema.addConstUndef(field_ty); if ((try sema.typeHasOnePossibleValue(field_ty))) |opv| { return sema.addConstant(field_ty, opv); } - const field_values = tuple_val.castTag(.aggregate).?.data; - return sema.addConstant(field_ty, field_values[field_index]); + return switch (mod.intern_pool.indexToKey(tuple_val.toIntern())) { + .undef => sema.addConstUndef(field_ty), + .aggregate => |aggregate| sema.addConstant(field_ty, switch (aggregate.storage) { + .bytes => |bytes| try mod.intValue(Type.u8, bytes[0]), + .elems => |elems| elems[field_index].toValue(), + .repeated_elem => |elem| elem.toValue(), + }), + else => unreachable, + }; } - if (tuple_ty.structFieldValueComptime(field_index)) |default_val| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { return sema.addConstant(field_ty, default_val); } @@ -24756,33 +25333,38 @@ fn unionFieldPtr( block: *Block, src: LazySrcLoc, union_ptr: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, unresolved_union_ty: Type, initializing: bool, ) CompileError!Air.Inst.Ref { const arena = sema.arena; - assert(unresolved_union_ty.zigTypeTag() == .Union); + const mod = sema.mod; + const ip = &mod.intern_pool; + + assert(unresolved_union_ty.zigTypeTag(mod) == .Union); const union_ptr_ty = sema.typeOf(union_ptr); const union_ty = try sema.resolveTypeFields(unresolved_union_ty); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field = union_obj.fields.values()[field_index]; - const ptr_field_ty = try Type.ptr(arena, sema.mod, .{ + const ptr_field_ty = try Type.ptr(arena, mod, .{ .pointee_type = field.ty, - .mutable = union_ptr_ty.ptrIsMutable(), - .@"volatile" = union_ptr_ty.isVolatilePtr(), - .@"addrspace" = union_ptr_ty.ptrAddressSpace(), + .mutable = union_ptr_ty.ptrIsMutable(mod), + .@"volatile" = union_ptr_ty.isVolatilePtr(mod), + .@"addrspace" = union_ptr_ty.ptrAddressSpace(mod), }); - const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?); + const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name, mod).?); - if (initializing and field.ty.zigTypeTag() == .NoReturn) { + if (initializing and field.ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, src, "cannot initialize 'noreturn' field of union", .{}); errdefer msg.destroy(sema.gpa); - try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{field_name}); + try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' declared here", .{ + field_name.fmt(ip), + }); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; @@ -24794,21 +25376,20 @@ fn unionFieldPtr( .Auto => if (!initializing) { const union_val = (try sema.pointerDeref(block, src, union_ptr_val, union_ptr_ty)) orelse break :ct; - if (union_val.isUndef()) { + if (union_val.isUndef(mod)) { return sema.failWithUseOfUndef(block, src); } - const tag_and_val = union_val.castTag(.@"union").?.data; - var field_tag_buf: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = enum_field_index, - }; - const field_tag = Value.initPayload(&field_tag_buf.base); - const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, sema.mod); + const un = ip.indexToKey(union_val.toIntern()).un; + const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); + const tag_matches = un.tag == field_tag.toIntern(); if (!tag_matches) { const msg = msg: { - const active_index = tag_and_val.tag.castTag(.enum_field_index).?.data; - const active_field_name = union_obj.tag_ty.enumFieldName(active_index); - const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name }); + const active_index = union_obj.tag_ty.enumTagFieldIndex(un.tag.toValue(), mod).?; + const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod); + const msg = try sema.errMsg(block, src, "access of union field '{}' while field '{}' is active", .{ + field_name.fmt(ip), + active_field_name.fmt(ip), + }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; @@ -24818,28 +25399,27 @@ fn unionFieldPtr( }, .Packed, .Extern => {}, } - return sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(arena, .{ - .container_ptr = union_ptr_val, - .container_ty = union_ty, - .field_index = field_index, - }), - ); + return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.toIntern(), + .addr = .{ .field = .{ + .base = union_ptr_val.toIntern(), + .index = field_index, + } }, + } })).toValue()); } try sema.requireRuntimeBlock(block, src, null); if (!initializing and union_obj.layout == .Auto and block.wantSafety() and - union_ty.unionTagTypeSafety() != null and union_obj.fields.count() > 1) + union_ty.unionTagTypeSafety(mod) != null and union_obj.fields.count() > 1) { - const wanted_tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); + const wanted_tag_val = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); const wanted_tag = try sema.addConstant(union_obj.tag_ty, wanted_tag_val); // TODO would it be better if get_union_tag supported pointers to unions? const union_val = try block.addTyOp(.load, union_ty, union_ptr); const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_val); try sema.panicInactiveUnionField(block, active_tag, wanted_tag); } - if (field.ty.zigTypeTag() == .NoReturn) { + if (field.ty.zigTypeTag(mod) == .NoReturn) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -24851,37 +25431,37 @@ fn unionFieldVal( block: *Block, src: LazySrcLoc, union_byval: Air.Inst.Ref, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_name_src: LazySrcLoc, unresolved_union_ty: Type, ) CompileError!Air.Inst.Ref { - assert(unresolved_union_ty.zigTypeTag() == .Union); + const mod = sema.mod; + const ip = &mod.intern_pool; + assert(unresolved_union_ty.zigTypeTag(mod) == .Union); const union_ty = try sema.resolveTypeFields(unresolved_union_ty); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); const field = union_obj.fields.values()[field_index]; - const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?); + const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name, mod).?); if (try sema.resolveMaybeUndefVal(union_byval)) |union_val| { - if (union_val.isUndef()) return sema.addConstUndef(field.ty); + if (union_val.isUndef(mod)) return sema.addConstUndef(field.ty); - const tag_and_val = union_val.castTag(.@"union").?.data; - var field_tag_buf: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = enum_field_index, - }; - const field_tag = Value.initPayload(&field_tag_buf.base); - const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, sema.mod); + const un = ip.indexToKey(union_val.toIntern()).un; + const field_tag = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); + const tag_matches = un.tag == field_tag.toIntern(); switch (union_obj.layout) { .Auto => { if (tag_matches) { - return sema.addConstant(field.ty, tag_and_val.val); + return sema.addConstant(field.ty, un.val.toValue()); } else { const msg = msg: { - const active_index = tag_and_val.tag.castTag(.enum_field_index).?.data; - const active_field_name = union_obj.tag_ty.enumFieldName(active_index); - const msg = try sema.errMsg(block, src, "access of union field '{s}' while field '{s}' is active", .{ field_name, active_field_name }); + const active_index = union_obj.tag_ty.enumTagFieldIndex(un.tag.toValue(), mod).?; + const active_field_name = union_obj.tag_ty.enumFieldName(active_index, mod); + const msg = try sema.errMsg(block, src, "access of union field '{}' while field '{}' is active", .{ + field_name.fmt(ip), active_field_name.fmt(ip), + }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; @@ -24891,10 +25471,10 @@ fn unionFieldVal( }, .Packed, .Extern => { if (tag_matches) { - return sema.addConstant(field.ty, tag_and_val.val); + return sema.addConstant(field.ty, un.val.toValue()); } else { - const old_ty = union_ty.unionFieldType(tag_and_val.tag, sema.mod); - if (try sema.bitCastVal(block, src, tag_and_val.val, old_ty, field.ty, 0)) |new_val| { + const old_ty = union_ty.unionFieldType(un.tag.toValue(), mod); + if (try sema.bitCastVal(block, src, un.val.toValue(), old_ty, field.ty, 0)) |new_val| { return sema.addConstant(field.ty, new_val); } } @@ -24904,14 +25484,14 @@ fn unionFieldVal( try sema.requireRuntimeBlock(block, src, null); if (union_obj.layout == .Auto and block.wantSafety() and - union_ty.unionTagTypeSafety() != null and union_obj.fields.count() > 1) + union_ty.unionTagTypeSafety(mod) != null and union_obj.fields.count() > 1) { - const wanted_tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index); + const wanted_tag_val = try mod.enumValueFieldIndex(union_obj.tag_ty, enum_field_index); const wanted_tag = try sema.addConstant(union_obj.tag_ty, wanted_tag_val); const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_byval); try sema.panicInactiveUnionField(block, active_tag, wanted_tag); } - if (field.ty.zigTypeTag() == .NoReturn) { + if (field.ty.zigTypeTag(mod) == .NoReturn) { _ = try block.addNoOp(.unreach); return Air.Inst.Ref.unreachable_value; } @@ -24928,22 +25508,22 @@ fn elemPtr( init: bool, oob_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const indexable_ptr_src = src; // TODO better source location const indexable_ptr_ty = sema.typeOf(indexable_ptr); - const target = sema.mod.getTarget(); - const indexable_ty = switch (indexable_ptr_ty.zigTypeTag()) { - .Pointer => indexable_ptr_ty.elemType(), - else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(sema.mod)}), + const indexable_ty = switch (indexable_ptr_ty.zigTypeTag(mod)) { + .Pointer => indexable_ptr_ty.childType(mod), + else => return sema.fail(block, indexable_ptr_src, "expected pointer, found '{}'", .{indexable_ptr_ty.fmt(mod)}), }; try checkIndexable(sema, block, src, indexable_ty); - switch (indexable_ty.zigTypeTag()) { + switch (indexable_ty.zigTypeTag(mod)) { .Array, .Vector => return sema.elemPtrArray(block, src, indexable_ptr_src, indexable_ptr, elem_index_src, elem_index, init, oob_safety), .Struct => { // Tuple field access. const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known"); - const index = @intCast(u32, index_val.toUnsignedInt(target)); + const index = @intCast(u32, index_val.toUnsignedInt(mod)); return sema.tupleFieldPtr(block, src, indexable_ptr, elem_index_src, index, init); }, else => { @@ -24966,11 +25546,11 @@ fn elemPtrOneLayerOnly( ) CompileError!Air.Inst.Ref { const indexable_src = src; // TODO better source location const indexable_ty = sema.typeOf(indexable); - const target = sema.mod.getTarget(); + const mod = sema.mod; try checkIndexable(sema, block, src, indexable_ty); - switch (indexable_ty.ptrSize()) { + switch (indexable_ty.ptrSize(mod)) { .Slice => return sema.elemPtrSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety), .Many, .C => { const maybe_ptr_val = try sema.resolveDefinedValue(block, indexable_src, indexable); @@ -24978,9 +25558,9 @@ fn elemPtrOneLayerOnly( const runtime_src = rs: { const ptr_val = maybe_ptr_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index = @intCast(usize, index_val.toUnsignedInt(target)); - const elem_ptr = try ptr_val.elemPtr(indexable_ty, sema.arena, index, sema.mod); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); const result_ty = try sema.elemPtrType(indexable_ty, index); + const elem_ptr = try ptr_val.elemPtr(result_ty, index, mod); return sema.addConstant(result_ty, elem_ptr); }; const result_ty = try sema.elemPtrType(indexable_ty, null); @@ -24989,7 +25569,7 @@ fn elemPtrOneLayerOnly( return block.addPtrElemPtr(indexable, elem_index, result_ty); }, .One => { - assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by checkIndexable + assert(indexable_ty.childType(mod).zigTypeTag(mod) == .Array); // Guaranteed by checkIndexable return sema.elemPtrArray(block, src, indexable_src, indexable, elem_index_src, elem_index, init, oob_safety); }, } @@ -25006,7 +25586,7 @@ fn elemVal( ) CompileError!Air.Inst.Ref { const indexable_src = src; // TODO better source location const indexable_ty = sema.typeOf(indexable); - const target = sema.mod.getTarget(); + const mod = sema.mod; try checkIndexable(sema, block, src, indexable_ty); @@ -25014,8 +25594,8 @@ fn elemVal( // index is a scalar or vector instead of unconditionally casting to usize. const elem_index = try sema.coerce(block, Type.usize, elem_index_uncasted, elem_index_src); - switch (indexable_ty.zigTypeTag()) { - .Pointer => switch (indexable_ty.ptrSize()) { + switch (indexable_ty.zigTypeTag(mod)) { + .Pointer => switch (indexable_ty.ptrSize(mod)) { .Slice => return sema.elemValSlice(block, src, indexable_src, indexable, elem_index_src, elem_index, oob_safety), .Many, .C => { const maybe_indexable_val = try sema.resolveDefinedValue(block, indexable_src, indexable); @@ -25024,10 +25604,14 @@ fn elemVal( const runtime_src = rs: { const indexable_val = maybe_indexable_val orelse break :rs indexable_src; const index_val = maybe_index_val orelse break :rs elem_index_src; - const index = @intCast(usize, index_val.toUnsignedInt(target)); - const elem_ptr_val = try indexable_val.elemPtr(indexable_ty, sema.arena, index, sema.mod); - if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, indexable_ty)) |elem_val| { - return sema.addConstant(indexable_ty.elemType2(), elem_val); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); + const elem_ty = indexable_ty.elemType2(mod); + const many_ptr_ty = try mod.manyConstPtrType(elem_ty); + const many_ptr_val = try mod.getCoerced(indexable_val, many_ptr_ty); + const elem_ptr_ty = try mod.singleConstPtrType(elem_ty); + const elem_ptr_val = try many_ptr_val.elemPtr(elem_ptr_ty, index, mod); + if (try sema.pointerDeref(block, indexable_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { + return sema.addConstant(elem_ty, try mod.getCoerced(elem_val, elem_ty)); } break :rs indexable_src; }; @@ -25036,7 +25620,19 @@ fn elemVal( return block.addBinOp(.ptr_elem_val, indexable, elem_index); }, .One => { - assert(indexable_ty.childType().zigTypeTag() == .Array); // Guaranteed by checkIndexable + const array_ty = indexable_ty.childType(mod); // Guaranteed by checkIndexable + assert(array_ty.zigTypeTag(mod) == .Array); + + if (array_ty.sentinel(mod)) |sentinel| { + // index must be defined since it can access out of bounds + if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| { + const index = @intCast(usize, index_val.toUnsignedInt(mod)); + if (index == array_ty.arrayLen(mod)) { + return sema.addConstant(array_ty.childType(mod), sentinel); + } + } + } + const elem_ptr = try sema.elemPtr(block, indexable_src, indexable, elem_index, elem_index_src, false, oob_safety); return sema.analyzeLoad(block, indexable_src, elem_ptr, elem_index_src); }, @@ -25049,7 +25645,7 @@ fn elemVal( .Struct => { // Tuple field access. const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index, "tuple field access index must be comptime-known"); - const index = @intCast(u32, index_val.toUnsignedInt(target)); + const index = @intCast(u32, index_val.toUnsignedInt(mod)); return sema.tupleField(block, indexable_src, indexable, elem_index_src, index); }, else => unreachable, @@ -25064,6 +25660,7 @@ fn validateRuntimeElemAccess( parent_ty: Type, parent_src: LazySrcLoc, ) CompileError!void { + const mod = sema.mod; const valid_rt = try sema.validateRunTimeType(elem_ty, false); if (!valid_rt) { const msg = msg: { @@ -25071,12 +25668,12 @@ fn validateRuntimeElemAccess( block, elem_index_src, "values of type '{}' must be comptime-known, but index value is runtime-known", - .{parent_ty.fmt(sema.mod)}, + .{parent_ty.fmt(mod)}, ); errdefer msg.destroy(sema.gpa); - const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsComptime(msg, parent_src.toSrcLoc(src_decl), parent_ty); + const src_decl = mod.declPtr(block.src_decl); + try sema.explainWhyTypeIsComptime(msg, parent_src.toSrcLoc(src_decl, mod), parent_ty); break :msg msg; }; @@ -25093,10 +25690,11 @@ fn tupleFieldPtr( field_index: u32, init: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const tuple_ptr_ty = sema.typeOf(tuple_ptr); - const tuple_ty = tuple_ptr_ty.childType(); + const tuple_ty = tuple_ptr_ty.childType(mod); _ = try sema.resolveTypeFields(tuple_ty); - const field_count = tuple_ty.structFieldCount(); + const field_count = tuple_ty.structFieldCount(mod); if (field_count == 0) { return sema.fail(block, tuple_ptr_src, "indexing into empty tuple is not allowed", .{}); @@ -25108,31 +25706,29 @@ fn tupleFieldPtr( }); } - const field_ty = tuple_ty.structFieldType(field_index); - const ptr_field_ty = try Type.ptr(sema.arena, sema.mod, .{ + const field_ty = tuple_ty.structFieldType(field_index, mod); + const ptr_field_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = field_ty, - .mutable = tuple_ptr_ty.ptrIsMutable(), - .@"volatile" = tuple_ptr_ty.isVolatilePtr(), - .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(), + .mutable = tuple_ptr_ty.ptrIsMutable(mod), + .@"volatile" = tuple_ptr_ty.isVolatilePtr(mod), + .@"addrspace" = tuple_ptr_ty.ptrAddressSpace(mod), }); - if (tuple_ty.structFieldValueComptime(field_index)) |default_val| { - const val = try Value.Tag.comptime_field_ptr.create(sema.arena, .{ - .field_ty = field_ty, - .field_val = default_val, - }); - return sema.addConstant(ptr_field_ty, val); + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_val| { + return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.toIntern(), + .addr = .{ .comptime_field = default_val.toIntern() }, + } })).toValue()); } if (try sema.resolveMaybeUndefVal(tuple_ptr)) |tuple_ptr_val| { - return sema.addConstant( - ptr_field_ty, - try Value.Tag.field_ptr.create(sema.arena, .{ - .container_ptr = tuple_ptr_val, - .container_ty = tuple_ty, - .field_index = field_index, - }), - ); + return sema.addConstant(ptr_field_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_field_ty.toIntern(), + .addr = .{ .field = .{ + .base = tuple_ptr_val.toIntern(), + .index = field_index, + } }, + } })).toValue()); } if (!init) { @@ -25151,8 +25747,9 @@ fn tupleField( field_index_src: LazySrcLoc, field_index: u32, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const tuple_ty = try sema.resolveTypeFields(sema.typeOf(tuple)); - const field_count = tuple_ty.structFieldCount(); + const field_count = tuple_ty.structFieldCount(mod); if (field_count == 0) { return sema.fail(block, tuple_src, "indexing into empty tuple is not allowed", .{}); @@ -25164,15 +25761,15 @@ fn tupleField( }); } - const field_ty = tuple_ty.structFieldType(field_index); + const field_ty = tuple_ty.structFieldType(field_index, mod); - if (tuple_ty.structFieldValueComptime(field_index)) |default_value| { + if (try tuple_ty.structFieldValueComptime(mod, field_index)) |default_value| { return sema.addConstant(field_ty, default_value); // comptime field } if (try sema.resolveMaybeUndefVal(tuple)) |tuple_val| { - if (tuple_val.isUndef()) return sema.addConstUndef(field_ty); - return sema.addConstant(field_ty, tuple_val.fieldValue(tuple_ty, field_index)); + if (tuple_val.isUndef(mod)) return sema.addConstUndef(field_ty); + return sema.addConstant(field_ty, try tuple_val.fieldValue(mod, field_index)); } try sema.validateRuntimeElemAccess(block, field_index_src, field_ty, tuple_ty, tuple_src); @@ -25191,11 +25788,12 @@ fn elemValArray( elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const array_ty = sema.typeOf(array); - const array_sent = array_ty.sentinel(); - const array_len = array_ty.arrayLen(); + const array_sent = array_ty.sentinel(mod); + const array_len = array_ty.arrayLen(mod); const array_len_s = array_len + @boolToInt(array_sent != null); - const elem_ty = array_ty.childType(); + const elem_ty = array_ty.childType(mod); if (array_len_s == 0) { return sema.fail(block, array_src, "indexing into empty array is not allowed", .{}); @@ -25204,10 +25802,9 @@ fn elemValArray( const maybe_undef_array_val = try sema.resolveMaybeUndefVal(array); // index must be defined since it can access out of bounds const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); - const target = sema.mod.getTarget(); if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(target)); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); if (array_sent) |s| { if (index == array_len) { return sema.addConstant(elem_ty, s); @@ -25219,12 +25816,12 @@ fn elemValArray( } } if (maybe_undef_array_val) |array_val| { - if (array_val.isUndef()) { + if (array_val.isUndef(mod)) { return sema.addConstUndef(elem_ty); } if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(target)); - const elem_val = try array_val.elemValue(sema.mod, sema.arena, index); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); + const elem_val = try array_val.elemValue(mod, index); return sema.addConstant(elem_ty, elem_val); } } @@ -25255,11 +25852,11 @@ fn elemPtrArray( init: bool, oob_safety: bool, ) CompileError!Air.Inst.Ref { - const target = sema.mod.getTarget(); + const mod = sema.mod; const array_ptr_ty = sema.typeOf(array_ptr); - const array_ty = array_ptr_ty.childType(); - const array_sent = array_ty.sentinel() != null; - const array_len = array_ty.arrayLen(); + const array_ty = array_ptr_ty.childType(mod); + const array_sent = array_ty.sentinel(mod) != null; + const array_len = array_ty.arrayLen(mod); const array_len_s = array_len + @boolToInt(array_sent); if (array_len_s == 0) { @@ -25269,7 +25866,7 @@ fn elemPtrArray( const maybe_undef_array_ptr_val = try sema.resolveMaybeUndefVal(array_ptr); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { - const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(target)); + const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(mod)); if (index >= array_len_s) { const sentinel_label: []const u8 = if (array_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside array of length {d}{s}", .{ index, array_len, sentinel_label }); @@ -25280,17 +25877,17 @@ fn elemPtrArray( const elem_ptr_ty = try sema.elemPtrType(array_ptr_ty, offset); if (maybe_undef_array_ptr_val) |array_ptr_val| { - if (array_ptr_val.isUndef()) { + if (array_ptr_val.isUndef(mod)) { return sema.addConstUndef(elem_ptr_ty); } if (offset) |index| { - const elem_ptr = try array_ptr_val.elemPtr(array_ptr_ty, sema.arena, index, sema.mod); + const elem_ptr = try array_ptr_val.elemPtr(elem_ptr_ty, index, mod); return sema.addConstant(elem_ptr_ty, elem_ptr); } } if (!init) { - try sema.validateRuntimeElemAccess(block, elem_index_src, array_ty.elemType2(), array_ty, array_ptr_src); + try sema.validateRuntimeElemAccess(block, elem_index_src, array_ty.elemType2(mod), array_ty, array_ptr_src); } const runtime_src = if (maybe_undef_array_ptr_val != null) elem_index_src else array_ptr_src; @@ -25316,32 +25913,33 @@ fn elemValSlice( elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const slice_ty = sema.typeOf(slice); - const slice_sent = slice_ty.sentinel() != null; - const elem_ty = slice_ty.elemType2(); + const slice_sent = slice_ty.sentinel(mod) != null; + const elem_ty = slice_ty.elemType2(mod); var runtime_src = slice_src; // slice must be defined since it can dereferenced as null const maybe_slice_val = try sema.resolveDefinedValue(block, slice_src, slice); // index must be defined since it can index out of bounds const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index); - const target = sema.mod.getTarget(); if (maybe_slice_val) |slice_val| { runtime_src = elem_index_src; - const slice_len = slice_val.sliceLen(sema.mod); + const slice_len = slice_val.sliceLen(mod); const slice_len_s = slice_len + @boolToInt(slice_sent); if (slice_len_s == 0) { return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); } if (maybe_index_val) |index_val| { - const index = @intCast(usize, index_val.toUnsignedInt(target)); + const index = @intCast(usize, index_val.toUnsignedInt(mod)); if (index >= slice_len_s) { const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, sema.mod); - if (try sema.pointerDeref(block, slice_src, elem_ptr_val, slice_ty)) |elem_val| { + const elem_ptr_ty = try sema.elemPtrType(slice_ty, index); + const elem_ptr_val = try slice_val.elemPtr(elem_ptr_ty, index, mod); + if (try sema.pointerDeref(block, slice_src, elem_ptr_val, elem_ptr_ty)) |elem_val| { return sema.addConstant(elem_ty, elem_val); } runtime_src = slice_src; @@ -25353,7 +25951,7 @@ fn elemValSlice( try sema.requireRuntimeBlock(block, src, runtime_src); if (oob_safety and block.wantSafety()) { const len_inst = if (maybe_slice_val) |slice_val| - try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(sema.mod)) + try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(mod)) else try block.addTyOp(.slice_len, Type.usize, slice); const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; @@ -25373,24 +25971,24 @@ fn elemPtrSlice( elem_index: Air.Inst.Ref, oob_safety: bool, ) CompileError!Air.Inst.Ref { - const target = sema.mod.getTarget(); + const mod = sema.mod; const slice_ty = sema.typeOf(slice); - const slice_sent = slice_ty.sentinel() != null; + const slice_sent = slice_ty.sentinel(mod) != null; const maybe_undef_slice_val = try sema.resolveMaybeUndefVal(slice); // The index must not be undefined since it can be out of bounds. const offset: ?usize = if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| o: { - const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(target)); + const index = try sema.usizeCast(block, elem_index_src, index_val.toUnsignedInt(mod)); break :o index; } else null; const elem_ptr_ty = try sema.elemPtrType(slice_ty, offset); if (maybe_undef_slice_val) |slice_val| { - if (slice_val.isUndef()) { + if (slice_val.isUndef(mod)) { return sema.addConstUndef(elem_ptr_ty); } - const slice_len = slice_val.sliceLen(sema.mod); + const slice_len = slice_val.sliceLen(mod); const slice_len_s = slice_len + @boolToInt(slice_sent); if (slice_len_s == 0) { return sema.fail(block, slice_src, "indexing into empty slice is not allowed", .{}); @@ -25400,7 +25998,7 @@ fn elemPtrSlice( const sentinel_label: []const u8 = if (slice_sent) " +1 (sentinel)" else ""; return sema.fail(block, elem_index_src, "index {d} outside slice of length {d}{s}", .{ index, slice_len, sentinel_label }); } - const elem_ptr_val = try slice_val.elemPtr(slice_ty, sema.arena, index, sema.mod); + const elem_ptr_val = try slice_val.elemPtr(elem_ptr_ty, index, mod); return sema.addConstant(elem_ptr_ty, elem_ptr_val); } } @@ -25412,8 +26010,8 @@ fn elemPtrSlice( if (oob_safety and block.wantSafety()) { const len_inst = len: { if (maybe_undef_slice_val) |slice_val| - if (!slice_val.isUndef()) - break :len try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(sema.mod)); + if (!slice_val.isUndef(mod)) + break :len try sema.addIntUnsigned(Type.usize, slice_val.sliceLen(mod)); break :len try block.addTyOp(.slice_len, Type.usize, slice); }; const cmp_op: Air.Inst.Tag = if (slice_sent) .cmp_lte else .cmp_lt; @@ -25455,16 +26053,17 @@ const CoerceOpts = struct { fn get(info: @This(), sema: *Sema) !?Module.SrcLoc { if (info.func_inst == .none) return null; + const mod = sema.mod; const fn_decl = (try sema.funcDeclSrc(info.func_inst)) orelse return null; - const param_src = Module.paramSrc(0, sema.gpa, fn_decl, info.param_i); + const param_src = Module.paramSrc(0, mod, fn_decl, info.param_i); if (param_src == .node_offset_param) { return Module.SrcLoc{ - .file_scope = fn_decl.getFileScope(), + .file_scope = fn_decl.getFileScope(mod), .parent_decl_node = fn_decl.src_node, .lazy = LazySrcLoc.nodeOffset(param_src.node_offset_param), }; } - return param_src.toSrcLoc(fn_decl); + return param_src.toSrcLoc(fn_decl, mod); } } = .{}, }; @@ -25477,34 +26076,30 @@ fn coerceExtra( inst_src: LazySrcLoc, opts: CoerceOpts, ) CoersionError!Air.Inst.Ref { - switch (dest_ty_unresolved.tag()) { - .generic_poison => return inst, - else => {}, - } + if (dest_ty_unresolved.isGenericPoison()) return inst; + const mod = sema.mod; const dest_ty_src = inst_src; // TODO better source location const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved); const inst_ty = try sema.resolveTypeFields(sema.typeOf(inst)); - const target = sema.mod.getTarget(); + const target = mod.getTarget(); // If the types are the same, we can return the operand. - if (dest_ty.eql(inst_ty, sema.mod)) + if (dest_ty.eql(inst_ty, mod)) return inst; - const arena = sema.arena; const maybe_inst_val = try sema.resolveMaybeUndefVal(inst); var in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src); if (in_memory_result == .ok) { if (maybe_inst_val) |val| { - // Keep the comptime Value representation; take the new type. - return sema.addConstant(dest_ty, val); + return sema.coerceInMemory(block, val, inst_ty, dest_ty, dest_ty_src); } try sema.requireRuntimeBlock(block, inst_src, null); return block.addBitCast(dest_ty, inst); } - const is_undef = inst_ty.zigTypeTag() == .Undefined; + const is_undef = inst_ty.zigTypeTag(mod) == .Undefined; - switch (dest_ty.zigTypeTag()) { + switch (dest_ty.zigTypeTag(mod)) { .Optional => optional: { // undefined sets the optional bit also to undefined. if (is_undef) { @@ -25512,18 +26107,22 @@ fn coerceExtra( } // null to ?T - if (inst_ty.zigTypeTag() == .Null) { - return sema.addConstant(dest_ty, Value.null); + if (inst_ty.zigTypeTag(mod) == .Null) { + return sema.addConstant(dest_ty, (try mod.intern(.{ .opt = .{ + .ty = dest_ty.toIntern(), + .val = .none, + } })).toValue()); } // cast from ?*T and ?[*]T to ?*anyopaque // but don't do it if the source type is a double pointer - if (dest_ty.isPtrLikeOptional() and dest_ty.elemType2().tag() == .anyopaque and - inst_ty.isPtrAtRuntime()) + if (dest_ty.isPtrLikeOptional(mod) and + dest_ty.elemType2(mod).toIntern() == .anyopaque_type and + inst_ty.isPtrAtRuntime(mod)) anyopaque_check: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :optional; - const elem_ty = inst_ty.elemType2(); - if (elem_ty.zigTypeTag() == .Pointer or elem_ty.isPtrLikeOptional()) { + const elem_ty = inst_ty.elemType2(mod); + if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) { in_memory_result = .{ .double_ptr_to_anyopaque = .{ .actual = inst_ty, .wanted = dest_ty, @@ -25532,12 +26131,12 @@ fn coerceExtra( } // Let the logic below handle wrapping the optional now that // it has been checked to correctly coerce. - if (!inst_ty.isPtrLikeOptional()) break :anyopaque_check; + if (!inst_ty.isPtrLikeOptional(mod)) break :anyopaque_check; return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } // T to ?T - const child_type = try dest_ty.optionalChildAlloc(sema.arena); + const child_type = dest_ty.optionalChild(mod); const intermediate = sema.coerceExtra(block, child_type, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) { error.NotCoercible => { if (in_memory_result == .no_match) { @@ -25551,12 +26150,12 @@ fn coerceExtra( return try sema.wrapOptional(block, dest_ty, intermediate, inst_src); }, .Pointer => pointer: { - const dest_info = dest_ty.ptrInfo().data; + const dest_info = dest_ty.ptrInfo(mod); // Function body to function pointer. - if (inst_ty.zigTypeTag() == .Fn) { + if (inst_ty.zigTypeTag(mod) == .Fn) { const fn_val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const fn_decl = fn_val.pointerDecl().?; + const fn_decl = fn_val.pointerDecl(mod).?; const inst_as_ptr = try sema.analyzeDeclRef(fn_decl); return sema.coerce(block, dest_ty, inst_as_ptr, inst_src); } @@ -25564,13 +26163,13 @@ fn coerceExtra( // *T to *[1]T single_item: { if (dest_info.size != .One) break :single_item; - if (!inst_ty.isSinglePointer()) break :single_item; + if (!inst_ty.isSinglePointer(mod)) break :single_item; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; - const ptr_elem_ty = inst_ty.childType(); + const ptr_elem_ty = inst_ty.childType(mod); const array_ty = dest_info.pointee_type; - if (array_ty.zigTypeTag() != .Array) break :single_item; - const array_elem_ty = array_ty.childType(); - if (array_ty.arrayLen() != 1) break :single_item; + if (array_ty.zigTypeTag(mod) != .Array) break :single_item; + const array_elem_ty = array_ty.childType(mod); + if (array_ty.arrayLen(mod) != 1) break :single_item; const dest_is_mut = dest_info.mutable; switch (try sema.coerceInMemoryAllowed(block, array_elem_ty, ptr_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) { .ok => {}, @@ -25581,11 +26180,11 @@ fn coerceExtra( // Coercions where the source is a single pointer to an array. src_array_ptr: { - if (!inst_ty.isSinglePointer()) break :src_array_ptr; + if (!inst_ty.isSinglePointer(mod)) break :src_array_ptr; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; - const array_ty = inst_ty.childType(); - if (array_ty.zigTypeTag() != .Array) break :src_array_ptr; - const array_elem_type = array_ty.childType(); + const array_ty = inst_ty.childType(mod); + if (array_ty.zigTypeTag(mod) != .Array) break :src_array_ptr; + const array_elem_type = array_ty.childType(mod); const dest_is_mut = dest_info.mutable; const dst_elem_type = dest_info.pointee_type; @@ -25603,8 +26202,8 @@ fn coerceExtra( } if (dest_info.sentinel) |dest_sent| { - if (array_ty.sentinel()) |inst_sent| { - if (!dest_sent.eql(inst_sent, dst_elem_type, sema.mod)) { + if (array_ty.sentinel(mod)) |inst_sent| { + if (!dest_sent.eql(inst_sent, dst_elem_type, mod)) { in_memory_result = .{ .ptr_sentinel = .{ .actual = inst_sent, .wanted = dest_sent, @@ -25614,7 +26213,7 @@ fn coerceExtra( } } else { in_memory_result = .{ .ptr_sentinel = .{ - .actual = Value.initTag(.unreachable_value), + .actual = Value.@"unreachable", .wanted = dest_sent, .ty = dst_elem_type, } }; @@ -25640,11 +26239,11 @@ fn coerceExtra( } // coercion from C pointer - if (inst_ty.isCPtr()) src_c_ptr: { + if (inst_ty.isCPtr(mod)) src_c_ptr: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :src_c_ptr; // In this case we must add a safety check because the C pointer // could be null. - const src_elem_ty = inst_ty.childType(); + const src_elem_ty = inst_ty.childType(mod); const dest_is_mut = dest_info.mutable; const dst_elem_type = dest_info.pointee_type; switch (try sema.coerceInMemoryAllowed(block, dst_elem_type, src_elem_ty, dest_is_mut, target, dest_ty_src, inst_src)) { @@ -25656,18 +26255,18 @@ fn coerceExtra( // cast from *T and [*]T to *anyopaque // but don't do it if the source type is a double pointer - if (dest_info.pointee_type.tag() == .anyopaque and inst_ty.zigTypeTag() == .Pointer) to_anyopaque: { + if (dest_info.pointee_type.toIntern() == .anyopaque_type and inst_ty.zigTypeTag(mod) == .Pointer) to_anyopaque: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :pointer; - const elem_ty = inst_ty.elemType2(); - if (elem_ty.zigTypeTag() == .Pointer or elem_ty.isPtrLikeOptional()) { + const elem_ty = inst_ty.elemType2(mod); + if (elem_ty.zigTypeTag(mod) == .Pointer or elem_ty.isPtrLikeOptional(mod)) { in_memory_result = .{ .double_ptr_to_anyopaque = .{ .actual = inst_ty, .wanted = dest_ty, } }; break :pointer; } - if (dest_ty.isSlice()) break :to_anyopaque; - if (inst_ty.isSlice()) { + if (dest_ty.isSlice(mod)) break :to_anyopaque; + if (inst_ty.isSlice(mod)) { in_memory_result = .{ .slice_to_anyopaque = .{ .actual = inst_ty, .wanted = dest_ty, @@ -25679,9 +26278,9 @@ fn coerceExtra( switch (dest_info.size) { // coercion to C pointer - .C => switch (inst_ty.zigTypeTag()) { + .C => switch (inst_ty.zigTypeTag(mod)) { .Null => { - return sema.addConstant(dest_ty, Value.null); + return sema.addConstant(dest_ty, try mod.getCoerced(Value.null, dest_ty)); }, .ComptimeInt => { const addr = sema.coerceExtra(block, Type.usize, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) { @@ -25691,7 +26290,7 @@ fn coerceExtra( return try sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src); }, .Int => { - const ptr_size_ty = switch (inst_ty.intInfo(target).signedness) { + const ptr_size_ty = switch (inst_ty.intInfo(mod).signedness) { .signed => Type.isize, .unsigned => Type.usize, }; @@ -25707,7 +26306,7 @@ fn coerceExtra( }, .Pointer => p: { if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p; - const inst_info = inst_ty.ptrInfo().data; + const inst_info = inst_ty.ptrInfo(mod); switch (try sema.coerceInMemoryAllowed( block, dest_info.pointee_type, @@ -25723,7 +26322,7 @@ fn coerceExtra( if (inst_info.size == .Slice) { assert(dest_info.sentinel == null); if (inst_info.sentinel == null or - !inst_info.sentinel.?.eql(Value.zero, dest_info.pointee_type, sema.mod)) + !inst_info.sentinel.?.eql(try mod.intValue(dest_info.pointee_type, 0), dest_info.pointee_type, mod)) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); @@ -25733,11 +26332,11 @@ fn coerceExtra( }, else => {}, }, - .One => switch (dest_info.pointee_type.zigTypeTag()) { + .One => switch (dest_info.pointee_type.zigTypeTag(mod)) { .Union => { // pointer to anonymous struct to pointer to union - if (inst_ty.isSinglePointer() and - inst_ty.childType().isAnonStruct() and + if (inst_ty.isSinglePointer(mod) and + inst_ty.childType(mod).isAnonStruct(mod) and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceAnonStructToUnionPtrs(block, dest_ty, dest_ty_src, inst, inst_src); @@ -25745,8 +26344,8 @@ fn coerceExtra( }, .Struct => { // pointer to anonymous struct to pointer to struct - if (inst_ty.isSinglePointer() and - inst_ty.childType().isAnonStruct() and + if (inst_ty.isSinglePointer(mod) and + inst_ty.childType(mod).isAnonStruct(mod) and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceAnonStructToStructPtrs(block, dest_ty, dest_ty_src, inst, inst_src) catch |err| switch (err) { @@ -25757,8 +26356,8 @@ fn coerceExtra( }, .Array => { // pointer to tuple to pointer to array - if (inst_ty.isSinglePointer() and - inst_ty.childType().isTuple() and + if (inst_ty.isSinglePointer(mod) and + inst_ty.childType(mod).isTuple(mod) and sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) { return sema.coerceTupleToArrayPtrs(block, dest_ty, dest_ty_src, inst, inst_src); @@ -25767,38 +26366,38 @@ fn coerceExtra( else => {}, }, .Slice => to_slice: { - if (inst_ty.zigTypeTag() == .Array) { + if (inst_ty.zigTypeTag(mod) == .Array) { return sema.fail( block, inst_src, "array literal requires address-of operator (&) to coerce to slice type '{}'", - .{dest_ty.fmt(sema.mod)}, + .{dest_ty.fmt(mod)}, ); } - if (!inst_ty.isSinglePointer()) break :to_slice; - const inst_child_ty = inst_ty.childType(); - if (!inst_child_ty.isTuple()) break :to_slice; + if (!inst_ty.isSinglePointer(mod)) break :to_slice; + const inst_child_ty = inst_ty.childType(mod); + if (!inst_child_ty.isTuple(mod)) break :to_slice; // empty tuple to zero-length slice // note that this allows coercing to a mutable slice. - if (inst_child_ty.structFieldCount() == 0) { + if (inst_child_ty.structFieldCount(mod) == 0) { // Optional slice is represented with a null pointer so // we use a dummy pointer value with the required alignment. - const slice_val = try Value.Tag.slice.create(sema.arena, .{ - .ptr = if (dest_info.@"align" != 0) - try Value.Tag.int_u64.create(sema.arena, dest_info.@"align") + return sema.addConstant(dest_ty, (try mod.intern(.{ .ptr = .{ + .ty = dest_ty.toIntern(), + .addr = .{ .int = (if (dest_info.@"align" != 0) + try mod.intValue(Type.usize, dest_info.@"align") else - try dest_info.pointee_type.lazyAbiAlignment(target, sema.arena), - .len = Value.zero, - }); - return sema.addConstant(dest_ty, slice_val); + try mod.getCoerced(try dest_info.pointee_type.lazyAbiAlignment(mod), Type.usize)).toIntern() }, + .len = (try mod.intValue(Type.usize, 0)).toIntern(), + } })).toValue()); } // pointer to tuple to slice if (dest_info.mutable) { const err_msg = err_msg: { - const err_msg = try sema.errMsg(block, inst_src, "cannot cast pointer to tuple to '{}'", .{dest_ty.fmt(sema.mod)}); + const err_msg = try sema.errMsg(block, inst_src, "cannot cast pointer to tuple to '{}'", .{dest_ty.fmt(mod)}); errdefer err_msg.deinit(sema.gpa); try sema.errNote(block, dest_ty_src, err_msg, "pointers to tuples can only coerce to constant pointers", .{}); break :err_msg err_msg; @@ -25808,9 +26407,9 @@ fn coerceExtra( return sema.coerceTupleToSlicePtrs(block, dest_ty, dest_ty_src, inst, inst_src); }, .Many => p: { - if (!inst_ty.isSlice()) break :p; + if (!inst_ty.isSlice(mod)) break :p; if (!sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) break :p; - const inst_info = inst_ty.ptrInfo().data; + const inst_info = inst_ty.ptrInfo(mod); switch (try sema.coerceInMemoryAllowed( block, @@ -25826,7 +26425,11 @@ fn coerceExtra( } if (dest_info.sentinel == null or inst_info.sentinel == null or - !dest_info.sentinel.?.eql(inst_info.sentinel.?, dest_info.pointee_type, sema.mod)) + !dest_info.sentinel.?.eql( + try mod.getCoerced(inst_info.sentinel.?, dest_info.pointee_type), + dest_info.pointee_type, + mod, + )) break :p; const slice_ptr = try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty); @@ -25834,25 +26437,25 @@ fn coerceExtra( }, } }, - .Int, .ComptimeInt => switch (inst_ty.zigTypeTag()) { + .Int, .ComptimeInt => switch (inst_ty.zigTypeTag(mod)) { .Float, .ComptimeFloat => float: { if (is_undef) { return sema.addConstUndef(dest_ty); } const val = (try sema.resolveMaybeUndefVal(inst)) orelse { - if (dest_ty.zigTypeTag() == .ComptimeInt) { + if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { if (!opts.report_err) return error.NotCoercible; return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_int' must be comptime-known"); } break :float; }; - if (val.floatHasFraction()) { + if (val.floatHasFraction(mod)) { return sema.fail( block, inst_src, "fractional component prevents float value '{}' from coercion to type '{}'", - .{ val.fmtValue(inst_ty, sema.mod), dest_ty.fmt(sema.mod) }, + .{ val.fmtValue(inst_ty, mod), dest_ty.fmt(mod) }, ); } const result_val = try sema.floatToInt(block, inst_src, val, inst_ty, dest_ty); @@ -25866,19 +26469,19 @@ fn coerceExtra( // comptime-known integer to other number if (!(try sema.intFitsInType(val, dest_ty, null))) { if (!opts.report_err) return error.NotCoercible; - return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) }); + return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(mod), val.fmtValue(inst_ty, mod) }); } - return try sema.addConstant(dest_ty, val); + return try sema.addConstant(dest_ty, try mod.getCoerced(val, dest_ty)); } - if (dest_ty.zigTypeTag() == .ComptimeInt) { + if (dest_ty.zigTypeTag(mod) == .ComptimeInt) { if (!opts.report_err) return error.NotCoercible; if (opts.no_cast_to_comptime_int) return inst; return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_int' must be comptime-known"); } // integer widening - const dst_info = dest_ty.intInfo(target); - const src_info = inst_ty.intInfo(target); + const dst_info = dest_ty.intInfo(mod); + const src_info = inst_ty.intInfo(mod); if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or // small enough unsigned ints can get casted to large enough signed ints (dst_info.signedness == .signed and dst_info.bits > src_info.bits)) @@ -25892,10 +26495,10 @@ fn coerceExtra( }, else => {}, }, - .Float, .ComptimeFloat => switch (inst_ty.zigTypeTag()) { + .Float, .ComptimeFloat => switch (inst_ty.zigTypeTag(mod)) { .ComptimeFloat => { const val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const result_val = try val.floatCast(sema.arena, dest_ty, target); + const result_val = try val.floatCast(dest_ty, mod); return try sema.addConstant(dest_ty, result_val); }, .Float => { @@ -25903,17 +26506,17 @@ fn coerceExtra( return sema.addConstUndef(dest_ty); } if (try sema.resolveMaybeUndefVal(inst)) |val| { - const result_val = try val.floatCast(sema.arena, dest_ty, target); - if (!val.eql(result_val, inst_ty, sema.mod)) { + const result_val = try val.floatCast(dest_ty, mod); + if (!val.eql(try result_val.floatCast(inst_ty, mod), inst_ty, mod)) { return sema.fail( block, inst_src, "type '{}' cannot represent float value '{}'", - .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) }, + .{ dest_ty.fmt(mod), val.fmtValue(inst_ty, mod) }, ); } return try sema.addConstant(dest_ty, result_val); - } else if (dest_ty.zigTypeTag() == .ComptimeFloat) { + } else if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) { if (!opts.report_err) return error.NotCoercible; return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_float' must be comptime-known"); } @@ -25931,13 +26534,13 @@ fn coerceExtra( return sema.addConstUndef(dest_ty); } const val = (try sema.resolveMaybeUndefVal(inst)) orelse { - if (dest_ty.zigTypeTag() == .ComptimeFloat) { + if (dest_ty.zigTypeTag(mod) == .ComptimeFloat) { if (!opts.report_err) return error.NotCoercible; return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_float' must be comptime-known"); } break :int; }; - const result_val = try val.intToFloatAdvanced(sema.arena, inst_ty, dest_ty, sema.mod, sema); + const result_val = try val.intToFloatAdvanced(sema.arena, inst_ty, dest_ty, mod, sema); // TODO implement this compile error //const int_again_val = try result_val.floatToInt(sema.arena, inst_ty); //if (!int_again_val.eql(val, inst_ty, mod)) { @@ -25945,7 +26548,7 @@ fn coerceExtra( // block, // inst_src, // "type '{}' cannot represent integer value '{}'", - // .{ dest_ty.fmt(sema.mod), val }, + // .{ dest_ty.fmt(mod), val }, // ); //} return try sema.addConstant(dest_ty, result_val); @@ -25955,18 +26558,18 @@ fn coerceExtra( }, else => {}, }, - .Enum => switch (inst_ty.zigTypeTag()) { + .Enum => switch (inst_ty.zigTypeTag(mod)) { .EnumLiteral => { // enum literal to enum const val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const bytes = val.castTag(.enum_literal).?.data; - const field_index = dest_ty.enumFieldIndex(bytes) orelse { + const string = mod.intern_pool.indexToKey(val.toIntern()).enum_literal; + const field_index = dest_ty.enumFieldIndex(string, mod) orelse { const msg = msg: { const msg = try sema.errMsg( block, inst_src, - "no field named '{s}' in enum '{}'", - .{ bytes, dest_ty.fmt(sema.mod) }, + "no field named '{}' in enum '{}'", + .{ string.fmt(&mod.intern_pool), dest_ty.fmt(mod) }, ); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, dest_ty); @@ -25976,13 +26579,13 @@ fn coerceExtra( }; return sema.addConstant( dest_ty, - try Value.Tag.enum_field_index.create(arena, @intCast(u32, field_index)), + try mod.enumValueFieldIndex(dest_ty, @intCast(u32, field_index)), ); }, .Union => blk: { // union to its own tag type - const union_tag_ty = inst_ty.unionTagType() orelse break :blk; - if (union_tag_ty.eql(dest_ty, sema.mod)) { + const union_tag_ty = inst_ty.unionTagType(mod) orelse break :blk; + if (union_tag_ty.eql(dest_ty, mod)) { return sema.unionToTag(block, dest_ty, inst, inst_src); } }, @@ -25991,27 +26594,33 @@ fn coerceExtra( }, else => {}, }, - .ErrorUnion => switch (inst_ty.zigTypeTag()) { + .ErrorUnion => switch (inst_ty.zigTypeTag(mod)) { .ErrorUnion => eu: { if (maybe_inst_val) |inst_val| { - switch (inst_val.tag()) { + switch (inst_val.toIntern()) { .undef => return sema.addConstUndef(dest_ty), - .eu_payload => { - const payload = try sema.addConstant( - inst_ty.errorUnionPayload(), - inst_val.castTag(.eu_payload).?.data, - ); - return sema.wrapErrorUnionPayload(block, dest_ty, payload, inst_src) catch |err| switch (err) { - error.NotCoercible => break :eu, - else => |e| return e, - }; - }, - else => { - const error_set = try sema.addConstant( - inst_ty.errorUnionSet(), - inst_val, - ); - return sema.wrapErrorUnionSet(block, dest_ty, error_set, inst_src); + else => switch (mod.intern_pool.indexToKey(inst_val.toIntern())) { + .error_union => |error_union| switch (error_union.val) { + .err_name => |err_name| { + const error_set_ty = inst_ty.errorUnionSet(mod); + const error_set_val = try sema.addConstant(error_set_ty, (try mod.intern(.{ .err = .{ + .ty = error_set_ty.toIntern(), + .name = err_name, + } })).toValue()); + return sema.wrapErrorUnionSet(block, dest_ty, error_set_val, inst_src); + }, + .payload => |payload| { + const payload_val = try sema.addConstant( + inst_ty.errorUnionPayload(mod), + payload.toValue(), + ); + return sema.wrapErrorUnionPayload(block, dest_ty, payload_val, inst_src) catch |err| switch (err) { + error.NotCoercible => break :eu, + else => |e| return e, + }; + }, + }, + else => unreachable, }, } } @@ -26031,10 +26640,10 @@ fn coerceExtra( }; }, }, - .Union => switch (inst_ty.zigTypeTag()) { + .Union => switch (inst_ty.zigTypeTag(mod)) { .Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { - if (inst_ty.isAnonStruct()) { + if (inst_ty.isAnonStruct(mod)) { return sema.coerceAnonStructToUnion(block, dest_ty, dest_ty_src, inst, inst_src); } }, @@ -26043,13 +26652,13 @@ fn coerceExtra( }, else => {}, }, - .Array => switch (inst_ty.zigTypeTag()) { + .Array => switch (inst_ty.zigTypeTag(mod)) { .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { if (inst == .empty_struct) { return sema.arrayInitEmpty(block, inst_src, dest_ty); } - if (inst_ty.isTuple()) { + if (inst_ty.isTuple(mod)) { return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src); } }, @@ -26058,10 +26667,10 @@ fn coerceExtra( }, else => {}, }, - .Vector => switch (inst_ty.zigTypeTag()) { + .Vector => switch (inst_ty.zigTypeTag(mod)) { .Array, .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src), .Struct => { - if (inst_ty.isTuple()) { + if (inst_ty.isTuple(mod)) { return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src); } }, @@ -26074,7 +26683,7 @@ fn coerceExtra( if (inst == .empty_struct) { return sema.structInitEmpty(block, dest_ty, dest_ty_src, inst_src); } - if (inst_ty.isTupleOrAnonStruct()) { + if (inst_ty.isTupleOrAnonStruct(mod)) { return sema.coerceTupleToStruct(block, dest_ty, inst, inst_src) catch |err| switch (err) { error.NotCoercible => break :blk, else => |e| return e, @@ -26093,35 +26702,34 @@ fn coerceExtra( if (!opts.report_err) return error.NotCoercible; - if (opts.is_ret and dest_ty.zigTypeTag() == .NoReturn) { + if (opts.is_ret and dest_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "function declared 'noreturn' returns", .{}); errdefer msg.destroy(sema.gpa); const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; - const src_decl = sema.mod.declPtr(sema.func.?.owner_decl); - try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl), msg, "'noreturn' declared here", .{}); + const src_decl = mod.declPtr(sema.func.?.owner_decl); + try mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "'noreturn' declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } const msg = msg: { - const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(sema.mod), inst_ty.fmt(sema.mod) }); + const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ dest_ty.fmt(mod), inst_ty.fmt(mod) }); errdefer msg.destroy(sema.gpa); // E!T to T - if (inst_ty.zigTypeTag() == .ErrorUnion and - (try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) + if (inst_ty.zigTypeTag(mod) == .ErrorUnion and + (try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(mod), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) { try sema.errNote(block, inst_src, msg, "cannot convert error union to payload type", .{}); try sema.errNote(block, inst_src, msg, "consider using 'try', 'catch', or 'if'", .{}); } // ?T to T - var buf: Type.Payload.ElemType = undefined; - if (inst_ty.zigTypeTag() == .Optional and - (try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(&buf), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) + if (inst_ty.zigTypeTag(mod) == .Optional and + (try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(mod), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) { try sema.errNote(block, inst_src, msg, "cannot convert optional to payload type", .{}); try sema.errNote(block, inst_src, msg, "consider using '.?', 'orelse', or 'if'", .{}); @@ -26130,18 +26738,18 @@ fn coerceExtra( try in_memory_result.report(sema, block, inst_src, msg); // Add notes about function return type - if (opts.is_ret and sema.mod.test_functions.get(sema.func.?.owner_decl) == null) { + if (opts.is_ret and mod.test_functions.get(sema.func.?.owner_decl) == null) { const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 }; - const src_decl = sema.mod.declPtr(sema.func.?.owner_decl); - if (inst_ty.isError() and !dest_ty.isError()) { - try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl), msg, "function cannot return an error", .{}); + const src_decl = mod.declPtr(sema.func.?.owner_decl); + if (inst_ty.isError(mod) and !dest_ty.isError(mod)) { + try mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function cannot return an error", .{}); } else { - try sema.mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl), msg, "function return type declared here", .{}); + try mod.errNoteNonLazy(ret_ty_src.toSrcLoc(src_decl, mod), msg, "function return type declared here", .{}); } } if (try opts.param_src.get(sema)) |param_src| { - try sema.mod.errNoteNonLazy(param_src, msg, "parameter type declared here", .{}); + try mod.errNoteNonLazy(param_src, msg, "parameter type declared here", .{}); } // TODO maybe add "cannot store an error in type '{}'" note @@ -26151,6 +26759,84 @@ fn coerceExtra( return sema.failWithOwnedErrorMsg(msg); } +fn coerceValueInMemory( + sema: *Sema, + block: *Block, + val: Value, + src_ty: Type, + dst_ty: Type, + dst_ty_src: LazySrcLoc, +) CompileError!Value { + const mod = sema.mod; + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .aggregate => |aggregate| { + const dst_ty_key = mod.intern_pool.indexToKey(dst_ty.toIntern()); + const dest_len = try sema.usizeCast( + block, + dst_ty_src, + mod.intern_pool.aggregateTypeLen(dst_ty.toIntern()), + ); + direct: { + const src_ty_child = switch (mod.intern_pool.indexToKey(src_ty.toIntern())) { + inline .array_type, .vector_type => |seq_type| seq_type.child, + .anon_struct_type, .struct_type => break :direct, + else => unreachable, + }; + const dst_ty_child = switch (dst_ty_key) { + inline .array_type, .vector_type => |seq_type| seq_type.child, + .anon_struct_type, .struct_type => break :direct, + else => unreachable, + }; + if (src_ty_child != dst_ty_child) break :direct; + // TODO: write something like getCoercedInts to avoid needing to dupe + return (try mod.intern(.{ .aggregate = .{ + .ty = dst_ty.toIntern(), + .storage = switch (aggregate.storage) { + .bytes => |bytes| .{ .bytes = try sema.arena.dupe(u8, bytes[0..dest_len]) }, + .elems => |elems| .{ .elems = try sema.arena.dupe(InternPool.Index, elems[0..dest_len]) }, + .repeated_elem => |elem| .{ .repeated_elem = elem }, + }, + } })).toValue(); + } + const dest_elems = try sema.arena.alloc(InternPool.Index, dest_len); + for (dest_elems, 0..) |*dest_elem, i| { + const elem_ty = switch (dst_ty_key) { + inline .array_type, .vector_type => |seq_type| seq_type.child, + .anon_struct_type => |anon_struct_type| anon_struct_type.types[i], + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).? + .fields.values()[i].ty.toIntern(), + else => unreachable, + }; + dest_elem.* = try mod.intern_pool.getCoerced(mod.gpa, switch (aggregate.storage) { + .bytes => |bytes| (try mod.intValue(Type.u8, bytes[i])).toIntern(), + .elems => |elems| elems[i], + .repeated_elem => |elem| elem, + }, elem_ty); + } + return (try mod.intern(.{ .aggregate = .{ + .ty = dst_ty.toIntern(), + .storage = .{ .elems = dest_elems }, + } })).toValue(); + }, + .float => |float| (try mod.intern(.{ .float = .{ + .ty = dst_ty.toIntern(), + .storage = float.storage, + } })).toValue(), + else => try mod.getCoerced(val, dst_ty), + }; +} + +fn coerceInMemory( + sema: *Sema, + block: *Block, + val: Value, + src_ty: Type, + dst_ty: Type, + dst_ty_src: LazySrcLoc, +) CompileError!Air.Inst.Ref { + return sema.addConstant(dst_ty, try sema.coerceValueInMemory(block, val, src_ty, dst_ty, dst_ty_src)); +} + const InMemoryCoercionResult = union(enum) { ok, no_match: Pair, @@ -26164,7 +26850,7 @@ const InMemoryCoercionResult = union(enum) { optional_shape: Pair, optional_child: PairAndChild, from_anyerror, - missing_error: []const []const u8, + missing_error: []const InternPool.NullTerminatedString, /// true if wanted is var args fn_var_args: bool, /// true if wanted is generic @@ -26264,6 +26950,7 @@ const InMemoryCoercionResult = union(enum) { } fn report(res: *const InMemoryCoercionResult, sema: *Sema, block: *Block, src: LazySrcLoc, msg: *Module.ErrorMsg) !void { + const mod = sema.mod; var cur = res; while (true) switch (cur.*) { .ok => unreachable, @@ -26280,7 +26967,7 @@ const InMemoryCoercionResult = union(enum) { }, .error_union_payload => |pair| { try sema.errNote(block, src, msg, "error union payload '{}' cannot cast into error union payload '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, @@ -26291,20 +26978,20 @@ const InMemoryCoercionResult = union(enum) { break; }, .array_sentinel => |sentinel| { - if (sentinel.actual.tag() != .unreachable_value) { + if (sentinel.actual.toIntern() != .unreachable_value) { try sema.errNote(block, src, msg, "array sentinel '{}' cannot cast into array sentinel '{}'", .{ - sentinel.actual.fmtValue(sentinel.ty, sema.mod), sentinel.wanted.fmtValue(sentinel.ty, sema.mod), + sentinel.actual.fmtValue(sentinel.ty, mod), sentinel.wanted.fmtValue(sentinel.ty, mod), }); } else { try sema.errNote(block, src, msg, "destination array requires '{}' sentinel", .{ - sentinel.wanted.fmtValue(sentinel.ty, sema.mod), + sentinel.wanted.fmtValue(sentinel.ty, mod), }); } break; }, .array_elem => |pair| { try sema.errNote(block, src, msg, "array element type '{}' cannot cast into array element type '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, @@ -26316,21 +27003,19 @@ const InMemoryCoercionResult = union(enum) { }, .vector_elem => |pair| { try sema.errNote(block, src, msg, "vector element type '{}' cannot cast into vector element type '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, .optional_shape => |pair| { - var buf_actual: Type.Payload.ElemType = undefined; - var buf_wanted: Type.Payload.ElemType = undefined; try sema.errNote(block, src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{ - pair.actual.optionalChild(&buf_actual).fmt(sema.mod), pair.wanted.optionalChild(&buf_wanted).fmt(sema.mod), + pair.actual.optionalChild(mod).fmt(mod), pair.wanted.optionalChild(mod).fmt(mod), }); break; }, .optional_child => |pair| { try sema.errNote(block, src, msg, "optional type child '{}' cannot cast into optional type child '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, @@ -26340,7 +27025,7 @@ const InMemoryCoercionResult = union(enum) { }, .missing_error => |missing_errors| { for (missing_errors) |err| { - try sema.errNote(block, src, msg, "'error.{s}' not a member of destination error set", .{err}); + try sema.errNote(block, src, msg, "'error.{}' not a member of destination error set", .{err.fmt(&mod.intern_pool)}); } break; }, @@ -26394,7 +27079,7 @@ const InMemoryCoercionResult = union(enum) { }, .fn_param => |param| { try sema.errNote(block, src, msg, "parameter {d} '{}' cannot cast into '{}'", .{ - param.index, param.actual.fmt(sema.mod), param.wanted.fmt(sema.mod), + param.index, param.actual.fmt(mod), param.wanted.fmt(mod), }); cur = param.child; }, @@ -26404,13 +27089,13 @@ const InMemoryCoercionResult = union(enum) { }, .fn_return_type => |pair| { try sema.errNote(block, src, msg, "return type '{}' cannot cast into return type '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, .ptr_child => |pair| { try sema.errNote(block, src, msg, "pointer type child '{}' cannot cast into pointer type child '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); cur = pair.child; }, @@ -26419,13 +27104,13 @@ const InMemoryCoercionResult = union(enum) { break; }, .ptr_sentinel => |sentinel| { - if (sentinel.actual.tag() != .unreachable_value) { + if (sentinel.actual.toIntern() != .unreachable_value) { try sema.errNote(block, src, msg, "pointer sentinel '{}' cannot cast into pointer sentinel '{}'", .{ - sentinel.actual.fmtValue(sentinel.ty, sema.mod), sentinel.wanted.fmtValue(sentinel.ty, sema.mod), + sentinel.actual.fmtValue(sentinel.ty, mod), sentinel.wanted.fmtValue(sentinel.ty, mod), }); } else { try sema.errNote(block, src, msg, "destination pointer requires '{}' sentinel", .{ - sentinel.wanted.fmtValue(sentinel.ty, sema.mod), + sentinel.wanted.fmtValue(sentinel.ty, mod), }); } break; @@ -26445,15 +27130,15 @@ const InMemoryCoercionResult = union(enum) { break; }, .ptr_allowzero => |pair| { - const wanted_allow_zero = pair.wanted.ptrAllowsZero(); - const actual_allow_zero = pair.actual.ptrAllowsZero(); + const wanted_allow_zero = pair.wanted.ptrAllowsZero(mod); + const actual_allow_zero = pair.actual.ptrAllowsZero(mod); if (actual_allow_zero and !wanted_allow_zero) { try sema.errNote(block, src, msg, "'{}' could have null values which are illegal in type '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); } else { try sema.errNote(block, src, msg, "mutable '{}' allows illegal null values stored to type '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); } break; @@ -26479,13 +27164,13 @@ const InMemoryCoercionResult = union(enum) { }, .double_ptr_to_anyopaque => |pair| { try sema.errNote(block, src, msg, "cannot implicitly cast double pointer '{}' to anyopaque pointer '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); break; }, .slice_to_anyopaque => |pair| { try sema.errNote(block, src, msg, "cannot implicitly cast slice '{}' to anyopaque pointer '{}'", .{ - pair.actual.fmt(sema.mod), pair.wanted.fmt(sema.mod), + pair.actual.fmt(mod), pair.wanted.fmt(mod), }); try sema.errNote(block, src, msg, "consider using '.ptr'", .{}); break; @@ -26522,13 +27207,18 @@ fn coerceInMemoryAllowed( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) CompileError!InMemoryCoercionResult { - if (dest_ty.eql(src_ty, sema.mod)) + const mod = sema.mod; + + if (dest_ty.eql(src_ty, mod)) return .ok; + const dest_tag = dest_ty.zigTypeTag(mod); + const src_tag = src_ty.zigTypeTag(mod); + // Differently-named integers with the same number of bits. - if (dest_ty.zigTypeTag() == .Int and src_ty.zigTypeTag() == .Int) { - const dest_info = dest_ty.intInfo(target); - const src_info = src_ty.intInfo(target); + if (dest_tag == .Int and src_tag == .Int) { + const dest_info = dest_ty.intInfo(mod); + const src_info = src_ty.intInfo(mod); if (dest_info.signedness == src_info.signedness and dest_info.bits == src_info.bits) @@ -26551,7 +27241,7 @@ fn coerceInMemoryAllowed( } // Differently-named floats with the same number of bits. - if (dest_ty.zigTypeTag() == .Float and src_ty.zigTypeTag() == .Float) { + if (dest_tag == .Float and src_tag == .Float) { const dest_bits = dest_ty.floatBits(target); const src_bits = src_ty.floatBits(target); if (dest_bits == src_bits) { @@ -26560,10 +27250,8 @@ fn coerceInMemoryAllowed( } // Pointers / Pointer-like Optionals - var dest_buf: Type.Payload.ElemType = undefined; - var src_buf: Type.Payload.ElemType = undefined; - const maybe_dest_ptr_ty = try sema.typePtrOrOptionalPtrTy(dest_ty, &dest_buf); - const maybe_src_ptr_ty = try sema.typePtrOrOptionalPtrTy(src_ty, &src_buf); + const maybe_dest_ptr_ty = try sema.typePtrOrOptionalPtrTy(dest_ty); + const maybe_src_ptr_ty = try sema.typePtrOrOptionalPtrTy(src_ty); if (maybe_dest_ptr_ty) |dest_ptr_ty| { if (maybe_src_ptr_ty) |src_ptr_ty| { return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ptr_ty, src_ptr_ty, dest_is_mut, target, dest_src, src_src); @@ -26571,13 +27259,10 @@ fn coerceInMemoryAllowed( } // Slices - if (dest_ty.isSlice() and src_ty.isSlice()) { + if (dest_ty.isSlice(mod) and src_ty.isSlice(mod)) { return try sema.coerceInMemoryAllowedPtrs(block, dest_ty, src_ty, dest_ty, src_ty, dest_is_mut, target, dest_src, src_src); } - const dest_tag = dest_ty.zigTypeTag(); - const src_tag = src_ty.zigTypeTag(); - // Functions if (dest_tag == .Fn and src_tag == .Fn) { return try sema.coerceInMemoryAllowedFns(block, dest_ty, src_ty, target, dest_src, src_src); @@ -26585,8 +27270,8 @@ fn coerceInMemoryAllowed( // Error Unions if (dest_tag == .ErrorUnion and src_tag == .ErrorUnion) { - const dest_payload = dest_ty.errorUnionPayload(); - const src_payload = src_ty.errorUnionPayload(); + const dest_payload = dest_ty.errorUnionPayload(mod); + const src_payload = src_ty.errorUnionPayload(mod); const child = try sema.coerceInMemoryAllowed(block, dest_payload, src_payload, dest_is_mut, target, dest_src, src_src); if (child != .ok) { return InMemoryCoercionResult{ .error_union_payload = .{ @@ -26595,7 +27280,7 @@ fn coerceInMemoryAllowed( .wanted = dest_payload, } }; } - return try sema.coerceInMemoryAllowed(block, dest_ty.errorUnionSet(), src_ty.errorUnionSet(), dest_is_mut, target, dest_src, src_src); + return try sema.coerceInMemoryAllowed(block, dest_ty.errorUnionSet(mod), src_ty.errorUnionSet(mod), dest_is_mut, target, dest_src, src_src); } // Error Sets @@ -26605,8 +27290,8 @@ fn coerceInMemoryAllowed( // Arrays if (dest_tag == .Array and src_tag == .Array) { - const dest_info = dest_ty.arrayInfo(); - const src_info = src_ty.arrayInfo(); + const dest_info = dest_ty.arrayInfo(mod); + const src_info = src_ty.arrayInfo(mod); if (dest_info.len != src_info.len) { return InMemoryCoercionResult{ .array_len = .{ .actual = src_info.len, @@ -26624,11 +27309,15 @@ fn coerceInMemoryAllowed( } const ok_sent = dest_info.sentinel == null or (src_info.sentinel != null and - dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.elem_type, sema.mod)); + dest_info.sentinel.?.eql( + try mod.getCoerced(src_info.sentinel.?, dest_info.elem_type), + dest_info.elem_type, + mod, + )); if (!ok_sent) { return InMemoryCoercionResult{ .array_sentinel = .{ - .actual = src_info.sentinel orelse Value.initTag(.unreachable_value), - .wanted = dest_info.sentinel orelse Value.initTag(.unreachable_value), + .actual = src_info.sentinel orelse Value.@"unreachable", + .wanted = dest_info.sentinel orelse Value.@"unreachable", .ty = dest_info.elem_type, } }; } @@ -26637,8 +27326,8 @@ fn coerceInMemoryAllowed( // Vectors if (dest_tag == .Vector and src_tag == .Vector) { - const dest_len = dest_ty.vectorLen(); - const src_len = src_ty.vectorLen(); + const dest_len = dest_ty.vectorLen(mod); + const src_len = src_ty.vectorLen(mod); if (dest_len != src_len) { return InMemoryCoercionResult{ .vector_len = .{ .actual = src_len, @@ -26646,8 +27335,8 @@ fn coerceInMemoryAllowed( } }; } - const dest_elem_ty = dest_ty.scalarType(); - const src_elem_ty = src_ty.scalarType(); + const dest_elem_ty = dest_ty.scalarType(mod); + const src_elem_ty = src_ty.scalarType(mod); const child = try sema.coerceInMemoryAllowed(block, dest_elem_ty, src_elem_ty, dest_is_mut, target, dest_src, src_src); if (child != .ok) { return InMemoryCoercionResult{ .vector_elem = .{ @@ -26668,15 +27357,15 @@ fn coerceInMemoryAllowed( .wanted = dest_ty, } }; } - const dest_child_type = dest_ty.optionalChild(&dest_buf); - const src_child_type = src_ty.optionalChild(&src_buf); + const dest_child_type = dest_ty.optionalChild(mod); + const src_child_type = src_ty.optionalChild(mod); const child = try sema.coerceInMemoryAllowed(block, dest_child_type, src_child_type, dest_is_mut, target, dest_src, src_src); if (child != .ok) { return InMemoryCoercionResult{ .optional_child = .{ .child = try child.dupe(sema.arena), - .actual = try src_child_type.copy(sema.arena), - .wanted = try dest_child_type.copy(sema.arena), + .actual = src_child_type, + .wanted = dest_child_type, } }; } @@ -26697,138 +27386,108 @@ fn coerceInMemoryAllowedErrorSets( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { + const mod = sema.mod; + const gpa = sema.gpa; + const ip = &mod.intern_pool; + // Coercion to `anyerror`. Note that this check can return false negatives // in case the error sets did not get resolved. - if (dest_ty.isAnyError()) { + if (dest_ty.isAnyError(mod)) { return .ok; } - if (dest_ty.castTag(.error_set_inferred)) |dst_payload| { - const dst_ies = dst_payload.data; + if (mod.typeToInferredErrorSetIndex(dest_ty).unwrap()) |dst_ies_index| { + const dst_ies = mod.inferredErrorSetPtr(dst_ies_index); // We will make an effort to return `ok` without resolving either error set, to // avoid unnecessary "unable to resolve error set" dependency loop errors. - switch (src_ty.tag()) { - .error_set_inferred => { - // If both are inferred error sets of functions, and - // the dest includes the source function, the coercion is OK. - // This check is important because it works without forcing a full resolution - // of inferred error sets. - const src_ies = src_ty.castTag(.error_set_inferred).?.data; - - if (dst_ies.inferred_error_sets.contains(src_ies)) { - return .ok; - } + switch (src_ty.toIntern()) { + .anyerror_type => {}, + else => switch (ip.indexToKey(src_ty.toIntern())) { + .inferred_error_set_type => |src_index| { + // If both are inferred error sets of functions, and + // the dest includes the source function, the coercion is OK. + // This check is important because it works without forcing a full resolution + // of inferred error sets. + if (dst_ies.inferred_error_sets.contains(src_index)) { + return .ok; + } + }, + .error_set_type => |error_set_type| { + for (error_set_type.names) |name| { + if (!dst_ies.errors.contains(name)) break; + } else return .ok; + }, + else => unreachable, }, - .error_set_single => { - const name = src_ty.castTag(.error_set_single).?.data; - if (dst_ies.errors.contains(name)) return .ok; - }, - .error_set_merged => { - const names = src_ty.castTag(.error_set_merged).?.data.keys(); - for (names) |name| { - if (!dst_ies.errors.contains(name)) break; - } else return .ok; - }, - .error_set => { - const names = src_ty.castTag(.error_set).?.data.names.keys(); - for (names) |name| { - if (!dst_ies.errors.contains(name)) break; - } else return .ok; - }, - .anyerror => {}, - else => unreachable, } - if (dst_ies.func == sema.owner_func) { + if (dst_ies.func == sema.owner_func_index.unwrap()) { // We are trying to coerce an error set to the current function's // inferred error set. - try dst_ies.addErrorSet(sema.gpa, src_ty); + try dst_ies.addErrorSet(src_ty, ip, gpa); return .ok; } - try sema.resolveInferredErrorSet(block, dest_src, dst_payload.data); + try sema.resolveInferredErrorSet(block, dest_src, dst_ies_index); // isAnyError might have changed from a false negative to a true positive after resolution. - if (dest_ty.isAnyError()) { + if (dest_ty.isAnyError(mod)) { return .ok; } } - var missing_error_buf = std.ArrayList([]const u8).init(sema.gpa); + var missing_error_buf = std.ArrayList(InternPool.NullTerminatedString).init(gpa); defer missing_error_buf.deinit(); - switch (src_ty.tag()) { - .error_set_inferred => { - const src_data = src_ty.castTag(.error_set_inferred).?.data; - - try sema.resolveInferredErrorSet(block, src_src, src_data); - // src anyerror status might have changed after the resolution. - if (src_ty.isAnyError()) { - // dest_ty.isAnyError() == true is already checked for at this point. - return .from_anyerror; - } - - for (src_data.errors.keys()) |key| { - if (!dest_ty.errorSetHasField(key)) { - try missing_error_buf.append(key); - } - } - - if (missing_error_buf.items.len != 0) { - return InMemoryCoercionResult{ - .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), - }; - } - - return .ok; - }, - .error_set_single => { - const name = src_ty.castTag(.error_set_single).?.data; - if (dest_ty.errorSetHasField(name)) { - return .ok; - } - const list = try sema.arena.alloc([]const u8, 1); - list[0] = name; - return InMemoryCoercionResult{ .missing_error = list }; - }, - .error_set_merged => { - const names = src_ty.castTag(.error_set_merged).?.data.keys(); - for (names) |name| { - if (!dest_ty.errorSetHasField(name)) { - try missing_error_buf.append(name); - } - } - - if (missing_error_buf.items.len != 0) { - return InMemoryCoercionResult{ - .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), - }; - } - - return .ok; - }, - .error_set => { - const names = src_ty.castTag(.error_set).?.data.names.keys(); - for (names) |name| { - if (!dest_ty.errorSetHasField(name)) { - try missing_error_buf.append(name); - } - } - - if (missing_error_buf.items.len != 0) { - return InMemoryCoercionResult{ - .missing_error = try sema.arena.dupe([]const u8, missing_error_buf.items), - }; - } - - return .ok; - }, - .anyerror => switch (dest_ty.tag()) { - .error_set_inferred => unreachable, // Caught by dest_ty.isAnyError() above. - .error_set_single, .error_set_merged, .error_set => return .from_anyerror, - .anyerror => unreachable, // Filtered out above. + switch (src_ty.toIntern()) { + .anyerror_type => switch (ip.indexToKey(dest_ty.toIntern())) { + .inferred_error_set_type => unreachable, // Caught by dest_ty.isAnyError(mod) above. + .simple_type => unreachable, // filtered out above + .error_set_type => return .from_anyerror, + else => unreachable, + }, + + else => switch (ip.indexToKey(src_ty.toIntern())) { + .inferred_error_set_type => |src_index| { + const src_data = mod.inferredErrorSetPtr(src_index); + + try sema.resolveInferredErrorSet(block, src_src, src_index); + // src anyerror status might have changed after the resolution. + if (src_ty.isAnyError(mod)) { + // dest_ty.isAnyError(mod) == true is already checked for at this point. + return .from_anyerror; + } + + for (src_data.errors.keys()) |key| { + if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), key)) { + try missing_error_buf.append(key); + } + } + + if (missing_error_buf.items.len != 0) { + return InMemoryCoercionResult{ + .missing_error = try sema.arena.dupe(InternPool.NullTerminatedString, missing_error_buf.items), + }; + } + + return .ok; + }, + .error_set_type => |error_set_type| { + for (error_set_type.names) |name| { + if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), name)) { + try missing_error_buf.append(name); + } + } + + if (missing_error_buf.items.len != 0) { + return InMemoryCoercionResult{ + .missing_error = try sema.arena.dupe(InternPool.NullTerminatedString, missing_error_buf.items), + }; + } + + return .ok; + }, else => unreachable, }, - else => unreachable, } unreachable; @@ -26843,68 +27502,94 @@ fn coerceInMemoryAllowedFns( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { - const dest_info = dest_ty.fnInfo(); - const src_info = src_ty.fnInfo(); + const mod = sema.mod; - if (dest_info.is_var_args != src_info.is_var_args) { - return InMemoryCoercionResult{ .fn_var_args = dest_info.is_var_args }; - } + { + const dest_info = mod.typeToFunc(dest_ty).?; + const src_info = mod.typeToFunc(src_ty).?; - if (dest_info.is_generic != src_info.is_generic) { - return InMemoryCoercionResult{ .fn_generic = dest_info.is_generic }; - } + if (dest_info.is_var_args != src_info.is_var_args) { + return InMemoryCoercionResult{ .fn_var_args = dest_info.is_var_args }; + } - if (dest_info.cc != src_info.cc) { - return InMemoryCoercionResult{ .fn_cc = .{ - .actual = src_info.cc, - .wanted = dest_info.cc, - } }; - } + if (dest_info.is_generic != src_info.is_generic) { + return InMemoryCoercionResult{ .fn_generic = dest_info.is_generic }; + } - if (!src_info.return_type.isNoReturn()) { - const rt = try sema.coerceInMemoryAllowed(block, dest_info.return_type, src_info.return_type, false, target, dest_src, src_src); - if (rt != .ok) { - return InMemoryCoercionResult{ .fn_return_type = .{ - .child = try rt.dupe(sema.arena), - .actual = src_info.return_type, - .wanted = dest_info.return_type, + if (dest_info.cc != src_info.cc) { + return InMemoryCoercionResult{ .fn_cc = .{ + .actual = src_info.cc, + .wanted = dest_info.cc, } }; } + + switch (src_info.return_type) { + .noreturn_type, .generic_poison_type => {}, + else => { + const dest_return_type = dest_info.return_type.toType(); + const src_return_type = src_info.return_type.toType(); + const rt = try sema.coerceInMemoryAllowed(block, dest_return_type, src_return_type, false, target, dest_src, src_src); + if (rt != .ok) { + return InMemoryCoercionResult{ .fn_return_type = .{ + .child = try rt.dupe(sema.arena), + .actual = src_return_type, + .wanted = dest_return_type, + } }; + } + }, + } } - if (dest_info.param_types.len != src_info.param_types.len) { - return InMemoryCoercionResult{ .fn_param_count = .{ - .actual = src_info.param_types.len, - .wanted = dest_info.param_types.len, - } }; - } + const params_len = params_len: { + const dest_info = mod.typeToFunc(dest_ty).?; + const src_info = mod.typeToFunc(src_ty).?; - if (dest_info.noalias_bits != src_info.noalias_bits) { - return InMemoryCoercionResult{ .fn_param_noalias = .{ - .actual = src_info.noalias_bits, - .wanted = dest_info.noalias_bits, - } }; - } + if (dest_info.param_types.len != src_info.param_types.len) { + return InMemoryCoercionResult{ .fn_param_count = .{ + .actual = src_info.param_types.len, + .wanted = dest_info.param_types.len, + } }; + } - for (dest_info.param_types, 0..) |dest_param_ty, i| { - const src_param_ty = src_info.param_types[i]; + if (dest_info.noalias_bits != src_info.noalias_bits) { + return InMemoryCoercionResult{ .fn_param_noalias = .{ + .actual = src_info.noalias_bits, + .wanted = dest_info.noalias_bits, + } }; + } - if (dest_info.comptime_params[i] != src_info.comptime_params[i]) { + break :params_len dest_info.param_types.len; + }; + + for (0..params_len) |param_i| { + const dest_info = mod.typeToFunc(dest_ty).?; + const src_info = mod.typeToFunc(src_ty).?; + + const dest_param_ty = dest_info.param_types[param_i].toType(); + const src_param_ty = src_info.param_types[param_i].toType(); + + const param_i_small = @intCast(u5, param_i); + if (dest_info.paramIsComptime(param_i_small) != src_info.paramIsComptime(param_i_small)) { return InMemoryCoercionResult{ .fn_param_comptime = .{ - .index = i, - .wanted = dest_info.comptime_params[i], + .index = param_i, + .wanted = dest_info.paramIsComptime(param_i_small), } }; } - // Note: Cast direction is reversed here. - const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty, false, target, dest_src, src_src); - if (param != .ok) { - return InMemoryCoercionResult{ .fn_param = .{ - .child = try param.dupe(sema.arena), - .actual = src_param_ty, - .wanted = dest_param_ty, - .index = i, - } }; + switch (src_param_ty.toIntern()) { + .generic_poison_type => {}, + else => { + // Note: Cast direction is reversed here. + const param = try sema.coerceInMemoryAllowed(block, src_param_ty, dest_param_ty, false, target, dest_src, src_src); + if (param != .ok) { + return InMemoryCoercionResult{ .fn_param = .{ + .child = try param.dupe(sema.arena), + .actual = src_param_ty, + .wanted = dest_param_ty, + .index = param_i, + } }; + } + }, } } @@ -26923,8 +27608,9 @@ fn coerceInMemoryAllowedPtrs( dest_src: LazySrcLoc, src_src: LazySrcLoc, ) !InMemoryCoercionResult { - const dest_info = dest_ptr_ty.ptrInfo().data; - const src_info = src_ptr_ty.ptrInfo().data; + const mod = sema.mod; + const dest_info = dest_ptr_ty.ptrInfo(mod); + const src_info = src_ptr_ty.ptrInfo(mod); const ok_ptr_size = src_info.size == dest_info.size or src_info.size == .C or dest_info.size == .C; @@ -26964,8 +27650,8 @@ fn coerceInMemoryAllowedPtrs( } }; } - const dest_allow_zero = dest_ty.ptrAllowsZero(); - const src_allow_zero = src_ty.ptrAllowsZero(); + const dest_allow_zero = dest_ty.ptrAllowsZero(mod); + const src_allow_zero = src_ty.ptrAllowsZero(mod); const ok_allows_zero = (dest_allow_zero and (src_allow_zero or !dest_is_mut)) or @@ -26989,12 +27675,15 @@ fn coerceInMemoryAllowedPtrs( } const ok_sent = dest_info.sentinel == null or src_info.size == .C or - (src_info.sentinel != null and - dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.pointee_type, sema.mod)); + (src_info.sentinel != null and dest_info.sentinel.?.eql( + try mod.getCoerced(src_info.sentinel.?, dest_info.pointee_type), + dest_info.pointee_type, + sema.mod, + )); if (!ok_sent) { return InMemoryCoercionResult{ .ptr_sentinel = .{ - .actual = src_info.sentinel orelse Value.initTag(.unreachable_value), - .wanted = dest_info.sentinel orelse Value.initTag(.unreachable_value), + .actual = src_info.sentinel orelse Value.@"unreachable", + .wanted = dest_info.sentinel orelse Value.@"unreachable", .ty = dest_info.pointee_type, } }; } @@ -27013,12 +27702,12 @@ fn coerceInMemoryAllowedPtrs( const src_align = if (src_info.@"align" != 0) src_info.@"align" else - src_info.pointee_type.abiAlignment(target); + src_info.pointee_type.abiAlignment(mod); const dest_align = if (dest_info.@"align" != 0) dest_info.@"align" else - dest_info.pointee_type.abiAlignment(target); + dest_info.pointee_type.abiAlignment(mod); if (dest_align > src_align) { return InMemoryCoercionResult{ .ptr_alignment = .{ @@ -27041,8 +27730,9 @@ fn coerceVarArgParam( ) !Air.Inst.Ref { if (block.is_typeof) return inst; + const mod = sema.mod; const uncasted_ty = sema.typeOf(inst); - const coerced = switch (uncasted_ty.zigTypeTag()) { + const coerced = switch (uncasted_ty.zigTypeTag(mod)) { // TODO consider casting to c_int/f64 if they fit .ComptimeInt, .ComptimeFloat => return sema.fail( block, @@ -27052,7 +27742,7 @@ fn coerceVarArgParam( ), .Fn => blk: { const fn_val = try sema.resolveConstValue(block, .unneeded, inst, ""); - const fn_decl = fn_val.pointerDecl().?; + const fn_decl = fn_val.pointerDecl(mod).?; break :blk try sema.analyzeDeclRef(fn_decl); }, .Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}), @@ -27077,7 +27767,7 @@ fn coerceVarArgParam( errdefer msg.destroy(sema.gpa); const src_decl = sema.mod.declPtr(block.src_decl); - try sema.explainWhyTypeIsNotExtern(msg, inst_src.toSrcLoc(src_decl), coerced_ty, .param_ty); + try sema.explainWhyTypeIsNotExtern(msg, inst_src.toSrcLoc(src_decl, mod), coerced_ty, .param_ty); try sema.addDeclaredHereNote(msg, coerced_ty); break :msg msg; @@ -27109,11 +27799,12 @@ fn storePtr2( operand_src: LazySrcLoc, air_tag: Air.Inst.Tag, ) CompileError!void { + const mod = sema.mod; const ptr_ty = sema.typeOf(ptr); - if (ptr_ty.isConstPtr()) + if (ptr_ty.isConstPtr(mod)) return sema.fail(block, ptr_src, "cannot assign to constant", .{}); - const elem_ty = ptr_ty.childType(); + const elem_ty = ptr_ty.childType(mod); // To generate better code for tuples, we detect a tuple operand here, and // analyze field loads and stores directly. This avoids an extra allocation + memcpy @@ -27124,8 +27815,8 @@ fn storePtr2( // this code does not handle tuple-to-struct coercion which requires dealing with missing // fields. const operand_ty = sema.typeOf(uncasted_operand); - if (operand_ty.isTuple() and elem_ty.zigTypeTag() == .Array) { - const field_count = operand_ty.structFieldCount(); + if (operand_ty.isTuple(mod) and elem_ty.zigTypeTag(mod) == .Array) { + const field_count = operand_ty.structFieldCount(mod); var i: u32 = 0; while (i < field_count) : (i += 1) { const elem_src = operand_src; // TODO better source location @@ -27149,7 +27840,7 @@ fn storePtr2( // as well as working around an LLVM bug: // https://github.com/ziglang/zig/issues/11154 if (sema.obtainBitCastedVectorPtr(ptr)) |vector_ptr| { - const vector_ty = sema.typeOf(vector_ptr).childType(); + const vector_ty = sema.typeOf(vector_ptr).childType(mod); const vector = sema.coerceExtra(block, vector_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) { error.NotCoercible => unreachable, else => |e| return e, @@ -27169,7 +27860,7 @@ fn storePtr2( try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src); break :rs operand_src; }; - if (ptr_val.isComptimeMutablePtr()) { + if (ptr_val.isComptimeMutablePtr(mod)) { try sema.storePtrVal(block, src, ptr_val, operand_val, elem_ty); return; } else break :rs ptr_src; @@ -27190,7 +27881,7 @@ fn storePtr2( try sema.requireRuntimeBlock(block, src, runtime_src); try sema.queueFullTypeResolution(elem_ty); - if (ptr_ty.ptrInfo().data.vector_index == .runtime) { + if (ptr_ty.ptrInfo(mod).vector_index == .runtime) { const ptr_inst = Air.refToIndex(ptr).?; const air_tags = sema.air_instructions.items(.tag); if (air_tags[ptr_inst] == .ptr_elem_ptr) { @@ -27224,30 +27915,27 @@ fn storePtr2( /// pointer. Only if the final element type matches the vector element type, and the /// lengths match. fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { - const array_ty = sema.typeOf(ptr).childType(); - if (array_ty.zigTypeTag() != .Array) return null; - var ptr_inst = Air.refToIndex(ptr) orelse return null; + const mod = sema.mod; + const array_ty = sema.typeOf(ptr).childType(mod); + if (array_ty.zigTypeTag(mod) != .Array) return null; + var ptr_ref = ptr; + var ptr_inst = Air.refToIndex(ptr_ref) orelse return null; const air_datas = sema.air_instructions.items(.data); const air_tags = sema.air_instructions.items(.tag); - const prev_ptr = while (air_tags[ptr_inst] == .bitcast) { - const prev_ptr = air_datas[ptr_inst].ty_op.operand; - const prev_ptr_ty = sema.typeOf(prev_ptr); - const prev_ptr_child_ty = switch (prev_ptr_ty.tag()) { - .single_mut_pointer => prev_ptr_ty.castTag(.single_mut_pointer).?.data, - .pointer => prev_ptr_ty.castTag(.pointer).?.data.pointee_type, - else => return null, - }; - if (prev_ptr_child_ty.zigTypeTag() == .Vector) break prev_ptr; - ptr_inst = Air.refToIndex(prev_ptr) orelse return null; + const vector_ty = while (air_tags[ptr_inst] == .bitcast) { + ptr_ref = air_datas[ptr_inst].ty_op.operand; + if (!sema.isKnownZigType(ptr_ref, .Pointer)) return null; + const child_ty = sema.typeOf(ptr_ref).childType(mod); + if (child_ty.zigTypeTag(mod) == .Vector) break child_ty; + ptr_inst = Air.refToIndex(ptr_ref) orelse return null; } else return null; // We have a pointer-to-array and a pointer-to-vector. If the elements and // lengths match, return the result. - const vector_ty = sema.typeOf(prev_ptr).childType(); - if (array_ty.childType().eql(vector_ty.childType(), sema.mod) and - array_ty.arrayLen() == vector_ty.vectorLen()) + if (array_ty.childType(mod).eql(vector_ty.childType(mod), sema.mod) and + array_ty.arrayLen(mod) == vector_ty.vectorLen(mod)) { - return prev_ptr; + return ptr_ref; } else { return null; } @@ -27263,54 +27951,55 @@ fn storePtrVal( operand_val: Value, operand_ty: Type, ) !void { + const mod = sema.mod; var mut_kit = try sema.beginComptimePtrMutation(block, src, ptr_val, operand_ty); - try sema.checkComptimeVarStore(block, src, mut_kit.decl_ref_mut); + try sema.checkComptimeVarStore(block, src, mut_kit.mut_decl); switch (mut_kit.pointee) { .direct => |val_ptr| { - if (mut_kit.decl_ref_mut.runtime_index == .comptime_field_ptr) { - if (!operand_val.eql(val_ptr.*, operand_ty, sema.mod)) { + if (mut_kit.mut_decl.runtime_index == .comptime_field_ptr) { + if (!operand_val.eql(val_ptr.*, operand_ty, mod)) { // TODO use failWithInvalidComptimeFieldStore return sema.fail(block, src, "value stored in comptime field does not match the default value of the field", .{}); } return; } - const arena = mut_kit.beginArena(sema.mod); - defer mut_kit.finishArena(sema.mod); - - val_ptr.* = try operand_val.copy(arena); + val_ptr.* = (try operand_val.intern(operand_ty, mod)).toValue(); }, .reinterpret => |reinterpret| { - const target = sema.mod.getTarget(); - const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(target)); + const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(mod)); const buffer = try sema.gpa.alloc(u8, abi_size); defer sema.gpa.free(buffer); - reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, sema.mod, buffer) catch |err| switch (err) { + reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, mod, buffer) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => unreachable, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already - error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(sema.mod)}), + error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(mod)}), }; - operand_val.writeToMemory(operand_ty, sema.mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) { + operand_val.writeToMemory(operand_ty, mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => unreachable, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already - error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(sema.mod)}), + error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(mod)}), }; - const arena = mut_kit.beginArena(sema.mod); - defer mut_kit.finishArena(sema.mod); - - reinterpret.val_ptr.* = try Value.readFromMemory(mut_kit.ty, sema.mod, buffer, arena); + reinterpret.val_ptr.* = (try (try Value.readFromMemory(mut_kit.ty, mod, buffer, sema.arena)).intern(mut_kit.ty, mod)).toValue(); }, .bad_decl_ty, .bad_ptr_ty => { // TODO show the decl declaration site in a note and explain whether the decl // or the pointer is the problematic type - return sema.fail(block, src, "comptime mutation of a reinterpreted pointer requires type '{}' to have a well-defined memory layout", .{mut_kit.ty.fmt(sema.mod)}); + return sema.fail( + block, + src, + "comptime mutation of a reinterpreted pointer requires type '{}' to have a well-defined memory layout", + .{mut_kit.ty.fmt(mod)}, + ); }, } } const ComptimePtrMutationKit = struct { - decl_ref_mut: Value.Payload.DeclRefMut.Data, + mut_decl: InternPool.Key.Ptr.Addr.MutDecl, pointee: union(enum) { /// The pointer type matches the actual comptime Value so a direct /// modification is possible. @@ -27333,18 +28022,6 @@ const ComptimePtrMutationKit = struct { bad_ptr_ty, }, ty: Type, - decl_arena: std.heap.ArenaAllocator = undefined, - - fn beginArena(self: *ComptimePtrMutationKit, mod: *Module) Allocator { - const decl = mod.declPtr(self.decl_ref_mut.decl_index); - return decl.value_arena.?.acquire(mod.gpa, &self.decl_arena); - } - - fn finishArena(self: *ComptimePtrMutationKit, mod: *Module) void { - const decl = mod.declPtr(self.decl_ref_mut.decl_index); - decl.value_arena.?.release(&self.decl_arena); - self.decl_arena = undefined; - } }; fn beginComptimePtrMutation( @@ -27354,201 +28031,251 @@ fn beginComptimePtrMutation( ptr_val: Value, ptr_elem_ty: Type, ) CompileError!ComptimePtrMutationKit { - const target = sema.mod.getTarget(); - switch (ptr_val.tag()) { - .decl_ref_mut => { - const decl_ref_mut = ptr_val.castTag(.decl_ref_mut).?.data; - const decl = sema.mod.declPtr(decl_ref_mut.decl_index); - return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, decl_ref_mut); + const mod = sema.mod; + const ptr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr; + switch (ptr.addr) { + .decl, .int => unreachable, // isComptimeMutablePtr has been checked already + .mut_decl => |mut_decl| { + const decl = mod.declPtr(mut_decl.decl); + return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, mut_decl); }, - .comptime_field_ptr => { - const payload = ptr_val.castTag(.comptime_field_ptr).?.data; + .comptime_field => |comptime_field| { const duped = try sema.arena.create(Value); - duped.* = payload.field_val; - return sema.beginComptimePtrMutationInner(block, src, payload.field_ty, duped, ptr_elem_ty, .{ - .decl_index = @intToEnum(Module.Decl.Index, 0), + duped.* = comptime_field.toValue(); + return sema.beginComptimePtrMutationInner(block, src, mod.intern_pool.typeOf(comptime_field).toType(), duped, ptr_elem_ty, .{ + .decl = undefined, .runtime_index = .comptime_field_ptr, }); }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.array_ptr, elem_ptr.elem_ty); + .eu_payload => |eu_ptr| { + const eu_ty = mod.intern_pool.typeOf(eu_ptr).toType().childType(mod); + var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.toValue(), eu_ty); + switch (parent.pointee) { + .direct => |val_ptr| { + const payload_ty = parent.ty.errorUnionPayload(mod); + if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) { + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, + .ty = payload_ty, + }; + } else { + // An error union has been initialized to undefined at comptime and now we + // are for the first time setting the payload. We must change the + // representation of the error union from `undef` to `opt_payload`. + + const payload = try sema.arena.create(Value.Payload.SubValue); + payload.* = .{ + .base = .{ .tag = .eu_payload }, + .data = (try mod.intern(.{ .undef = payload_ty.toIntern() })).toValue(), + }; + + val_ptr.* = Value.initPayload(&payload.base); + + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &payload.data }, + .ty = payload_ty, + }; + } + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + .reinterpret => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .bad_ptr_ty, + .ty = eu_ty, + }, + } + }, + .opt_payload => |opt_ptr| { + const opt_ty = mod.intern_pool.typeOf(opt_ptr).toType().childType(mod); + var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.toValue(), opt_ty); + switch (parent.pointee) { + .direct => |val_ptr| { + const payload_ty = parent.ty.optionalChild(mod); + switch (val_ptr.ip_index) { + .none => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, + .ty = payload_ty, + }, + else => { + const payload_val = switch (mod.intern_pool.indexToKey(val_ptr.ip_index)) { + .undef => try mod.intern(.{ .undef = payload_ty.toIntern() }), + .opt => |opt| switch (opt.val) { + .none => try mod.intern(.{ .undef = payload_ty.toIntern() }), + else => |payload| payload, + }, + else => unreachable, + }; + + // An optional has been initialized to undefined at comptime and now we + // are for the first time setting the payload. We must change the + // representation of the optional from `undef` to `opt_payload`. + + const payload = try sema.arena.create(Value.Payload.SubValue); + payload.* = .{ + .base = .{ .tag = .opt_payload }, + .data = payload_val.toValue(), + }; + + val_ptr.* = Value.initPayload(&payload.base); + + return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .{ .direct = &payload.data }, + .ty = payload_ty, + }; + }, + } + }, + .bad_decl_ty, .bad_ptr_ty => return parent, + // Even though the parent value type has well-defined memory layout, our + // pointer type does not. + .reinterpret => return ComptimePtrMutationKit{ + .mut_decl = parent.mut_decl, + .pointee = .bad_ptr_ty, + .ty = opt_ty, + }, + } + }, + .elem => |elem_ptr| { + const base_elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod); + var parent = try sema.beginComptimePtrMutation(block, src, elem_ptr.base.toValue(), base_elem_ty); switch (parent.pointee) { - .direct => |val_ptr| switch (parent.ty.zigTypeTag()) { + .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { .Array, .Vector => { - const check_len = parent.ty.arrayLenIncludingSentinel(); + const check_len = parent.ty.arrayLenIncludingSentinel(mod); if (elem_ptr.index >= check_len) { // TODO have the parent include the decl so we can say "declared here" return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{ elem_ptr.index, check_len, }); } - const elem_ty = parent.ty.childType(); + const elem_ty = parent.ty.childType(mod); // We might have a pointer to multiple elements of the array (e.g. a pointer // to a sub-array). In this case, we just have to reinterpret the relevant // bytes of the whole array rather than any single element. - const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); + const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty); if (elem_abi_size_u64 < try sema.typeAbiSize(ptr_elem_ty)) { const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); + const elem_idx = try sema.usizeCast(block, src, elem_ptr.index); return .{ - .decl_ref_mut = parent.decl_ref_mut, + .mut_decl = parent.mut_decl, .pointee = .{ .reinterpret = .{ .val_ptr = val_ptr, - .byte_offset = elem_abi_size * elem_ptr.index, + .byte_offset = elem_abi_size * elem_idx, } }, .ty = parent.ty, }; } - switch (val_ptr.tag()) { - .undef => { - // An array has been initialized to undefined at comptime and now we - // are for the first time setting an element. We must change the representation - // of the array from `undef` to `array`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + switch (val_ptr.ip_index) { + .none => switch (val_ptr.tag()) { + .bytes => { + // An array is memory-optimized to store a slice of bytes, but we are about + // to modify an individual field and the representation has to change. + // If we wanted to avoid this, there would need to be special detection + // elsewhere to identify when writing a value to an array element that is stored + // using the `bytes` tag, and handle it without making a call to this function. + const arena = sema.arena; - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel()); - const elems = try arena.alloc(Value, array_len_including_sentinel); - @memset(elems, Value.undef); + const bytes = val_ptr.castTag(.bytes).?.data; + const dest_len = parent.ty.arrayLenIncludingSentinel(mod); + // bytes.len may be one greater than dest_len because of the case when + // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. + assert(bytes.len >= dest_len); + const elems = try arena.alloc(Value, @intCast(usize, dest_len)); + for (elems, 0..) |*elem, i| { + elem.* = try mod.intValue(elem_ty, bytes[i]); + } - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - return beginComptimePtrMutationInner( + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[@intCast(usize, elem_ptr.index)], + ptr_elem_ty, + parent.mut_decl, + ); + }, + .repeated => { + // An array is memory-optimized to store only a single element value, and + // that value is understood to be the same for the entire length of the array. + // However, now we want to modify an individual field and so the + // representation has to change. If we wanted to avoid this, there would + // need to be special detection elsewhere to identify when writing a value to an + // array element that is stored using the `repeated` tag, and handle it + // without making a call to this function. + const arena = sema.arena; + + const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); + const array_len_including_sentinel = + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); + const elems = try arena.alloc(Value, array_len_including_sentinel); + if (elems.len > 0) elems[0] = repeated_val; + for (elems[1..]) |*elem| { + elem.* = try repeated_val.copy(arena); + } + + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[@intCast(usize, elem_ptr.index)], + ptr_elem_ty, + parent.mut_decl, + ); + }, + + .aggregate => return beginComptimePtrMutationInner( sema, block, src, elem_ty, - &elems[elem_ptr.index], + &val_ptr.castTag(.aggregate).?.data[@intCast(usize, elem_ptr.index)], ptr_elem_ty, - parent.decl_ref_mut, - ); + parent.mut_decl, + ), + + else => unreachable, }, - .bytes => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `bytes` tag, and handle it without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); + else => switch (mod.intern_pool.indexToKey(val_ptr.toIntern())) { + .undef => { + // An array has been initialized to undefined at comptime and now we + // are for the first time setting an element. We must change the representation + // of the array from `undef` to `array`. + const arena = sema.arena; - const bytes = val_ptr.castTag(.bytes).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(); - // bytes.len may be one greater than dest_len because of the case when - // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. - assert(bytes.len >= dest_len); - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); - for (elems, 0..) |*elem, i| { - elem.* = try Value.Tag.int_u64.create(arena, bytes[i]); - } + const array_len_including_sentinel = + try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); + const elems = try arena.alloc(Value, array_len_including_sentinel); + @memset(elems, (try mod.intern(.{ .undef = elem_ty.toIntern() })).toValue()); - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); + return beginComptimePtrMutationInner( + sema, + block, + src, + elem_ty, + &elems[@intCast(usize, elem_ptr.index)], + ptr_elem_ty, + parent.mut_decl, + ); + }, + else => unreachable, }, - .str_lit => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `str_lit` tag, and handle it without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const str_lit = val_ptr.castTag(.str_lit).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(); - const bytes = sema.mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const elems = try arena.alloc(Value, @intCast(usize, dest_len)); - for (bytes, 0..) |byte, i| { - elems[i] = try Value.Tag.int_u64.create(arena, byte); - } - if (parent.ty.sentinel()) |sent_val| { - assert(elems.len == bytes.len + 1); - elems[bytes.len] = sent_val; - } - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .repeated => { - // An array is memory-optimized to store only a single element value, and - // that value is understood to be the same for the entire length of the array. - // However, now we want to modify an individual field and so the - // representation has to change. If we wanted to avoid this, there would - // need to be special detection elsewhere to identify when writing a value to an - // array element that is stored using the `repeated` tag, and handle it - // without making a call to this function. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const repeated_val = try val_ptr.castTag(.repeated).?.data.copy(arena); - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel()); - const elems = try arena.alloc(Value, array_len_including_sentinel); - if (elems.len > 0) elems[0] = repeated_val; - for (elems[1..]) |*elem| { - elem.* = try repeated_val.copy(arena); - } - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &val_ptr.castTag(.aggregate).?.data[elem_ptr.index], - ptr_elem_ty, - parent.decl_ref_mut, - ), - - .the_only_possible_value => { - const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - duped, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - - else => unreachable, } }, else => { @@ -27565,28 +28292,29 @@ fn beginComptimePtrMutation( parent.ty, val_ptr, ptr_elem_ty, - parent.decl_ref_mut, + parent.mut_decl, ); }, }, .reinterpret => |reinterpret| { - if (!elem_ptr.elem_ty.hasWellDefinedLayout()) { + if (!base_elem_ty.hasWellDefinedLayout(mod)) { // Even though the parent value type has well-defined memory layout, our // pointer type does not. return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, + .mut_decl = parent.mut_decl, .pointee = .bad_ptr_ty, - .ty = elem_ptr.elem_ty, + .ty = base_elem_ty, }; } - const elem_abi_size_u64 = try sema.typeAbiSize(elem_ptr.elem_ty); + const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty); const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); + const elem_idx = try sema.usizeCast(block, src, elem_ptr.index); return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, + .mut_decl = parent.mut_decl, .pointee = .{ .reinterpret = .{ .val_ptr = reinterpret.val_ptr, - .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_ptr.index, + .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_idx, } }, .ty = parent.ty, }; @@ -27594,162 +28322,184 @@ fn beginComptimePtrMutation( .bad_decl_ty, .bad_ptr_ty => return parent, } }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const field_index = @intCast(u32, field_ptr.field_index); + .field => |field_ptr| { + const base_child_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); + const field_index = @intCast(u32, field_ptr.index); - var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.container_ptr, field_ptr.container_ty); + var parent = try sema.beginComptimePtrMutation(block, src, field_ptr.base.toValue(), base_child_ty); switch (parent.pointee) { - .direct => |val_ptr| switch (val_ptr.tag()) { - .undef => { - // A struct or union has been initialized to undefined at comptime and now we - // are for the first time setting a field. We must change the representation - // of the struct/union from `undef` to `struct`/`union`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - switch (parent.ty.zigTypeTag()) { - .Struct => { - const fields = try arena.alloc(Value, parent.ty.structFieldCount()); - @memset(fields, Value.undef); - - val_ptr.* = try Value.Tag.aggregate.create(arena, fields); - - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index), - &fields[field_index], - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .Union => { - const payload = try arena.create(Value.Payload.Union); - payload.* = .{ .data = .{ - .tag = try Value.Tag.enum_field_index.create(arena, field_index), - .val = Value.undef, - } }; - - val_ptr.* = Value.initPayload(&payload.base); - - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index), - &payload.data.val, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .Pointer => { - assert(parent.ty.isSlice()); - val_ptr.* = try Value.Tag.slice.create(arena, .{ - .ptr = Value.undef, - .len = Value.undef, - }); - - switch (field_index) { - Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer)), - &val_ptr.castTag(.slice).?.data.ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ), - Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.decl_ref_mut, - ), - - else => unreachable, - } - }, - else => unreachable, - } - }, - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index), - &val_ptr.castTag(.aggregate).?.data[field_index], - ptr_elem_ty, - parent.decl_ref_mut, - ), - - .@"union" => { - // We need to set the active field of the union. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const payload = &val_ptr.castTag(.@"union").?.data; - payload.tag = try Value.Tag.enum_field_index.create(arena, field_index); - - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index), - &payload.val, - ptr_elem_ty, - parent.decl_ref_mut, - ); - }, - .slice => switch (field_index) { - Value.Payload.Slice.ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer)), - &val_ptr.castTag(.slice).?.data.ptr, - ptr_elem_ty, - parent.decl_ref_mut, - ), - - Value.Payload.Slice.len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.decl_ref_mut, - ), - - else => unreachable, - }, - - .empty_struct_value => { + .direct => |val_ptr| switch (val_ptr.ip_index) { + .empty_struct => { const duped = try sema.arena.create(Value); - duped.* = Value.initTag(.the_only_possible_value); + duped.* = val_ptr.*; return beginComptimePtrMutationInner( sema, block, src, - parent.ty.structFieldType(field_index), + parent.ty.structFieldType(field_index, mod), duped, ptr_elem_ty, - parent.decl_ref_mut, + parent.mut_decl, ); }, + .none => switch (val_ptr.tag()) { + .aggregate => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &val_ptr.castTag(.aggregate).?.data[field_index], + ptr_elem_ty, + parent.mut_decl, + ), + .repeated => { + const arena = sema.arena; - else => unreachable, + const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod)); + @memset(elems, val_ptr.castTag(.repeated).?.data); + val_ptr.* = try Value.Tag.aggregate.create(arena, elems); + + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &elems[field_index], + ptr_elem_ty, + parent.mut_decl, + ); + }, + .@"union" => { + // We need to set the active field of the union. + const union_tag_ty = base_child_ty.unionTagTypeHypothetical(mod); + + const payload = &val_ptr.castTag(.@"union").?.data; + payload.tag = try mod.enumValueFieldIndex(union_tag_ty, field_index); + + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &payload.val, + ptr_elem_ty, + parent.mut_decl, + ); + }, + .slice => switch (field_index) { + Value.slice_ptr_index => return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.slicePtrFieldType(mod), + &val_ptr.castTag(.slice).?.data.ptr, + ptr_elem_ty, + parent.mut_decl, + ), + + Value.slice_len_index => return beginComptimePtrMutationInner( + sema, + block, + src, + Type.usize, + &val_ptr.castTag(.slice).?.data.len, + ptr_elem_ty, + parent.mut_decl, + ), + + else => unreachable, + }, + else => unreachable, + }, + else => switch (mod.intern_pool.indexToKey(val_ptr.toIntern())) { + .undef => { + // A struct or union has been initialized to undefined at comptime and now we + // are for the first time setting a field. We must change the representation + // of the struct/union from `undef` to `struct`/`union`. + const arena = sema.arena; + + switch (parent.ty.zigTypeTag(mod)) { + .Struct => { + const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod)); + for (fields, 0..) |*field, i| field.* = (try mod.intern(.{ + .undef = parent.ty.structFieldType(i, mod).toIntern(), + })).toValue(); + + val_ptr.* = try Value.Tag.aggregate.create(arena, fields); + + return beginComptimePtrMutationInner( + sema, + block, + src, + parent.ty.structFieldType(field_index, mod), + &fields[field_index], + ptr_elem_ty, + parent.mut_decl, + ); + }, + .Union => { + const payload = try arena.create(Value.Payload.Union); + const tag_ty = parent.ty.unionTagTypeHypothetical(mod); + const payload_ty = parent.ty.structFieldType(field_index, mod); + payload.* = .{ .data = .{ + .tag = try mod.enumValueFieldIndex(tag_ty, field_index), + .val = (try mod.intern(.{ .undef = payload_ty.toIntern() })).toValue(), + } }; + + val_ptr.* = Value.initPayload(&payload.base); + + return beginComptimePtrMutationInner( + sema, + block, + src, + payload_ty, + &payload.data.val, + ptr_elem_ty, + parent.mut_decl, + ); + }, + .Pointer => { + assert(parent.ty.isSlice(mod)); + const ptr_ty = parent.ty.slicePtrFieldType(mod); + val_ptr.* = try Value.Tag.slice.create(arena, .{ + .ptr = (try mod.intern(.{ .undef = ptr_ty.toIntern() })).toValue(), + .len = (try mod.intern(.{ .undef = .usize_type })).toValue(), + }); + + switch (field_index) { + Value.slice_ptr_index => return beginComptimePtrMutationInner( + sema, + block, + src, + ptr_ty, + &val_ptr.castTag(.slice).?.data.ptr, + ptr_elem_ty, + parent.mut_decl, + ), + Value.slice_len_index => return beginComptimePtrMutationInner( + sema, + block, + src, + Type.usize, + &val_ptr.castTag(.slice).?.data.len, + ptr_elem_ty, + parent.mut_decl, + ), + + else => unreachable, + } + }, + else => unreachable, + } + }, + else => unreachable, + }, }, .reinterpret => |reinterpret| { - const field_offset_u64 = field_ptr.container_ty.structFieldOffset(field_index, target); + const field_offset_u64 = base_child_ty.structFieldOffset(field_index, mod); const field_offset = try sema.usizeCast(block, src, field_offset_u64); return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, + .mut_decl = parent.mut_decl, .pointee = .{ .reinterpret = .{ .val_ptr = reinterpret.val_ptr, .byte_offset = reinterpret.byte_offset + field_offset, @@ -27760,106 +28510,6 @@ fn beginComptimePtrMutation( .bad_decl_ty, .bad_ptr_ty => return parent, } }, - .eu_payload_ptr => { - const eu_ptr = ptr_val.castTag(.eu_payload_ptr).?.data; - var parent = try sema.beginComptimePtrMutation(block, src, eu_ptr.container_ptr, eu_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| { - const payload_ty = parent.ty.errorUnionPayload(); - switch (val_ptr.tag()) { - else => { - // An error union has been initialized to undefined at comptime and now we - // are for the first time setting the payload. We must change the - // representation of the error union from `undef` to `opt_payload`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const payload = try arena.create(Value.Payload.SubValue); - payload.* = .{ - .base = .{ .tag = .eu_payload }, - .data = Value.undef, - }; - - val_ptr.* = Value.initPayload(&payload.base); - - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &payload.data }, - .ty = payload_ty, - }; - }, - .eu_payload => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, - .ty = payload_ty, - }, - } - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - .reinterpret => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = eu_ptr.container_ty, - }, - } - }, - .opt_payload_ptr => { - const opt_ptr = if (ptr_val.castTag(.opt_payload_ptr)) |some| some.data else { - return sema.beginComptimePtrMutation(block, src, ptr_val, try ptr_elem_ty.optionalChildAlloc(sema.arena)); - }; - var parent = try sema.beginComptimePtrMutation(block, src, opt_ptr.container_ptr, opt_ptr.container_ty); - switch (parent.pointee) { - .direct => |val_ptr| { - const payload_ty = try parent.ty.optionalChildAlloc(sema.arena); - switch (val_ptr.tag()) { - .undef, .null_value => { - // An optional has been initialized to undefined at comptime and now we - // are for the first time setting the payload. We must change the - // representation of the optional from `undef` to `opt_payload`. - const arena = parent.beginArena(sema.mod); - defer parent.finishArena(sema.mod); - - const payload = try arena.create(Value.Payload.SubValue); - payload.* = .{ - .base = .{ .tag = .opt_payload }, - .data = Value.undef, - }; - - val_ptr.* = Value.initPayload(&payload.base); - - return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &payload.data }, - .ty = payload_ty, - }; - }, - .opt_payload => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, - .ty = payload_ty, - }, - - else => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .{ .direct = val_ptr }, - .ty = payload_ty, - }, - } - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - .reinterpret => return ComptimePtrMutationKit{ - .decl_ref_mut = parent.decl_ref_mut, - .pointee = .bad_ptr_ty, - .ty = opt_ptr.container_ty, - }, - } - }, - .decl_ref => unreachable, // isComptimeMutablePtr() has been checked already - else => unreachable, } } @@ -27870,46 +28520,50 @@ fn beginComptimePtrMutationInner( decl_ty: Type, decl_val: *Value, ptr_elem_ty: Type, - decl_ref_mut: Value.Payload.DeclRefMut.Data, + mut_decl: InternPool.Key.Ptr.Addr.MutDecl, ) CompileError!ComptimePtrMutationKit { - const target = sema.mod.getTarget(); + const mod = sema.mod; + const target = mod.getTarget(); const coerce_ok = (try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_ty, true, target, src, src)) == .ok; + + decl_val.* = try decl_val.unintern(sema.arena, mod); + if (coerce_ok) { return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, + .mut_decl = mut_decl, .pointee = .{ .direct = decl_val }, .ty = decl_ty, }; } // Handle the case that the decl is an array and we're actually trying to point to an element. - if (decl_ty.isArrayOrVector()) { - const decl_elem_ty = decl_ty.childType(); + if (decl_ty.isArrayOrVector(mod)) { + const decl_elem_ty = decl_ty.childType(mod); if ((try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_elem_ty, true, target, src, src)) == .ok) { return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, + .mut_decl = mut_decl, .pointee = .{ .direct = decl_val }, .ty = decl_ty, }; } } - if (!decl_ty.hasWellDefinedLayout()) { + if (!decl_ty.hasWellDefinedLayout(mod)) { return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, - .pointee = .{ .bad_decl_ty = {} }, + .mut_decl = mut_decl, + .pointee = .bad_decl_ty, .ty = decl_ty, }; } - if (!ptr_elem_ty.hasWellDefinedLayout()) { + if (!ptr_elem_ty.hasWellDefinedLayout(mod)) { return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, - .pointee = .{ .bad_ptr_ty = {} }, + .mut_decl = mut_decl, + .pointee = .bad_ptr_ty, .ty = ptr_elem_ty, }; } return ComptimePtrMutationKit{ - .decl_ref_mut = decl_ref_mut, + .mut_decl = mut_decl, .pointee = .{ .reinterpret = .{ .val_ptr = decl_val, .byte_offset = 0, @@ -27951,237 +28605,227 @@ fn beginComptimePtrLoad( ptr_val: Value, maybe_array_ty: ?Type, ) ComptimePtrLoadError!ComptimePtrLoadKit { - const target = sema.mod.getTarget(); - var deref: ComptimePtrLoadKit = switch (ptr_val.tag()) { - .decl_ref, - .decl_ref_mut, - => blk: { - const decl_index = switch (ptr_val.tag()) { - .decl_ref => ptr_val.castTag(.decl_ref).?.data, - .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index, - else => unreachable, - }; - const is_mutable = ptr_val.tag() == .decl_ref_mut; - const decl = sema.mod.declPtr(decl_index); - const decl_tv = try decl.typedValue(); - if (decl_tv.val.tag() == .variable) return error.RuntimeLoad; + const mod = sema.mod; + const target = mod.getTarget(); - const layout_defined = decl.ty.hasWellDefinedLayout(); - break :blk ComptimePtrLoadKit{ - .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, - .pointee = decl_tv, - .is_mutable = is_mutable, - .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, - }; - }, + var deref: ComptimePtrLoadKit = switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .decl, .mut_decl => blk: { + const decl_index = switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, + else => unreachable, + }; + const is_mutable = ptr.addr == .mut_decl; + const decl = mod.declPtr(decl_index); + const decl_tv = try decl.typedValue(); + if (decl.val.getVariable(mod) != null) return error.RuntimeLoad; - .elem_ptr => blk: { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const elem_ty = elem_ptr.elem_ty; - var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.array_ptr, null); + const layout_defined = decl.ty.hasWellDefinedLayout(mod); + break :blk ComptimePtrLoadKit{ + .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, + .pointee = decl_tv, + .is_mutable = is_mutable, + .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, + }; + }, + .int => return error.RuntimeLoad, + .eu_payload, .opt_payload => |container_ptr| blk: { + const container_ty = mod.intern_pool.typeOf(container_ptr).toType().childType(mod); + const payload_ty = switch (ptr.addr) { + .eu_payload => container_ty.errorUnionPayload(mod), + .opt_payload => container_ty.optionalChild(mod), + else => unreachable, + }; + var deref = try sema.beginComptimePtrLoad(block, src, container_ptr.toValue(), container_ty); - // This code assumes that elem_ptrs have been "flattened" in order for direct dereference - // to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that - // our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened" - if (elem_ptr.array_ptr.castTag(.elem_ptr)) |parent_elem_ptr| { - assert(!(parent_elem_ptr.data.elem_ty.eql(elem_ty, sema.mod))); - } + // eu_payload and opt_payload never have a well-defined layout + if (deref.parent != null) { + deref.parent = null; + deref.ty_without_well_defined_layout = container_ty; + } - if (elem_ptr.index != 0) { - if (elem_ty.hasWellDefinedLayout()) { - if (deref.parent) |*parent| { + if (deref.pointee) |*tv| { + const coerce_in_mem_ok = + (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; + if (coerce_in_mem_ok) { + const payload_val = switch (tv.val.ip_index) { + .none => tv.val.cast(Value.Payload.SubValue).?.data, + .null_value => return sema.fail(block, src, "attempt to use null value", .{}), + else => switch (mod.intern_pool.indexToKey(tv.val.toIntern())) { + .error_union => |error_union| switch (error_union.val) { + .err_name => |err_name| return sema.fail( + block, + src, + "attempt to unwrap error: {}", + .{err_name.fmt(&mod.intern_pool)}, + ), + .payload => |payload| payload, + }, + .opt => |opt| switch (opt.val) { + .none => return sema.fail(block, src, "attempt to use null value", .{}), + else => |payload| payload, + }, + else => unreachable, + }.toValue(), + }; + tv.* = TypedValue{ .ty = payload_ty, .val = payload_val }; + break :blk deref; + } + } + deref.pointee = null; + break :blk deref; + }, + .comptime_field => |comptime_field| blk: { + const field_ty = mod.intern_pool.typeOf(comptime_field).toType(); + break :blk ComptimePtrLoadKit{ + .parent = null, + .pointee = .{ .ty = field_ty, .val = comptime_field.toValue() }, + .is_mutable = false, + .ty_without_well_defined_layout = field_ty, + }; + }, + .elem => |elem_ptr| blk: { + const elem_ty = mod.intern_pool.typeOf(elem_ptr.base).toType().elemType2(mod); + var deref = try sema.beginComptimePtrLoad(block, src, elem_ptr.base.toValue(), null); + + // This code assumes that elem_ptrs have been "flattened" in order for direct dereference + // to succeed, meaning that elem ptrs of the same elem_ty are coalesced. Here we check that + // our parent is not an elem_ptr with the same elem_ty, since that would be "unflattened" + switch (mod.intern_pool.indexToKey(elem_ptr.base)) { + .ptr => |base_ptr| switch (base_ptr.addr) { + .elem => |base_elem| assert(!mod.intern_pool.typeOf(base_elem.base).toType().elemType2(mod).eql(elem_ty, mod)), + else => {}, + }, + else => {}, + } + + if (elem_ptr.index != 0) { + if (elem_ty.hasWellDefinedLayout(mod)) { + if (deref.parent) |*parent| { + // Update the byte offset (in-place) + const elem_size = try sema.typeAbiSize(elem_ty); + const offset = parent.byte_offset + elem_size * elem_ptr.index; + parent.byte_offset = try sema.usizeCast(block, src, offset); + } + } else { + deref.parent = null; + deref.ty_without_well_defined_layout = elem_ty; + } + } + + // If we're loading an elem that was derived from a different type + // than the true type of the underlying decl, we cannot deref directly + const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector(mod)) x: { + const deref_elem_ty = deref.pointee.?.ty.childType(mod); + break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok; + } else false; + if (!ty_matches) { + deref.pointee = null; + break :blk deref; + } + + var array_tv = deref.pointee.?; + const check_len = array_tv.ty.arrayLenIncludingSentinel(mod); + if (maybe_array_ty) |load_ty| { + // It's possible that we're loading a [N]T, in which case we'd like to slice + // the pointee array directly from our parent array. + if (load_ty.isArrayOrVector(mod) and load_ty.childType(mod).eql(elem_ty, mod)) { + const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel(mod)); + const elem_idx = try sema.usizeCast(block, src, elem_ptr.index); + deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ + .ty = try Type.array(sema.arena, N, null, elem_ty, mod), + .val = try array_tv.val.sliceArray(mod, sema.arena, elem_idx, elem_idx + N), + } else null; + break :blk deref; + } + } + + if (elem_ptr.index >= check_len) { + deref.pointee = null; + break :blk deref; + } + if (elem_ptr.index == check_len - 1) { + if (array_tv.ty.sentinel(mod)) |sent| { + deref.pointee = TypedValue{ + .ty = elem_ty, + .val = sent, + }; + break :blk deref; + } + } + deref.pointee = TypedValue{ + .ty = elem_ty, + .val = try array_tv.val.elemValue(mod, @intCast(usize, elem_ptr.index)), + }; + break :blk deref; + }, + .field => |field_ptr| blk: { + const field_index = @intCast(u32, field_ptr.index); + const container_ty = mod.intern_pool.typeOf(field_ptr.base).toType().childType(mod); + var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.base.toValue(), container_ty); + + if (container_ty.hasWellDefinedLayout(mod)) { + const struct_obj = mod.typeToStruct(container_ty); + if (struct_obj != null and struct_obj.?.layout == .Packed) { + // packed structs are not byte addressable + deref.parent = null; + } else if (deref.parent) |*parent| { // Update the byte offset (in-place) - const elem_size = try sema.typeAbiSize(elem_ty); - const offset = parent.byte_offset + elem_size * elem_ptr.index; - parent.byte_offset = try sema.usizeCast(block, src, offset); + try sema.resolveTypeLayout(container_ty); + const field_offset = container_ty.structFieldOffset(field_index, mod); + parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset); } } else { deref.parent = null; - deref.ty_without_well_defined_layout = elem_ty; + deref.ty_without_well_defined_layout = container_ty; } - } - // If we're loading an elem_ptr that was derived from a different type - // than the true type of the underlying decl, we cannot deref directly - const ty_matches = if (deref.pointee != null and deref.pointee.?.ty.isArrayOrVector()) x: { - const deref_elem_ty = deref.pointee.?.ty.childType(); - break :x (try sema.coerceInMemoryAllowed(block, deref_elem_ty, elem_ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, elem_ty, deref_elem_ty, false, target, src, src)) == .ok; - } else false; - if (!ty_matches) { - deref.pointee = null; - break :blk deref; - } - - var array_tv = deref.pointee.?; - const check_len = array_tv.ty.arrayLenIncludingSentinel(); - if (maybe_array_ty) |load_ty| { - // It's possible that we're loading a [N]T, in which case we'd like to slice - // the pointee array directly from our parent array. - if (load_ty.isArrayOrVector() and load_ty.childType().eql(elem_ty, sema.mod)) { - const N = try sema.usizeCast(block, src, load_ty.arrayLenIncludingSentinel()); - deref.pointee = if (elem_ptr.index + N <= check_len) TypedValue{ - .ty = try Type.array(sema.arena, N, null, elem_ty, sema.mod), - .val = try array_tv.val.sliceArray(sema.mod, sema.arena, elem_ptr.index, elem_ptr.index + N), - } else null; + const tv = deref.pointee orelse { + deref.pointee = null; break :blk deref; - } - } - - if (elem_ptr.index >= check_len) { - deref.pointee = null; - break :blk deref; - } - if (elem_ptr.index == check_len - 1) { - if (array_tv.ty.sentinel()) |sent| { - deref.pointee = TypedValue{ - .ty = elem_ty, - .val = sent, - }; - break :blk deref; - } - } - deref.pointee = TypedValue{ - .ty = elem_ty, - .val = try array_tv.val.elemValue(sema.mod, sema.arena, elem_ptr.index), - }; - break :blk deref; - }, - - .slice => blk: { - const slice = ptr_val.castTag(.slice).?.data; - break :blk try sema.beginComptimePtrLoad(block, src, slice.ptr, null); - }, - - .field_ptr => blk: { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const field_index = @intCast(u32, field_ptr.field_index); - var deref = try sema.beginComptimePtrLoad(block, src, field_ptr.container_ptr, field_ptr.container_ty); - - if (field_ptr.container_ty.hasWellDefinedLayout()) { - const struct_ty = field_ptr.container_ty.castTag(.@"struct"); - if (struct_ty != null and struct_ty.?.data.layout == .Packed) { - // packed structs are not byte addressable - deref.parent = null; - } else if (deref.parent) |*parent| { - // Update the byte offset (in-place) - try sema.resolveTypeLayout(field_ptr.container_ty); - const field_offset = field_ptr.container_ty.structFieldOffset(field_index, target); - parent.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset); - } - } else { - deref.parent = null; - deref.ty_without_well_defined_layout = field_ptr.container_ty; - } - - const tv = deref.pointee orelse { - deref.pointee = null; - break :blk deref; - }; - const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, field_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, tv.ty, field_ptr.container_ty, false, target, src, src)) == .ok; - if (!coerce_in_mem_ok) { - deref.pointee = null; - break :blk deref; - } - - if (field_ptr.container_ty.isSlice()) { - const slice_val = tv.val.castTag(.slice).?.data; - deref.pointee = switch (field_index) { - Value.Payload.Slice.ptr_index => TypedValue{ - .ty = field_ptr.container_ty.slicePtrFieldType(try sema.arena.create(Type.SlicePtrFieldTypeBuffer)), - .val = slice_val.ptr, - }, - Value.Payload.Slice.len_index => TypedValue{ - .ty = Type.usize, - .val = slice_val.len, - }, - else => unreachable, }; - } else { - const field_ty = field_ptr.container_ty.structFieldType(field_index); - deref.pointee = TypedValue{ - .ty = field_ty, - .val = tv.val.fieldValue(tv.ty, field_index), - }; - } - break :blk deref; - }, - - .comptime_field_ptr => blk: { - const comptime_field_ptr = ptr_val.castTag(.comptime_field_ptr).?.data; - break :blk ComptimePtrLoadKit{ - .parent = null, - .pointee = .{ .ty = comptime_field_ptr.field_ty, .val = comptime_field_ptr.field_val }, - .is_mutable = false, - .ty_without_well_defined_layout = comptime_field_ptr.field_ty, - }; - }, - - .opt_payload_ptr, - .eu_payload_ptr, - => blk: { - const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; - const payload_ty = switch (ptr_val.tag()) { - .eu_payload_ptr => payload_ptr.container_ty.errorUnionPayload(), - .opt_payload_ptr => try payload_ptr.container_ty.optionalChildAlloc(sema.arena), - else => unreachable, - }; - var deref = try sema.beginComptimePtrLoad(block, src, payload_ptr.container_ptr, payload_ptr.container_ty); - - // eu_payload_ptr and opt_payload_ptr never have a well-defined layout - if (deref.parent != null) { - deref.parent = null; - deref.ty_without_well_defined_layout = payload_ptr.container_ty; - } - - if (deref.pointee) |*tv| { const coerce_in_mem_ok = - (try sema.coerceInMemoryAllowed(block, payload_ptr.container_ty, tv.ty, false, target, src, src)) == .ok or - (try sema.coerceInMemoryAllowed(block, tv.ty, payload_ptr.container_ty, false, target, src, src)) == .ok; - if (coerce_in_mem_ok) { - const payload_val = switch (ptr_val.tag()) { - .eu_payload_ptr => if (tv.val.castTag(.eu_payload)) |some| some.data else { - return sema.fail(block, src, "attempt to unwrap error: {s}", .{tv.val.castTag(.@"error").?.data.name}); + (try sema.coerceInMemoryAllowed(block, container_ty, tv.ty, false, target, src, src)) == .ok or + (try sema.coerceInMemoryAllowed(block, tv.ty, container_ty, false, target, src, src)) == .ok; + if (!coerce_in_mem_ok) { + deref.pointee = null; + break :blk deref; + } + + if (container_ty.isSlice(mod)) { + deref.pointee = switch (field_index) { + Value.slice_ptr_index => TypedValue{ + .ty = container_ty.slicePtrFieldType(mod), + .val = tv.val.slicePtr(mod), }, - .opt_payload_ptr => if (tv.val.castTag(.opt_payload)) |some| some.data else opt: { - if (tv.val.isNull()) return sema.fail(block, src, "attempt to use null value", .{}); - break :opt tv.val; + Value.slice_len_index => TypedValue{ + .ty = Type.usize, + .val = mod.intern_pool.indexToKey(tv.val.toIntern()).ptr.len.toValue(), }, else => unreachable, }; - tv.* = TypedValue{ .ty = payload_ty, .val = payload_val }; - break :blk deref; + } else { + const field_ty = container_ty.structFieldType(field_index, mod); + deref.pointee = TypedValue{ + .ty = field_ty, + .val = try tv.val.fieldValue(mod, field_index), + }; } - } - deref.pointee = null; - break :blk deref; + break :blk deref; + }, }, - .null_value => { - return sema.fail(block, src, "attempt to use null value", .{}); + .opt => |opt| switch (opt.val) { + .none => return sema.fail(block, src, "attempt to use null value", .{}), + else => |payload| try sema.beginComptimePtrLoad(block, src, payload.toValue(), null), }, - .opt_payload => blk: { - const opt_payload = ptr_val.castTag(.opt_payload).?.data; - break :blk try sema.beginComptimePtrLoad(block, src, opt_payload, null); - }, - - .zero, - .one, - .int_u64, - .int_i64, - .int_big_positive, - .int_big_negative, - .variable, - .extern_fn, - .function, - => return error.RuntimeLoad, - else => unreachable, }; if (deref.pointee) |tv| { - if (deref.parent == null and tv.ty.hasWellDefinedLayout()) { + if (deref.parent == null and tv.ty.hasWellDefinedLayout(mod)) { deref.parent = .{ .tv = tv, .byte_offset = 0 }; } } @@ -28196,21 +28840,21 @@ fn bitCast( inst_src: LazySrcLoc, operand_src: ?LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved); try sema.resolveTypeLayout(dest_ty); const old_ty = try sema.resolveTypeFields(sema.typeOf(inst)); try sema.resolveTypeLayout(old_ty); - const target = sema.mod.getTarget(); - const dest_bits = dest_ty.bitSize(target); - const old_bits = old_ty.bitSize(target); + const dest_bits = dest_ty.bitSize(mod); + const old_bits = old_ty.bitSize(mod); if (old_bits != dest_bits) { return sema.fail(block, inst_src, "@bitCast size mismatch: destination type '{}' has {d} bits but source type '{}' has {d} bits", .{ - dest_ty.fmt(sema.mod), + dest_ty.fmt(mod), dest_bits, - old_ty.fmt(sema.mod), + old_ty.fmt(mod), old_bits, }); } @@ -28233,20 +28877,21 @@ fn bitCastVal( new_ty: Type, buffer_offset: usize, ) !?Value { - const target = sema.mod.getTarget(); - if (old_ty.eql(new_ty, sema.mod)) return val; + const mod = sema.mod; + if (old_ty.eql(new_ty, mod)) return val; // For types with well-defined memory layouts, we serialize them a byte buffer, // then deserialize to the new type. - const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(target)); + const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(mod)); const buffer = try sema.gpa.alloc(u8, abi_size); defer sema.gpa.free(buffer); - val.writeToMemory(old_ty, sema.mod, buffer) catch |err| switch (err) { + val.writeToMemory(old_ty, mod, buffer) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.ReinterpretDeclRef => return null, error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already - error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(sema.mod)}), + error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(mod)}), }; - return try Value.readFromMemory(new_ty, sema.mod, buffer[buffer_offset..], sema.arena); + return try Value.readFromMemory(new_ty, mod, buffer[buffer_offset..], sema.arena); } fn coerceArrayPtrToSlice( @@ -28256,25 +28901,32 @@ fn coerceArrayPtrToSlice( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; if (try sema.resolveMaybeUndefVal(inst)) |val| { const ptr_array_ty = sema.typeOf(inst); - const array_ty = ptr_array_ty.childType(); - const slice_val = try Value.Tag.slice.create(sema.arena, .{ - .ptr = val, - .len = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()), - }); - return sema.addConstant(dest_ty, slice_val); + const array_ty = ptr_array_ty.childType(mod); + const slice_val = try mod.intern(.{ .ptr = .{ + .ty = dest_ty.toIntern(), + .addr = switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => .{ .int = try mod.intern(.{ .undef = .usize_type }) }, + .ptr => |ptr| ptr.addr, + else => unreachable, + }, + .len = (try mod.intValue(Type.usize, array_ty.arrayLen(mod))).toIntern(), + } }); + return sema.addConstant(dest_ty, slice_val.toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); return block.addTyOp(.array_to_slice, dest_ty, inst); } fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_result: *InMemoryCoercionResult) bool { - const dest_info = dest_ty.ptrInfo().data; - const inst_info = inst_ty.ptrInfo().data; - const len0 = (inst_info.pointee_type.zigTypeTag() == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel() == 0 or - (inst_info.pointee_type.arrayLen() == 0 and dest_info.sentinel == null and dest_info.size != .C and dest_info.size != .Many))) or - (inst_info.pointee_type.isTuple() and inst_info.pointee_type.structFieldCount() == 0); + const mod = sema.mod; + const dest_info = dest_ty.ptrInfo(mod); + const inst_info = inst_ty.ptrInfo(mod); + const len0 = (inst_info.pointee_type.zigTypeTag(mod) == .Array and (inst_info.pointee_type.arrayLenIncludingSentinel(mod) == 0 or + (inst_info.pointee_type.arrayLen(mod) == 0 and dest_info.sentinel == null and dest_info.size != .C and dest_info.size != .Many))) or + (inst_info.pointee_type.isTuple(mod) and inst_info.pointee_type.structFieldCount(mod) == 0); const ok_cv_qualifiers = ((inst_info.mutable or !dest_info.mutable) or len0) and @@ -28298,17 +28950,16 @@ fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_resul } if (inst_info.@"align" == 0 and dest_info.@"align" == 0) return true; if (len0) return true; - const target = sema.mod.getTarget(); const inst_align = if (inst_info.@"align" != 0) inst_info.@"align" else - inst_info.pointee_type.abiAlignment(target); + inst_info.pointee_type.abiAlignment(mod); const dest_align = if (dest_info.@"align" != 0) dest_info.@"align" else - dest_info.pointee_type.abiAlignment(target); + dest_info.pointee_type.abiAlignment(mod); if (dest_align > inst_align) { in_memory_result.* = .{ .ptr_alignment = .{ @@ -28327,26 +28978,30 @@ fn coerceCompatiblePtrs( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); if (try sema.resolveMaybeUndefVal(inst)) |val| { - if (!val.isUndef() and val.isNull() and !dest_ty.isAllowzeroPtr()) { + if (!val.isUndef(mod) and val.isNull(mod) and !dest_ty.isAllowzeroPtr(mod)) { return sema.fail(block, inst_src, "null pointer casted to type '{}'", .{dest_ty.fmt(sema.mod)}); } // The comptime Value representation is compatible with both types. - return sema.addConstant(dest_ty, val); + return sema.addConstant( + dest_ty, + try mod.getCoerced((try val.intern(inst_ty, mod)).toValue(), dest_ty), + ); } try sema.requireRuntimeBlock(block, inst_src, null); - const inst_allows_zero = inst_ty.zigTypeTag() != .Pointer or inst_ty.ptrAllowsZero(); - if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero() and - (try sema.typeHasRuntimeBits(dest_ty.elemType2()) or dest_ty.elemType2().zigTypeTag() == .Fn)) + const inst_allows_zero = inst_ty.zigTypeTag(mod) != .Pointer or inst_ty.ptrAllowsZero(mod); + if (block.wantSafety() and inst_allows_zero and !dest_ty.ptrAllowsZero(mod) and + (try sema.typeHasRuntimeBits(dest_ty.elemType2(mod)) or dest_ty.elemType2(mod).zigTypeTag(mod) == .Fn)) { - const actual_ptr = if (inst_ty.isSlice()) + const actual_ptr = if (inst_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, inst_src, inst, inst_ty) else inst; const ptr_int = try block.addUnOp(.ptrtoint, actual_ptr); const is_non_zero = try block.addBinOp(.cmp_neq, ptr_int, .zero_usize); - const ok = if (inst_ty.isSlice()) ok: { + const ok = if (inst_ty.isSlice(mod)) ok: { const len = try sema.analyzeSliceLen(block, inst_src, inst); const len_zero = try block.addBinOp(.cmp_eq, len, .zero_usize); break :ok try block.addBinOp(.bit_or, len_zero, is_non_zero); @@ -28364,9 +29019,11 @@ fn coerceEnumToUnion( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; + const ip = &mod.intern_pool; const inst_ty = sema.typeOf(inst); - const tag_ty = union_ty.unionTagType() orelse { + const tag_ty = union_ty.unionTagType(mod) orelse { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ union_ty.fmt(sema.mod), inst_ty.fmt(sema.mod), @@ -28393,16 +29050,18 @@ fn coerceEnumToUnion( return sema.failWithOwnedErrorMsg(msg); }; - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field = union_obj.fields.values()[field_index]; const field_ty = try sema.resolveTypeFields(field.ty); - if (field_ty.zigTypeTag() == .NoReturn) { + if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "cannot initialize 'noreturn' field of union", .{}); errdefer msg.destroy(sema.gpa); const field_name = union_obj.fields.keys()[field_index]; - try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{field_name}); + try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' declared here", .{ + field_name.fmt(ip), + }); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; @@ -28411,27 +29070,27 @@ fn coerceEnumToUnion( const opv = (try sema.typeHasOnePossibleValue(field_ty)) orelse { const msg = msg: { const field_name = union_obj.fields.keys()[field_index]; - const msg = try sema.errMsg(block, inst_src, "coercion from enum '{}' to union '{}' must initialize '{}' field '{s}'", .{ - inst_ty.fmt(sema.mod), union_ty.fmt(sema.mod), field_ty.fmt(sema.mod), field_name, + const msg = try sema.errMsg(block, inst_src, "coercion from enum '{}' to union '{}' must initialize '{}' field '{}'", .{ + inst_ty.fmt(sema.mod), union_ty.fmt(sema.mod), + field_ty.fmt(sema.mod), field_name.fmt(ip), }); errdefer msg.destroy(sema.gpa); - try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' declared here", .{field_name}); + try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' declared here", .{ + field_name.fmt(ip), + }); try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); }; - return sema.addConstant(union_ty, try Value.Tag.@"union".create(sema.arena, .{ - .tag = val, - .val = opv, - })); + return sema.addConstant(union_ty, try mod.unionValue(union_ty, val, opv)); } try sema.requireRuntimeBlock(block, inst_src, null); - if (tag_ty.isNonexhaustiveEnum()) { + if (tag_ty.isNonexhaustiveEnum(mod)) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "runtime coercion to union '{}' from non-exhaustive enum", .{ union_ty.fmt(sema.mod), @@ -28443,13 +29102,13 @@ fn coerceEnumToUnion( return sema.failWithOwnedErrorMsg(msg); } - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; { var msg: ?*Module.ErrorMsg = null; errdefer if (msg) |some| some.destroy(sema.gpa); for (union_obj.fields.values(), 0..) |field, i| { - if (field.ty.zigTypeTag() == .NoReturn) { + if (field.ty.zigTypeTag(mod) == .NoReturn) { const err_msg = msg orelse try sema.errMsg( block, inst_src, @@ -28469,7 +29128,7 @@ fn coerceEnumToUnion( } // If the union has all fields 0 bits, the union value is just the enum value. - if (union_ty.unionHasAllZeroBitFieldTypes()) { + if (union_ty.unionHasAllZeroBitFieldTypes(mod)) { return block.addBitCast(union_ty, enum_tag); } @@ -28487,8 +29146,11 @@ fn coerceEnumToUnion( while (it.next()) |field| : (field_index += 1) { const field_name = field.key_ptr.*; const field_ty = field.value_ptr.ty; - if (!field_ty.hasRuntimeBits()) continue; - try sema.addFieldErrNote(union_ty, field_index, msg, "field '{s}' has type '{}'", .{ field_name, field_ty.fmt(sema.mod) }); + if (!(try sema.typeHasRuntimeBits(field_ty))) continue; + try sema.addFieldErrNote(union_ty, field_index, msg, "field '{}' has type '{}'", .{ + field_name.fmt(ip), + field_ty.fmt(sema.mod), + }); } try sema.addDeclaredHereNote(msg, union_ty); break :msg msg; @@ -28504,36 +29166,55 @@ fn coerceAnonStructToUnion( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); - const field_count = inst_ty.structFieldCount(); - if (field_count != 1) { - const msg = msg: { - const msg = if (field_count > 1) try sema.errMsg( - block, - inst_src, - "cannot initialize multiple union fields at once; unions can only have one active field", - .{}, - ) else try sema.errMsg( - block, - inst_src, - "union initializer must initialize one field", - .{}, - ); - errdefer msg.destroy(sema.gpa); + const field_info: union(enum) { + name: InternPool.NullTerminatedString, + count: usize, + } = switch (mod.intern_pool.indexToKey(inst_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 1) + .{ .name = anon_struct_type.names[0] } + else + .{ .count = anon_struct_type.names.len }, + .struct_type => |struct_type| name: { + const field_names = mod.structPtrUnwrap(struct_type.index).?.fields.keys(); + break :name if (field_names.len == 1) + .{ .name = field_names[0] } + else + .{ .count = field_names.len }; + }, + else => unreachable, + }; + switch (field_info) { + .name => |field_name| { + const init = try sema.structFieldVal(block, inst_src, inst, field_name, inst_src, inst_ty); + return sema.unionInit(block, init, inst_src, union_ty, union_ty_src, field_name, inst_src); + }, + .count => |field_count| { + assert(field_count != 1); + const msg = msg: { + const msg = if (field_count > 1) try sema.errMsg( + block, + inst_src, + "cannot initialize multiple union fields at once; unions can only have one active field", + .{}, + ) else try sema.errMsg( + block, + inst_src, + "union initializer must initialize one field", + .{}, + ); + errdefer msg.destroy(sema.gpa); - // TODO add notes for where the anon struct was created to point out - // the extra fields. + // TODO add notes for where the anon struct was created to point out + // the extra fields. - try sema.addDeclaredHereNote(msg, union_ty); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(msg); + try sema.addDeclaredHereNote(msg, union_ty); + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(msg); + }, } - - const anon_struct = inst_ty.castTag(.anon_struct).?.data; - const field_name = anon_struct.names[0]; - const init = try sema.structFieldVal(block, inst_src, inst, field_name, inst_src, inst_ty); - return sema.unionInit(block, init, inst_src, union_ty, union_ty_src, field_name, inst_src); } fn coerceAnonStructToUnionPtrs( @@ -28544,7 +29225,8 @@ fn coerceAnonStructToUnionPtrs( ptr_anon_struct: Air.Inst.Ref, anon_struct_src: LazySrcLoc, ) !Air.Inst.Ref { - const union_ty = ptr_union_ty.childType(); + const mod = sema.mod; + const union_ty = ptr_union_ty.childType(mod); const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src); const union_inst = try sema.coerceAnonStructToUnion(block, union_ty, union_ty_src, anon_struct, anon_struct_src); return sema.analyzeRef(block, union_ty_src, union_inst); @@ -28558,7 +29240,8 @@ fn coerceAnonStructToStructPtrs( ptr_anon_struct: Air.Inst.Ref, anon_struct_src: LazySrcLoc, ) !Air.Inst.Ref { - const struct_ty = ptr_struct_ty.childType(); + const mod = sema.mod; + const struct_ty = ptr_struct_ty.childType(mod); const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src); const struct_inst = try sema.coerceTupleToStruct(block, struct_ty, anon_struct, anon_struct_src); return sema.analyzeRef(block, struct_ty_src, struct_inst); @@ -28573,15 +29256,16 @@ fn coerceArrayLike( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); - const inst_len = inst_ty.arrayLen(); - const dest_len = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLen()); - const target = sema.mod.getTarget(); + const inst_len = inst_ty.arrayLen(mod); + const dest_len = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLen(mod)); + const target = mod.getTarget(); if (dest_len != inst_len) { const msg = msg: { const msg = try sema.errMsg(block, inst_src, "expected type '{}', found '{}'", .{ - dest_ty.fmt(sema.mod), inst_ty.fmt(sema.mod), + dest_ty.fmt(mod), inst_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len}); @@ -28591,35 +29275,32 @@ fn coerceArrayLike( return sema.failWithOwnedErrorMsg(msg); } - const dest_elem_ty = dest_ty.childType(); - const inst_elem_ty = inst_ty.childType(); + const dest_elem_ty = dest_ty.childType(mod); + const inst_elem_ty = inst_ty.childType(mod); const in_memory_result = try sema.coerceInMemoryAllowed(block, dest_elem_ty, inst_elem_ty, false, target, dest_ty_src, inst_src); if (in_memory_result == .ok) { if (try sema.resolveMaybeUndefVal(inst)) |inst_val| { // These types share the same comptime value representation. - return sema.addConstant(dest_ty, inst_val); + return sema.coerceInMemory(block, inst_val, inst_ty, dest_ty, dest_ty_src); } try sema.requireRuntimeBlock(block, inst_src, null); return block.addBitCast(dest_ty, inst); } - const element_vals = try sema.arena.alloc(Value, dest_len); + const element_vals = try sema.arena.alloc(InternPool.Index, dest_len); const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_len); var runtime_src: ?LazySrcLoc = null; - for (element_vals, 0..) |*elem, i| { - const index_ref = try sema.addConstant( - Type.usize, - try Value.Tag.int_u64.create(sema.arena, i), - ); + for (element_vals, element_refs, 0..) |*val, *ref, i| { + const index_ref = try sema.addConstant(Type.usize, try mod.intValue(Type.usize, i)); const src = inst_src; // TODO better source location const elem_src = inst_src; // TODO better source location const elem_ref = try sema.elemValArray(block, src, inst_src, inst, elem_src, index_ref, true); const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src); - element_refs[i] = coerced; + ref.* = coerced; if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |elem_val| { - elem.* = elem_val; + val.* = try elem_val.intern(dest_elem_ty, mod); } else { runtime_src = elem_src; } @@ -28631,10 +29312,10 @@ fn coerceArrayLike( return block.addAggregateInit(dest_ty, element_refs); } - return sema.addConstant( - dest_ty, - try Value.Tag.aggregate.create(sema.arena, element_vals), - ); + return sema.addConstant(dest_ty, (try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .elems = element_vals }, + } })).toValue()); } /// If the lengths match, coerces element-wise. @@ -28646,9 +29327,10 @@ fn coerceTupleToArray( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const inst_ty = sema.typeOf(inst); - const inst_len = inst_ty.arrayLen(); - const dest_len = dest_ty.arrayLen(); + const inst_len = inst_ty.arrayLen(mod); + const dest_len = dest_ty.arrayLen(mod); if (dest_len != inst_len) { const msg = msg: { @@ -28663,26 +29345,27 @@ fn coerceTupleToArray( return sema.failWithOwnedErrorMsg(msg); } - const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLenIncludingSentinel()); - const element_vals = try sema.arena.alloc(Value, dest_elems); + const dest_elems = try sema.usizeCast(block, dest_ty_src, dest_len); + const element_vals = try sema.arena.alloc(InternPool.Index, dest_elems); const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_elems); - const dest_elem_ty = dest_ty.childType(); + const dest_elem_ty = dest_ty.childType(mod); var runtime_src: ?LazySrcLoc = null; - for (element_vals, 0..) |*elem, i_usize| { + for (element_vals, element_refs, 0..) |*val, *ref, i_usize| { const i = @intCast(u32, i_usize); if (i_usize == inst_len) { - elem.* = dest_ty.sentinel().?; - element_refs[i] = try sema.addConstant(dest_elem_ty, elem.*); + const sentinel_val = dest_ty.sentinel(mod).?; + val.* = sentinel_val.toIntern(); + ref.* = try sema.addConstant(dest_elem_ty, sentinel_val); break; } const elem_src = inst_src; // TODO better source location const elem_ref = try sema.tupleField(block, inst_src, inst, elem_src, i); const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src); - element_refs[i] = coerced; + ref.* = coerced; if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |elem_val| { - elem.* = elem_val; + val.* = try elem_val.intern(dest_elem_ty, mod); } else { runtime_src = elem_src; } @@ -28694,10 +29377,10 @@ fn coerceTupleToArray( return block.addAggregateInit(dest_ty, element_refs); } - return sema.addConstant( - dest_ty, - try Value.Tag.aggregate.create(sema.arena, element_vals), - ); + return sema.addConstant(dest_ty, (try mod.intern(.{ .aggregate = .{ + .ty = dest_ty.toIntern(), + .storage = .{ .elems = element_vals }, + } })).toValue()); } /// If the lengths match, coerces element-wise. @@ -28709,10 +29392,11 @@ fn coerceTupleToSlicePtrs( ptr_tuple: Air.Inst.Ref, tuple_src: LazySrcLoc, ) !Air.Inst.Ref { - const tuple_ty = sema.typeOf(ptr_tuple).childType(); + const mod = sema.mod; + const tuple_ty = sema.typeOf(ptr_tuple).childType(mod); const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src); - const slice_info = slice_ty.ptrInfo().data; - const array_ty = try Type.array(sema.arena, tuple_ty.structFieldCount(), slice_info.sentinel, slice_info.pointee_type, sema.mod); + const slice_info = slice_ty.ptrInfo(mod); + const array_ty = try Type.array(sema.arena, tuple_ty.structFieldCount(mod), slice_info.sentinel, slice_info.pointee_type, sema.mod); const array_inst = try sema.coerceTupleToArray(block, array_ty, slice_ty_src, tuple, tuple_src); if (slice_info.@"align" != 0) { return sema.fail(block, slice_ty_src, "TODO: override the alignment of the array decl we create here", .{}); @@ -28730,8 +29414,9 @@ fn coerceTupleToArrayPtrs( ptr_tuple: Air.Inst.Ref, tuple_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; const tuple = try sema.analyzeLoad(block, tuple_src, ptr_tuple, tuple_src); - const ptr_info = ptr_array_ty.ptrInfo().data; + const ptr_info = ptr_array_ty.ptrInfo(mod); const array_ty = ptr_info.pointee_type; const array_inst = try sema.coerceTupleToArray(block, array_ty, array_ty_src, tuple, tuple_src); if (ptr_info.@"align" != 0) { @@ -28750,27 +29435,41 @@ fn coerceTupleToStruct( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; + const ip = &mod.intern_pool; const struct_ty = try sema.resolveTypeFields(dest_ty); - if (struct_ty.isTupleOrAnonStruct()) { + if (struct_ty.isTupleOrAnonStruct(mod)) { return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src); } - const fields = struct_ty.structFields(); - const field_vals = try sema.arena.alloc(Value, fields.count()); + const fields = struct_ty.structFields(mod); + const field_vals = try sema.arena.alloc(InternPool.Index, fields.count()); const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len); @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); var runtime_src: ?LazySrcLoc = null; - const field_count = inst_ty.structFieldCount(); - var field_i: u32 = 0; - while (field_i < field_count) : (field_i += 1) { - const field_src = inst_src; // TODO better source location - const field_name = if (inst_ty.castTag(.anon_struct)) |payload| - payload.data.names[field_i] + const field_count = switch (ip.indexToKey(inst_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| + struct_obj.fields.count() else - try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}); + 0, + else => unreachable, + }; + for (0..field_count) |field_index_usize| { + const field_i = @intCast(u32, field_index_usize); + const field_src = inst_src; // TODO better source location + // https://github.com/ziglang/zig/issues/15709 + const field_name: InternPool.NullTerminatedString = switch (ip.indexToKey(inst_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0) + anon_struct_type.names[field_i] + else + try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}), + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i], + else => unreachable, + }; const field_index = try sema.structFieldIndex(block, struct_ty, field_name, field_src); const field = fields.values()[field_index]; const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i); @@ -28781,13 +29480,13 @@ fn coerceTupleToStruct( return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; - if (!init_val.eql(field.default_val, field.ty, sema.mod)) { + if (!init_val.eql(field.default_val.toValue(), field.ty, sema.mod)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i); } } if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |field_val| { - field_vals[field_index] = field_val; + field_vals[field_index] = field_val.toIntern(); } else { runtime_src = field_src; } @@ -28804,9 +29503,9 @@ fn coerceTupleToStruct( const field_name = fields.keys()[i]; const field = fields.values()[i]; const field_src = inst_src; // TODO better source location - if (field.default_val.tag() == .unreachable_value) { - const template = "missing struct field: {s}"; - const args = .{field_name}; + if (field.default_val == .none) { + const template = "missing struct field: {}"; + const args = .{field_name.fmt(ip)}; if (root_msg) |msg| { try sema.errNote(block, field_src, msg, template, args); } else { @@ -28817,7 +29516,7 @@ fn coerceTupleToStruct( if (runtime_src == null) { field_vals[i] = field.default_val; } else { - field_ref.* = try sema.addConstant(field.ty, field.default_val); + field_ref.* = try sema.addConstant(field.ty, field.default_val.toValue()); } } @@ -28832,10 +29531,14 @@ fn coerceTupleToStruct( return block.addAggregateInit(struct_ty, field_refs); } - return sema.addConstant( - struct_ty, - try Value.Tag.aggregate.create(sema.arena, field_vals), - ); + const struct_val = try mod.intern(.{ .aggregate = .{ + .ty = struct_ty.toIntern(), + .storage = .{ .elems = field_vals }, + } }); + // TODO: figure out InternPool removals for incremental compilation + //errdefer ip.remove(struct_val); + + return sema.addConstant(struct_ty, struct_val.toValue()); } fn coerceTupleToTuple( @@ -28845,47 +29548,76 @@ fn coerceTupleToTuple( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { - const dest_field_count = tuple_ty.structFieldCount(); - const field_vals = try sema.arena.alloc(Value, dest_field_count); + const mod = sema.mod; + const ip = &mod.intern_pool; + const dest_field_count = switch (ip.indexToKey(tuple_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| + struct_obj.fields.count() + else + 0, + else => unreachable, + }; + const field_vals = try sema.arena.alloc(InternPool.Index, dest_field_count); const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len); @memset(field_refs, .none); const inst_ty = sema.typeOf(inst); - const inst_field_count = inst_ty.structFieldCount(); - if (inst_field_count > dest_field_count) return error.NotCoercible; + const src_field_count = switch (ip.indexToKey(inst_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| + struct_obj.fields.count() + else + 0, + else => unreachable, + }; + if (src_field_count > dest_field_count) return error.NotCoercible; var runtime_src: ?LazySrcLoc = null; - var field_i: u32 = 0; - while (field_i < inst_field_count) : (field_i += 1) { + for (0..dest_field_count) |field_index_usize| { + const field_i = @intCast(u32, field_index_usize); const field_src = inst_src; // TODO better source location - const field_name = if (inst_ty.castTag(.anon_struct)) |payload| - payload.data.names[field_i] - else - try std.fmt.allocPrint(sema.arena, "{d}", .{field_i}); + // https://github.com/ziglang/zig/issues/15709 + const field_name: InternPool.NullTerminatedString = switch (ip.indexToKey(inst_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len > 0) + anon_struct_type.names[field_i] + else + try ip.getOrPutStringFmt(sema.gpa, "{d}", .{field_i}), + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.keys()[field_i], + else => unreachable, + }; - if (mem.eql(u8, field_name, "len")) { + if (ip.stringEqlSlice(field_name, "len")) return sema.fail(block, field_src, "cannot assign to 'len' field of tuple", .{}); - } + + const field_ty = switch (ip.indexToKey(tuple_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.types[field_index_usize].toType(), + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].ty, + else => unreachable, + }; + const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.values[field_index_usize], + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[field_index_usize].default_val, + else => unreachable, + }; const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_src); - const field_ty = tuple_ty.structFieldType(field_i); - const default_val = tuple_ty.structFieldDefaultValue(field_i); const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i); const coerced = try sema.coerce(block, field_ty, elem_ref, field_src); field_refs[field_index] = coerced; - if (default_val.tag() != .unreachable_value) { + if (default_val != .none) { const init_val = (try sema.resolveMaybeUndefVal(coerced)) orelse { return sema.failWithNeededComptime(block, field_src, "value stored in comptime field must be comptime-known"); }; - if (!init_val.eql(default_val, field_ty, sema.mod)) { + if (!init_val.eql(default_val.toValue(), field_ty, sema.mod)) { return sema.failWithInvalidComptimeFieldStore(block, field_src, inst_ty, field_i); } } if (runtime_src == null) { if (try sema.resolveMaybeUndefVal(coerced)) |field_val| { - field_vals[field_index] = field_val; + field_vals[field_index] = field_val.toIntern(); } else { runtime_src = field_src; } @@ -28899,12 +29631,15 @@ fn coerceTupleToTuple( for (field_refs, 0..) |*field_ref, i| { if (field_ref.* != .none) continue; - const default_val = tuple_ty.structFieldDefaultValue(i); - const field_ty = tuple_ty.structFieldType(i); + const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.values[i], + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[i].default_val, + else => unreachable, + }; const field_src = inst_src; // TODO better source location - if (default_val.tag() == .unreachable_value) { - if (tuple_ty.isTuple()) { + if (default_val == .none) { + if (tuple_ty.isTuple(mod)) { const template = "missing tuple field: {d}"; if (root_msg) |msg| { try sema.errNote(block, field_src, msg, template, .{i}); @@ -28913,8 +29648,8 @@ fn coerceTupleToTuple( } continue; } - const template = "missing struct field: {s}"; - const args = .{tuple_ty.structFieldName(i)}; + const template = "missing struct field: {}"; + const args = .{tuple_ty.structFieldName(i, mod).fmt(ip)}; if (root_msg) |msg| { try sema.errNote(block, field_src, msg, template, args); } else { @@ -28925,7 +29660,12 @@ fn coerceTupleToTuple( if (runtime_src == null) { field_vals[i] = default_val; } else { - field_ref.* = try sema.addConstant(field_ty, default_val); + const field_ty = switch (ip.indexToKey(tuple_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| anon_struct_type.types[i].toType(), + .struct_type => |struct_type| mod.structPtrUnwrap(struct_type.index).?.fields.values()[i].ty, + else => unreachable, + }; + field_ref.* = try sema.addConstant(field_ty, default_val.toValue()); } } @@ -28942,7 +29682,10 @@ fn coerceTupleToTuple( return sema.addConstant( tuple_ty, - try Value.Tag.aggregate.create(sema.arena, field_vals), + (try mod.intern(.{ .aggregate = .{ + .ty = tuple_ty.toIntern(), + .storage = .{ .elems = field_vals }, + } })).toValue(), ); } @@ -28959,7 +29702,7 @@ fn analyzeDeclVal( const decl_ref = try sema.analyzeDeclRefInner(decl_index, false); const result = try sema.analyzeLoad(block, src, decl_ref, src); if (Air.refToIndex(result)) |index| { - if (sema.air_instructions.items(.tag)[index] == .constant and !block.is_typeof) { + if (sema.air_instructions.items(.tag)[index] == .interned and !block.is_typeof) { try sema.decl_val_table.put(sema.gpa, decl_index, result); } } @@ -28980,13 +29723,14 @@ fn addReferencedBy( } fn ensureDeclAnalyzed(sema: *Sema, decl_index: Decl.Index) CompileError!void { - const decl = sema.mod.declPtr(decl_index); + const mod = sema.mod; + const decl = mod.declPtr(decl_index); if (decl.analysis == .in_progress) { - const msg = try Module.ErrorMsg.create(sema.gpa, decl.srcLoc(), "dependency loop detected", .{}); + const msg = try Module.ErrorMsg.create(sema.gpa, decl.srcLoc(mod), "dependency loop detected", .{}); return sema.failWithOwnedErrorMsg(msg); } - sema.mod.ensureDeclAnalyzed(decl_index) catch |err| { + mod.ensureDeclAnalyzed(decl_index) catch |err| { if (sema.owner_func) |owner_func| { owner_func.state = .dependency_failure; } else { @@ -28996,7 +29740,7 @@ fn ensureDeclAnalyzed(sema: *Sema, decl_index: Decl.Index) CompileError!void { }; } -fn ensureFuncBodyAnalyzed(sema: *Sema, func: *Module.Fn) CompileError!void { +fn ensureFuncBodyAnalyzed(sema: *Sema, func: Module.Fn.Index) CompileError!void { sema.mod.ensureFuncBodyAnalyzed(func) catch |err| { if (sema.owner_func) |owner_func| { owner_func.state = .dependency_failure; @@ -29008,23 +29752,33 @@ fn ensureFuncBodyAnalyzed(sema: *Sema, func: *Module.Fn) CompileError!void { } fn refValue(sema: *Sema, block: *Block, ty: Type, val: Value) !Value { + const mod = sema.mod; var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); const decl = try anon_decl.finish( - try ty.copy(anon_decl.arena()), - try val.copy(anon_decl.arena()), + ty, + val, 0, // default alignment ); try sema.maybeQueueFuncBodyAnalysis(decl); - try sema.mod.declareDeclDependency(sema.owner_decl_index, decl); - return try Value.Tag.decl_ref.create(sema.arena, decl); + try mod.declareDeclDependency(sema.owner_decl_index, decl); + const result = try mod.intern(.{ .ptr = .{ + .ty = (try mod.singleConstPtrType(ty)).toIntern(), + .addr = .{ .decl = decl }, + } }); + return result.toValue(); } fn optRefValue(sema: *Sema, block: *Block, ty: Type, opt_val: ?Value) !Value { - const val = opt_val orelse return Value.null; - const ptr_val = try sema.refValue(block, ty, val); - const result = try Value.Tag.opt_payload.create(sema.arena, ptr_val); - return result; + const mod = sema.mod; + const ptr_anyopaque_ty = try mod.singleConstPtrType(Type.anyopaque); + return (try mod.intern(.{ .opt = .{ + .ty = (try mod.optionalType(ptr_anyopaque_ty.toIntern())).toIntern(), + .val = if (opt_val) |val| (try mod.getCoerced( + try sema.refValue(block, ty, val), + ptr_anyopaque_ty, + )).toIntern() else .none, + } })).toValue(); } fn analyzeDeclRef(sema: *Sema, decl_index: Decl.Index) CompileError!Air.Inst.Ref { @@ -29036,42 +29790,37 @@ fn analyzeDeclRef(sema: *Sema, decl_index: Decl.Index) CompileError!Air.Inst.Ref /// decl_ref to end up in runtime code, the function body must be analyzed: `analyzeDeclRef` wraps /// this function with `analyze_fn_body` set to true. fn analyzeDeclRefInner(sema: *Sema, decl_index: Decl.Index, analyze_fn_body: bool) CompileError!Air.Inst.Ref { - try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); + const mod = sema.mod; + try mod.declareDeclDependency(sema.owner_decl_index, decl_index); try sema.ensureDeclAnalyzed(decl_index); - const decl = sema.mod.declPtr(decl_index); + const decl = mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); - if (decl_tv.val.castTag(.variable)) |payload| { - const variable = payload.data; - const ty = try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = decl_tv.ty, - .mutable = variable.is_mutable, - .@"addrspace" = decl.@"addrspace", - .@"align" = decl.@"align", - }); - return sema.addConstant(ty, try Value.Tag.decl_ref.create(sema.arena, decl_index)); - } + const ptr_ty = try mod.ptrType(.{ + .child = decl_tv.ty.toIntern(), + .flags = .{ + .alignment = InternPool.Alignment.fromByteUnits(decl.@"align"), + .is_const = if (decl.val.getVariable(mod)) |variable| variable.is_const else true, + .address_space = decl.@"addrspace", + }, + }); if (analyze_fn_body) { try sema.maybeQueueFuncBodyAnalysis(decl_index); } - return sema.addConstant( - try Type.ptr(sema.arena, sema.mod, .{ - .pointee_type = decl_tv.ty, - .mutable = false, - .@"addrspace" = decl.@"addrspace", - .@"align" = decl.@"align", - }), - try Value.Tag.decl_ref.create(sema.arena, decl_index), - ); + return sema.addConstant(ptr_ty, (try mod.intern(.{ .ptr = .{ + .ty = ptr_ty.toIntern(), + .addr = .{ .decl = decl_index }, + } })).toValue()); } fn maybeQueueFuncBodyAnalysis(sema: *Sema, decl_index: Decl.Index) !void { - const decl = sema.mod.declPtr(decl_index); + const mod = sema.mod; + const decl = mod.declPtr(decl_index); const tv = try decl.typedValue(); - if (tv.ty.zigTypeTag() != .Fn) return; + if (tv.ty.zigTypeTag(mod) != .Fn) return; if (!try sema.fnHasRuntimeBits(tv.ty)) return; - const func = tv.val.castTag(.function) orelse return; // undef or extern_fn - try sema.mod.ensureFuncBodyAnalysisQueued(func.data); + const func_index = mod.intern_pool.indexToFunc(tv.val.toIntern()).unwrap() orelse return; // undef or extern_fn + try mod.ensureFuncBodyAnalysisQueued(func_index); } fn analyzeRef( @@ -29083,18 +29832,16 @@ fn analyzeRef( const operand_ty = sema.typeOf(operand); if (try sema.resolveMaybeUndefVal(operand)) |val| { - switch (val.tag()) { - .extern_fn, .function => { - const decl_index = val.pointerDecl().?; - return sema.analyzeDeclRef(decl_index); - }, + switch (sema.mod.intern_pool.indexToKey(val.toIntern())) { + .extern_func => |extern_func| return sema.analyzeDeclRef(extern_func.decl), + .func => |func| return sema.analyzeDeclRef(sema.mod.funcPtr(func.index).owner_decl), else => {}, } var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( - try operand_ty.copy(anon_decl.arena()), - try val.copy(anon_decl.arena()), + operand_ty, + val, 0, // default alignment )); } @@ -29124,9 +29871,10 @@ fn analyzeLoad( ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const ptr_ty = sema.typeOf(ptr); - const elem_ty = switch (ptr_ty.zigTypeTag()) { - .Pointer => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.zigTypeTag(mod)) { + .Pointer => ptr_ty.childType(mod), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}), }; @@ -29136,11 +29884,11 @@ fn analyzeLoad( if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| { if (try sema.pointerDeref(block, src, ptr_val, ptr_ty)) |elem_val| { - return sema.addConstant(elem_ty, elem_val); + return sema.addConstant(elem_ty, try mod.getCoerced(elem_val, elem_ty)); } } - if (ptr_ty.ptrInfo().data.vector_index == .runtime) { + if (ptr_ty.ptrInfo(mod).vector_index == .runtime) { const ptr_inst = Air.refToIndex(ptr).?; const air_tags = sema.air_instructions.items(.tag); if (air_tags[ptr_inst] == .ptr_elem_ptr) { @@ -29163,11 +29911,11 @@ fn analyzeSlicePtr( slice: Air.Inst.Ref, slice_ty: Type, ) CompileError!Air.Inst.Ref { - const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer); - const result_ty = slice_ty.slicePtrFieldType(buf); + const mod = sema.mod; + const result_ty = slice_ty.slicePtrFieldType(mod); if (try sema.resolveMaybeUndefVal(slice)) |val| { - if (val.isUndef()) return sema.addConstUndef(result_ty); - return sema.addConstant(result_ty, val.slicePtr()); + if (val.isUndef(mod)) return sema.addConstUndef(result_ty); + return sema.addConstant(result_ty, val.slicePtr(mod)); } try sema.requireRuntimeBlock(block, slice_src, null); return block.addTyOp(.slice_ptr, result_ty, slice); @@ -29179,8 +29927,9 @@ fn analyzeSliceLen( src: LazySrcLoc, slice_inst: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; if (try sema.resolveMaybeUndefVal(slice_inst)) |slice_val| { - if (slice_val.isUndef()) { + if (slice_val.isUndef(mod)) { return sema.addConstUndef(Type.usize); } return sema.addIntUnsigned(Type.usize, slice_val.sliceLen(sema.mod)); @@ -29196,12 +29945,13 @@ fn analyzeIsNull( operand: Air.Inst.Ref, invert_logic: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const result_ty = Type.bool; if (try sema.resolveMaybeUndefVal(operand)) |opt_val| { - if (opt_val.isUndef()) { + if (opt_val.isUndef(mod)) { return sema.addConstUndef(result_ty); } - const is_null = opt_val.isNull(); + const is_null = opt_val.isNull(mod); const bool_value = if (invert_logic) !is_null else is_null; if (bool_value) { return Air.Inst.Ref.bool_true; @@ -29212,11 +29962,10 @@ fn analyzeIsNull( const inverted_non_null_res = if (invert_logic) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; const operand_ty = sema.typeOf(operand); - var buf: Type.Payload.ElemType = undefined; - if (operand_ty.zigTypeTag() == .Optional and operand_ty.optionalChild(&buf).zigTypeTag() == .NoReturn) { + if (operand_ty.zigTypeTag(mod) == .Optional and operand_ty.optionalChild(mod).zigTypeTag(mod) == .NoReturn) { return inverted_non_null_res; } - if (operand_ty.zigTypeTag() != .Optional and !operand_ty.isPtrLikeOptional()) { + if (operand_ty.zigTypeTag(mod) != .Optional and !operand_ty.isPtrLikeOptional(mod)) { return inverted_non_null_res; } try sema.requireRuntimeBlock(block, src, null); @@ -29230,11 +29979,12 @@ fn analyzePtrIsNonErrComptimeOnly( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const ptr_ty = sema.typeOf(operand); - assert(ptr_ty.zigTypeTag() == .Pointer); - const child_ty = ptr_ty.childType(); + assert(ptr_ty.zigTypeTag(mod) == .Pointer); + const child_ty = ptr_ty.childType(mod); - const child_tag = child_ty.zigTypeTag(); + const child_tag = child_ty.zigTypeTag(mod); if (child_tag != .ErrorSet and child_tag != .ErrorUnion) return Air.Inst.Ref.bool_true; if (child_tag == .ErrorSet) return Air.Inst.Ref.bool_false; assert(child_tag == .ErrorUnion); @@ -29251,14 +30001,15 @@ fn analyzeIsNonErrComptimeOnly( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const operand_ty = sema.typeOf(operand); - const ot = operand_ty.zigTypeTag(); + const ot = operand_ty.zigTypeTag(mod); if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true; if (ot == .ErrorSet) return Air.Inst.Ref.bool_false; assert(ot == .ErrorUnion); - const payload_ty = operand_ty.errorUnionPayload(); - if (payload_ty.zigTypeTag() == .NoReturn) { + const payload_ty = operand_ty.errorUnionPayload(mod); + if (payload_ty.zigTypeTag(mod) == .NoReturn) { return Air.Inst.Ref.bool_false; } @@ -29279,50 +30030,56 @@ fn analyzeIsNonErrComptimeOnly( // exception if the error union error set is known to be empty, // we allow the comparison but always make it comptime-known. - const set_ty = operand_ty.errorUnionSet(); - switch (set_ty.tag()) { - .anyerror => {}, - .error_set_inferred => blk: { - // If the error set is empty, we must return a comptime true or false. - // However we want to avoid unnecessarily resolving an inferred error set - // in case it is already non-empty. - const ies = set_ty.castTag(.error_set_inferred).?.data; - if (ies.is_anyerror) break :blk; - if (ies.errors.count() != 0) break :blk; - if (maybe_operand_val == null) { - // Try to avoid resolving inferred error set if possible. + const set_ty = operand_ty.errorUnionSet(mod); + switch (set_ty.toIntern()) { + .anyerror_type => {}, + else => switch (mod.intern_pool.indexToKey(set_ty.toIntern())) { + .error_set_type => |error_set_type| { + if (error_set_type.names.len == 0) return Air.Inst.Ref.bool_true; + }, + .inferred_error_set_type => |ies_index| blk: { + // If the error set is empty, we must return a comptime true or false. + // However we want to avoid unnecessarily resolving an inferred error set + // in case it is already non-empty. + const ies = mod.inferredErrorSetPtr(ies_index); + if (ies.is_anyerror) break :blk; if (ies.errors.count() != 0) break :blk; - if (ies.is_anyerror) break :blk; - for (ies.inferred_error_sets.keys()) |other_ies| { - if (ies == other_ies) continue; - try sema.resolveInferredErrorSet(block, src, other_ies); - if (other_ies.is_anyerror) { - ies.is_anyerror = true; - ies.is_resolved = true; - break :blk; - } + if (maybe_operand_val == null) { + // Try to avoid resolving inferred error set if possible. + if (ies.errors.count() != 0) break :blk; + if (ies.is_anyerror) break :blk; + for (ies.inferred_error_sets.keys()) |other_ies_index| { + if (ies_index == other_ies_index) continue; + try sema.resolveInferredErrorSet(block, src, other_ies_index); + const other_ies = mod.inferredErrorSetPtr(other_ies_index); + if (other_ies.is_anyerror) { + ies.is_anyerror = true; + ies.is_resolved = true; + break :blk; + } - if (other_ies.errors.count() != 0) break :blk; + if (other_ies.errors.count() != 0) break :blk; + } + if (ies.func == sema.owner_func_index.unwrap()) { + // We're checking the inferred errorset of the current function and none of + // its child inferred error sets contained any errors meaning that any value + // so far with this type can't contain errors either. + return Air.Inst.Ref.bool_true; + } + try sema.resolveInferredErrorSet(block, src, ies_index); + if (ies.is_anyerror) break :blk; + if (ies.errors.count() == 0) return Air.Inst.Ref.bool_true; } - if (ies.func == sema.owner_func) { - // We're checking the inferred errorset of the current function and none of - // its child inferred error sets contained any errors meaning that any value - // so far with this type can't contain errors either. - return Air.Inst.Ref.bool_true; - } - try sema.resolveInferredErrorSet(block, src, ies); - if (ies.is_anyerror) break :blk; - if (ies.errors.count() == 0) return Air.Inst.Ref.bool_true; - } + }, + else => unreachable, }, - else => if (set_ty.errorSetNames().len == 0) return Air.Inst.Ref.bool_true, } if (maybe_operand_val) |err_union| { - if (err_union.isUndef()) { + if (err_union.isUndef(mod)) { return sema.addConstUndef(Type.bool); } - if (err_union.getError() == null) { + if (err_union.getErrorName(mod) == .none) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; @@ -29375,72 +30132,78 @@ fn analyzeSlice( end_src: LazySrcLoc, by_length: bool, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; // Slice expressions can operate on a variable whose type is an array. This requires // the slice operand to be a pointer. In the case of a non-array, it will be a double pointer. const ptr_ptr_ty = sema.typeOf(ptr_ptr); - const target = sema.mod.getTarget(); - const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag()) { - .Pointer => ptr_ptr_ty.elemType(), - else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(sema.mod)}), + const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag(mod)) { + .Pointer => ptr_ptr_ty.childType(mod), + else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty.fmt(mod)}), }; - const mod = sema.mod; var array_ty = ptr_ptr_child_ty; var slice_ty = ptr_ptr_ty; var ptr_or_slice = ptr_ptr; var elem_ty: Type = undefined; var ptr_sentinel: ?Value = null; - switch (ptr_ptr_child_ty.zigTypeTag()) { + switch (ptr_ptr_child_ty.zigTypeTag(mod)) { .Array => { - ptr_sentinel = ptr_ptr_child_ty.sentinel(); - elem_ty = ptr_ptr_child_ty.childType(); + ptr_sentinel = ptr_ptr_child_ty.sentinel(mod); + elem_ty = ptr_ptr_child_ty.childType(mod); }, - .Pointer => switch (ptr_ptr_child_ty.ptrSize()) { + .Pointer => switch (ptr_ptr_child_ty.ptrSize(mod)) { .One => { - const double_child_ty = ptr_ptr_child_ty.childType(); - if (double_child_ty.zigTypeTag() == .Array) { - ptr_sentinel = double_child_ty.sentinel(); + const double_child_ty = ptr_ptr_child_ty.childType(mod); + if (double_child_ty.zigTypeTag(mod) == .Array) { + ptr_sentinel = double_child_ty.sentinel(mod); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = double_child_ty; - elem_ty = double_child_ty.childType(); + elem_ty = double_child_ty.childType(mod); } else { return sema.fail(block, src, "slice of single-item pointer", .{}); } }, .Many, .C => { - ptr_sentinel = ptr_ptr_child_ty.sentinel(); + ptr_sentinel = ptr_ptr_child_ty.sentinel(mod); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = ptr_ptr_child_ty; - elem_ty = ptr_ptr_child_ty.childType(); + elem_ty = ptr_ptr_child_ty.childType(mod); - if (ptr_ptr_child_ty.ptrSize() == .C) { + if (ptr_ptr_child_ty.ptrSize(mod) == .C) { if (try sema.resolveDefinedValue(block, ptr_src, ptr_or_slice)) |ptr_val| { - if (ptr_val.isNull()) { + if (ptr_val.isNull(mod)) { return sema.fail(block, src, "slice of null pointer", .{}); } } } }, .Slice => { - ptr_sentinel = ptr_ptr_child_ty.sentinel(); + ptr_sentinel = ptr_ptr_child_ty.sentinel(mod); ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src); slice_ty = ptr_ptr_child_ty; array_ty = ptr_ptr_child_ty; - elem_ty = ptr_ptr_child_ty.childType(); + elem_ty = ptr_ptr_child_ty.childType(mod); }, }, else => return sema.fail(block, src, "slice of non-array type '{}'", .{ptr_ptr_child_ty.fmt(mod)}), } - const ptr = if (slice_ty.isSlice()) + const ptr = if (slice_ty.isSlice(mod)) try sema.analyzeSlicePtr(block, ptr_src, ptr_or_slice, slice_ty) - else - ptr_or_slice; + else if (array_ty.zigTypeTag(mod) == .Array) ptr: { + var manyptr_ty_key = mod.intern_pool.indexToKey(slice_ty.toIntern()).ptr_type; + assert(manyptr_ty_key.child == array_ty.toIntern()); + assert(manyptr_ty_key.flags.size == .One); + manyptr_ty_key.child = elem_ty.toIntern(); + manyptr_ty_key.flags.size = .Many; + break :ptr try sema.coerceCompatiblePtrs(block, try mod.ptrType(manyptr_ty_key), ptr_or_slice, ptr_src); + } else ptr_or_slice; const start = try sema.coerce(block, Type.usize, uncasted_start, start_src); const new_ptr = try sema.analyzePtrArithmetic(block, src, ptr, start, .ptr_add, ptr_src, start_src); + const new_ptr_ty = sema.typeOf(new_ptr); // true if and only if the end index of the slice, implicitly or explicitly, equals // the length of the underlying object being sliced. we might learn the length of the @@ -29448,8 +30211,8 @@ fn analyzeSlice( // we might learn of the length because it is a comptime-known slice value. var end_is_len = uncasted_end_opt == .none; const end = e: { - if (array_ty.zigTypeTag() == .Array) { - const len_val = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()); + if (array_ty.zigTypeTag(mod) == .Array) { + const len_val = try mod.intValue(Type.usize, array_ty.arrayLen(mod)); if (!end_is_len) { const end = if (by_length) end: { @@ -29458,12 +30221,12 @@ fn analyzeSlice( break :end try sema.coerce(block, Type.usize, uncasted_end, end_src); } else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); if (try sema.resolveMaybeUndefVal(end)) |end_val| { - const len_s_val = try Value.Tag.int_u64.create( - sema.arena, - array_ty.arrayLenIncludingSentinel(), + const len_s_val = try mod.intValue( + Type.usize, + array_ty.arrayLenIncludingSentinel(mod), ); if (!(try sema.compareAll(end_val, .lte, len_s_val, Type.usize))) { - const sentinel_label: []const u8 = if (array_ty.sentinel() != null) + const sentinel_label: []const u8 = if (array_ty.sentinel(mod) != null) " +1 (sentinel)" else ""; @@ -29491,7 +30254,7 @@ fn analyzeSlice( } break :e try sema.addConstant(Type.usize, len_val); - } else if (slice_ty.isSlice()) { + } else if (slice_ty.isSlice(mod)) { if (!end_is_len) { const end = if (by_length) end: { const len = try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); @@ -29500,16 +30263,14 @@ fn analyzeSlice( } else try sema.coerce(block, Type.usize, uncasted_end_opt, end_src); if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| { if (try sema.resolveMaybeUndefVal(ptr_or_slice)) |slice_val| { - if (slice_val.isUndef()) { + if (slice_val.isUndef(mod)) { return sema.fail(block, src, "slice of undefined", .{}); } - const has_sentinel = slice_ty.sentinel() != null; - var int_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = slice_val.sliceLen(mod) + @boolToInt(has_sentinel), - }; - const slice_len_val = Value.initPayload(&int_payload.base); - if (!(try sema.compareAll(end_val, .lte, slice_len_val, Type.usize))) { + const has_sentinel = slice_ty.sentinel(mod) != null; + const slice_len = slice_val.sliceLen(mod); + const len_plus_sent = slice_len + @boolToInt(has_sentinel); + const slice_len_val_with_sentinel = try mod.intValue(Type.usize, len_plus_sent); + if (!(try sema.compareAll(end_val, .lte, slice_len_val_with_sentinel, Type.usize))) { const sentinel_label: []const u8 = if (has_sentinel) " +1 (sentinel)" else @@ -29527,13 +30288,10 @@ fn analyzeSlice( ); } - // If the slice has a sentinel, we subtract one so that - // end_is_len is only true if it equals the length WITHOUT - // the sentinel, so we don't add a sentinel type. - if (has_sentinel) { - int_payload.data -= 1; - } - + // If the slice has a sentinel, we consider end_is_len + // is only true if it equals the length WITHOUT the + // sentinel, so we don't add a sentinel type. + const slice_len_val = try mod.intValue(Type.usize, slice_len); if (end_val.eql(slice_len_val, Type.usize, mod)) { end_is_len = true; } @@ -29569,11 +30327,12 @@ fn analyzeSlice( }; const slice_sentinel = if (sentinel_opt != .none) sentinel else null; + var checked_start_lte_end = by_length; + var runtime_src: ?LazySrcLoc = null; + // requirement: start <= end - var need_start_gt_end_check = true; if (try sema.resolveDefinedValue(block, end_src, end)) |end_val| { if (try sema.resolveDefinedValue(block, start_src, start)) |start_val| { - need_start_gt_end_check = false; if (!by_length and !(try sema.compareAll(start_val, .lte, end_val, Type.usize))) { return sema.fail( block, @@ -29585,14 +30344,18 @@ fn analyzeSlice( }, ); } + checked_start_lte_end = true; if (try sema.resolveMaybeUndefVal(new_ptr)) |ptr_val| sentinel_check: { const expected_sentinel = sentinel orelse break :sentinel_check; - const start_int = start_val.getUnsignedInt(sema.mod.getTarget()).?; - const end_int = end_val.getUnsignedInt(sema.mod.getTarget()).?; + const start_int = start_val.getUnsignedInt(mod).?; + const end_int = end_val.getUnsignedInt(mod).?; const sentinel_index = try sema.usizeCast(block, end_src, end_int - start_int); - const elem_ptr = try ptr_val.elemPtr(sema.typeOf(new_ptr), sema.arena, sentinel_index, sema.mod); - const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty, false); + const many_ptr_ty = try mod.manyConstPtrType(elem_ty); + const many_ptr_val = try mod.getCoerced(ptr_val, many_ptr_ty); + const elem_ptr_ty = try mod.singleConstPtrType(elem_ty); + const elem_ptr = try many_ptr_val.elemPtr(elem_ptr_ty, sentinel_index, mod); + const res = try sema.pointerDerefExtra(block, src, elem_ptr, elem_ty); const actual_sentinel = switch (res) { .runtime_load => break :sentinel_check, .val => |v| v, @@ -29600,36 +30363,49 @@ fn analyzeSlice( block, src, "comptime dereference requires '{}' to have a well-defined layout, but it does not.", - .{ty.fmt(sema.mod)}, + .{ty.fmt(mod)}, ), .out_of_bounds => |ty| return sema.fail( block, end_src, "slice end index {d} exceeds bounds of containing decl of type '{}'", - .{ end_int, ty.fmt(sema.mod) }, + .{ end_int, ty.fmt(mod) }, ), }; - if (!actual_sentinel.eql(expected_sentinel, elem_ty, sema.mod)) { + if (!actual_sentinel.eql(expected_sentinel, elem_ty, mod)) { const msg = msg: { const msg = try sema.errMsg(block, src, "value in memory does not match slice sentinel", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(block, src, msg, "expected '{}', found '{}'", .{ - expected_sentinel.fmtValue(elem_ty, sema.mod), - actual_sentinel.fmtValue(elem_ty, sema.mod), + expected_sentinel.fmtValue(elem_ty, mod), + actual_sentinel.fmtValue(elem_ty, mod), }); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } + } else { + runtime_src = ptr_src; } + } else { + runtime_src = start_src; } + } else { + runtime_src = end_src; } - if (!by_length and block.wantSafety() and !block.is_comptime and need_start_gt_end_check) { + if (!checked_start_lte_end and block.wantSafety() and !block.is_comptime) { // requirement: start <= end - try sema.panicStartGreaterThanEnd(block, start, end); + assert(!block.is_comptime); + try sema.requireRuntimeBlock(block, src, runtime_src.?); + const ok = try block.addBinOp(.cmp_lte, start, end); + if (!sema.mod.comp.formatted_panics) { + try sema.addSafetyCheck(block, ok, .start_index_greater_than_end); + } else { + try sema.safetyCheckFormatted(block, ok, "panicStartGreaterThanEnd", &.{ start, end }); + } } const new_len = if (by_length) try sema.coerce(block, Type.usize, uncasted_end_opt, end_src) @@ -29637,11 +30413,11 @@ fn analyzeSlice( try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src, false); const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len); - const new_ptr_ty_info = sema.typeOf(new_ptr).ptrInfo().data; - const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize() != .C; + const new_ptr_ty_info = new_ptr_ty.ptrInfo(mod); + const new_allowzero = new_ptr_ty_info.@"allowzero" and sema.typeOf(ptr).ptrSize(mod) != .C; if (opt_new_len_val) |new_len_val| { - const new_len_int = new_len_val.toUnsignedInt(target); + const new_len_int = new_len_val.toUnsignedInt(mod); const return_ty = try Type.ptr(sema.arena, mod, .{ .pointee_type = try Type.array(sema.arena, new_len_int, sentinel, elem_ty, mod), @@ -29659,14 +30435,14 @@ fn analyzeSlice( const result = try block.addBitCast(return_ty, new_ptr); if (block.wantSafety()) { // requirement: slicing C ptr is non-null - if (ptr_ptr_child_ty.isCPtr()) { + if (ptr_ptr_child_ty.isCPtr(mod)) { const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true); try sema.addSafetyCheck(block, is_non_null, .unwrap_null); } - if (slice_ty.isSlice()) { + if (slice_ty.isSlice(mod)) { const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); - const actual_len = if (slice_ty.sentinel() == null) + const actual_len = if (slice_ty.sentinel(mod) == null) slice_len_inst else try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true); @@ -29685,8 +30461,11 @@ fn analyzeSlice( return result; }; - if (!new_ptr_val.isUndef()) { - return sema.addConstant(return_ty, new_ptr_val); + if (!new_ptr_val.isUndef(mod)) { + return sema.addConstant(return_ty, try mod.getCoerced( + (try new_ptr_val.intern(new_ptr_ty, mod)).toValue(), + return_ty, + )); } // Special case: @as([]i32, undefined)[x..x] @@ -29708,25 +30487,18 @@ fn analyzeSlice( .size = .Slice, }); - const runtime_src = if ((try sema.resolveMaybeUndefVal(ptr_or_slice)) == null) - ptr_src - else if ((try sema.resolveMaybeUndefVal(start)) == null) - start_src - else - end_src; - - try sema.requireRuntimeBlock(block, src, runtime_src); + try sema.requireRuntimeBlock(block, src, runtime_src.?); if (block.wantSafety()) { // requirement: slicing C ptr is non-null - if (ptr_ptr_child_ty.isCPtr()) { + if (ptr_ptr_child_ty.isCPtr(mod)) { const is_non_null = try sema.analyzeIsNull(block, ptr_src, ptr, true); try sema.addSafetyCheck(block, is_non_null, .unwrap_null); } // requirement: end <= len - const opt_len_inst = if (array_ty.zigTypeTag() == .Array) - try sema.addIntUnsigned(Type.usize, array_ty.arrayLenIncludingSentinel()) - else if (slice_ty.isSlice()) blk: { + const opt_len_inst = if (array_ty.zigTypeTag(mod) == .Array) + try sema.addIntUnsigned(Type.usize, array_ty.arrayLenIncludingSentinel(mod)) + else if (slice_ty.isSlice(mod)) blk: { if (try sema.resolveDefinedValue(block, src, ptr_or_slice)) |slice_val| { // we don't need to add one for sentinels because the // underlying value data includes the sentinel @@ -29734,7 +30506,7 @@ fn analyzeSlice( } const slice_len_inst = try block.addTyOp(.slice_len, Type.usize, ptr_or_slice); - if (slice_ty.sentinel() == null) break :blk slice_len_inst; + if (slice_ty.sentinel(mod) == null) break :blk slice_len_inst; // we have to add one because slice lengths don't include the sentinel break :blk try sema.analyzeArithmetic(block, .add, slice_len_inst, .one, src, end_src, end_src, true); @@ -29778,15 +30550,16 @@ fn cmpNumeric( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(uncasted_lhs); const rhs_ty = sema.typeOf(uncasted_rhs); - assert(lhs_ty.isNumeric()); - assert(rhs_ty.isNumeric()); + assert(lhs_ty.isNumeric(mod)); + assert(rhs_ty.isNumeric(mod)); - const lhs_ty_tag = lhs_ty.zigTypeTag(); - const rhs_ty_tag = rhs_ty.zigTypeTag(); - const target = sema.mod.getTarget(); + const lhs_ty_tag = lhs_ty.zigTypeTag(mod); + const rhs_ty_tag = rhs_ty.zigTypeTag(mod); + const target = mod.getTarget(); // One exception to heterogeneous comparison: comptime_float needs to // coerce to fixed-width float. @@ -29805,49 +30578,45 @@ fn cmpNumeric( if (try sema.resolveMaybeUndefVal(lhs)) |lhs_val| { if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { // Compare ints: const vs. undefined (or vice versa) - if (!lhs_val.isUndef() and (lhs_ty.isInt() or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt() and rhs_val.isUndef()) { - try sema.resolveLazyValue(lhs_val); - if (sema.compareIntsOnlyPossibleResult(target, lhs_val, op, rhs_ty)) |res| { + if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod) and rhs_val.isUndef(mod)) { + if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(lhs_val), op, rhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } - } else if (!rhs_val.isUndef() and (rhs_ty.isInt() or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt() and lhs_val.isUndef()) { - try sema.resolveLazyValue(rhs_val); - if (sema.compareIntsOnlyPossibleResult(target, rhs_val, op.reverse(), lhs_ty)) |res| { + } else if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod) and lhs_val.isUndef(mod)) { + if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(rhs_val), op.reverse(), lhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } - if (lhs_val.isUndef() or rhs_val.isUndef()) { + if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { return sema.addConstUndef(Type.bool); } - if (lhs_val.isNan() or rhs_val.isNan()) { + if (lhs_val.isNan(mod) or rhs_val.isNan(mod)) { if (op == std.math.CompareOperator.neq) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } - if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, target, sema)) { + if (try Value.compareHeteroAdvanced(lhs_val, op, rhs_val, mod, sema)) { return Air.Inst.Ref.bool_true; } else { return Air.Inst.Ref.bool_false; } } else { - if (!lhs_val.isUndef() and (lhs_ty.isInt() or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt()) { + if (!lhs_val.isUndef(mod) and (lhs_ty.isInt(mod) or lhs_ty_tag == .ComptimeInt) and rhs_ty.isInt(mod)) { // Compare ints: const vs. var - try sema.resolveLazyValue(lhs_val); - if (sema.compareIntsOnlyPossibleResult(target, lhs_val, op, rhs_ty)) |res| { + if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(lhs_val), op, rhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } break :src rhs_src; } } else { - if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { - if (!rhs_val.isUndef() and (rhs_ty.isInt() or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt()) { + if (try sema.resolveMaybeUndefLazyVal(rhs)) |rhs_val| { + if (!rhs_val.isUndef(mod) and (rhs_ty.isInt(mod) or rhs_ty_tag == .ComptimeInt) and lhs_ty.isInt(mod)) { // Compare ints: var vs. const - try sema.resolveLazyValue(rhs_val); - if (sema.compareIntsOnlyPossibleResult(target, rhs_val, op.reverse(), lhs_ty)) |res| { + if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(rhs_val), op.reverse(), lhs_ty)) |res| { return if (res) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false; } } @@ -29901,32 +30670,31 @@ fn cmpNumeric( const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| !(try lhs_val.compareAllWithZeroAdvanced(.gte, sema)) else - (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt()); + (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt(mod)); const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| !(try rhs_val.compareAllWithZeroAdvanced(.gte, sema)) else - (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt()); + (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt(mod)); const dest_int_is_signed = lhs_is_signed or rhs_is_signed; var dest_float_type: ?Type = null; var lhs_bits: usize = undefined; - if (try sema.resolveMaybeUndefVal(lhs)) |lhs_val| { - try sema.resolveLazyValue(lhs_val); - if (lhs_val.isUndef()) + if (try sema.resolveMaybeUndefLazyVal(lhs)) |lhs_val| { + if (lhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); - if (lhs_val.isNan()) switch (op) { + if (lhs_val.isNan(mod)) switch (op) { .neq => return Air.Inst.Ref.bool_true, else => return Air.Inst.Ref.bool_false, }; - if (lhs_val.isInf()) switch (op) { + if (lhs_val.isInf(mod)) switch (op) { .neq => return Air.Inst.Ref.bool_true, .eq => return Air.Inst.Ref.bool_false, - .gt, .gte => return if (lhs_val.isNegativeInf()) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true, - .lt, .lte => return if (lhs_val.isNegativeInf()) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false, + .gt, .gte => return if (lhs_val.isNegativeInf(mod)) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true, + .lt, .lte => return if (lhs_val.isNegativeInf(mod)) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false, }; if (!rhs_is_signed) { - switch (lhs_val.orderAgainstZero()) { + switch (lhs_val.orderAgainstZero(mod)) { .gt => {}, .eq => switch (op) { // LHS = 0, RHS is unsigned .lte => return Air.Inst.Ref.bool_true, @@ -29940,7 +30708,7 @@ fn cmpNumeric( } } if (lhs_is_float) { - if (lhs_val.floatHasFraction()) { + if (lhs_val.floatHasFraction(mod)) { switch (op) { .eq => return Air.Inst.Ref.bool_false, .neq => return Air.Inst.Ref.bool_true, @@ -29948,9 +30716,9 @@ fn cmpNumeric( } } - var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128)); + var bigint = try float128IntPartToBigInt(sema.gpa, lhs_val.toFloat(f128, mod)); defer bigint.deinit(); - if (lhs_val.floatHasFraction()) { + if (lhs_val.floatHasFraction(mod)) { if (lhs_is_signed) { try bigint.addScalar(&bigint, -1); } else { @@ -29959,33 +30727,32 @@ fn cmpNumeric( } lhs_bits = bigint.toConst().bitCountTwosComp(); } else { - lhs_bits = lhs_val.intBitCountTwosComp(target); + lhs_bits = lhs_val.intBitCountTwosComp(mod); } lhs_bits += @boolToInt(!lhs_is_signed and dest_int_is_signed); } else if (lhs_is_float) { dest_float_type = lhs_ty; } else { - const int_info = lhs_ty.intInfo(target); + const int_info = lhs_ty.intInfo(mod); lhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed); } var rhs_bits: usize = undefined; - if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| { - try sema.resolveLazyValue(rhs_val); - if (rhs_val.isUndef()) + if (try sema.resolveMaybeUndefLazyVal(rhs)) |rhs_val| { + if (rhs_val.isUndef(mod)) return sema.addConstUndef(Type.bool); - if (rhs_val.isNan()) switch (op) { + if (rhs_val.isNan(mod)) switch (op) { .neq => return Air.Inst.Ref.bool_true, else => return Air.Inst.Ref.bool_false, }; - if (rhs_val.isInf()) switch (op) { + if (rhs_val.isInf(mod)) switch (op) { .neq => return Air.Inst.Ref.bool_true, .eq => return Air.Inst.Ref.bool_false, - .gt, .gte => return if (rhs_val.isNegativeInf()) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false, - .lt, .lte => return if (rhs_val.isNegativeInf()) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true, + .gt, .gte => return if (rhs_val.isNegativeInf(mod)) Air.Inst.Ref.bool_true else Air.Inst.Ref.bool_false, + .lt, .lte => return if (rhs_val.isNegativeInf(mod)) Air.Inst.Ref.bool_false else Air.Inst.Ref.bool_true, }; if (!lhs_is_signed) { - switch (rhs_val.orderAgainstZero()) { + switch (rhs_val.orderAgainstZero(mod)) { .gt => {}, .eq => switch (op) { // RHS = 0, LHS is unsigned .gte => return Air.Inst.Ref.bool_true, @@ -29999,7 +30766,7 @@ fn cmpNumeric( } } if (rhs_is_float) { - if (rhs_val.floatHasFraction()) { + if (rhs_val.floatHasFraction(mod)) { switch (op) { .eq => return Air.Inst.Ref.bool_false, .neq => return Air.Inst.Ref.bool_true, @@ -30007,9 +30774,9 @@ fn cmpNumeric( } } - var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128)); + var bigint = try float128IntPartToBigInt(sema.gpa, rhs_val.toFloat(f128, mod)); defer bigint.deinit(); - if (rhs_val.floatHasFraction()) { + if (rhs_val.floatHasFraction(mod)) { if (rhs_is_signed) { try bigint.addScalar(&bigint, -1); } else { @@ -30018,13 +30785,13 @@ fn cmpNumeric( } rhs_bits = bigint.toConst().bitCountTwosComp(); } else { - rhs_bits = rhs_val.intBitCountTwosComp(target); + rhs_bits = rhs_val.intBitCountTwosComp(mod); } rhs_bits += @boolToInt(!rhs_is_signed and dest_int_is_signed); } else if (rhs_is_float) { dest_float_type = rhs_ty; } else { - const int_info = rhs_ty.intInfo(target); + const int_info = rhs_ty.intInfo(mod); rhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed); } @@ -30032,7 +30799,7 @@ fn cmpNumeric( const max_bits = std.math.max(lhs_bits, rhs_bits); const casted_bits = std.math.cast(u16, max_bits) orelse return sema.fail(block, src, "{d} exceeds maximum integer bit count", .{max_bits}); const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned; - break :blk try Module.makeIntType(sema.arena, signedness, casted_bits); + break :blk try mod.intType(signedness, casted_bits); }; const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src); const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src); @@ -30040,13 +30807,20 @@ fn cmpNumeric( return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized), casted_lhs, casted_rhs); } -/// Asserts that LHS value is an int or comptime int and not undefined, and that RHS type is an int. -/// Given a const LHS and an unknown RHS, attempt to determine whether `op` has a guaranteed result. +/// Asserts that LHS value is an int or comptime int and not undefined, and +/// that RHS type is an int. Given a const LHS and an unknown RHS, attempt to +/// determine whether `op` has a guaranteed result. /// If it cannot be determined, returns null. /// Otherwise returns a bool for the guaranteed comparison operation. -fn compareIntsOnlyPossibleResult(sema: *Sema, target: std.Target, lhs_val: Value, op: std.math.CompareOperator, rhs_ty: Type) ?bool { - const rhs_info = rhs_ty.intInfo(target); - const vs_zero = lhs_val.orderAgainstZeroAdvanced(sema) catch unreachable; +fn compareIntsOnlyPossibleResult( + sema: *Sema, + lhs_val: Value, + op: std.math.CompareOperator, + rhs_ty: Type, +) Allocator.Error!?bool { + const mod = sema.mod; + const rhs_info = rhs_ty.intInfo(mod); + const vs_zero = lhs_val.orderAgainstZeroAdvanced(mod, sema) catch unreachable; const is_zero = vs_zero == .eq; const is_negative = vs_zero == .lt; const is_positive = vs_zero == .gt; @@ -30078,7 +30852,7 @@ fn compareIntsOnlyPossibleResult(sema: *Sema, target: std.Target, lhs_val: Value }; const sign_adj = @boolToInt(!is_negative and rhs_info.signedness == .signed); - const req_bits = lhs_val.intBitCountTwosComp(target) + sign_adj; + const req_bits = lhs_val.intBitCountTwosComp(mod) + sign_adj; // No sized type can have more than 65535 bits. // The RHS type operand is either a runtime value or sized (but undefined) constant. @@ -30111,12 +30885,11 @@ fn compareIntsOnlyPossibleResult(sema: *Sema, target: std.Target, lhs_val: Value .max = false, }; - var ty_buffer: Type.Payload.Bits = .{ - .base = .{ .tag = if (is_negative) .int_signed else .int_unsigned }, - .data = @intCast(u16, req_bits), - }; - const ty = Type.initPayload(&ty_buffer.base); - const pop_count = lhs_val.popCount(ty, target); + const ty = try mod.intType( + if (is_negative) .signed else .unsigned, + @intCast(u16, req_bits), + ); + const pop_count = lhs_val.popCount(ty, mod); if (is_negative) { break :edge .{ @@ -30152,22 +30925,26 @@ fn cmpVector( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { + const mod = sema.mod; const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); - assert(lhs_ty.zigTypeTag() == .Vector); - assert(rhs_ty.zigTypeTag() == .Vector); + assert(lhs_ty.zigTypeTag(mod) == .Vector); + assert(rhs_ty.zigTypeTag(mod) == .Vector); try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src); const resolved_ty = try sema.resolvePeerTypes(block, src, &.{ lhs, rhs }, .{ .override = &.{ lhs_src, rhs_src } }); const casted_lhs = try sema.coerce(block, resolved_ty, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_ty, rhs, rhs_src); - const result_ty = try Type.vector(sema.arena, lhs_ty.vectorLen(), Type.bool); + const result_ty = try mod.vectorType(.{ + .len = lhs_ty.vectorLen(mod), + .child = .bool_type, + }); const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(casted_lhs)) |lhs_val| { if (try sema.resolveMaybeUndefVal(casted_rhs)) |rhs_val| { - if (lhs_val.isUndef() or rhs_val.isUndef()) { + if (lhs_val.isUndef(mod) or rhs_val.isUndef(mod)) { return sema.addConstUndef(result_ty); } const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_ty); @@ -30192,7 +30969,10 @@ fn wrapOptional( inst_src: LazySrcLoc, ) !Air.Inst.Ref { if (try sema.resolveMaybeUndefVal(inst)) |val| { - return sema.addConstant(dest_ty, try Value.Tag.opt_payload.create(sema.arena, val)); + return sema.addConstant(dest_ty, (try sema.mod.intern(.{ .opt = .{ + .ty = dest_ty.toIntern(), + .val = val.toIntern(), + } })).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); @@ -30206,10 +30986,14 @@ fn wrapErrorUnionPayload( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { - const dest_payload_ty = dest_ty.errorUnionPayload(); + const mod = sema.mod; + const dest_payload_ty = dest_ty.errorUnionPayload(mod); const coerced = try sema.coerceExtra(block, dest_payload_ty, inst, inst_src, .{ .report_err = false }); if (try sema.resolveMaybeUndefVal(coerced)) |val| { - return sema.addConstant(dest_ty, try Value.Tag.eu_payload.create(sema.arena, val)); + return sema.addConstant(dest_ty, (try mod.intern(.{ .error_union = .{ + .ty = dest_ty.toIntern(), + .val = .{ .payload = try val.intern(dest_payload_ty, mod) }, + } })).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); try sema.queueFullTypeResolution(dest_payload_ty); @@ -30223,48 +31007,41 @@ fn wrapErrorUnionSet( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; + const ip = &mod.intern_pool; const inst_ty = sema.typeOf(inst); - const dest_err_set_ty = dest_ty.errorUnionSet(); + const dest_err_set_ty = dest_ty.errorUnionSet(mod); if (try sema.resolveMaybeUndefVal(inst)) |val| { - switch (dest_err_set_ty.tag()) { - .anyerror => {}, - .error_set_single => ok: { - const expected_name = val.castTag(.@"error").?.data.name; - const n = dest_err_set_ty.castTag(.error_set_single).?.data; - if (mem.eql(u8, expected_name, n)) break :ok; - return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); - }, - .error_set => { - const expected_name = val.castTag(.@"error").?.data.name; - const error_set = dest_err_set_ty.castTag(.error_set).?.data; - if (!error_set.names.contains(expected_name)) { + switch (dest_err_set_ty.toIntern()) { + .anyerror_type => {}, + else => switch (ip.indexToKey(dest_err_set_ty.toIntern())) { + .error_set_type => |error_set_type| ok: { + const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; + if (error_set_type.nameIndex(ip, expected_name) != null) break :ok; return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); - } - }, - .error_set_inferred => ok: { - const expected_name = val.castTag(.@"error").?.data.name; - const ies = dest_err_set_ty.castTag(.error_set_inferred).?.data; + }, + .inferred_error_set_type => |ies_index| ok: { + const ies = mod.inferredErrorSetPtr(ies_index); + const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name; - // We carefully do this in an order that avoids unnecessarily - // resolving the destination error set type. - if (ies.is_anyerror) break :ok; - if (ies.errors.contains(expected_name)) break :ok; - if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) { - break :ok; - } + // We carefully do this in an order that avoids unnecessarily + // resolving the destination error set type. + if (ies.is_anyerror) break :ok; + + if (ies.errors.contains(expected_name)) break :ok; + if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) break :ok; - return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); - }, - .error_set_merged => { - const expected_name = val.castTag(.@"error").?.data.name; - const error_set = dest_err_set_ty.castTag(.error_set_merged).?.data; - if (!error_set.contains(expected_name)) { return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty); - } + }, + else => unreachable, }, - else => unreachable, } - return sema.addConstant(dest_ty, val); + return sema.addConstant(dest_ty, (try mod.intern(.{ .error_union = .{ + .ty = dest_ty.toIntern(), + .val = .{ + .err_name = mod.intern_pool.indexToKey(try val.intern(dest_err_set_ty, mod)).err.name, + }, + } })).toValue()); } try sema.requireRuntimeBlock(block, inst_src, null); @@ -30279,11 +31056,12 @@ fn unionToTag( un: Air.Inst.Ref, un_src: LazySrcLoc, ) !Air.Inst.Ref { + const mod = sema.mod; if ((try sema.typeHasOnePossibleValue(enum_ty))) |opv| { return sema.addConstant(enum_ty, opv); } if (try sema.resolveMaybeUndefVal(un)) |un_val| { - return sema.addConstant(enum_ty, un_val.unionTag()); + return sema.addConstant(enum_ty, un_val.unionTag(mod)); } try sema.requireRuntimeBlock(block, un_src, null); return block.addTyOp(.get_union_tag, enum_ty, un); @@ -30296,16 +31074,17 @@ fn resolvePeerTypes( instructions: []const Air.Inst.Ref, candidate_srcs: Module.PeerTypeCandidateSrc, ) !Type { + const mod = sema.mod; switch (instructions.len) { - 0 => return Type.initTag(.noreturn), + 0 => return Type.noreturn, 1 => return sema.typeOf(instructions[0]), else => {}, } - const target = sema.mod.getTarget(); + const target = mod.getTarget(); var chosen = instructions[0]; - // If this is non-null then it does the following thing, depending on the chosen zigTypeTag(). + // If this is non-null then it does the following thing, depending on the chosen zigTypeTag(mod). // * ErrorSet: this is an override // * ErrorUnion: this is an override of the error set only // * other: at the end we make an ErrorUnion with the other thing and this @@ -30318,8 +31097,8 @@ fn resolvePeerTypes( const candidate_ty = sema.typeOf(candidate); const chosen_ty = sema.typeOf(chosen); - const candidate_ty_tag = try candidate_ty.zigTypeTagOrPoison(); - const chosen_ty_tag = try chosen_ty.zigTypeTagOrPoison(); + const candidate_ty_tag = try candidate_ty.zigTypeTagOrPoison(mod); + const chosen_ty_tag = try chosen_ty.zigTypeTagOrPoison(mod); // If the candidate can coerce into our chosen type, we're done. // If the chosen type can coerce into the candidate, use that. @@ -30347,8 +31126,8 @@ fn resolvePeerTypes( continue; }, .Int => { - const chosen_info = chosen_ty.intInfo(target); - const candidate_info = candidate_ty.intInfo(target); + const chosen_info = chosen_ty.intInfo(mod); + const candidate_info = candidate_ty.intInfo(mod); if (chosen_info.bits < candidate_info.bits) { chosen = candidate; @@ -30356,12 +31135,12 @@ fn resolvePeerTypes( } continue; }, - .Pointer => if (chosen_ty.ptrSize() == .C) continue, + .Pointer => if (chosen_ty.ptrSize(mod) == .C) continue, else => {}, }, .ComptimeInt => switch (chosen_ty_tag) { .Int, .Float, .ComptimeFloat => continue, - .Pointer => if (chosen_ty.ptrSize() == .C) continue, + .Pointer => if (chosen_ty.ptrSize(mod) == .C) continue, else => {}, }, .Float => switch (chosen_ty_tag) { @@ -30426,11 +31205,11 @@ fn resolvePeerTypes( continue; } - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_ty); continue; }, .ErrorUnion => { - const chosen_set_ty = err_set_ty orelse chosen_ty.errorUnionSet(); + const chosen_set_ty = err_set_ty orelse chosen_ty.errorUnionSet(mod); if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_ty, src, src)) { continue; @@ -30440,7 +31219,7 @@ fn resolvePeerTypes( continue; } - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_ty); continue; }, else => { @@ -30453,7 +31232,7 @@ fn resolvePeerTypes( continue; } - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_ty); continue; } else { err_set_ty = candidate_ty; @@ -30464,14 +31243,14 @@ fn resolvePeerTypes( .ErrorUnion => switch (chosen_ty_tag) { .ErrorSet => { const chosen_set_ty = err_set_ty orelse chosen_ty; - const candidate_set_ty = candidate_ty.errorUnionSet(); + const candidate_set_ty = candidate_ty.errorUnionSet(mod); if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_set_ty, src, src)) { err_set_ty = chosen_set_ty; } else if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_set_ty, chosen_set_ty, src, src)) { err_set_ty = null; } else { - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_set_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_set_ty); } chosen = candidate; chosen_i = candidate_i + 1; @@ -30479,8 +31258,8 @@ fn resolvePeerTypes( }, .ErrorUnion => { - const chosen_payload_ty = chosen_ty.errorUnionPayload(); - const candidate_payload_ty = candidate_ty.errorUnionPayload(); + const chosen_payload_ty = chosen_ty.errorUnionPayload(mod); + const candidate_payload_ty = candidate_ty.errorUnionPayload(mod); const coerce_chosen = (try sema.coerceInMemoryAllowed(block, chosen_payload_ty, candidate_payload_ty, false, target, src, src)) == .ok; const coerce_candidate = (try sema.coerceInMemoryAllowed(block, candidate_payload_ty, chosen_payload_ty, false, target, src, src)) == .ok; @@ -30494,15 +31273,15 @@ fn resolvePeerTypes( chosen_i = candidate_i + 1; } - const chosen_set_ty = err_set_ty orelse chosen_ty.errorUnionSet(); - const candidate_set_ty = candidate_ty.errorUnionSet(); + const chosen_set_ty = err_set_ty orelse chosen_ty.errorUnionSet(mod); + const candidate_set_ty = candidate_ty.errorUnionSet(mod); if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_set_ty, src, src)) { err_set_ty = chosen_set_ty; } else if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_set_ty, chosen_set_ty, src, src)) { err_set_ty = candidate_set_ty; } else { - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_set_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_set_ty); } continue; } @@ -30510,26 +31289,26 @@ fn resolvePeerTypes( else => { if (err_set_ty) |chosen_set_ty| { - const candidate_set_ty = candidate_ty.errorUnionSet(); + const candidate_set_ty = candidate_ty.errorUnionSet(mod); if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, chosen_set_ty, candidate_set_ty, src, src)) { err_set_ty = chosen_set_ty; } else if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, candidate_set_ty, chosen_set_ty, src, src)) { err_set_ty = null; } else { - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, candidate_set_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, candidate_set_ty); } } - seen_const = seen_const or chosen_ty.isConstPtr(); + seen_const = seen_const or chosen_ty.isConstPtr(mod); chosen = candidate; chosen_i = candidate_i + 1; continue; }, }, .Pointer => { - const cand_info = candidate_ty.ptrInfo().data; + const cand_info = candidate_ty.ptrInfo(mod); switch (chosen_ty_tag) { .Pointer => { - const chosen_info = chosen_ty.ptrInfo().data; + const chosen_info = chosen_ty.ptrInfo(mod); seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; @@ -30537,7 +31316,7 @@ fn resolvePeerTypes( // *[N]T to []T if ((cand_info.size == .Many or cand_info.size == .Slice) and chosen_info.size == .One and - chosen_info.pointee_type.zigTypeTag() == .Array) + chosen_info.pointee_type.zigTypeTag(mod) == .Array) { // In case we see i.e.: `*[1]T`, `*[2]T`, `[*]T` convert_to_slice = false; @@ -30546,7 +31325,7 @@ fn resolvePeerTypes( continue; } if (cand_info.size == .One and - cand_info.pointee_type.zigTypeTag() == .Array and + cand_info.pointee_type.zigTypeTag(mod) == .Array and (chosen_info.size == .Many or chosen_info.size == .Slice)) { // In case we see i.e.: `*[1]T`, `*[2]T`, `[*]T` @@ -30559,11 +31338,11 @@ fn resolvePeerTypes( // Keep the one whose element type can be coerced into. if (chosen_info.size == .One and cand_info.size == .One and - chosen_info.pointee_type.zigTypeTag() == .Array and - cand_info.pointee_type.zigTypeTag() == .Array) + chosen_info.pointee_type.zigTypeTag(mod) == .Array and + cand_info.pointee_type.zigTypeTag(mod) == .Array) { - const chosen_elem_ty = chosen_info.pointee_type.childType(); - const cand_elem_ty = cand_info.pointee_type.childType(); + const chosen_elem_ty = chosen_info.pointee_type.childType(mod); + const cand_elem_ty = cand_info.pointee_type.childType(mod); const chosen_ok = .ok == try sema.coerceInMemoryAllowed(block, chosen_elem_ty, cand_elem_ty, chosen_info.mutable, target, src, src); if (chosen_ok) { @@ -30629,17 +31408,16 @@ fn resolvePeerTypes( } }, .Optional => { - var opt_child_buf: Type.Payload.ElemType = undefined; - const chosen_ptr_ty = chosen_ty.optionalChild(&opt_child_buf); - if (chosen_ptr_ty.zigTypeTag() == .Pointer) { - const chosen_info = chosen_ptr_ty.ptrInfo().data; + const chosen_ptr_ty = chosen_ty.optionalChild(mod); + if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) { + const chosen_info = chosen_ptr_ty.ptrInfo(mod); seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; // *[N]T to ?![*]T // *[N]T to ?![]T if (cand_info.size == .One and - cand_info.pointee_type.zigTypeTag() == .Array and + cand_info.pointee_type.zigTypeTag(mod) == .Array and (chosen_info.size == .Many or chosen_info.size == .Slice)) { continue; @@ -30647,16 +31425,16 @@ fn resolvePeerTypes( } }, .ErrorUnion => { - const chosen_ptr_ty = chosen_ty.errorUnionPayload(); - if (chosen_ptr_ty.zigTypeTag() == .Pointer) { - const chosen_info = chosen_ptr_ty.ptrInfo().data; + const chosen_ptr_ty = chosen_ty.errorUnionPayload(mod); + if (chosen_ptr_ty.zigTypeTag(mod) == .Pointer) { + const chosen_info = chosen_ptr_ty.ptrInfo(mod); seen_const = seen_const or !chosen_info.mutable or !cand_info.mutable; // *[N]T to E![*]T // *[N]T to E![]T if (cand_info.size == .One and - cand_info.pointee_type.zigTypeTag() == .Array and + cand_info.pointee_type.zigTypeTag(mod) == .Array and (chosen_info.size == .Many or chosen_info.size == .Slice)) { continue; @@ -30664,7 +31442,7 @@ fn resolvePeerTypes( } }, .Fn => { - if (!cand_info.mutable and cand_info.pointee_type.zigTypeTag() == .Fn and .ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty, cand_info.pointee_type, target, src, src)) { + if (!cand_info.mutable and cand_info.pointee_type.zigTypeTag(mod) == .Fn and .ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty, cand_info.pointee_type, target, src, src)) { chosen = candidate; chosen_i = candidate_i + 1; continue; @@ -30674,15 +31452,14 @@ fn resolvePeerTypes( } }, .Optional => { - var opt_child_buf: Type.Payload.ElemType = undefined; - const opt_child_ty = candidate_ty.optionalChild(&opt_child_buf); + const opt_child_ty = candidate_ty.optionalChild(mod); if ((try sema.coerceInMemoryAllowed(block, chosen_ty, opt_child_ty, false, target, src, src)) == .ok) { - seen_const = seen_const or opt_child_ty.isConstPtr(); + seen_const = seen_const or opt_child_ty.isConstPtr(mod); any_are_null = true; continue; } - seen_const = seen_const or chosen_ty.isConstPtr(); + seen_const = seen_const or chosen_ty.isConstPtr(mod); any_are_null = false; chosen = candidate; chosen_i = candidate_i + 1; @@ -30690,23 +31467,23 @@ fn resolvePeerTypes( }, .Vector => switch (chosen_ty_tag) { .Vector => { - const chosen_len = chosen_ty.vectorLen(); - const candidate_len = candidate_ty.vectorLen(); + const chosen_len = chosen_ty.vectorLen(mod); + const candidate_len = candidate_ty.vectorLen(mod); if (chosen_len != candidate_len) continue; - const chosen_child_ty = chosen_ty.childType(); - const candidate_child_ty = candidate_ty.childType(); - if (chosen_child_ty.zigTypeTag() == .Int and candidate_child_ty.zigTypeTag() == .Int) { - const chosen_info = chosen_child_ty.intInfo(target); - const candidate_info = candidate_child_ty.intInfo(target); + const chosen_child_ty = chosen_ty.childType(mod); + const candidate_child_ty = candidate_ty.childType(mod); + if (chosen_child_ty.zigTypeTag(mod) == .Int and candidate_child_ty.zigTypeTag(mod) == .Int) { + const chosen_info = chosen_child_ty.intInfo(mod); + const candidate_info = candidate_child_ty.intInfo(mod); if (chosen_info.bits < candidate_info.bits) { chosen = candidate; chosen_i = candidate_i + 1; } continue; } - if (chosen_child_ty.zigTypeTag() == .Float and candidate_child_ty.zigTypeTag() == .Float) { + if (chosen_child_ty.zigTypeTag(mod) == .Float and candidate_child_ty.zigTypeTag(mod) == .Float) { if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) { chosen = candidate; chosen_i = candidate_i + 1; @@ -30725,8 +31502,8 @@ fn resolvePeerTypes( .Vector => continue, else => {}, }, - .Fn => if (chosen_ty.isSinglePointer() and chosen_ty.isConstPtr() and chosen_ty.childType().zigTypeTag() == .Fn) { - if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(), candidate_ty, target, src, src)) { + .Fn => if (chosen_ty.isSinglePointer(mod) and chosen_ty.isConstPtr(mod) and chosen_ty.childType(mod).zigTypeTag(mod) == .Fn) { + if (.ok == try sema.coerceInMemoryAllowedFns(block, chosen_ty.childType(mod), candidate_ty, target, src, src)) { continue; } }, @@ -30746,8 +31523,7 @@ fn resolvePeerTypes( continue; }, .Optional => { - var opt_child_buf: Type.Payload.ElemType = undefined; - const opt_child_ty = chosen_ty.optionalChild(&opt_child_buf); + const opt_child_ty = chosen_ty.optionalChild(mod); if ((try sema.coerceInMemoryAllowed(block, opt_child_ty, candidate_ty, false, target, src, src)) == .ok) { continue; } @@ -30759,7 +31535,7 @@ fn resolvePeerTypes( } }, .ErrorUnion => { - const payload_ty = chosen_ty.errorUnionPayload(); + const payload_ty = chosen_ty.errorUnionPayload(mod); if ((try sema.coerceInMemoryAllowed(block, payload_ty, candidate_ty, false, target, src, src)) == .ok) { continue; } @@ -30776,7 +31552,7 @@ fn resolvePeerTypes( continue; } - err_set_ty = try chosen_set_ty.errorSetMerge(sema.arena, chosen_ty); + err_set_ty = try sema.errorSetMerge(chosen_set_ty, chosen_ty); continue; } else { err_set_ty = chosen_ty; @@ -30789,28 +31565,28 @@ fn resolvePeerTypes( // At this point, we hit a compile error. We need to recover // the source locations. const chosen_src = candidate_srcs.resolve( - sema.gpa, - sema.mod.declPtr(block.src_decl), + mod, + mod.declPtr(block.src_decl), chosen_i, ); const candidate_src = candidate_srcs.resolve( - sema.gpa, - sema.mod.declPtr(block.src_decl), + mod, + mod.declPtr(block.src_decl), candidate_i + 1, ); const msg = msg: { const msg = try sema.errMsg(block, src, "incompatible types: '{}' and '{}'", .{ - chosen_ty.fmt(sema.mod), - candidate_ty.fmt(sema.mod), + chosen_ty.fmt(mod), + candidate_ty.fmt(mod), }); errdefer msg.destroy(sema.gpa); if (chosen_src) |src_loc| - try sema.errNote(block, src_loc, msg, "type '{}' here", .{chosen_ty.fmt(sema.mod)}); + try sema.errNote(block, src_loc, msg, "type '{}' here", .{chosen_ty.fmt(mod)}); if (candidate_src) |src_loc| - try sema.errNote(block, src_loc, msg, "type '{}' here", .{candidate_ty.fmt(sema.mod)}); + try sema.errNote(block, src_loc, msg, "type '{}' here", .{candidate_ty.fmt(mod)}); break :msg msg; }; @@ -30821,139 +31597,231 @@ fn resolvePeerTypes( if (convert_to_slice) { // turn *[N]T => []T - const chosen_child_ty = chosen_ty.childType(); - var info = chosen_ty.ptrInfo(); - info.data.sentinel = chosen_child_ty.sentinel(); - info.data.size = .Slice; - info.data.mutable = !(seen_const or chosen_child_ty.isConstPtr()); - info.data.pointee_type = chosen_child_ty.elemType2(); + const chosen_child_ty = chosen_ty.childType(mod); + var info = chosen_ty.ptrInfo(mod); + info.sentinel = chosen_child_ty.sentinel(mod); + info.size = .Slice; + info.mutable = !(seen_const or chosen_child_ty.isConstPtr(mod)); + info.pointee_type = chosen_child_ty.elemType2(mod); - const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data); + const new_ptr_ty = try Type.ptr(sema.arena, mod, info); const opt_ptr_ty = if (any_are_null) - try Type.optional(sema.arena, new_ptr_ty) + try Type.optional(sema.arena, new_ptr_ty, mod) else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod); + return try mod.errorUnionType(set_ty, opt_ptr_ty); } if (seen_const) { // turn []T => []const T - switch (chosen_ty.zigTypeTag()) { + switch (chosen_ty.zigTypeTag(mod)) { .ErrorUnion => { - const ptr_ty = chosen_ty.errorUnionPayload(); - var info = ptr_ty.ptrInfo(); - info.data.mutable = false; - const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data); + const ptr_ty = chosen_ty.errorUnionPayload(mod); + var info = ptr_ty.ptrInfo(mod); + info.mutable = false; + const new_ptr_ty = try Type.ptr(sema.arena, mod, info); const opt_ptr_ty = if (any_are_null) - try Type.optional(sema.arena, new_ptr_ty) + try Type.optional(sema.arena, new_ptr_ty, mod) else new_ptr_ty; - const set_ty = err_set_ty orelse chosen_ty.errorUnionSet(); - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod); + const set_ty = err_set_ty orelse chosen_ty.errorUnionSet(mod); + return try mod.errorUnionType(set_ty, opt_ptr_ty); }, .Pointer => { - var info = chosen_ty.ptrInfo(); - info.data.mutable = false; - const new_ptr_ty = try Type.ptr(sema.arena, sema.mod, info.data); + var info = chosen_ty.ptrInfo(mod); + info.mutable = false; + const new_ptr_ty = try Type.ptr(sema.arena, mod, info); const opt_ptr_ty = if (any_are_null) - try Type.optional(sema.arena, new_ptr_ty) + try Type.optional(sema.arena, new_ptr_ty, mod) else new_ptr_ty; const set_ty = err_set_ty orelse return opt_ptr_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ptr_ty, sema.mod); + return try mod.errorUnionType(set_ty, opt_ptr_ty); }, else => return chosen_ty, } } if (any_are_null) { - const opt_ty = switch (chosen_ty.zigTypeTag()) { + const opt_ty = switch (chosen_ty.zigTypeTag(mod)) { .Null, .Optional => chosen_ty, - else => try Type.optional(sema.arena, chosen_ty), + else => try Type.optional(sema.arena, chosen_ty, mod), }; const set_ty = err_set_ty orelse return opt_ty; - return try Type.errorUnion(sema.arena, set_ty, opt_ty, sema.mod); + return try mod.errorUnionType(set_ty, opt_ty); } - if (err_set_ty) |ty| switch (chosen_ty.zigTypeTag()) { + if (err_set_ty) |ty| switch (chosen_ty.zigTypeTag(mod)) { .ErrorSet => return ty, .ErrorUnion => { - const payload_ty = chosen_ty.errorUnionPayload(); - return try Type.errorUnion(sema.arena, ty, payload_ty, sema.mod); + const payload_ty = chosen_ty.errorUnionPayload(mod); + return try mod.errorUnionType(ty, payload_ty); }, - else => return try Type.errorUnion(sema.arena, ty, chosen_ty, sema.mod), + else => return try mod.errorUnionType(ty, chosen_ty), }; return chosen_ty; } -pub fn resolveFnTypes(sema: *Sema, fn_info: Type.Payload.Function.Data) CompileError!void { - try sema.resolveTypeFully(fn_info.return_type); +pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void { + const mod = sema.mod; + try sema.resolveTypeFully(mod.typeToFunc(fn_ty).?.return_type.toType()); - if (sema.mod.comp.bin_file.options.error_return_tracing and fn_info.return_type.isError()) { + if (mod.comp.bin_file.options.error_return_tracing and mod.typeToFunc(fn_ty).?.return_type.toType().isError(mod)) { // Ensure the type exists so that backends can assume that. _ = try sema.getBuiltinType("StackTrace"); } - for (fn_info.param_types) |param_ty| { - try sema.resolveTypeFully(param_ty); + for (0..mod.typeToFunc(fn_ty).?.param_types.len) |i| { + try sema.resolveTypeFully(mod.typeToFunc(fn_ty).?.param_types[i].toType()); } } /// Make it so that calling hash() and eql() on `val` will not assert due /// to a type not having its layout resolved. -fn resolveLazyValue(sema: *Sema, val: Value) CompileError!void { - switch (val.tag()) { - .lazy_align => { - const ty = val.castTag(.lazy_align).?.data; - return sema.resolveTypeLayout(ty); +fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value { + const mod = sema.mod; + switch (mod.intern_pool.indexToKey(val.toIntern())) { + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => return val, + .lazy_align, .lazy_size => return (try mod.intern(.{ .int = .{ + .ty = int.ty, + .storage = .{ .u64 = (try val.getUnsignedIntAdvanced(mod, sema)).? }, + } })).toValue(), }, - .lazy_size => { - const ty = val.castTag(.lazy_size).?.data; - return sema.resolveTypeLayout(ty); - }, - .comptime_field_ptr => { - const field_ptr = val.castTag(.comptime_field_ptr).?.data; - return sema.resolveLazyValue(field_ptr.field_val); - }, - .eu_payload, - .opt_payload, - => { - const sub_val = val.cast(Value.Payload.SubValue).?.data; - return sema.resolveLazyValue(sub_val); - }, - .@"union" => { - const union_val = val.castTag(.@"union").?.data; - return sema.resolveLazyValue(union_val.val); - }, - .aggregate => { - const aggregate = val.castTag(.aggregate).?.data; - for (aggregate) |elem_val| { - try sema.resolveLazyValue(elem_val); + .ptr => |ptr| { + const resolved_len = switch (ptr.len) { + .none => .none, + else => (try sema.resolveLazyValue(ptr.len.toValue())).toIntern(), + }; + switch (ptr.addr) { + .decl, .mut_decl => return if (resolved_len == ptr.len) + val + else + (try mod.intern(.{ .ptr = .{ + .ty = ptr.ty, + .addr = switch (ptr.addr) { + .decl => |decl| .{ .decl = decl }, + .mut_decl => |mut_decl| .{ .mut_decl = mut_decl }, + else => unreachable, + }, + .len = resolved_len, + } })).toValue(), + .comptime_field => |field_val| { + const resolved_field_val = + (try sema.resolveLazyValue(field_val.toValue())).toIntern(); + return if (resolved_field_val == field_val and resolved_len == ptr.len) + val + else + (try mod.intern(.{ .ptr = .{ + .ty = ptr.ty, + .addr = .{ .comptime_field = resolved_field_val }, + .len = resolved_len, + } })).toValue(); + }, + .int => |int| { + const resolved_int = (try sema.resolveLazyValue(int.toValue())).toIntern(); + return if (resolved_int == int and resolved_len == ptr.len) + val + else + (try mod.intern(.{ .ptr = .{ + .ty = ptr.ty, + .addr = .{ .int = resolved_int }, + .len = resolved_len, + } })).toValue(); + }, + .eu_payload, .opt_payload => |base| { + const resolved_base = (try sema.resolveLazyValue(base.toValue())).toIntern(); + return if (resolved_base == base and resolved_len == ptr.len) + val + else + (try mod.intern(.{ .ptr = .{ + .ty = ptr.ty, + .addr = switch (ptr.addr) { + .eu_payload => .{ .eu_payload = resolved_base }, + .opt_payload => .{ .opt_payload = resolved_base }, + else => unreachable, + }, + .len = ptr.len, + } })).toValue(); + }, + .elem, .field => |base_index| { + const resolved_base = (try sema.resolveLazyValue(base_index.base.toValue())).toIntern(); + return if (resolved_base == base_index.base and resolved_len == ptr.len) + val + else + (try mod.intern(.{ .ptr = .{ + .ty = ptr.ty, + .addr = switch (ptr.addr) { + .elem => .{ .elem = .{ + .base = resolved_base, + .index = base_index.index, + } }, + .field => .{ .field = .{ + .base = resolved_base, + .index = base_index.index, + } }, + else => unreachable, + }, + .len = ptr.len, + } })).toValue(); + }, } }, - .slice => { - const slice = val.castTag(.slice).?.data; - try sema.resolveLazyValue(slice.ptr); - return sema.resolveLazyValue(slice.len); + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => return val, + .elems => |elems| { + var resolved_elems: []InternPool.Index = &.{}; + for (elems, 0..) |elem, i| { + const resolved_elem = (try sema.resolveLazyValue(elem.toValue())).toIntern(); + if (resolved_elems.len == 0 and resolved_elem != elem) { + resolved_elems = try sema.arena.alloc(InternPool.Index, elems.len); + @memcpy(resolved_elems[0..i], elems[0..i]); + } + if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem; + } + return if (resolved_elems.len == 0) val else (try mod.intern(.{ .aggregate = .{ + .ty = aggregate.ty, + .storage = .{ .elems = resolved_elems }, + } })).toValue(); + }, + .repeated_elem => |elem| { + const resolved_elem = (try sema.resolveLazyValue(elem.toValue())).toIntern(); + return if (resolved_elem == elem) val else (try mod.intern(.{ .aggregate = .{ + .ty = aggregate.ty, + .storage = .{ .repeated_elem = resolved_elem }, + } })).toValue(); + }, }, - else => return, + .un => |un| { + const resolved_tag = (try sema.resolveLazyValue(un.tag.toValue())).toIntern(); + const resolved_val = (try sema.resolveLazyValue(un.val.toValue())).toIntern(); + return if (resolved_tag == un.tag and resolved_val == un.val) + val + else + (try mod.intern(.{ .un = .{ + .ty = un.ty, + .tag = resolved_tag, + .val = resolved_val, + } })).toValue(); + }, + else => return val, } } pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Struct => return sema.resolveStructLayout(ty), .Union => return sema.resolveUnionLayout(ty), .Array => { - if (ty.arrayLenIncludingSentinel() == 0) return; - const elem_ty = ty.childType(); + if (ty.arrayLenIncludingSentinel(mod) == 0) return; + const elem_ty = ty.childType(mod); return sema.resolveTypeLayout(elem_ty); }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); + const payload_ty = ty.optionalChild(mod); // In case of querying the ABI alignment of this optional, we will ask // for hasRuntimeBits() of the payload type, so we need "requires comptime" // to be known already before this function returns. @@ -30961,37 +31829,37 @@ pub fn resolveTypeLayout(sema: *Sema, ty: Type) CompileError!void { return sema.resolveTypeLayout(payload_ty); }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); + const payload_ty = ty.errorUnionPayload(mod); return sema.resolveTypeLayout(payload_ty); }, .Fn => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; if (info.is_generic) { // Resolving of generic function types is deferred to when // the function is instantiated. return; } for (info.param_types) |param_ty| { - try sema.resolveTypeLayout(param_ty); + try sema.resolveTypeLayout(param_ty.toType()); } - try sema.resolveTypeLayout(info.return_type); + try sema.resolveTypeLayout(info.return_type.toType()); }, else => {}, } } fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { + const mod = sema.mod; const resolved_ty = try sema.resolveTypeFields(ty); - if (resolved_ty.castTag(.@"struct")) |payload| { - const struct_obj = payload.data; + if (mod.typeToStruct(resolved_ty)) |struct_obj| { switch (struct_obj.status) { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { const msg = try Module.ErrorMsg.create( sema.gpa, - struct_obj.srcLoc(sema.mod), + struct_obj.srcLoc(mod), "struct '{}' depends on itself", - .{ty.fmt(sema.mod)}, + .{ty.fmt(mod)}, ); return sema.failWithOwnedErrorMsg(msg); }, @@ -31015,35 +31883,27 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { } if (struct_obj.layout == .Packed) { - try semaBackingIntType(sema.mod, struct_obj); + try semaBackingIntType(mod, struct_obj); } struct_obj.status = .have_layout; _ = try sema.resolveTypeRequiresComptime(resolved_ty); - if (struct_obj.assumed_runtime_bits and !resolved_ty.hasRuntimeBits()) { + if (struct_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(resolved_ty))) { const msg = try Module.ErrorMsg.create( sema.gpa, - struct_obj.srcLoc(sema.mod), + struct_obj.srcLoc(mod), "struct layout depends on it having runtime bits", .{}, ); return sema.failWithOwnedErrorMsg(msg); } - if (struct_obj.layout == .Auto and sema.mod.backendSupportsFeature(.field_reordering)) { - const optimized_order = if (struct_obj.owner_decl == sema.owner_decl_index) - try sema.perm_arena.alloc(u32, struct_obj.fields.count()) - else blk: { - const decl = sema.mod.declPtr(struct_obj.owner_decl); - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(sema.mod.gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); - break :blk try decl_arena_allocator.alloc(u32, struct_obj.fields.count()); - }; + if (struct_obj.layout == .Auto and mod.backendSupportsFeature(.field_reordering)) { + const optimized_order = try mod.tmp_hack_arena.allocator().alloc(u32, struct_obj.fields.count()); for (struct_obj.fields.values(), 0..) |field, i| { - optimized_order[i] = if (field.ty.hasRuntimeBits()) + optimized_order[i] = if (try sema.typeHasRuntimeBits(field.ty)) @intCast(u32, i) else Module.Struct.omitted_field; @@ -31054,11 +31914,11 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { sema: *Sema, fn lessThan(ctx: @This(), a: u32, b: u32) bool { + const m = ctx.sema.mod; if (a == Module.Struct.omitted_field) return false; if (b == Module.Struct.omitted_field) return true; - const target = ctx.sema.mod.getTarget(); - return ctx.struct_obj.fields.values()[a].ty.abiAlignment(target) > - ctx.struct_obj.fields.values()[b].ty.abiAlignment(target); + return ctx.struct_obj.fields.values()[a].ty.abiAlignment(m) > + ctx.struct_obj.fields.values()[b].ty.abiAlignment(m); } }; mem.sort(u32, optimized_order, AlignSortContext{ @@ -31073,20 +31933,16 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void { fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!void { const gpa = mod.gpa; - const target = mod.getTarget(); var fields_bit_sum: u64 = 0; for (struct_obj.fields.values()) |field| { - fields_bit_sum += field.ty.bitSize(target); + fields_bit_sum += field.ty.bitSize(mod); } const decl_index = struct_obj.owner_decl; const decl = mod.declPtr(decl_index); - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); - const zir = struct_obj.namespace.file_scope.zir; + const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; assert(extended.opcode == .struct_decl); const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); @@ -31103,28 +31959,33 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); + var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); + defer comptime_mutable_decls.deinit(); + var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = analysis_arena.allocator(), - .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, + .comptime_mutable_decls = &comptime_mutable_decls, }; defer sema.deinit(); - var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope); defer wip_captures.deinit(); var block: Block = .{ .parent = null, .sema = &sema, .src_decl = decl_index, - .namespace = &struct_obj.namespace, + .namespace = struct_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, @@ -31148,21 +32009,27 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi }; try sema.checkBackingIntType(&block, backing_int_src, backing_int_ty, fields_bit_sum); - struct_obj.backing_int_ty = try backing_int_ty.copy(decl_arena_allocator); + struct_obj.backing_int_ty = backing_int_ty; try wip_captures.finalize(); + for (comptime_mutable_decls.items) |ct_decl_index| { + const ct_decl = mod.declPtr(ct_decl_index); + try ct_decl.intern(mod); + } } else { if (fields_bit_sum > std.math.maxInt(u16)) { var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = undefined, - .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, + .comptime_mutable_decls = undefined, }; defer sema.deinit(); @@ -31170,7 +32037,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi .parent = null, .sema = &sema, .src_decl = decl_index, - .namespace = &struct_obj.namespace, + .namespace = struct_obj.namespace, .wip_capture_scope = undefined, .instructions = .{}, .inlining = null, @@ -31178,32 +32045,29 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi }; return sema.fail(&block, LazySrcLoc.nodeOffset(0), "size of packed struct '{d}' exceeds maximum bit width of 65535", .{fields_bit_sum}); } - var buf: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, fields_bit_sum), - }; - struct_obj.backing_int_ty = try Type.initPayload(&buf.base).copy(decl_arena_allocator); + struct_obj.backing_int_ty = try mod.intType(.unsigned, @intCast(u16, fields_bit_sum)); } } fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ty: Type, fields_bit_sum: u64) CompileError!void { - const target = sema.mod.getTarget(); + const mod = sema.mod; - if (!backing_int_ty.isInt()) { + if (!backing_int_ty.isInt(mod)) { return sema.fail(block, src, "expected backing integer type, found '{}'", .{backing_int_ty.fmt(sema.mod)}); } - if (backing_int_ty.bitSize(target) != fields_bit_sum) { + if (backing_int_ty.bitSize(mod) != fields_bit_sum) { return sema.fail( block, src, "backing integer type '{}' has bit size {} but the struct fields have a total bit size of {}", - .{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(target), fields_bit_sum }, + .{ backing_int_ty.fmt(sema.mod), backing_int_ty.bitSize(mod), fields_bit_sum }, ); } } fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - if (!ty.isIndexable()) { + const mod = sema.mod; + if (!ty.isIndexable(mod)) { const msg = msg: { const msg = try sema.errMsg(block, src, "type '{}' does not support indexing", .{ty.fmt(sema.mod)}); errdefer msg.destroy(sema.gpa); @@ -31215,12 +32079,13 @@ fn checkIndexable(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { } fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void { - if (ty.zigTypeTag() == .Pointer) { - switch (ty.ptrSize()) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Pointer) { + switch (ty.ptrSize(mod)) { .Slice, .Many, .C => return, .One => { - const elem_ty = ty.childType(); - if (elem_ty.zigTypeTag() == .Array) return; + const elem_ty = ty.childType(mod); + if (elem_ty.zigTypeTag(mod) == .Array) return; // TODO https://github.com/ziglang/zig/issues/15479 // if (elem_ty.isTuple()) return; }, @@ -31236,8 +32101,9 @@ fn checkMemOperand(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) !void } fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { + const mod = sema.mod; const resolved_ty = try sema.resolveTypeFields(ty); - const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(resolved_ty).?; switch (union_obj.status) { .none, .have_field_types => {}, .field_types_wip, .layout_wip => { @@ -31270,7 +32136,7 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { union_obj.status = .have_layout; _ = try sema.resolveTypeRequiresComptime(resolved_ty); - if (union_obj.assumed_runtime_bits and !resolved_ty.hasRuntimeBits()) { + if (union_obj.assumed_runtime_bits and !(try sema.typeHasRuntimeBits(resolved_ty))) { const msg = try Module.ErrorMsg.create( sema.gpa, union_obj.srcLoc(sema.mod), @@ -31285,188 +32151,154 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void { // for hasRuntimeBits() of each field, so we need "requires comptime" // to be known already before this function returns. pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { - return switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, - .anyopaque, - .bool, - .void, - .anyerror, - .noreturn, - .@"anyframe", - .null, - .undefined, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .anyerror_void_error_union, - .empty_struct_literal, - .empty_struct, - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - .@"opaque", - .generic_poison, - .array_u8, - .array_u8_sentinel_0, - .int_signed, - .int_unsigned, - .enum_simple, - => false, + const mod = sema.mod; - .single_const_pointer_to_comptime_int, - .type, - .comptime_int, - .comptime_float, - .enum_literal, - .type_info, - // These are function bodies, not function pointers. - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .function, - => true, - - .inferred_alloc_mut => unreachable, - .inferred_alloc_const => unreachable, - - .array, - .array_sentinel, - .vector, - => return sema.resolveTypeRequiresComptime(ty.childType()), - - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => { - const child_ty = ty.childType(); - if (child_ty.zigTypeTag() == .Fn) { - return child_ty.fnInfo().is_generic; - } else { - return sema.resolveTypeRequiresComptime(child_ty); - } - }, - - .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, - => { - var buf: Type.Payload.ElemType = undefined; - return sema.resolveTypeRequiresComptime(ty.optionalChild(&buf)); - }, - - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.types, 0..) |field_ty, i| { - const have_comptime_val = tuple.values[i].tag() != .unreachable_value; - if (!have_comptime_val and try sema.resolveTypeRequiresComptime(field_ty)) { - return true; + return switch (ty.toIntern()) { + .empty_struct_type => false, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => false, + .ptr_type => |ptr_type| { + const child_ty = ptr_type.child.toType(); + if (child_ty.zigTypeTag(mod) == .Fn) { + return mod.typeToFunc(child_ty).?.is_generic; + } else { + return sema.resolveTypeRequiresComptime(child_ty); } - } - return false; - }, + }, + .anyframe_type => |child| { + if (child == .none) return false; + return sema.resolveTypeRequiresComptime(child.toType()); + }, + .array_type => |array_type| return sema.resolveTypeRequiresComptime(array_type.child.toType()), + .vector_type => |vector_type| return sema.resolveTypeRequiresComptime(vector_type.child.toType()), + .opt_type => |child| return sema.resolveTypeRequiresComptime(child.toType()), + .error_union_type => |error_union_type| return sema.resolveTypeRequiresComptime(error_union_type.payload_type.toType()), + .error_set_type, .inferred_error_set_type => false, - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - switch (struct_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - var requires_comptime = false; - struct_obj.requires_comptime = .wip; - for (struct_obj.fields.values()) |field| { - if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true; - } - if (requires_comptime) { - struct_obj.requires_comptime = .yes; - } else { - struct_obj.requires_comptime = .no; - } - return requires_comptime; - }, - } - }, + .func_type => true, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - switch (union_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - var requires_comptime = false; - union_obj.requires_comptime = .wip; - for (union_obj.fields.values()) |field| { - if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true; - } - if (requires_comptime) { - union_obj.requires_comptime = .yes; - } else { - union_obj.requires_comptime = .no; - } - return requires_comptime; - }, - } - }, + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .anyerror, + .noreturn, + .generic_poison, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => false, - .error_union => return sema.resolveTypeRequiresComptime(ty.errorUnionPayload()), - .anyframe_T => { - const child_ty = ty.castTag(.anyframe_T).?.data; - return sema.resolveTypeRequiresComptime(child_ty); - }, - .enum_numbered => { - const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; - return sema.resolveTypeRequiresComptime(tag_ty); - }, - .enum_full, .enum_nonexhaustive => { - const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; - return sema.resolveTypeRequiresComptime(tag_ty); + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + .type_info, + => true, + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; + switch (struct_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + var requires_comptime = false; + struct_obj.requires_comptime = .wip; + for (struct_obj.fields.values()) |field| { + if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true; + } + if (requires_comptime) { + struct_obj.requires_comptime = .yes; + } else { + struct_obj.requires_comptime = .no; + } + return requires_comptime; + }, + } + }, + + .anon_struct_type => |tuple| { + for (tuple.types, tuple.values) |field_ty, field_val| { + const have_comptime_val = field_val != .none; + if (!have_comptime_val and try sema.resolveTypeRequiresComptime(field_ty.toType())) { + return true; + } + } + return false; + }, + + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + switch (union_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + var requires_comptime = false; + union_obj.requires_comptime = .wip; + for (union_obj.fields.values()) |field| { + if (try sema.resolveTypeRequiresComptime(field.ty)) requires_comptime = true; + } + if (requires_comptime) { + union_obj.requires_comptime = .yes; + } else { + union_obj.requires_comptime = .no; + } + return requires_comptime; + }, + } + }, + + .opaque_type => false, + + .enum_type => |enum_type| try sema.resolveTypeRequiresComptime(enum_type.tag_ty.toType()), + + // values, not types + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, }, }; } @@ -31474,40 +32306,38 @@ pub fn resolveTypeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { /// Returns `error.AnalysisFail` if any of the types (recursively) failed to /// be resolved. pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { - switch (ty.zigTypeTag()) { + const mod = sema.mod; + switch (ty.zigTypeTag(mod)) { .Pointer => { - const child_ty = try sema.resolveTypeFields(ty.childType()); + const child_ty = try sema.resolveTypeFields(ty.childType(mod)); return sema.resolveTypeFully(child_ty); }, - .Struct => switch (ty.tag()) { - .@"struct" => return sema.resolveStructFully(ty), - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - + .Struct => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => return sema.resolveStructFully(ty), + .anon_struct_type => |tuple| { for (tuple.types) |field_ty| { - try sema.resolveTypeFully(field_ty); + try sema.resolveTypeFully(field_ty.toType()); } }, else => {}, }, .Union => return sema.resolveUnionFully(ty), - .Array => return sema.resolveTypeFully(ty.childType()), + .Array => return sema.resolveTypeFully(ty.childType(mod)), .Optional => { - var buf: Type.Payload.ElemType = undefined; - return sema.resolveTypeFully(ty.optionalChild(&buf)); + return sema.resolveTypeFully(ty.optionalChild(mod)); }, - .ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload()), + .ErrorUnion => return sema.resolveTypeFully(ty.errorUnionPayload(mod)), .Fn => { - const info = ty.fnInfo(); + const info = mod.typeToFunc(ty).?; if (info.is_generic) { // Resolving of generic function types is deferred to when // the function is instantiated. return; } for (info.param_types) |param_ty| { - try sema.resolveTypeFully(param_ty); + try sema.resolveTypeFully(param_ty.toType()); } - try sema.resolveTypeFully(info.return_type); + try sema.resolveTypeFully(info.return_type.toType()); }, else => {}, } @@ -31516,9 +32346,9 @@ pub fn resolveTypeFully(sema: *Sema, ty: Type) CompileError!void { fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void { try sema.resolveStructLayout(ty); + const mod = sema.mod; const resolved_ty = try sema.resolveTypeFields(ty); - const payload = resolved_ty.castTag(.@"struct").?; - const struct_obj = payload.data; + const struct_obj = mod.typeToStruct(resolved_ty).?; switch (struct_obj.status) { .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {}, @@ -31546,8 +32376,9 @@ fn resolveStructFully(sema: *Sema, ty: Type) CompileError!void { fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { try sema.resolveUnionLayout(ty); + const mod = sema.mod; const resolved_ty = try sema.resolveTypeFields(ty); - const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(resolved_ty).?; switch (union_obj.status) { .none, .have_field_types, .field_types_wip, .layout_wip, .have_layout => {}, .fully_resolved_wip, .fully_resolved => return, @@ -31572,30 +32403,111 @@ fn resolveUnionFully(sema: *Sema, ty: Type) CompileError!void { } pub fn resolveTypeFields(sema: *Sema, ty: Type) CompileError!Type { - switch (ty.tag()) { - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - try sema.resolveTypeFieldsStruct(ty, struct_obj); - return ty; - }, - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - try sema.resolveTypeFieldsUnion(ty, union_obj); - return ty; - }, - .type_info => return sema.getBuiltinType("Type"), - .extern_options => return sema.getBuiltinType("ExternOptions"), - .export_options => return sema.getBuiltinType("ExportOptions"), - .atomic_order => return sema.getBuiltinType("AtomicOrder"), - .atomic_rmw_op => return sema.getBuiltinType("AtomicRmwOp"), - .calling_convention => return sema.getBuiltinType("CallingConvention"), - .address_space => return sema.getBuiltinType("AddressSpace"), - .float_mode => return sema.getBuiltinType("FloatMode"), - .reduce_op => return sema.getBuiltinType("ReduceOp"), - .modifier => return sema.getBuiltinType("CallModifier"), - .prefetch_options => return sema.getBuiltinType("PrefetchOptions"), + const mod = sema.mod; - else => return ty, + switch (ty.toIntern()) { + .var_args_param_type => unreachable, + + .none => unreachable, + + .u1_type, + .u8_type, + .i8_type, + .u16_type, + .i16_type, + .u29_type, + .u32_type, + .i32_type, + .u64_type, + .i64_type, + .u80_type, + .u128_type, + .i128_type, + .usize_type, + .isize_type, + .c_char_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + .anyopaque_type, + .bool_type, + .void_type, + .type_type, + .anyerror_type, + .comptime_int_type, + .comptime_float_type, + .noreturn_type, + .anyframe_type, + .null_type, + .undefined_type, + .enum_literal_type, + .manyptr_u8_type, + .manyptr_const_u8_type, + .manyptr_const_u8_sentinel_0_type, + .single_const_pointer_to_comptime_int_type, + .slice_const_u8_type, + .slice_const_u8_sentinel_0_type, + .anyerror_void_error_union_type, + .generic_poison_type, + .empty_struct_type, + => return ty, + + .undef => unreachable, + .zero => unreachable, + .zero_usize => unreachable, + .zero_u8 => unreachable, + .one => unreachable, + .one_usize => unreachable, + .one_u8 => unreachable, + .four_u8 => unreachable, + .negative_one => unreachable, + .calling_convention_c => unreachable, + .calling_convention_inline => unreachable, + .void_value => unreachable, + .unreachable_value => unreachable, + .null_value => unreachable, + .bool_true => unreachable, + .bool_false => unreachable, + .empty_struct => unreachable, + .generic_poison => unreachable, + + .type_info_type => return sema.getBuiltinType("Type"), + .extern_options_type => return sema.getBuiltinType("ExternOptions"), + .export_options_type => return sema.getBuiltinType("ExportOptions"), + .atomic_order_type => return sema.getBuiltinType("AtomicOrder"), + .atomic_rmw_op_type => return sema.getBuiltinType("AtomicRmwOp"), + .calling_convention_type => return sema.getBuiltinType("CallingConvention"), + .address_space_type => return sema.getBuiltinType("AddressSpace"), + .float_mode_type => return sema.getBuiltinType("FloatMode"), + .reduce_op_type => return sema.getBuiltinType("ReduceOp"), + .call_modifier_type => return sema.getBuiltinType("CallModifier"), + .prefetch_options_type => return sema.getBuiltinType("PrefetchOptions"), + + _ => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return ty; + try sema.resolveTypeFieldsStruct(ty, struct_obj); + return ty; + }, + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + try sema.resolveTypeFieldsUnion(ty, union_obj); + return ty; + }, + + else => return ty, + }, } } @@ -31682,35 +32594,39 @@ fn resolveInferredErrorSet( sema: *Sema, block: *Block, src: LazySrcLoc, - ies: *Module.Fn.InferredErrorSet, + ies_index: Module.Fn.InferredErrorSet.Index, ) CompileError!void { + const mod = sema.mod; + const ies = mod.inferredErrorSetPtr(ies_index); + if (ies.is_resolved) return; - if (ies.func.state == .in_progress) { + const func = mod.funcPtr(ies.func); + if (func.state == .in_progress) { return sema.fail(block, src, "unable to resolve inferred error set", .{}); } // In order to ensure that all dependencies are properly added to the set, we // need to ensure the function body is analyzed of the inferred error set. // However, in the case of comptime/inline function calls with inferred error sets, - // each call gets a new InferredErrorSet object, which points to the same - // `*Module.Fn`. Not only is the function not relevant to the inferred error set + // each call gets a new InferredErrorSet object, which contains the same + // `Module.Fn.Index`. Not only is the function not relevant to the inferred error set // in this case, it may be a generic function which would cause an assertion failure // if we called `ensureFuncBodyAnalyzed` on it here. - const ies_func_owner_decl = sema.mod.declPtr(ies.func.owner_decl); - const ies_func_info = ies_func_owner_decl.ty.fnInfo(); + const ies_func_owner_decl = mod.declPtr(func.owner_decl); + const ies_func_info = mod.typeToFunc(ies_func_owner_decl.ty).?; // if ies declared by a inline function with generic return type, the return_type should be generic_poison, // because inline function does not create a new declaration, and the ies has been filled with analyzeCall, // so here we can simply skip this case. - if (ies_func_info.return_type.tag() == .generic_poison) { + if (ies_func_info.return_type == .generic_poison_type) { assert(ies_func_info.cc == .Inline); - } else if (ies_func_info.return_type.errorUnionSet().castTag(.error_set_inferred).?.data == ies) { + } else if (mod.typeToInferredErrorSet(ies_func_info.return_type.toType().errorUnionSet(mod)).? == ies) { if (ies_func_info.is_generic) { const msg = msg: { const msg = try sema.errMsg(block, src, "unable to resolve inferred error set of generic function", .{}); errdefer msg.destroy(sema.gpa); - try sema.mod.errNoteNonLazy(ies_func_owner_decl.srcLoc(), msg, "generic function declared here", .{}); + try sema.mod.errNoteNonLazy(ies_func_owner_decl.srcLoc(mod), msg, "generic function declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); @@ -31722,10 +32638,11 @@ fn resolveInferredErrorSet( ies.is_resolved = true; - for (ies.inferred_error_sets.keys()) |other_ies| { - if (ies == other_ies) continue; - try sema.resolveInferredErrorSet(block, src, other_ies); + for (ies.inferred_error_sets.keys()) |other_ies_index| { + if (ies_index == other_ies_index) continue; + try sema.resolveInferredErrorSet(block, src, other_ies_index); + const other_ies = mod.inferredErrorSetPtr(other_ies_index); for (other_ies.errors.keys()) |key| { try ies.errors.put(sema.gpa, key, {}); } @@ -31740,15 +32657,17 @@ fn resolveInferredErrorSetTy( src: LazySrcLoc, ty: Type, ) CompileError!void { - if (ty.castTag(.error_set_inferred)) |inferred| { - try sema.resolveInferredErrorSet(block, src, inferred.data); + const mod = sema.mod; + if (mod.typeToInferredErrorSetIndex(ty).unwrap()) |ies_index| { + try sema.resolveInferredErrorSet(block, src, ies_index); } } fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void { const gpa = mod.gpa; + const ip = &mod.intern_pool; const decl_index = struct_obj.owner_decl; - const zir = struct_obj.namespace.file_scope.zir; + const zir = mod.namespacePtr(struct_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[struct_obj.zir_index].extended; assert(extended.opcode == .struct_decl); const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); @@ -31794,35 +32713,37 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void } const decl = mod.declPtr(decl_index); - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); + var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); + defer comptime_mutable_decls.deinit(); + var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = analysis_arena.allocator(), - .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, + .comptime_mutable_decls = &comptime_mutable_decls, }; defer sema.deinit(); - var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope); defer wip_captures.deinit(); var block_scope: Block = .{ .parent = null, .sema = &sema, .src_decl = decl_index, - .namespace = &struct_obj.namespace, + .namespace = struct_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, @@ -31834,13 +32755,13 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void } struct_obj.fields = .{}; - try struct_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); + try struct_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len); const Field = struct { type_body_len: u32 = 0, align_body_len: u32 = 0, init_body_len: u32 = 0, - type_ref: Air.Inst.Ref = .none, + type_ref: Zir.Inst.Ref = .none, }; const fields = try sema.arena.alloc(Field, fields_len); var any_inits = false; @@ -31885,30 +32806,30 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void extra_index += 1; // This string needs to outlive the ZIR code. - const field_name = if (field_name_zir) |some| - try decl_arena_allocator.dupe(u8, some) + const field_name = try ip.getOrPutString(gpa, if (field_name_zir) |s| + s else - try std.fmt.allocPrint(decl_arena_allocator, "{d}", .{field_i}); + try std.fmt.allocPrint(sema.arena, "{d}", .{field_i})); const gop = struct_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { const msg = msg: { - const field_src = struct_obj.fieldSrcLoc(sema.mod, .{ .index = field_i }).lazy; - const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{s}'", .{field_name}); + const field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i }).lazy; + const msg = try sema.errMsg(&block_scope, field_src, "duplicate struct field: '{}'", .{field_name.fmt(ip)}); errdefer msg.destroy(gpa); const prev_field_index = struct_obj.fields.getIndex(field_name).?; - const prev_field_src = struct_obj.fieldSrcLoc(sema.mod, .{ .index = prev_field_index }); - try sema.mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{}); + const prev_field_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = prev_field_index }); + try mod.errNoteNonLazy(prev_field_src, msg, "other field here", .{}); try sema.errNote(&block_scope, src, msg, "struct declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } gop.value_ptr.* = .{ - .ty = Type.initTag(.noreturn), + .ty = Type.noreturn, .abi_align = 0, - .default_val = Value.initTag(.unreachable_value), + .default_val = .none, .is_comptime = is_comptime, .offset = undefined, }; @@ -31934,7 +32855,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void if (zir_field.type_ref != .none) { break :ty sema.resolveType(&block_scope, .unneeded, zir_field.type_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -31950,7 +32871,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const ty_ref = try sema.resolveBody(&block_scope, body, struct_obj.zir_index); break :ty sema.analyzeAsType(&block_scope, .unneeded, ty_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -31960,16 +32881,16 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void else => |e| return e, }; }; - if (field_ty.tag() == .generic_poison) { + if (field_ty.isGenericPoison()) { return error.GenericPoison; } const field = &struct_obj.fields.values()[field_i]; - field.ty = try field_ty.copy(decl_arena_allocator); + field.ty = field_ty; - if (field_ty.zigTypeTag() == .Opaque) { + if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -31981,9 +32902,9 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void }; return sema.failWithOwnedErrorMsg(msg); } - if (field_ty.zigTypeTag() == .NoReturn) { + if (field_ty.zigTypeTag(mod) == .NoReturn) { const msg = msg: { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -31997,11 +32918,11 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void } if (struct_obj.layout == .Extern and !try sema.validateExternType(field.ty, .struct_field)) { const msg = msg: { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field.ty.fmt(sema.mod)}); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, ty_src, field.ty, .struct_field); @@ -32010,13 +32931,13 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty))) { + } else if (struct_obj.layout == .Packed and !(validatePackedType(field.ty, mod))) { const msg = msg: { - const ty_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .type, }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field.ty.fmt(sema.mod)}); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed structs cannot contain fields of type '{}'", .{field.ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotPacked(msg, ty_src, field.ty); @@ -32033,7 +32954,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const align_ref = try sema.resolveBody(&block_scope, body, struct_obj.zir_index); field.abi_align = sema.analyzeAsAlign(&block_scope, .unneeded, align_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const align_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const align_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .alignment, }).lazy; @@ -32061,7 +32982,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void const field = &struct_obj.fields.values()[field_i]; const coerced = sema.coerce(&block_scope, field.ty, init, .unneeded) catch |err| switch (err) { error.NeededSourceLocation => { - const init_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const init_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .value, }).lazy; @@ -32071,17 +32992,21 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void else => |e| return e, }; const default_val = (try sema.resolveMaybeUndefVal(coerced)) orelse { - const init_src = struct_obj.fieldSrcLoc(sema.mod, .{ + const init_src = mod.fieldSrcLoc(struct_obj.owner_decl, .{ .index = field_i, .range = .value, }).lazy; return sema.failWithNeededComptime(&block_scope, init_src, "struct field default value must be comptime-known"); }; - field.default_val = try default_val.copy(decl_arena_allocator); + field.default_val = try default_val.intern(field.ty, mod); } } } try wip_captures.finalize(); + for (comptime_mutable_decls.items) |ct_decl_index| { + const ct_decl = mod.declPtr(ct_decl_index); + try ct_decl.intern(mod); + } struct_obj.have_field_inits = true; } @@ -32091,8 +33016,9 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { defer tracy.end(); const gpa = mod.gpa; + const ip = &mod.intern_pool; const decl_index = union_obj.owner_decl; - const zir = union_obj.namespace.file_scope.zir; + const zir = mod.namespacePtr(union_obj.namespace).file_scope.zir; const extended = zir.instructions.items(.data)[union_obj.zir_index].extended; assert(extended.opcode == .union_decl); const small = @bitCast(Zir.Inst.UnionDecl.Small, extended.small); @@ -32134,35 +33060,37 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { extra_index += body.len; const decl = mod.declPtr(decl_index); - var decl_arena: std.heap.ArenaAllocator = undefined; - const decl_arena_allocator = decl.value_arena.?.acquire(gpa, &decl_arena); - defer decl.value_arena.?.release(&decl_arena); var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); + var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); + defer comptime_mutable_decls.deinit(); + var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = analysis_arena.allocator(), - .perm_arena = decl_arena_allocator, .code = zir, .owner_decl = decl, .owner_decl_index = decl_index, .func = null, + .func_index = .none, .fn_ret_ty = Type.void, .owner_func = null, + .owner_func_index = .none, + .comptime_mutable_decls = &comptime_mutable_decls, }; defer sema.deinit(); - var wip_captures = try WipCaptureScope.init(gpa, decl_arena_allocator, decl.src_scope); + var wip_captures = try WipCaptureScope.init(gpa, decl.src_scope); defer wip_captures.deinit(); var block_scope: Block = .{ .parent = null, .sema = &sema, .src_decl = decl_index, - .namespace = &union_obj.namespace, + .namespace = union_obj.namespace, .wip_capture_scope = wip_captures.scope, .instructions = .{}, .inlining = null, @@ -32178,66 +33106,61 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } try wip_captures.finalize(); + for (comptime_mutable_decls.items) |ct_decl_index| { + const ct_decl = mod.declPtr(ct_decl_index); + try ct_decl.intern(mod); + } - try union_obj.fields.ensureTotalCapacity(decl_arena_allocator, fields_len); + try union_obj.fields.ensureTotalCapacity(mod.tmp_hack_arena.allocator(), fields_len); var int_tag_ty: Type = undefined; - var enum_field_names: ?*Module.EnumNumbered.NameMap = null; - var enum_value_map: ?*Module.EnumNumbered.ValueMap = null; - var tag_ty_field_names: ?Module.EnumFull.NameMap = null; + var enum_field_names: []InternPool.NullTerminatedString = &.{}; + var enum_field_vals: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}; + var explicit_tags_seen: []bool = &.{}; if (tag_type_ref != .none) { const tag_ty_src: LazySrcLoc = .{ .node_offset_container_tag = src.node_offset.x }; const provided_ty = try sema.resolveType(&block_scope, tag_ty_src, tag_type_ref); if (small.auto_enum_tag) { // The provided type is an integer type and we must construct the enum tag type here. int_tag_ty = provided_ty; - if (int_tag_ty.zigTypeTag() != .Int and int_tag_ty.zigTypeTag() != .ComptimeInt) { - return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(sema.mod)}); + if (int_tag_ty.zigTypeTag(mod) != .Int and int_tag_ty.zigTypeTag(mod) != .ComptimeInt) { + return sema.fail(&block_scope, tag_ty_src, "expected integer tag type, found '{}'", .{int_tag_ty.fmt(mod)}); } if (fields_len > 0) { - var field_count_val: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = fields_len - 1, - }; - if (!(try sema.intFitsInType(Value.initPayload(&field_count_val.base), int_tag_ty, null))) { + const field_count_val = try mod.intValue(Type.comptime_int, fields_len - 1); + if (!(try sema.intFitsInType(field_count_val, int_tag_ty, null))) { const msg = msg: { const msg = try sema.errMsg(&block_scope, tag_ty_src, "specified integer tag type cannot represent every field", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(&block_scope, tag_ty_src, msg, "type '{}' cannot fit values in range 0...{d}", .{ - int_tag_ty.fmt(sema.mod), + int_tag_ty.fmt(mod), fields_len - 1, }); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } + enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len); + try enum_field_vals.ensureTotalCapacity(sema.arena, fields_len); } - union_obj.tag_ty = try sema.generateUnionTagTypeNumbered(&block_scope, fields_len, provided_ty, union_obj); - const enum_obj = union_obj.tag_ty.castTag(.enum_numbered).?.data; - enum_field_names = &enum_obj.fields; - enum_value_map = &enum_obj.values; } else { // The provided type is the enum tag type. - union_obj.tag_ty = try provided_ty.copy(decl_arena_allocator); - if (union_obj.tag_ty.zigTypeTag() != .Enum) { - return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(sema.mod)}); - } + union_obj.tag_ty = provided_ty; + const enum_type = switch (ip.indexToKey(union_obj.tag_ty.toIntern())) { + .enum_type => |x| x, + else => return sema.fail(&block_scope, tag_ty_src, "expected enum tag type, found '{}'", .{union_obj.tag_ty.fmt(mod)}), + }; // The fields of the union must match the enum exactly. - // Store a copy of the enum field names so we can check for - // missing or extraneous fields later. - tag_ty_field_names = try union_obj.tag_ty.enumFields().clone(sema.arena); + // A flag per field is used to check for missing and extraneous fields. + explicit_tags_seen = try sema.arena.alloc(bool, enum_type.names.len); + @memset(explicit_tags_seen, false); } } else { // If auto_enum_tag is false, this is an untagged union. However, for semantic analysis // purposes, we still auto-generate an enum tag type the same way. That the union is // untagged is represented by the Type tag (union vs union_tagged). - union_obj.tag_ty = try sema.generateUnionTagTypeSimple(&block_scope, fields_len, union_obj); - enum_field_names = &union_obj.tag_ty.castTag(.enum_simple).?.data.fields; - } - - if (fields_len == 0) { - return; + enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len); } const bits_per_field = 4; @@ -32281,17 +33204,17 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :blk align_ref; } else .none; - const tag_ref: Zir.Inst.Ref = if (has_tag) blk: { + const tag_ref: Air.Inst.Ref = if (has_tag) blk: { const tag_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); extra_index += 1; break :blk try sema.resolveInst(tag_ref); } else .none; - if (enum_value_map) |map| { - const copied_val = if (tag_ref != .none) blk: { + if (enum_field_vals.capacity() > 0) { + const enum_tag_val = if (tag_ref != .none) blk: { const val = sema.semaUnionFieldVal(&block_scope, .unneeded, int_tag_ty, tag_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const val_src = union_obj.fieldSrcLoc(sema.mod, .{ + const val_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .value, }).lazy; @@ -32302,27 +33225,22 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { }; last_tag_val = val; - // This puts the memory into the union arena, not the enum arena, but - // it is OK since they share the same lifetime. - break :blk try val.copy(decl_arena_allocator); + break :blk val; } else blk: { const val = if (last_tag_val) |val| - try sema.intAdd(val, Value.one, int_tag_ty) + try sema.intAdd(val, Value.one_comptime_int, int_tag_ty, undefined) else - Value.zero; + try mod.intValue(int_tag_ty, 0); last_tag_val = val; - break :blk try val.copy(decl_arena_allocator); + break :blk val; }; - const gop = map.getOrPutAssumeCapacityContext(copied_val, .{ - .ty = int_tag_ty, - .mod = mod, - }); + const gop = enum_field_vals.getOrPutAssumeCapacity(enum_tag_val.toIntern()); if (gop.found_existing) { - const field_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = field_i }).lazy; - const other_field_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = gop.index }).lazy; + const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy; + const other_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = gop.index }).lazy; const msg = msg: { - const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{copied_val.fmtValue(int_tag_ty, sema.mod)}); + const msg = try sema.errMsg(&block_scope, field_src, "enum tag value {} already taken", .{enum_tag_val.fmtValue(int_tag_ty, mod)}); errdefer msg.destroy(gpa); try sema.errNote(&block_scope, other_field_src, msg, "other occurrence here", .{}); break :msg msg; @@ -32332,19 +33250,19 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } // This string needs to outlive the ZIR code. - const field_name = try decl_arena_allocator.dupe(u8, field_name_zir); - if (enum_field_names) |set| { - set.putAssumeCapacity(field_name, {}); + const field_name = try ip.getOrPutString(gpa, field_name_zir); + if (enum_field_names.len != 0) { + enum_field_names[field_i] = field_name; } const field_ty: Type = if (!has_type) Type.void else if (field_type_ref == .none) - Type.initTag(.noreturn) + Type.noreturn else sema.resolveType(&block_scope, .unneeded, field_type_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -32354,46 +33272,54 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { else => |e| return e, }; - if (field_ty.tag() == .generic_poison) { + if (field_ty.isGenericPoison()) { return error.GenericPoison; } const gop = union_obj.fields.getOrPutAssumeCapacity(field_name); if (gop.found_existing) { const msg = msg: { - const field_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = field_i }).lazy; - const msg = try sema.errMsg(&block_scope, field_src, "duplicate union field: '{s}'", .{field_name}); + const field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i }).lazy; + const msg = try sema.errMsg(&block_scope, field_src, "duplicate union field: '{}'", .{ + field_name.fmt(ip), + }); errdefer msg.destroy(gpa); const prev_field_index = union_obj.fields.getIndex(field_name).?; - const prev_field_src = union_obj.fieldSrcLoc(sema.mod, .{ .index = prev_field_index }).lazy; - try sema.mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl), msg, "other field here", .{}); + const prev_field_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = prev_field_index }).lazy; + try mod.errNoteNonLazy(prev_field_src.toSrcLoc(decl, mod), msg, "other field here", .{}); try sema.errNote(&block_scope, src, msg, "union declared here", .{}); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } - if (tag_ty_field_names) |*names| { - const enum_has_field = names.orderedRemove(field_name); - if (!enum_has_field) { + if (explicit_tags_seen.len > 0) { + const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type; + const enum_index = tag_info.nameIndex(ip, field_name) orelse { const msg = msg: { - const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; - const msg = try sema.errMsg(&block_scope, ty_src, "no field named '{s}' in enum '{}'", .{ field_name, union_obj.tag_ty.fmt(sema.mod) }); + const msg = try sema.errMsg(&block_scope, ty_src, "no field named '{}' in enum '{}'", .{ + field_name.fmt(ip), union_obj.tag_ty.fmt(mod), + }); errdefer msg.destroy(sema.gpa); try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } + }; + // No check for duplicate because the check already happened in order + // to create the enum type in the first place. + assert(!explicit_tags_seen[enum_index]); + explicit_tags_seen[enum_index] = true; } - if (field_ty.zigTypeTag() == .Opaque) { + if (field_ty.zigTypeTag(mod) == .Opaque) { const msg = msg: { - const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }).lazy; @@ -32407,11 +33333,11 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } if (union_obj.layout == .Extern and !try sema.validateExternType(field_ty, .union_field)) { const msg = msg: { - const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotExtern(msg, ty_src, field_ty, .union_field); @@ -32420,13 +33346,13 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); - } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty))) { + } else if (union_obj.layout == .Packed and !(validatePackedType(field_ty, mod))) { const msg = msg: { - const ty_src = union_obj.fieldSrcLoc(sema.mod, .{ + const ty_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .type, }); - const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)}); + const msg = try sema.errMsg(&block_scope, ty_src.lazy, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)}); errdefer msg.destroy(sema.gpa); try sema.explainWhyTypeIsNotPacked(msg, ty_src, field_ty); @@ -32438,14 +33364,14 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } gop.value_ptr.* = .{ - .ty = try field_ty.copy(decl_arena_allocator), + .ty = field_ty, .abi_align = 0, }; if (align_ref != .none) { gop.value_ptr.abi_align = sema.resolveAlign(&block_scope, .unneeded, align_ref) catch |err| switch (err) { error.NeededSourceLocation => { - const align_src = union_obj.fieldSrcLoc(sema.mod, .{ + const align_src = mod.fieldSrcLoc(union_obj.owner_decl, .{ .index = field_i, .range = .alignment, }).lazy; @@ -32459,22 +33385,29 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void { } } - if (tag_ty_field_names) |names| { - if (names.count() > 0) { + if (explicit_tags_seen.len > 0) { + const tag_info = ip.indexToKey(union_obj.tag_ty.toIntern()).enum_type; + if (tag_info.names.len > fields_len) { const msg = msg: { const msg = try sema.errMsg(&block_scope, src, "enum field(s) missing in union", .{}); errdefer msg.destroy(sema.gpa); const enum_ty = union_obj.tag_ty; - for (names.keys()) |field_name| { - const field_index = enum_ty.enumFieldIndex(field_name).?; - try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{s}' missing, declared here", .{field_name}); + for (tag_info.names, 0..) |field_name, field_index| { + if (explicit_tags_seen[field_index]) continue; + try sema.addFieldErrNote(enum_ty, field_index, msg, "field '{}' missing, declared here", .{ + field_name.fmt(ip), + }); } try sema.addDeclaredHereNote(msg, union_obj.tag_ty); break :msg msg; }; return sema.failWithOwnedErrorMsg(msg); } + } else if (enum_field_vals.count() > 0) { + union_obj.tag_ty = try sema.generateUnionTagTypeNumbered(&block_scope, enum_field_names, enum_field_vals.keys(), union_obj); + } else { + union_obj.tag_ty = try sema.generateUnionTagTypeSimple(&block_scope, enum_field_names, union_obj); } } @@ -32486,116 +33419,103 @@ fn semaUnionFieldVal(sema: *Sema, block: *Block, src: LazySrcLoc, int_tag_ty: Ty fn generateUnionTagTypeNumbered( sema: *Sema, block: *Block, - fields_len: u32, - int_ty: Type, + enum_field_names: []const InternPool.NullTerminatedString, + enum_field_vals: []const InternPool.Index, union_obj: *Module.Union, ) !Type { const mod = sema.mod; - - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - - const enum_obj = try new_decl_arena_allocator.create(Module.EnumNumbered); - const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumNumbered); - enum_ty_payload.* = .{ - .base = .{ .tag = .enum_numbered }, - .data = enum_obj, - }; - const enum_ty = Type.initPayload(&enum_ty_payload.base); - const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); + const gpa = sema.gpa; const src_decl = mod.declPtr(block.src_decl); const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node, block.wip_capture_scope); errdefer mod.destroyDecl(new_decl_index); - const name = name: { - const fqn = try union_obj.getFullyQualifiedName(mod); - defer sema.gpa.free(fqn); - break :name try std.fmt.allocPrintZ(mod.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn}); - }; + const fqn = try union_obj.getFullyQualifiedName(mod); + const name = try mod.intern_pool.getOrPutStringFmt(gpa, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(&mod.intern_pool)}); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ - .ty = Type.type, - .val = enum_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, name); - sema.mod.declPtr(new_decl_index).name_fully_qualified = true; - - const new_decl = mod.declPtr(new_decl_index); - new_decl.owns_tv = true; - new_decl.name_fully_qualified = true; errdefer mod.abortAnonDecl(new_decl_index); - const copied_int_ty = try int_ty.copy(new_decl_arena_allocator); - enum_obj.* = .{ - .owner_decl = new_decl_index, - .tag_ty = copied_int_ty, - .fields = .{}, - .values = .{}, - }; - // Here we pre-allocate the maps using the decl arena. - try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); - try enum_obj.values.ensureTotalCapacityContext(new_decl_arena_allocator, fields_len, .{ - .ty = copied_int_ty, - .mod = mod, - }); - try new_decl.finalizeNewArena(&new_decl_arena); - return enum_ty; + const new_decl = mod.declPtr(new_decl_index); + new_decl.name_fully_qualified = true; + new_decl.owns_tv = true; + new_decl.name_fully_qualified = true; + + const enum_ty = try mod.intern(.{ .enum_type = .{ + .decl = new_decl_index, + .namespace = .none, + .tag_ty = if (enum_field_vals.len == 0) + (try mod.intType(.unsigned, 0)).toIntern() + else + mod.intern_pool.typeOf(enum_field_vals[0]), + .names = enum_field_names, + .values = enum_field_vals, + .tag_mode = .explicit, + } }); + + new_decl.ty = Type.type; + new_decl.val = enum_ty.toValue(); + + try mod.finalizeAnonDecl(new_decl_index); + return enum_ty.toType(); } -fn generateUnionTagTypeSimple(sema: *Sema, block: *Block, fields_len: usize, maybe_union_obj: ?*Module.Union) !Type { +fn generateUnionTagTypeSimple( + sema: *Sema, + block: *Block, + enum_field_names: []const InternPool.NullTerminatedString, + maybe_union_obj: ?*Module.Union, +) !Type { const mod = sema.mod; - - var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_decl_arena.deinit(); - const new_decl_arena_allocator = new_decl_arena.allocator(); - - const enum_obj = try new_decl_arena_allocator.create(Module.EnumSimple); - const enum_ty_payload = try new_decl_arena_allocator.create(Type.Payload.EnumSimple); - enum_ty_payload.* = .{ - .base = .{ .tag = .enum_simple }, - .data = enum_obj, - }; - const enum_ty = Type.initPayload(&enum_ty_payload.base); - const enum_val = try Value.Tag.ty.create(new_decl_arena_allocator, enum_ty); + const gpa = sema.gpa; const new_decl_index = new_decl_index: { const union_obj = maybe_union_obj orelse { break :new_decl_index try mod.createAnonymousDecl(block, .{ - .ty = Type.type, - .val = enum_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }); }; const src_decl = mod.declPtr(block.src_decl); const new_decl_index = try mod.allocateNewDecl(block.namespace, src_decl.src_node, block.wip_capture_scope); errdefer mod.destroyDecl(new_decl_index); - const name = name: { - const fqn = try union_obj.getFullyQualifiedName(mod); - defer sema.gpa.free(fqn); - break :name try std.fmt.allocPrintZ(mod.gpa, "@typeInfo({s}).Union.tag_type.?", .{fqn}); - }; + const fqn = try union_obj.getFullyQualifiedName(mod); + const name = try mod.intern_pool.getOrPutStringFmt(gpa, "@typeInfo({}).Union.tag_type.?", .{fqn.fmt(&mod.intern_pool)}); try mod.initNewAnonDecl(new_decl_index, src_decl.src_line, block.namespace, .{ - .ty = Type.type, - .val = enum_val, + .ty = Type.noreturn, + .val = Value.@"unreachable", }, name); - sema.mod.declPtr(new_decl_index).name_fully_qualified = true; + mod.declPtr(new_decl_index).name_fully_qualified = true; break :new_decl_index new_decl_index; }; + errdefer mod.abortAnonDecl(new_decl_index); + + const enum_ty = try mod.intern(.{ .enum_type = .{ + .decl = new_decl_index, + .namespace = .none, + .tag_ty = if (enum_field_names.len == 0) + (try mod.intType(.unsigned, 0)).toIntern() + else + (try mod.smallestUnsignedInt(enum_field_names.len - 1)).toIntern(), + .names = enum_field_names, + .values = &.{}, + .tag_mode = .auto, + } }); const new_decl = mod.declPtr(new_decl_index); new_decl.owns_tv = true; - errdefer mod.abortAnonDecl(new_decl_index); + new_decl.ty = Type.type; + new_decl.val = enum_ty.toValue(); - enum_obj.* = .{ - .owner_decl = new_decl_index, - .fields = .{}, - }; - // Here we pre-allocate the maps using the decl arena. - try enum_obj.fields.ensureTotalCapacity(new_decl_arena_allocator, fields_len); - try new_decl.finalizeNewArena(&new_decl_arena); - return enum_ty; + try mod.finalizeAnonDecl(new_decl_index); + return enum_ty.toType(); } fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { - var wip_captures = try WipCaptureScope.init(sema.gpa, sema.perm_arena, sema.owner_decl.src_scope); + const gpa = sema.gpa; + + var wip_captures = try WipCaptureScope.init(gpa, sema.owner_decl.src_scope); defer wip_captures.deinit(); var block: Block = .{ @@ -32609,19 +33529,20 @@ fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { .is_comptime = true, }; defer { - block.instructions.deinit(sema.gpa); - block.params.deinit(sema.gpa); + block.instructions.deinit(gpa); + block.params.deinit(gpa); } const src = LazySrcLoc.nodeOffset(0); const mod = sema.mod; + const ip = &mod.intern_pool; const std_pkg = mod.main_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; const opt_builtin_inst = (try sema.namespaceLookupRef( &block, src, mod.declPtr(std_file.root_decl.unwrap().?).src_namespace, - "builtin", + try ip.getOrPutString(gpa, "builtin"), )) orelse @panic("lib/std.zig is corrupt and missing 'builtin'"); const builtin_inst = try sema.analyzeLoad(&block, src, opt_builtin_inst, src); const builtin_ty = sema.analyzeAsType(&block, src, builtin_inst) catch |err| switch (err) { @@ -32631,8 +33552,8 @@ fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { const opt_ty_decl = (try sema.namespaceLookup( &block, src, - builtin_ty.getNamespace().?, - name, + builtin_ty.getNamespaceIndex(mod).unwrap().?, + try ip.getOrPutString(gpa, name), )) orelse std.debug.panic("lib/std/builtin.zig is corrupt and missing '{s}'", .{name}); return sema.analyzeDeclVal(&block, src, opt_ty_decl); } @@ -32640,7 +33561,7 @@ fn getBuiltin(sema: *Sema, name: []const u8) CompileError!Air.Inst.Ref { fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type { const ty_inst = try sema.getBuiltin(name); - var wip_captures = try WipCaptureScope.init(sema.gpa, sema.perm_arena, sema.owner_decl.src_scope); + var wip_captures = try WipCaptureScope.init(sema.gpa, sema.owner_decl.src_scope); defer wip_captures.deinit(); var block: Block = .{ @@ -32673,341 +33594,287 @@ fn getBuiltinType(sema: *Sema, name: []const u8) CompileError!Type { /// that the types are already resolved. /// TODO assert the return value matches `ty.onePossibleValue` pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { - switch (ty.tag()) { - .f16, - .f32, - .f64, - .f80, - .f128, - .c_longdouble, - .comptime_int, - .comptime_float, - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .bool, - .type, - .anyerror, - .error_set_single, - .error_set, - .error_set_merged, - .error_union, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .function, - .single_const_pointer_to_comptime_int, - .array_sentinel, - .array_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .const_slice, - .mut_slice, - .anyopaque, - .optional_single_mut_pointer, - .optional_single_const_pointer, - .enum_literal, - .anyerror_void_error_union, - .error_set_inferred, - .@"opaque", - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .type_info, - .@"anyframe", - .anyframe_T, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .single_const_pointer, - .single_mut_pointer, - .pointer, - => return null, - - .optional => { - var buf: Type.Payload.ElemType = undefined; - const child_ty = ty.optionalChild(&buf); - if (child_ty.isNoReturn()) { - return Value.null; - } else { - return null; - } - }, - - .@"struct" => { - const resolved_ty = try sema.resolveTypeFields(ty); - const s = resolved_ty.castTag(.@"struct").?.data; - for (s.fields.values(), 0..) |field, i| { - if (field.is_comptime) continue; - if (field.ty.eql(resolved_ty, sema.mod)) { - const msg = try Module.ErrorMsg.create( - sema.gpa, - s.srcLoc(sema.mod), - "struct '{}' depends on itself", - .{ty.fmt(sema.mod)}, - ); - try sema.addFieldErrNote(resolved_ty, i, msg, "while checking this field", .{}); - return sema.failWithOwnedErrorMsg(msg); - } - if ((try sema.typeHasOnePossibleValue(field.ty)) == null) { + const mod = sema.mod; + return switch (ty.toIntern()) { + .empty_struct_type => Value.empty_struct, + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => |int_type| { + if (int_type.bits == 0) { + return try mod.intValue(ty, 0); + } else { return null; } - } - return Value.initTag(.empty_struct_value); - }, + }, - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.values, 0..) |val, i| { - const is_comptime = val.tag() != .unreachable_value; - if (is_comptime) continue; - if ((try sema.typeHasOnePossibleValue(tuple.types[i])) != null) continue; - return null; - } - return Value.initTag(.empty_struct_value); - }, + .ptr_type, + .error_union_type, + .func_type, + .anyframe_type, + .error_set_type, + .inferred_error_set_type, + => null, - .enum_numbered => { - const resolved_ty = try sema.resolveTypeFields(ty); - const enum_obj = resolved_ty.castTag(.enum_numbered).?.data; - // An explicit tag type is always provided for enum_numbered. - if (enum_obj.tag_ty.hasRuntimeBits()) { - return null; - } - if (enum_obj.fields.count() == 1) { - if (enum_obj.values.count() == 0) { - return Value.zero; // auto-numbered - } else { - return enum_obj.values.keys()[0]; + inline .array_type, .vector_type => |seq_type, seq_tag| { + const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none; + if (seq_type.len + @boolToInt(has_sentinel) == 0) return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = &.{} }, + } })).toValue(); + + if (try sema.typeHasOnePossibleValue(seq_type.child.toType())) |opv| { + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .repeated_elem = opv.toIntern() }, + } })).toValue(); } - } else { return null; - } - }, - .enum_full => { - const resolved_ty = try sema.resolveTypeFields(ty); - const enum_obj = resolved_ty.castTag(.enum_full).?.data; - if (enum_obj.tag_ty.hasRuntimeBits()) { - return null; - } - switch (enum_obj.fields.count()) { - 0 => return Value.initTag(.unreachable_value), - 1 => if (enum_obj.values.count() == 0) { - return Value.zero; // auto-numbered + }, + .opt_type => |child| { + if (child == .noreturn_type) { + return try mod.nullValue(ty); } else { - return enum_obj.values.keys()[0]; + return null; + } + }, + + .simple_type => |t| switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .enum_literal, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + .type_info, + => null, + + .void => Value.void, + .noreturn => Value.@"unreachable", + .null => Value.null, + .undefined => Value.undef, + + .generic_poison => return error.GenericPoison, + }, + .struct_type => |struct_type| { + const resolved_ty = try sema.resolveTypeFields(ty); + if (mod.structPtrUnwrap(struct_type.index)) |s| { + const field_vals = try sema.arena.alloc(InternPool.Index, s.fields.count()); + for (field_vals, s.fields.values(), 0..) |*field_val, field, i| { + if (field.is_comptime) { + field_val.* = field.default_val; + continue; + } + if (field.ty.eql(resolved_ty, sema.mod)) { + const msg = try Module.ErrorMsg.create( + sema.gpa, + s.srcLoc(sema.mod), + "struct '{}' depends on itself", + .{ty.fmt(sema.mod)}, + ); + try sema.addFieldErrNote(resolved_ty, i, msg, "while checking this field", .{}); + return sema.failWithOwnedErrorMsg(msg); + } + if (try sema.typeHasOnePossibleValue(field.ty)) |field_opv| { + field_val.* = try field_opv.intern(field.ty, mod); + } else return null; + } + + // In this case the struct has no runtime-known fields and + // therefore has one possible value. + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = field_vals }, + } })).toValue(); + } + + // In this case the struct has no fields at all and + // therefore has one possible value. + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = &.{} }, + } })).toValue(); + }, + + .anon_struct_type => |tuple| { + for (tuple.values) |val| { + if (val == .none) return null; + } + // In this case the struct has all comptime-known fields and + // therefore has one possible value. + // TODO: write something like getCoercedInts to avoid needing to dupe + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = try sema.arena.dupe(InternPool.Index, tuple.values) }, + } })).toValue(); + }, + + .union_type => |union_type| { + const resolved_ty = try sema.resolveTypeFields(ty); + const union_obj = mod.unionPtr(union_type.index); + const tag_val = (try sema.typeHasOnePossibleValue(union_obj.tag_ty)) orelse + return null; + const fields = union_obj.fields.values(); + if (fields.len == 0) { + const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + return only.toValue(); + } + const only_field = fields[0]; + if (only_field.ty.eql(resolved_ty, sema.mod)) { + const msg = try Module.ErrorMsg.create( + sema.gpa, + union_obj.srcLoc(sema.mod), + "union '{}' depends on itself", + .{ty.fmt(sema.mod)}, + ); + try sema.addFieldErrNote(resolved_ty, 0, msg, "while checking this field", .{}); + return sema.failWithOwnedErrorMsg(msg); + } + const val_val = (try sema.typeHasOnePossibleValue(only_field.ty)) orelse + return null; + const only = try mod.intern(.{ .un = .{ + .ty = resolved_ty.toIntern(), + .tag = tag_val.toIntern(), + .val = val_val.toIntern(), + } }); + return only.toValue(); + }, + .opaque_type => null, + .enum_type => |enum_type| switch (enum_type.tag_mode) { + .nonexhaustive => { + if (enum_type.tag_ty == .comptime_int_type) return null; + + if (try sema.typeHasOnePossibleValue(enum_type.tag_ty.toType())) |int_opv| { + const only = try mod.intern(.{ .enum_tag = .{ + .ty = ty.toIntern(), + .int = int_opv.toIntern(), + } }); + return only.toValue(); + } + + return null; }, - else => return null, - } - }, - .enum_simple => { - const resolved_ty = try sema.resolveTypeFields(ty); - const enum_simple = resolved_ty.castTag(.enum_simple).?.data; - switch (enum_simple.fields.count()) { - 0 => return Value.initTag(.unreachable_value), - 1 => return Value.zero, - else => return null, - } - }, - .enum_nonexhaustive => { - const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; - if (tag_ty.zigTypeTag() != .ComptimeInt and !(try sema.typeHasRuntimeBits(tag_ty))) { - return Value.zero; - } else { - return null; - } - }, - .@"union", .union_safety_tagged, .union_tagged => { - const resolved_ty = try sema.resolveTypeFields(ty); - const union_obj = resolved_ty.cast(Type.Payload.Union).?.data; - const tag_val = (try sema.typeHasOnePossibleValue(union_obj.tag_ty)) orelse - return null; - const fields = union_obj.fields.values(); - if (fields.len == 0) return Value.initTag(.unreachable_value); - const only_field = fields[0]; - if (only_field.ty.eql(resolved_ty, sema.mod)) { - const msg = try Module.ErrorMsg.create( - sema.gpa, - union_obj.srcLoc(sema.mod), - "union '{}' depends on itself", - .{ty.fmt(sema.mod)}, - ); - try sema.addFieldErrNote(resolved_ty, 0, msg, "while checking this field", .{}); - return sema.failWithOwnedErrorMsg(msg); - } - const val_val = (try sema.typeHasOnePossibleValue(only_field.ty)) orelse - return null; - // TODO make this not allocate. The function in `Type.onePossibleValue` - // currently returns `empty_struct_value` and we should do that here too. - return try Value.Tag.@"union".create(sema.arena, .{ - .tag = tag_val, - .val = val_val, - }); - }, + .auto, .explicit => { + if (enum_type.tag_ty.toType().hasRuntimeBits(mod)) return null; - .empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value), - .void => return Value.void, - .noreturn => return Value.initTag(.unreachable_value), - .null => return Value.null, - .undefined => return Value.initTag(.undef), + switch (enum_type.names.len) { + 0 => { + const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() }); + return only.toValue(); + }, + 1 => return try mod.getCoerced((if (enum_type.values.len == 0) + try mod.intern(.{ .int = .{ + .ty = enum_type.tag_ty, + .storage = .{ .u64 = 0 }, + } }) + else + enum_type.values[0]).toValue(), ty), + else => return null, + } + }, + }, - .int_unsigned, .int_signed => { - if (ty.cast(Type.Payload.Bits).?.data == 0) { - return Value.zero; - } else { - return null; - } + // values, not types + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, }, - .vector, .array, .array_u8 => { - if (ty.arrayLen() == 0) - return Value.initTag(.empty_array); - if ((try sema.typeHasOnePossibleValue(ty.elemType())) != null) { - return Value.initTag(.the_only_possible_value); - } - return null; - }, - - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - .generic_poison => return error.GenericPoison, - } + }; } /// Returns the type of the AIR instruction. fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type { - return sema.getTmpAir().typeOf(inst); + return sema.getTmpAir().typeOf(inst, &sema.mod.intern_pool); } pub fn getTmpAir(sema: Sema) Air { return .{ .instructions = sema.air_instructions.slice(), .extra = sema.air_extra.items, - .values = sema.air_values.items, }; } pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { - switch (ty.tag()) { - .u1 => return .u1_type, - .u8 => return .u8_type, - .i8 => return .i8_type, - .u16 => return .u16_type, - .u29 => return .u29_type, - .i16 => return .i16_type, - .u32 => return .u32_type, - .i32 => return .i32_type, - .u64 => return .u64_type, - .i64 => return .i64_type, - .u128 => return .u128_type, - .i128 => return .i128_type, - .usize => return .usize_type, - .isize => return .isize_type, - .c_short => return .c_short_type, - .c_ushort => return .c_ushort_type, - .c_int => return .c_int_type, - .c_uint => return .c_uint_type, - .c_long => return .c_long_type, - .c_ulong => return .c_ulong_type, - .c_longlong => return .c_longlong_type, - .c_ulonglong => return .c_ulonglong_type, - .c_longdouble => return .c_longdouble_type, - .f16 => return .f16_type, - .f32 => return .f32_type, - .f64 => return .f64_type, - .f80 => return .f80_type, - .f128 => return .f128_type, - .anyopaque => return .anyopaque_type, - .bool => return .bool_type, - .void => return .void_type, - .type => return .type_type, - .anyerror => return .anyerror_type, - .comptime_int => return .comptime_int_type, - .comptime_float => return .comptime_float_type, - .noreturn => return .noreturn_type, - .@"anyframe" => return .anyframe_type, - .null => return .null_type, - .undefined => return .undefined_type, - .enum_literal => return .enum_literal_type, - .atomic_order => return .atomic_order_type, - .atomic_rmw_op => return .atomic_rmw_op_type, - .calling_convention => return .calling_convention_type, - .address_space => return .address_space_type, - .float_mode => return .float_mode_type, - .reduce_op => return .reduce_op_type, - .modifier => return .modifier_type, - .prefetch_options => return .prefetch_options_type, - .export_options => return .export_options_type, - .extern_options => return .extern_options_type, - .type_info => return .type_info_type, - .manyptr_u8 => return .manyptr_u8_type, - .manyptr_const_u8 => return .manyptr_const_u8_type, - .fn_noreturn_no_args => return .fn_noreturn_no_args_type, - .fn_void_no_args => return .fn_void_no_args_type, - .fn_naked_noreturn_no_args => return .fn_naked_noreturn_no_args_type, - .fn_ccc_void_no_args => return .fn_ccc_void_no_args_type, - .single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type, - .const_slice_u8 => return .const_slice_u8_type, - .anyerror_void_error_union => return .anyerror_void_error_union_type, - .generic_poison => return .generic_poison_type, - else => {}, - } + if (@enumToInt(ty.toIntern()) < Air.ref_start_index) + return @intToEnum(Air.Inst.Ref, @enumToInt(ty.toIntern())); try sema.air_instructions.append(sema.gpa, .{ - .tag = .const_ty, - .data = .{ .ty = ty }, + .tag = .interned, + .data = .{ .interned = ty.toIntern() }, }); return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { - return sema.addConstant(ty, try Value.Tag.int_u64.create(sema.arena, int)); + const mod = sema.mod; + return sema.addConstant(ty, try mod.intValue(ty, int)); } fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { - return sema.addConstant(ty, Value.undef); + return sema.addConstant(ty, (try sema.mod.intern(.{ .undef = ty.toIntern() })).toValue()); } pub fn addConstant(sema: *Sema, ty: Type, val: Value) SemaError!Air.Inst.Ref { + const mod = sema.mod; const gpa = sema.gpa; - const ty_inst = try sema.addType(ty); - try sema.air_values.append(gpa, val); + + // This assertion can be removed when the `ty` parameter is removed from + // this function thanks to the InternPool transition being complete. + if (std.debug.runtime_safety) { + const val_ty = mod.intern_pool.typeOf(val.toIntern()); + if (ty.toIntern() != val_ty) { + std.debug.panic("addConstant type mismatch: '{}' vs '{}'\n", .{ + ty.fmt(mod), val_ty.toType().fmt(mod), + }); + } + } + if (@enumToInt(val.toIntern()) < Air.ref_start_index) + return @intToEnum(Air.Inst.Ref, @enumToInt(val.toIntern())); try sema.air_instructions.append(gpa, .{ - .tag = .constant, - .data = .{ .ty_pl = .{ - .ty = ty_inst, - .payload = @intCast(u32, sema.air_values.items.len - 1), - } }, + .tag = .interned, + .data = .{ .interned = val.toIntern() }, }); return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } @@ -33026,7 +33893,8 @@ pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { u32 => @field(extra, field.name), Air.Inst.Ref => @enumToInt(@field(extra, field.name)), i32 => @bitCast(u32, @field(extra, field.name)), - else => @compileError("bad field type"), + InternPool.Index => @enumToInt(@field(extra, field.name)), + else => @compileError("bad field type: " ++ @typeName(field.type)), }); } return result; @@ -33072,21 +33940,25 @@ fn analyzeComptimeAlloc( defer anon_decl.deinit(); const decl_index = try anon_decl.finish( - try var_type.copy(anon_decl.arena()), + var_type, // There will be stores before the first load, but they may be to sub-elements or // sub-fields. So we need to initialize with undef to allow the mechanism to expand // into fields/elements and have those overridden with stored values. - Value.undef, + (try sema.mod.intern(.{ .undef = var_type.toIntern() })).toValue(), alignment, ); const decl = sema.mod.declPtr(decl_index); decl.@"align" = alignment; + try sema.comptime_mutable_decls.append(decl_index); try sema.mod.declareDeclDependency(sema.owner_decl_index, decl_index); - return sema.addConstant(ptr_type, try Value.Tag.decl_ref_mut.create(sema.arena, .{ - .runtime_index = block.runtime_index, - .decl_index = decl_index, - })); + return sema.addConstant(ptr_type, (try sema.mod.intern(.{ .ptr = .{ + .ty = ptr_type.toIntern(), + .addr = .{ .mut_decl = .{ + .decl = decl_index, + .runtime_index = block.runtime_index, + } }, + } })).toValue()); } /// The places where a user can specify an address space attribute @@ -33114,8 +33986,9 @@ pub fn analyzeAddressSpace( zir_ref: Zir.Inst.Ref, ctx: AddressSpaceContext, ) !std.builtin.AddressSpace { + const mod = sema.mod; const addrspace_tv = try sema.resolveInstConst(block, src, zir_ref, "addresspace must be comptime-known"); - const address_space = addrspace_tv.val.toEnum(std.builtin.AddressSpace); + const address_space = mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val); const target = sema.mod.getTarget(); const arch = target.cpu.arch; @@ -33158,8 +34031,9 @@ pub fn analyzeAddressSpace( /// Asserts the value is a pointer and dereferences it. /// Returns `null` if the pointer contents cannot be loaded at comptime. fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value { - const load_ty = ptr_ty.childType(); - const res = try sema.pointerDerefExtra(block, src, ptr_val, load_ty, true); + const mod = sema.mod; + const load_ty = ptr_ty.childType(mod); + const res = try sema.pointerDerefExtra(block, src, ptr_val, load_ty); switch (res) { .runtime_load => return null, .val => |v| return v, @@ -33185,8 +34059,9 @@ const DerefResult = union(enum) { out_of_bounds: Type, }; -fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, load_ty: Type, want_mutable: bool) CompileError!DerefResult { - const target = sema.mod.getTarget(); +fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, load_ty: Type) CompileError!DerefResult { + const mod = sema.mod; + const target = mod.getTarget(); const deref = sema.beginComptimePtrLoad(block, src, ptr_val, load_ty) catch |err| switch (err) { error.RuntimeLoad => return DerefResult{ .runtime_load = {} }, else => |e| return e, @@ -33199,19 +34074,17 @@ fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value if (coerce_in_mem_ok) { // We have a Value that lines up in virtual memory exactly with what we want to load, // and it is in-memory coercible to load_ty. It may be returned without modifications. - if (deref.is_mutable and want_mutable) { - // The decl whose value we are obtaining here may be overwritten with - // a different value upon further semantic analysis, which would - // invalidate this memory. So we must copy here. - return DerefResult{ .val = try tv.val.copy(sema.arena) }; - } - return DerefResult{ .val = tv.val }; + // Move mutable decl values to the InternPool and assert other decls are already in + // the InternPool. + const uncoerced_val = if (deref.is_mutable) try tv.val.intern(tv.ty, mod) else tv.val.toIntern(); + const coerced_val = try sema.coerceValueInMemory(block, uncoerced_val.toValue(), tv.ty, load_ty, src); + return .{ .val = coerced_val }; } } // The type is not in-memory coercible or the direct dereference failed, so it must // be bitcast according to the pointer type we are performing the load through. - if (!load_ty.hasWellDefinedLayout()) { + if (!load_ty.hasWellDefinedLayout(mod)) { return DerefResult{ .needed_well_defined = load_ty }; } @@ -33248,59 +34121,32 @@ fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError /// This can return `error.AnalysisFail` because it sometimes requires resolving whether /// a type has zero bits, which can cause a "foo depends on itself" compile error. /// This logic must be kept in sync with `Type.isPtrLikeOptional`. -fn typePtrOrOptionalPtrTy( - sema: *Sema, - ty: Type, - buf: *Type.Payload.ElemType, -) !?Type { - switch (ty.tag()) { - .optional_single_const_pointer, - .optional_single_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - => return ty.optionalChild(buf), - - .single_const_pointer_to_comptime_int, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - => return ty, - - .pointer => switch (ty.ptrSize()) { - .Slice => return null, - .C => return ty.optionalChild(buf), - else => return ty, +fn typePtrOrOptionalPtrTy(sema: *Sema, ty: Type) !?Type { + const mod = sema.mod; + return switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .One, .Many, .C => ty, + .Slice => null, }, - - .inferred_alloc_const => unreachable, - .inferred_alloc_mut => unreachable, - - .optional => { - const child_type = ty.optionalChild(buf); - if (child_type.zigTypeTag() != .Pointer) return null; - - const info = child_type.ptrInfo().data; - switch (info.size) { - .Slice, .C => return null, + .opt_type => |opt_child| switch (mod.intern_pool.indexToKey(opt_child)) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .Slice, .C => null, .Many, .One => { - if (info.@"allowzero") return null; + if (ptr_type.flags.is_allowzero) return null; // optionals of zero sized types behave like bools, not pointers - if ((try sema.typeHasOnePossibleValue(child_type)) != null) { + const payload_ty = opt_child.toType(); + if ((try sema.typeHasOnePossibleValue(payload_ty)) != null) { return null; } - return child_type; + return payload_ty; }, - } + }, + else => null, }, - - else => return null, - } + else => null, + }; } /// `generic_poison` will return false. @@ -33310,201 +34156,170 @@ fn typePtrOrOptionalPtrTy( /// TODO merge these implementations together with the "advanced"/opt_sema pattern seen /// elsewhere in value.zig pub fn typeRequiresComptime(sema: *Sema, ty: Type) CompileError!bool { - return switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u29, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .usize, - .isize, - .c_char, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f80, - .f128, - .anyopaque, - .bool, - .void, - .anyerror, - .noreturn, - .@"anyframe", - .null, - .undefined, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .anyerror_void_error_union, - .empty_struct_literal, - .empty_struct, - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - .@"opaque", - .generic_poison, - .array_u8, - .array_u8_sentinel_0, - .int_signed, - .int_unsigned, - .enum_simple, - => false, + const mod = sema.mod; + return switch (ty.toIntern()) { + .empty_struct_type => false, - .single_const_pointer_to_comptime_int, - .type, - .comptime_int, - .comptime_float, - .enum_literal, - .type_info, - // These are function bodies, not function pointers. - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .function, - => true, - - .inferred_alloc_mut => unreachable, - .inferred_alloc_const => unreachable, - - .array, - .array_sentinel, - .vector, - => return sema.typeRequiresComptime(ty.childType()), - - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => { - const child_ty = ty.childType(); - if (child_ty.zigTypeTag() == .Fn) { - return child_ty.fnInfo().is_generic; - } else { - return sema.typeRequiresComptime(child_ty); - } - }, - - .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, - => { - var buf: Type.Payload.ElemType = undefined; - return sema.typeRequiresComptime(ty.optionalChild(&buf)); - }, - - .tuple, .anon_struct => { - const tuple = ty.tupleFields(); - for (tuple.types, 0..) |field_ty, i| { - const have_comptime_val = tuple.values[i].tag() != .unreachable_value; - if (!have_comptime_val and try sema.typeRequiresComptime(field_ty)) { - return true; + else => switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .int_type => return false, + .ptr_type => |ptr_type| { + const child_ty = ptr_type.child.toType(); + if (child_ty.zigTypeTag(mod) == .Fn) { + return mod.typeToFunc(child_ty).?.is_generic; + } else { + return sema.typeRequiresComptime(child_ty); } - } - return false; - }, + }, + .anyframe_type => |child| { + if (child == .none) return false; + return sema.typeRequiresComptime(child.toType()); + }, + .array_type => |array_type| return sema.typeRequiresComptime(array_type.child.toType()), + .vector_type => |vector_type| return sema.typeRequiresComptime(vector_type.child.toType()), + .opt_type => |child| return sema.typeRequiresComptime(child.toType()), - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - switch (struct_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - if (struct_obj.status == .field_types_wip) - return false; + .error_union_type => |error_union_type| { + return sema.typeRequiresComptime(error_union_type.payload_type.toType()); + }, - try sema.resolveTypeFieldsStruct(ty, struct_obj); + .error_set_type, .inferred_error_set_type => false, - struct_obj.requires_comptime = .wip; - for (struct_obj.fields.values()) |field| { - if (field.is_comptime) continue; - if (try sema.typeRequiresComptime(field.ty)) { - struct_obj.requires_comptime = .yes; - return true; + .func_type => true, + + .simple_type => |t| return switch (t) { + .f16, + .f32, + .f64, + .f80, + .f128, + .usize, + .isize, + .c_char, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .anyopaque, + .bool, + .void, + .anyerror, + .noreturn, + .generic_poison, + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_modifier, + .prefetch_options, + .export_options, + .extern_options, + => false, + + .type, + .comptime_int, + .comptime_float, + .null, + .undefined, + .enum_literal, + .type_info, + => true, + }, + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse return false; + switch (struct_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + if (struct_obj.status == .field_types_wip) + return false; + + try sema.resolveTypeFieldsStruct(ty, struct_obj); + + struct_obj.requires_comptime = .wip; + for (struct_obj.fields.values()) |field| { + if (field.is_comptime) continue; + if (try sema.typeRequiresComptime(field.ty)) { + struct_obj.requires_comptime = .yes; + return true; + } } - } - struct_obj.requires_comptime = .no; - return false; - }, - } - }, - - .@"union", .union_safety_tagged, .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - switch (union_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - if (union_obj.status == .field_types_wip) + struct_obj.requires_comptime = .no; return false; - - try sema.resolveTypeFieldsUnion(ty, union_obj); - - union_obj.requires_comptime = .wip; - for (union_obj.fields.values()) |field| { - if (try sema.typeRequiresComptime(field.ty)) { - union_obj.requires_comptime = .yes; - return true; - } + }, + } + }, + .anon_struct_type => |tuple| { + for (tuple.types, tuple.values) |field_ty, val| { + const have_comptime_val = val != .none; + if (!have_comptime_val and try sema.typeRequiresComptime(field_ty.toType())) { + return true; } - union_obj.requires_comptime = .no; - return false; - }, - } - }, + } + return false; + }, - .error_union => return sema.typeRequiresComptime(ty.errorUnionPayload()), - .anyframe_T => { - const child_ty = ty.castTag(.anyframe_T).?.data; - return sema.typeRequiresComptime(child_ty); - }, - .enum_numbered => { - const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; - return sema.typeRequiresComptime(tag_ty); - }, - .enum_full, .enum_nonexhaustive => { - const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; - return sema.typeRequiresComptime(tag_ty); + .union_type => |union_type| { + const union_obj = mod.unionPtr(union_type.index); + switch (union_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + if (union_obj.status == .field_types_wip) + return false; + + try sema.resolveTypeFieldsUnion(ty, union_obj); + + union_obj.requires_comptime = .wip; + for (union_obj.fields.values()) |field| { + if (try sema.typeRequiresComptime(field.ty)) { + union_obj.requires_comptime = .yes; + return true; + } + } + union_obj.requires_comptime = .no; + return false; + }, + } + }, + + .opaque_type => false, + .enum_type => |enum_type| try sema.typeRequiresComptime(enum_type.tag_ty.toType()), + + // values, not types + .undef, + .runtime_value, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .opt, + .aggregate, + .un, + // memoization, not types + .memoized_call, + => unreachable, }, }; } pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { - return ty.hasRuntimeBitsAdvanced(false, .{ .sema = sema }) catch |err| switch (err) { + const mod = sema.mod; + return ty.hasRuntimeBitsAdvanced(mod, false, .{ .sema = sema }) catch |err| switch (err) { error.NeedLazy => unreachable, else => |e| return e, }; @@ -33512,19 +34327,18 @@ pub fn typeHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { fn typeAbiSize(sema: *Sema, ty: Type) !u64 { try sema.resolveTypeLayout(ty); - const target = sema.mod.getTarget(); - return ty.abiSize(target); + return ty.abiSize(sema.mod); } fn typeAbiAlignment(sema: *Sema, ty: Type) CompileError!u32 { - const target = sema.mod.getTarget(); - return (try ty.abiAlignmentAdvanced(target, .{ .sema = sema })).scalar; + return (try ty.abiAlignmentAdvanced(sema.mod, .{ .sema = sema })).scalar; } /// Not valid to call for packed unions. /// Keep implementation in sync with `Module.Union.Field.normalAlignment`. fn unionFieldAlignment(sema: *Sema, field: Module.Union.Field) !u32 { - if (field.ty.zigTypeTag() == .NoReturn) { + const mod = sema.mod; + if (field.ty.zigTypeTag(mod) == .NoReturn) { return @as(u32, 0); } else if (field.abi_align == 0) { return sema.typeAbiAlignment(field.ty); @@ -33535,7 +34349,8 @@ fn unionFieldAlignment(sema: *Sema, field: Module.Union.Field) !u32 { /// Synchronize logic with `Type.isFnOrHasRuntimeBits`. pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { - const fn_info = ty.fnInfo(); + const mod = sema.mod; + const fn_info = mod.typeToFunc(ty).?; if (fn_info.is_generic) return false; if (fn_info.is_var_args) return true; switch (fn_info.cc) { @@ -33543,7 +34358,7 @@ pub fn fnHasRuntimeBits(sema: *Sema, ty: Type) CompileError!bool { .Inline => return false, else => {}, } - if (try sema.typeRequiresComptime(fn_info.return_type)) { + if (try sema.typeRequiresComptime(fn_info.return_type.toType())) { return false; } return true; @@ -33553,11 +34368,12 @@ fn unionFieldIndex( sema: *Sema, block: *Block, unresolved_union_ty: Type, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) !u32 { + const mod = sema.mod; const union_ty = try sema.resolveTypeFields(unresolved_union_ty); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field_index_usize = union_obj.fields.getIndex(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name); return @intCast(u32, field_index_usize); @@ -33567,14 +34383,15 @@ fn structFieldIndex( sema: *Sema, block: *Block, unresolved_struct_ty: Type, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) !u32 { + const mod = sema.mod; const struct_ty = try sema.resolveTypeFields(unresolved_struct_ty); - if (struct_ty.isAnonStruct()) { + if (struct_ty.isAnonStruct(mod)) { return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src); } else { - const struct_obj = struct_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(struct_ty).?; const field_index_usize = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name); return @intCast(u32, field_index_usize); @@ -33585,55 +34402,98 @@ fn anonStructFieldIndex( sema: *Sema, block: *Block, struct_ty: Type, - field_name: []const u8, + field_name: InternPool.NullTerminatedString, field_src: LazySrcLoc, ) !u32 { - const anon_struct = struct_ty.castTag(.anon_struct).?.data; - for (anon_struct.names, 0..) |name, i| { - if (mem.eql(u8, name, field_name)) { - return @intCast(u32, i); - } + const mod = sema.mod; + switch (mod.intern_pool.indexToKey(struct_ty.toIntern())) { + .anon_struct_type => |anon_struct_type| for (anon_struct_type.names, 0..) |name, i| { + if (name == field_name) return @intCast(u32, i); + }, + .struct_type => |struct_type| if (mod.structPtrUnwrap(struct_type.index)) |struct_obj| { + for (struct_obj.fields.keys(), 0..) |name, i| { + if (name == field_name) { + return @intCast(u32, i); + } + } + }, + else => unreachable, } - return sema.fail(block, field_src, "no field named '{s}' in anonymous struct '{}'", .{ - field_name, struct_ty.fmt(sema.mod), + return sema.fail(block, field_src, "no field named '{}' in anonymous struct '{}'", .{ + field_name.fmt(&mod.intern_pool), struct_ty.fmt(sema.mod), }); } fn queueFullTypeResolution(sema: *Sema, ty: Type) !void { - const inst_ref = try sema.addType(ty); - try sema.types_to_resolve.append(sema.gpa, inst_ref); + try sema.types_to_resolve.put(sema.gpa, ty.toIntern(), {}); } -fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value { - if (ty.zigTypeTag() == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); +/// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting +/// overflow_idx to the vector index the overflow was at (or 0 for a scalar). +fn intAdd(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value { + var overflow: usize = undefined; + return sema.intAddInner(lhs, rhs, ty, &overflow) catch |err| switch (err) { + error.Overflow => { + const is_vec = ty.isVector(sema.mod); + overflow_idx.* = if (is_vec) overflow else 0; + const safe_ty = if (is_vec) try sema.mod.vectorType(.{ + .len = ty.vectorLen(sema.mod), + .child = .comptime_int_type, + }) else Type.comptime_int; + return sema.intAddInner(lhs, rhs, safe_ty, undefined) catch |err1| switch (err1) { + error.Overflow => unreachable, + else => |e| return e, + }; + }, + else => |e| return e, + }; +} + +fn intAddInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize) !Value { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.intAddScalar(lhs_elem, rhs_elem); + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + const val = sema.intAddScalar(lhs_elem, rhs_elem, scalar_ty) catch |err| switch (err) { + error.Overflow => { + overflow_idx.* = i; + return error.Overflow; + }, + else => |e| return e, + }; + scalar.* = try val.intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } - return sema.intAddScalar(lhs, rhs); + return sema.intAddScalar(lhs, rhs, ty); } -fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { +fn intAddScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { + const mod = sema.mod; + if (scalar_ty.toIntern() != .comptime_int_type) { + const res = try sema.intAddWithOverflowScalar(lhs, rhs, scalar_ty); + if (res.overflow_bit.compareAllWithZero(.neq, mod)) return error.Overflow; + return res.wrapped_result; + } // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const target = sema.mod.getTarget(); - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.add(lhs_bigint, rhs_bigint); - return Value.fromBigInt(sema.arena, result_bigint.toConst()); + return mod.intValue_big(scalar_ty, result_bigint.toConst()); } /// Supports both floats and ints; handles undefined. @@ -33643,55 +34503,87 @@ fn numberAddWrapScalar( rhs: Value, ty: Type, ) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + const mod = sema.mod; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; - if (ty.zigTypeTag() == .ComptimeInt) { - return sema.intAdd(lhs, rhs, ty); + if (ty.zigTypeTag(mod) == .ComptimeInt) { + return sema.intAdd(lhs, rhs, ty, undefined); } if (ty.isAnyFloat()) { - return sema.floatAdd(lhs, rhs, ty); + return Value.floatAdd(lhs, rhs, ty, sema.arena, mod); } const overflow_result = try sema.intAddWithOverflow(lhs, rhs, ty); return overflow_result.wrapped_result; } -fn intSub( - sema: *Sema, - lhs: Value, - rhs: Value, - ty: Type, -) !Value { - if (ty.zigTypeTag() == .Vector) { - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); - for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.intSubScalar(lhs_elem, rhs_elem); - } - return Value.Tag.aggregate.create(sema.arena, result_data); - } - return sema.intSubScalar(lhs, rhs); +/// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting +/// overflow_idx to the vector index the overflow was at (or 0 for a scalar). +fn intSub(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize) !Value { + var overflow: usize = undefined; + return sema.intSubInner(lhs, rhs, ty, &overflow) catch |err| switch (err) { + error.Overflow => { + const is_vec = ty.isVector(sema.mod); + overflow_idx.* = if (is_vec) overflow else 0; + const safe_ty = if (is_vec) try sema.mod.vectorType(.{ + .len = ty.vectorLen(sema.mod), + .child = .comptime_int_type, + }) else Type.comptime_int; + return sema.intSubInner(lhs, rhs, safe_ty, undefined) catch |err1| switch (err1) { + error.Overflow => unreachable, + else => |e| return e, + }; + }, + else => |e| return e, + }; } -fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value) !Value { +fn intSubInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize) !Value { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); + const val = sema.intSubScalar(lhs_elem, rhs_elem, scalar_ty) catch |err| switch (err) { + error.Overflow => { + overflow_idx.* = i; + return error.Overflow; + }, + else => |e| return e, + }; + scalar.* = try val.intern(scalar_ty, mod); + } + return (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); + } + return sema.intSubScalar(lhs, rhs, ty); +} + +fn intSubScalar(sema: *Sema, lhs: Value, rhs: Value, scalar_ty: Type) !Value { + const mod = sema.mod; + if (scalar_ty.toIntern() != .comptime_int_type) { + const res = try sema.intSubWithOverflowScalar(lhs, rhs, scalar_ty); + if (res.overflow_bit.compareAllWithZero(.neq, mod)) return error.Overflow; + return res.wrapped_result; + } // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const target = sema.mod.getTarget(); - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.sub(lhs_bigint, rhs_bigint); - return Value.fromBigInt(sema.arena, result_bigint.toConst()); + return mod.intValue_big(scalar_ty, result_bigint.toConst()); } /// Supports both floats and ints; handles undefined. @@ -33701,155 +34593,49 @@ fn numberSubWrapScalar( rhs: Value, ty: Type, ) !Value { - if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + const mod = sema.mod; + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; - if (ty.zigTypeTag() == .ComptimeInt) { - return sema.intSub(lhs, rhs, ty); + if (ty.zigTypeTag(mod) == .ComptimeInt) { + return sema.intSub(lhs, rhs, ty, undefined); } if (ty.isAnyFloat()) { - return sema.floatSub(lhs, rhs, ty); + return Value.floatSub(lhs, rhs, ty, sema.arena, mod); } const overflow_result = try sema.intSubWithOverflow(lhs, rhs, ty); return overflow_result.wrapped_result; } -fn floatAdd( - sema: *Sema, - lhs: Value, - rhs: Value, - float_type: Type, -) !Value { - if (float_type.zigTypeTag() == .Vector) { - const result_data = try sema.arena.alloc(Value, float_type.vectorLen()); - for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.floatAddScalar(lhs_elem, rhs_elem, float_type.scalarType()); - } - return Value.Tag.aggregate.create(sema.arena, result_data); - } - return sema.floatAddScalar(lhs, rhs, float_type); -} - -fn floatAddScalar( - sema: *Sema, - lhs: Value, - rhs: Value, - float_type: Type, -) !Value { - const target = sema.mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); - return Value.Tag.float_16.create(sema.arena, lhs_val + rhs_val); - }, - 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); - return Value.Tag.float_32.create(sema.arena, lhs_val + rhs_val); - }, - 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); - return Value.Tag.float_64.create(sema.arena, lhs_val + rhs_val); - }, - 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); - return Value.Tag.float_80.create(sema.arena, lhs_val + rhs_val); - }, - 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); - return Value.Tag.float_128.create(sema.arena, lhs_val + rhs_val); - }, - else => unreachable, - } -} - -fn floatSub( - sema: *Sema, - lhs: Value, - rhs: Value, - float_type: Type, -) !Value { - if (float_type.zigTypeTag() == .Vector) { - const result_data = try sema.arena.alloc(Value, float_type.vectorLen()); - for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - scalar.* = try sema.floatSubScalar(lhs_elem, rhs_elem, float_type.scalarType()); - } - return Value.Tag.aggregate.create(sema.arena, result_data); - } - return sema.floatSubScalar(lhs, rhs, float_type); -} - -fn floatSubScalar( - sema: *Sema, - lhs: Value, - rhs: Value, - float_type: Type, -) !Value { - const target = sema.mod.getTarget(); - switch (float_type.floatBits(target)) { - 16 => { - const lhs_val = lhs.toFloat(f16); - const rhs_val = rhs.toFloat(f16); - return Value.Tag.float_16.create(sema.arena, lhs_val - rhs_val); - }, - 32 => { - const lhs_val = lhs.toFloat(f32); - const rhs_val = rhs.toFloat(f32); - return Value.Tag.float_32.create(sema.arena, lhs_val - rhs_val); - }, - 64 => { - const lhs_val = lhs.toFloat(f64); - const rhs_val = rhs.toFloat(f64); - return Value.Tag.float_64.create(sema.arena, lhs_val - rhs_val); - }, - 80 => { - const lhs_val = lhs.toFloat(f80); - const rhs_val = rhs.toFloat(f80); - return Value.Tag.float_80.create(sema.arena, lhs_val - rhs_val); - }, - 128 => { - const lhs_val = lhs.toFloat(f128); - const rhs_val = rhs.toFloat(f128); - return Value.Tag.float_128.create(sema.arena, lhs_val - rhs_val); - }, - else => unreachable, - } -} - fn intSubWithOverflow( sema: *Sema, lhs: Value, rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - if (ty.zigTypeTag() == .Vector) { - const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen()); - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); - for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType()); - overflowed_data[i] = of_math_result.overflow_bit; - scalar.* = of_math_result.wrapped_result; + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { + const vec_len = ty.vectorLen(mod); + const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len); + const result_data = try sema.arena.alloc(InternPool.Index, vec_len); + const scalar_ty = ty.scalarType(mod); + for (overflowed_data, result_data, 0..) |*of, *scalar, i| { + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); + const of_math_result = try sema.intSubWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty); + of.* = try of_math_result.overflow_bit.intern(Type.u1, mod); + scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return Value.OverflowArithmeticResult{ - .overflow_bit = try Value.Tag.aggregate.create(sema.arena, overflowed_data), - .wrapped_result = try Value.Tag.aggregate.create(sema.arena, result_data), + .overflow_bit = (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .storage = .{ .elems = overflowed_data }, + } })).toValue(), + .wrapped_result = (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(), }; } return sema.intSubWithOverflowScalar(lhs, rhs, ty); @@ -33861,22 +34647,22 @@ fn intSubWithOverflowScalar( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - const target = sema.mod.getTarget(); - const info = ty.intInfo(target); + const mod = sema.mod; + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits); - const wrapped_result = try Value.fromBigInt(sema.arena, result_bigint.toConst()); + const wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()); return Value.OverflowArithmeticResult{ - .overflow_bit = Value.boolToInt(overflowed), + .overflow_bit = try mod.intValue(Type.u1, @boolToInt(overflowed)), .wrapped_result = wrapped_result, }; } @@ -33889,15 +34675,19 @@ fn floatToInt( float_ty: Type, int_ty: Type, ) CompileError!Value { - if (float_ty.zigTypeTag() == .Vector) { - const elem_ty = float_ty.childType(); - const result_data = try sema.arena.alloc(Value, float_ty.vectorLen()); + const mod = sema.mod; + if (float_ty.zigTypeTag(mod) == .Vector) { + const elem_ty = float_ty.scalarType(mod); + const result_data = try sema.arena.alloc(InternPool.Index, float_ty.vectorLen(mod)); + const scalar_ty = int_ty.scalarType(mod); for (result_data, 0..) |*scalar, i| { - var buf: Value.ElemValueBuffer = undefined; - const elem_val = val.elemValueBuffer(sema.mod, i, &buf); - scalar.* = try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType()); + const elem_val = try val.elemValue(sema.mod, i); + scalar.* = try (try sema.floatToIntScalar(block, src, elem_val, elem_ty, int_ty.scalarType(mod))).intern(scalar_ty, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = int_ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } return sema.floatToIntScalar(block, src, val, float_ty, int_ty); } @@ -33935,9 +34725,9 @@ fn floatToIntScalar( float_ty: Type, int_ty: Type, ) CompileError!Value { - const Limb = std.math.big.Limb; + const mod = sema.mod; - const float = val.toFloat(f128); + const float = val.toFloat(f128, mod); if (std.math.isNan(float)) { return sema.fail(block, src, "float value NaN cannot be stored in integer type '{}'", .{ int_ty.fmt(sema.mod), @@ -33952,18 +34742,14 @@ fn floatToIntScalar( var big_int = try float128IntPartToBigInt(sema.arena, float); defer big_int.deinit(); - const result_limbs = try sema.arena.dupe(Limb, big_int.toConst().limbs); - const result = if (!big_int.isPositive()) - try Value.Tag.int_big_negative.create(sema.arena, result_limbs) - else - try Value.Tag.int_big_positive.create(sema.arena, result_limbs); + const cti_result = try mod.intValue_big(Type.comptime_int, big_int.toConst()); - if (!(try sema.intFitsInType(result, int_ty, null))) { + if (!(try sema.intFitsInType(cti_result, int_ty, null))) { return sema.fail(block, src, "float value '{}' cannot be stored in integer type '{}'", .{ val.fmtValue(float_ty, sema.mod), int_ty.fmt(sema.mod), }); } - return result; + return mod.getCoerced(cti_result, int_ty); } /// Asserts the value is an integer, and the destination type is ComptimeInt or Int. @@ -33976,208 +34762,91 @@ fn intFitsInType( ty: Type, vector_index: ?*usize, ) CompileError!bool { - const target = sema.mod.getTarget(); - switch (val.tag()) { - .zero, - .undef, - .bool_false, - => return true, - - .one, - .bool_true, - => switch (ty.zigTypeTag()) { - .Int => { - const info = ty.intInfo(target); - return switch (info.signedness) { - .signed => info.bits >= 2, - .unsigned => info.bits >= 1, - }; - }, - .ComptimeInt => return true, - else => unreachable, - }, - - .lazy_align => switch (ty.zigTypeTag()) { - .Int => { - const info = ty.intInfo(target); - const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); - // If it is u16 or bigger we know the alignment fits without resolving it. - if (info.bits >= max_needed_bits) return true; - const x = try sema.typeAbiAlignment(val.castTag(.lazy_align).?.data); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= actual_needed_bits; - }, - .ComptimeInt => return true, - else => unreachable, - }, - .lazy_size => switch (ty.zigTypeTag()) { - .Int => { - const info = ty.intInfo(target); - const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); - // If it is u64 or bigger we know the size fits without resolving it. - if (info.bits >= max_needed_bits) return true; - const x = try sema.typeAbiSize(val.castTag(.lazy_size).?.data); - if (x == 0) return true; - const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= actual_needed_bits; - }, - .ComptimeInt => return true, - else => unreachable, - }, - - .int_u64 => switch (ty.zigTypeTag()) { - .Int => { - const x = val.castTag(.int_u64).?.data; - if (x == 0) return true; - const info = ty.intInfo(target); - const needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); - return info.bits >= needed_bits; - }, - .ComptimeInt => return true, - else => unreachable, - }, - .int_i64 => switch (ty.zigTypeTag()) { - .Int => { - const x = val.castTag(.int_i64).?.data; - if (x == 0) return true; - const info = ty.intInfo(target); - if (info.signedness == .unsigned and x < 0) - return false; - var buffer: Value.BigIntSpace = undefined; - return (try val.toBigIntAdvanced(&buffer, target, sema)).fitsInTwosComp(info.signedness, info.bits); - }, - .ComptimeInt => return true, - else => unreachable, - }, - .int_big_positive => switch (ty.zigTypeTag()) { - .Int => { - const info = ty.intInfo(target); - return val.castTag(.int_big_positive).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); - }, - .ComptimeInt => return true, - else => unreachable, - }, - .int_big_negative => switch (ty.zigTypeTag()) { - .Int => { - const info = ty.intInfo(target); - return val.castTag(.int_big_negative).?.asBigInt().fitsInTwosComp(info.signedness, info.bits); - }, - .ComptimeInt => return true, - else => unreachable, - }, - - .the_only_possible_value => { - assert(ty.intInfo(target).bits == 0); - return true; - }, - - .decl_ref_mut, - .extern_fn, - .decl_ref, - .function, - .variable, - => switch (ty.zigTypeTag()) { - .Int => { - const info = ty.intInfo(target); + const mod = sema.mod; + if (ty.toIntern() == .comptime_int_type) return true; + const info = ty.intInfo(mod); + switch (val.toIntern()) { + .zero_usize, .zero_u8 => return true, + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => return true, + .variable, .extern_func, .func, .ptr => { + const target = mod.getTarget(); const ptr_bits = target.ptrBitWidth(); return switch (info.signedness) { .signed => info.bits > ptr_bits, .unsigned => info.bits >= ptr_bits, }; }, - .ComptimeInt => return true, + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => { + var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined; + const big_int = int.storage.toBigInt(&buffer); + return big_int.fitsInTwosComp(info.signedness, info.bits); + }, + .lazy_align => |lazy_ty| { + const max_needed_bits = @as(u16, 16) + @boolToInt(info.signedness == .signed); + // If it is u16 or bigger we know the alignment fits without resolving it. + if (info.bits >= max_needed_bits) return true; + const x = try sema.typeAbiAlignment(lazy_ty.toType()); + if (x == 0) return true; + const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= actual_needed_bits; + }, + .lazy_size => |lazy_ty| { + const max_needed_bits = @as(u16, 64) + @boolToInt(info.signedness == .signed); + // If it is u64 or bigger we know the size fits without resolving it. + if (info.bits >= max_needed_bits) return true; + const x = try sema.typeAbiSize(lazy_ty.toType()); + if (x == 0) return true; + const actual_needed_bits = std.math.log2(x) + 1 + @boolToInt(info.signedness == .signed); + return info.bits >= actual_needed_bits; + }, + }, + .aggregate => |aggregate| { + assert(ty.zigTypeTag(mod) == .Vector); + return switch (aggregate.storage) { + .bytes => |bytes| for (bytes, 0..) |byte, i| { + if (byte == 0) continue; + const actual_needed_bits = std.math.log2(byte) + 1 + @boolToInt(info.signedness == .signed); + if (info.bits >= actual_needed_bits) continue; + if (vector_index) |vi| vi.* = i; + break false; + } else true, + .elems, .repeated_elem => for (switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems, + .repeated_elem => |elem| @as(*const [1]InternPool.Index, &elem), + }, 0..) |elem, i| { + if (try sema.intFitsInType(elem.toValue(), ty.scalarType(mod), null)) continue; + if (vector_index) |vi| vi.* = i; + break false; + } else true, + }; + }, else => unreachable, }, - - .aggregate => { - assert(ty.zigTypeTag() == .Vector); - for (val.castTag(.aggregate).?.data, 0..) |elem, i| { - if (!(try sema.intFitsInType(elem, ty.scalarType(), null))) { - if (vector_index) |some| some.* = i; - return false; - } - } - return true; - }, - - else => unreachable, } } -fn intInRange( - sema: *Sema, - tag_ty: Type, - int_val: Value, - end: usize, -) !bool { +fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool { + const mod = sema.mod; if (!(try int_val.compareAllWithZeroAdvanced(.gte, sema))) return false; - var end_payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = end, - }; - const end_val = Value.initPayload(&end_payload.base); + const end_val = try mod.intValue(tag_ty, end); if (!(try sema.compareAll(int_val, .lt, end_val, tag_ty))) return false; return true; } /// Asserts the type is an enum. -fn enumHasInt( - sema: *Sema, - ty: Type, - int: Value, -) CompileError!bool { - switch (ty.tag()) { - .enum_nonexhaustive => unreachable, - .enum_full => { - const enum_full = ty.castTag(.enum_full).?.data; - const tag_ty = enum_full.tag_ty; - if (enum_full.values.count() == 0) { - return sema.intInRange(tag_ty, int, enum_full.fields.count()); - } else { - return enum_full.values.containsContext(int, .{ - .ty = tag_ty, - .mod = sema.mod, - }); - } - }, - .enum_numbered => { - const enum_obj = ty.castTag(.enum_numbered).?.data; - const tag_ty = enum_obj.tag_ty; - if (enum_obj.values.count() == 0) { - return sema.intInRange(tag_ty, int, enum_obj.fields.count()); - } else { - return enum_obj.values.containsContext(int, .{ - .ty = tag_ty, - .mod = sema.mod, - }); - } - }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - const fields_len = enum_simple.fields.count(); - const bits = std.math.log2_int_ceil(usize, fields_len); - var buffer: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = bits, - }; - const tag_ty = Type.initPayload(&buffer.base); - return sema.intInRange(tag_ty, int, fields_len); - }, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .modifier, - .prefetch_options, - .export_options, - .extern_options, - => unreachable, +fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool { + const mod = sema.mod; + const enum_type = mod.intern_pool.indexToKey(ty.toIntern()).enum_type; + assert(enum_type.tag_mode != .nonexhaustive); + // The `tagValueIndex` function call below relies on the type being the integer tag type. + // `getCoerced` assumes the value will fit the new type. + if (!(try sema.intFitsInType(int, enum_type.tag_ty.toType(), null))) return false; + const int_coerced = try mod.getCoerced(int, enum_type.tag_ty.toType()); - else => unreachable, - } + return enum_type.tagValueIndex(&mod.intern_pool, int_coerced.toIntern()) != null; } fn intAddWithOverflow( @@ -34186,21 +34855,28 @@ fn intAddWithOverflow( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - if (ty.zigTypeTag() == .Vector) { - const overflowed_data = try sema.arena.alloc(Value, ty.vectorLen()); - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); - for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, ty.scalarType()); - overflowed_data[i] = of_math_result.overflow_bit; - scalar.* = of_math_result.wrapped_result; + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { + const vec_len = ty.vectorLen(mod); + const overflowed_data = try sema.arena.alloc(InternPool.Index, vec_len); + const result_data = try sema.arena.alloc(InternPool.Index, vec_len); + const scalar_ty = ty.scalarType(mod); + for (overflowed_data, result_data, 0..) |*of, *scalar, i| { + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); + const of_math_result = try sema.intAddWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty); + of.* = try of_math_result.overflow_bit.intern(Type.u1, mod); + scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); } return Value.OverflowArithmeticResult{ - .overflow_bit = try Value.Tag.aggregate.create(sema.arena, overflowed_data), - .wrapped_result = try Value.Tag.aggregate.create(sema.arena, result_data), + .overflow_bit = (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .storage = .{ .elems = overflowed_data }, + } })).toValue(), + .wrapped_result = (try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(), }; } return sema.intAddWithOverflowScalar(lhs, rhs, ty); @@ -34212,22 +34888,22 @@ fn intAddWithOverflowScalar( rhs: Value, ty: Type, ) !Value.OverflowArithmeticResult { - const target = sema.mod.getTarget(); - const info = ty.intInfo(target); + const mod = sema.mod; + const info = ty.intInfo(mod); var lhs_space: Value.BigIntSpace = undefined; var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, target, sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, target, sema); + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_space, mod, sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_space, mod, sema); const limbs = try sema.arena.alloc( std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(info.bits), ); var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, info.signedness, info.bits); - const result = try Value.fromBigInt(sema.arena, result_bigint.toConst()); + const result = try mod.intValue_big(ty, result_bigint.toConst()); return Value.OverflowArithmeticResult{ - .overflow_bit = Value.boolToInt(overflowed), + .overflow_bit = try mod.intValue(Type.u1, @boolToInt(overflowed)), .wrapped_result = result, }; } @@ -34243,14 +34919,13 @@ fn compareAll( rhs: Value, ty: Type, ) CompileError!bool { - if (ty.zigTypeTag() == .Vector) { + const mod = sema.mod; + if (ty.zigTypeTag(mod) == .Vector) { var i: usize = 0; - while (i < ty.vectorLen()) : (i += 1) { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType()))) { + while (i < ty.vectorLen(mod)) : (i += 1) { + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); + if (!(try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)))) { return false; } } @@ -34267,10 +34942,13 @@ fn compareScalar( rhs: Value, ty: Type, ) CompileError!bool { + const mod = sema.mod; + const coerced_lhs = try mod.getCoerced(lhs, ty); + const coerced_rhs = try mod.getCoerced(rhs, ty); switch (op) { - .eq => return sema.valuesEqual(lhs, rhs, ty), - .neq => return !(try sema.valuesEqual(lhs, rhs, ty)), - else => return Value.compareHeteroAdvanced(lhs, op, rhs, sema.mod.getTarget(), sema), + .eq => return sema.valuesEqual(coerced_lhs, coerced_rhs, ty), + .neq => return !(try sema.valuesEqual(coerced_lhs, coerced_rhs, ty)), + else => return Value.compareHeteroAdvanced(coerced_lhs, op, coerced_rhs, mod, sema), } } @@ -34291,17 +34969,19 @@ fn compareVector( rhs: Value, ty: Type, ) !Value { - assert(ty.zigTypeTag() == .Vector); - const result_data = try sema.arena.alloc(Value, ty.vectorLen()); + const mod = sema.mod; + assert(ty.zigTypeTag(mod) == .Vector); + const result_data = try sema.arena.alloc(InternPool.Index, ty.vectorLen(mod)); for (result_data, 0..) |*scalar, i| { - var lhs_buf: Value.ElemValueBuffer = undefined; - var rhs_buf: Value.ElemValueBuffer = undefined; - const lhs_elem = lhs.elemValueBuffer(sema.mod, i, &lhs_buf); - const rhs_elem = rhs.elemValueBuffer(sema.mod, i, &rhs_buf); - const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType()); - scalar.* = Value.makeBool(res_bool); + const lhs_elem = try lhs.elemValue(sema.mod, i); + const rhs_elem = try rhs.elemValue(sema.mod, i); + const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(mod)); + scalar.* = try Value.makeBool(res_bool).intern(Type.bool, mod); } - return Value.Tag.aggregate.create(sema.arena, result_data); + return (try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = ty.vectorLen(mod), .child = .bool_type })).toIntern(), + .storage = .{ .elems = result_data }, + } })).toValue(); } /// Returns the type of a pointer to an element. @@ -34312,11 +34992,11 @@ fn compareVector( /// Handles const-ness and address spaces in particular. /// This code is duplicated in `analyzePtrArithmetic`. fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { - const ptr_info = ptr_ty.ptrInfo().data; - const elem_ty = ptr_ty.elemType2(); + const mod = sema.mod; + const ptr_info = ptr_ty.ptrInfo(mod); + const elem_ty = ptr_ty.elemType2(mod); const allow_zero = ptr_info.@"allowzero" and (offset orelse 0) == 0; - const target = sema.mod.getTarget(); - const parent_ty = ptr_ty.childType(); + const parent_ty = ptr_ty.childType(mod); const VI = Type.Payload.Pointer.Data.VectorIndex; @@ -34324,15 +35004,15 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { host_size: u16 = 0, alignment: u32 = 0, vector_index: VI = .none, - } = if (parent_ty.tag() == .vector and ptr_info.size == .One) blk: { - const elem_bits = elem_ty.bitSize(target); + } = if (parent_ty.isVector(mod) and ptr_info.size == .One) blk: { + const elem_bits = elem_ty.bitSize(mod); if (elem_bits == 0) break :blk .{}; const is_packed = elem_bits < 8 or !std.math.isPowerOfTwo(elem_bits); if (!is_packed) break :blk .{}; break :blk .{ - .host_size = @intCast(u16, parent_ty.arrayLen()), - .alignment = @intCast(u16, parent_ty.abiAlignment(target)), + .host_size = @intCast(u16, parent_ty.arrayLen(mod)), + .alignment = @intCast(u16, parent_ty.abiAlignment(mod)), .vector_index = if (offset) |some| @intToEnum(VI, some) else .runtime, }; } else .{}; @@ -34366,3 +35046,42 @@ fn elemPtrType(sema: *Sema, ptr_ty: Type, offset: ?usize) !Type { .vector_index = vector_info.vector_index, }); } + +/// Merge lhs with rhs. +/// Asserts that lhs and rhs are both error sets and are resolved. +fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type { + const mod = sema.mod; + const arena = sema.arena; + const lhs_names = lhs.errorSetNames(mod); + const rhs_names = rhs.errorSetNames(mod); + var names: Module.Fn.InferredErrorSet.NameMap = .{}; + try names.ensureUnusedCapacity(arena, lhs_names.len); + + for (lhs_names) |name| { + names.putAssumeCapacityNoClobber(name, {}); + } + for (rhs_names) |name| { + try names.put(arena, name, {}); + } + + return mod.errorSetFromUnsortedNames(names.keys()); +} + +/// Avoids crashing the compiler when asking if inferred allocations are noreturn. +fn isNoReturn(sema: *Sema, ref: Air.Inst.Ref) bool { + if (ref == .unreachable_value) return true; + if (Air.refToIndex(ref)) |inst| switch (sema.air_instructions.items(.tag)[inst]) { + .inferred_alloc, .inferred_alloc_comptime => return false, + else => {}, + }; + return sema.typeOf(ref).isNoReturn(sema.mod); +} + +/// Avoids crashing the compiler when asking if inferred allocations are known to be a certain zig type. +fn isKnownZigType(sema: *Sema, ref: Air.Inst.Ref, tag: std.builtin.TypeId) bool { + if (Air.refToIndex(ref)) |inst| switch (sema.air_instructions.items(.tag)[inst]) { + .inferred_alloc, .inferred_alloc_comptime => return false, + else => {}, + }; + return sema.typeOf(ref).zigTypeTag(sema.mod) == tag; +} diff --git a/src/TypedValue.zig b/src/TypedValue.zig index d74fbda93e..ec76b52d20 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -27,13 +27,13 @@ pub const Managed = struct { /// Assumes arena allocation. Does a recursive copy. pub fn copy(self: TypedValue, arena: Allocator) error{OutOfMemory}!TypedValue { return TypedValue{ - .ty = try self.ty.copy(arena), + .ty = self.ty, .val = try self.val.copy(arena), }; } pub fn eql(a: TypedValue, b: TypedValue, mod: *Module) bool { - if (!a.ty.eql(b.ty, mod)) return false; + if (a.ty.toIntern() != b.ty.toIntern()) return false; return a.val.eql(b.val, a.ty, mod); } @@ -41,8 +41,8 @@ pub fn hash(tv: TypedValue, hasher: *std.hash.Wyhash, mod: *Module) void { return tv.val.hash(tv.ty, hasher, mod); } -pub fn enumToInt(tv: TypedValue, buffer: *Value.Payload.U64) Value { - return tv.val.enumToInt(tv.ty, buffer); +pub fn enumToInt(tv: TypedValue, mod: *Module) Allocator.Error!Value { + return tv.val.enumToInt(tv.ty, mod); } const max_aggregate_items = 100; @@ -61,7 +61,10 @@ pub fn format( ) !void { _ = options; comptime std.debug.assert(fmt.len == 0); - return ctx.tv.print(writer, 3, ctx.mod); + return ctx.tv.print(writer, 3, ctx.mod) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // We're not allowed to return this from a format function + else => |e| return e, + }; } /// Prints the Value according to the Type, not according to the Value Tag. @@ -70,106 +73,61 @@ pub fn print( writer: anytype, level: u8, mod: *Module, -) @TypeOf(writer).Error!void { - const target = mod.getTarget(); +) (@TypeOf(writer).Error || Allocator.Error)!void { var val = tv.val; var ty = tv.ty; - if (val.isVariable(mod)) - return writer.writeAll("(variable)"); + const ip = &mod.intern_pool; + while (true) switch (val.ip_index) { + .none => switch (val.tag()) { + .aggregate => return printAggregate(ty, val, writer, level, mod), + .@"union" => { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + const union_val = val.castTag(.@"union").?.data; + try writer.writeAll(".{ "); - while (true) switch (val.tag()) { - .u1_type => return writer.writeAll("u1"), - .u8_type => return writer.writeAll("u8"), - .i8_type => return writer.writeAll("i8"), - .u16_type => return writer.writeAll("u16"), - .i16_type => return writer.writeAll("i16"), - .u29_type => return writer.writeAll("u29"), - .u32_type => return writer.writeAll("u32"), - .i32_type => return writer.writeAll("i32"), - .u64_type => return writer.writeAll("u64"), - .i64_type => return writer.writeAll("i64"), - .u128_type => return writer.writeAll("u128"), - .i128_type => return writer.writeAll("i128"), - .isize_type => return writer.writeAll("isize"), - .usize_type => return writer.writeAll("usize"), - .c_char_type => return writer.writeAll("c_char"), - .c_short_type => return writer.writeAll("c_short"), - .c_ushort_type => return writer.writeAll("c_ushort"), - .c_int_type => return writer.writeAll("c_int"), - .c_uint_type => return writer.writeAll("c_uint"), - .c_long_type => return writer.writeAll("c_long"), - .c_ulong_type => return writer.writeAll("c_ulong"), - .c_longlong_type => return writer.writeAll("c_longlong"), - .c_ulonglong_type => return writer.writeAll("c_ulonglong"), - .c_longdouble_type => return writer.writeAll("c_longdouble"), - .f16_type => return writer.writeAll("f16"), - .f32_type => return writer.writeAll("f32"), - .f64_type => return writer.writeAll("f64"), - .f80_type => return writer.writeAll("f80"), - .f128_type => return writer.writeAll("f128"), - .anyopaque_type => return writer.writeAll("anyopaque"), - .bool_type => return writer.writeAll("bool"), - .void_type => return writer.writeAll("void"), - .type_type => return writer.writeAll("type"), - .anyerror_type => return writer.writeAll("anyerror"), - .comptime_int_type => return writer.writeAll("comptime_int"), - .comptime_float_type => return writer.writeAll("comptime_float"), - .noreturn_type => return writer.writeAll("noreturn"), - .null_type => return writer.writeAll("@Type(.Null)"), - .undefined_type => return writer.writeAll("@Type(.Undefined)"), - .fn_noreturn_no_args_type => return writer.writeAll("fn() noreturn"), - .fn_void_no_args_type => return writer.writeAll("fn() void"), - .fn_naked_noreturn_no_args_type => return writer.writeAll("fn() callconv(.Naked) noreturn"), - .fn_ccc_void_no_args_type => return writer.writeAll("fn() callconv(.C) void"), - .single_const_pointer_to_comptime_int_type => return writer.writeAll("*const comptime_int"), - .anyframe_type => return writer.writeAll("anyframe"), - .const_slice_u8_type => return writer.writeAll("[]const u8"), - .const_slice_u8_sentinel_0_type => return writer.writeAll("[:0]const u8"), - .anyerror_void_error_union_type => return writer.writeAll("anyerror!void"), - - .enum_literal_type => return writer.writeAll("@Type(.EnumLiteral)"), - .manyptr_u8_type => return writer.writeAll("[*]u8"), - .manyptr_const_u8_type => return writer.writeAll("[*]const u8"), - .manyptr_const_u8_sentinel_0_type => return writer.writeAll("[*:0]const u8"), - .atomic_order_type => return writer.writeAll("std.builtin.AtomicOrder"), - .atomic_rmw_op_type => return writer.writeAll("std.builtin.AtomicRmwOp"), - .calling_convention_type => return writer.writeAll("std.builtin.CallingConvention"), - .address_space_type => return writer.writeAll("std.builtin.AddressSpace"), - .float_mode_type => return writer.writeAll("std.builtin.FloatMode"), - .reduce_op_type => return writer.writeAll("std.builtin.ReduceOp"), - .modifier_type => return writer.writeAll("std.builtin.CallModifier"), - .prefetch_options_type => return writer.writeAll("std.builtin.PrefetchOptions"), - .export_options_type => return writer.writeAll("std.builtin.ExportOptions"), - .extern_options_type => return writer.writeAll("std.builtin.ExternOptions"), - .type_info_type => return writer.writeAll("std.builtin.Type"), - - .empty_struct_value, .aggregate => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - if (ty.zigTypeTag() == .Struct) { - try writer.writeAll(".{"); - const max_len = std.math.min(ty.structFieldCount(), max_aggregate_items); + try print(.{ + .ty = mod.unionPtr(ip.indexToKey(ty.toIntern()).union_type.index).tag_ty, + .val = union_val.tag, + }, writer, level - 1, mod); + try writer.writeAll(" = "); + try print(.{ + .ty = ty.unionFieldType(union_val.tag, mod), + .val = union_val.val, + }, writer, level - 1, mod); + return writer.writeAll(" }"); + }, + .bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), + .repeated => { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } var i: u32 = 0; + try writer.writeAll(".{ "); + const elem_tv = TypedValue{ + .ty = ty.elemType2(mod), + .val = val.castTag(.repeated).?.data, + }; + const len = ty.arrayLen(mod); + const max_len = std.math.min(len, max_aggregate_items); while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); - switch (ty.tag()) { - .anon_struct, .@"struct" => try writer.print(".{s} = ", .{ty.structFieldName(i)}), - else => {}, - } - try print(.{ - .ty = ty.structFieldType(i), - .val = val.fieldValue(ty, i), - }, writer, level - 1, mod); + try print(elem_tv, writer, level - 1, mod); } - if (ty.structFieldCount() > max_aggregate_items) { + if (len > max_aggregate_items) { try writer.writeAll(", ..."); } - return writer.writeAll("}"); - } else { - const elem_ty = ty.elemType2(); - const len = ty.arrayLen(); + return writer.writeAll(" }"); + }, + .slice => { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + const payload = val.castTag(.slice).?.data; + const elem_ty = ty.elemType2(mod); + const len = payload.len.toUnsignedInt(mod); if (elem_ty.eql(Type.u8, mod)) str: { const max_len = @intCast(usize, std.math.min(len, max_string_len)); @@ -177,11 +135,14 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { - const elem = val.fieldValue(ty, i); - if (elem.isUndef()) break :str; - buf[i] = std.math.cast(u8, elem.toUnsignedInt(target)) orelse break :str; + const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic + }; + if (elem_val.isUndef(mod)) break :str; + buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str; } + // TODO would be nice if this had a bit of unicode awareness. const truncated = if (len > max_string_len) " (truncated)" else ""; return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); } @@ -192,315 +153,334 @@ pub fn print( var i: u32 = 0; while (i < max_len) : (i += 1) { if (i != 0) try writer.writeAll(", "); + const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) { + error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic + }; try print(.{ .ty = elem_ty, - .val = val.fieldValue(ty, i), + .val = elem_val, }, writer, level - 1, mod); } if (len > max_aggregate_items) { try writer.writeAll(", ..."); } return writer.writeAll(" }"); - } + }, + .eu_payload => { + val = val.castTag(.eu_payload).?.data; + ty = ty.errorUnionPayload(mod); + }, + .opt_payload => { + val = val.castTag(.opt_payload).?.data; + ty = ty.optionalChild(mod); + }, }, - .@"union" => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - const union_val = val.castTag(.@"union").?.data; - try writer.writeAll(".{ "); - - try print(.{ - .ty = ty.cast(Type.Payload.Union).?.data.tag_ty, - .val = union_val.tag, - }, writer, level - 1, mod); - try writer.writeAll(" = "); - try print(.{ - .ty = ty.unionFieldType(union_val.tag, mod), - .val = union_val.val, - }, writer, level - 1, mod); - - return writer.writeAll(" }"); - }, - .null_value => return writer.writeAll("null"), - .undef => return writer.writeAll("undefined"), - .zero => return writer.writeAll("0"), - .one => return writer.writeAll("1"), - .void_value => return writer.writeAll("{}"), - .unreachable_value => return writer.writeAll("unreachable"), - .the_only_possible_value => return writer.writeAll("0"), - .bool_true => return writer.writeAll("true"), - .bool_false => return writer.writeAll("false"), - .ty => return val.castTag(.ty).?.data.print(writer, mod), - .int_type => { - const int_type = val.castTag(.int_type).?.data; - return writer.print("{s}{d}", .{ - if (int_type.signed) "s" else "u", - int_type.bits, - }); - }, - .int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", .{}, writer), - .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", .{}, writer), - .int_big_positive => return writer.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), - .int_big_negative => return writer.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), - .lazy_align => { - const sub_ty = val.castTag(.lazy_align).?.data; - const x = sub_ty.abiAlignment(target); - return writer.print("{d}", .{x}); - }, - .lazy_size => { - const sub_ty = val.castTag(.lazy_size).?.data; - const x = sub_ty.abiSize(target); - return writer.print("{d}", .{x}); - }, - .function => return writer.print("(function '{s}')", .{ - mod.declPtr(val.castTag(.function).?.data.owner_decl).name, - }), - .extern_fn => return writer.writeAll("(extern function)"), - .variable => unreachable, - .decl_ref_mut => { - const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index; - const decl = mod.declPtr(decl_index); - if (level == 0) { - return writer.print("(decl ref mut '{s}')", .{decl.name}); - } - return print(.{ - .ty = decl.ty, - .val = decl.val, - }, writer, level - 1, mod); - }, - .decl_ref => { - const decl_index = val.castTag(.decl_ref).?.data; - const decl = mod.declPtr(decl_index); - if (level == 0) { - return writer.print("(decl ref '{s}')", .{decl.name}); - } - return print(.{ - .ty = decl.ty, - .val = decl.val, - }, writer, level - 1, mod); - }, - .comptime_field_ptr => { - const payload = val.castTag(.comptime_field_ptr).?.data; - if (level == 0) { - return writer.writeAll("(comptime field ptr)"); - } - return print(.{ - .ty = payload.field_ty, - .val = payload.field_val, - }, writer, level - 1, mod); - }, - .elem_ptr => { - const elem_ptr = val.castTag(.elem_ptr).?.data; - try writer.writeAll("&"); - if (level == 0) { - try writer.writeAll("(ptr)"); - } else { + else => switch (ip.indexToKey(val.toIntern())) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => return Type.print(val.toType(), writer, mod), + .undef => return writer.writeAll("undefined"), + .runtime_value => return writer.writeAll("(runtime value)"), + .simple_value => |simple_value| switch (simple_value) { + .empty_struct => return printAggregate(ty, val, writer, level, mod), + .generic_poison => return writer.writeAll("(generic poison)"), + else => return writer.writeAll(@tagName(simple_value)), + }, + .variable => return writer.writeAll("(variable)"), + .extern_func => |extern_func| return writer.print("(extern function '{}')", .{ + mod.declPtr(extern_func.decl).name.fmt(ip), + }), + .func => |func| return writer.print("(function '{}')", .{ + mod.declPtr(mod.funcPtr(func.index).owner_decl).name.fmt(ip), + }), + .int => |int| switch (int.storage) { + inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}), + .lazy_align => |lazy_ty| return writer.print("{d}", .{ + lazy_ty.toType().abiAlignment(mod), + }), + .lazy_size => |lazy_ty| return writer.print("{d}", .{ + lazy_ty.toType().abiSize(mod), + }), + }, + .err => |err| return writer.print("error.{}", .{ + err.name.fmt(ip), + }), + .error_union => |error_union| switch (error_union.val) { + .err_name => |err_name| return writer.print("error.{}", .{ + err_name.fmt(ip), + }), + .payload => |payload| { + val = payload.toValue(); + ty = ty.errorUnionPayload(mod); + }, + }, + .enum_literal => |enum_literal| return writer.print(".{}", .{ + enum_literal.fmt(ip), + }), + .enum_tag => |enum_tag| { + if (level == 0) { + return writer.writeAll("(enum)"); + } + const enum_type = ip.indexToKey(ty.toIntern()).enum_type; + if (enum_type.tagValueIndex(ip, val.toIntern())) |tag_index| { + try writer.print(".{i}", .{enum_type.names[tag_index].fmt(ip)}); + return; + } + try writer.writeAll("@intToEnum("); try print(.{ - .ty = elem_ptr.elem_ty, - .val = elem_ptr.array_ptr, + .ty = Type.type, + .val = enum_tag.ty.toValue(), }, writer, level - 1, mod); - } - return writer.print("[{}]", .{elem_ptr.index}); - }, - .field_ptr => { - const field_ptr = val.castTag(.field_ptr).?.data; - try writer.writeAll("&"); - if (level == 0) { - try writer.writeAll("(ptr)"); - } else { + try writer.writeAll(", "); try print(.{ - .ty = field_ptr.container_ty, - .val = field_ptr.container_ptr, + .ty = ip.typeOf(enum_tag.int).toType(), + .val = enum_tag.int.toValue(), }, writer, level - 1, mod); - } + try writer.writeAll(")"); + return; + }, + .empty_enum_value => return writer.writeAll("(empty enum value)"), + .float => |float| switch (float.storage) { + inline else => |x| return writer.print("{d}", .{@floatCast(f64, x)}), + }, + .ptr => |ptr| { + if (ptr.addr == .int) { + const i = ip.indexToKey(ptr.addr.int).int; + switch (i.storage) { + inline else => |addr| return writer.print("{x:0>8}", .{addr}), + } + } - if (field_ptr.container_ty.zigTypeTag() == .Struct) { - switch (field_ptr.container_ty.tag()) { - .tuple => return writer.print(".@\"{d}\"", .{field_ptr.field_index}), - else => { - const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index); - return writer.print(".{s}", .{field_name}); + const ptr_ty = ip.indexToKey(ty.toIntern()).ptr_type; + if (ptr_ty.flags.size == .Slice) { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + const elem_ty = ptr_ty.child.toType(); + const len = ptr.len.toValue().toUnsignedInt(mod); + if (elem_ty.eql(Type.u8, mod)) str: { + const max_len = @min(len, max_string_len); + var buf: [max_string_len]u8 = undefined; + for (buf[0..max_len], 0..) |*c, i| { + const elem = try val.elemValue(mod, i); + if (elem.isUndef(mod)) break :str; + c.* = @intCast(u8, elem.toUnsignedInt(mod)); + } + const truncated = if (len > max_string_len) " (truncated)" else ""; + return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); + } + try writer.writeAll(".{ "); + const max_len = @min(len, max_aggregate_items); + for (0..max_len) |i| { + if (i != 0) try writer.writeAll(", "); + try print(.{ + .ty = elem_ty, + .val = try val.elemValue(mod, i), + }, writer, level - 1, mod); + } + if (len > max_aggregate_items) { + try writer.writeAll(", ..."); + } + return writer.writeAll(" }"); + } + + switch (ptr.addr) { + .decl => |decl_index| { + const decl = mod.declPtr(decl_index); + if (level == 0) return writer.print("(decl '{}')", .{decl.name.fmt(ip)}); + return print(.{ + .ty = decl.ty, + .val = decl.val, + }, writer, level - 1, mod); + }, + .mut_decl => |mut_decl| { + const decl = mod.declPtr(mut_decl.decl); + if (level == 0) return writer.print("(mut decl '{}')", .{decl.name.fmt(ip)}); + return print(.{ + .ty = decl.ty, + .val = decl.val, + }, writer, level - 1, mod); + }, + .comptime_field => |field_val_ip| { + return print(.{ + .ty = ip.typeOf(field_val_ip).toType(), + .val = field_val_ip.toValue(), + }, writer, level - 1, mod); + }, + .int => unreachable, + .eu_payload => |eu_ip| { + try writer.writeAll("(payload of "); + try print(.{ + .ty = ip.typeOf(eu_ip).toType(), + .val = eu_ip.toValue(), + }, writer, level - 1, mod); + try writer.writeAll(")"); + }, + .opt_payload => |opt_ip| { + try print(.{ + .ty = ip.typeOf(opt_ip).toType(), + .val = opt_ip.toValue(), + }, writer, level - 1, mod); + try writer.writeAll(".?"); + }, + .elem => |elem| { + try print(.{ + .ty = ip.typeOf(elem.base).toType(), + .val = elem.base.toValue(), + }, writer, level - 1, mod); + try writer.print("[{}]", .{elem.index}); + }, + .field => |field| { + const container_ty = ip.typeOf(field.base).toType(); + try print(.{ + .ty = container_ty, + .val = field.base.toValue(), + }, writer, level - 1, mod); + + switch (container_ty.zigTypeTag(mod)) { + .Struct => { + if (container_ty.isTuple(mod)) { + try writer.print("[{d}]", .{field.index}); + } + const field_name = container_ty.structFieldName(@intCast(usize, field.index), mod); + try writer.print(".{i}", .{field_name.fmt(ip)}); + }, + .Union => { + const field_name = container_ty.unionFields(mod).keys()[@intCast(usize, field.index)]; + try writer.print(".{i}", .{field_name.fmt(ip)}); + }, + .Pointer => { + std.debug.assert(container_ty.isSlice(mod)); + try writer.writeAll(switch (field.index) { + Value.slice_ptr_index => ".ptr", + Value.slice_len_index => ".len", + else => unreachable, + }); + }, + else => unreachable, + } }, } - } else if (field_ptr.container_ty.zigTypeTag() == .Union) { - const field_name = field_ptr.container_ty.unionFields().keys()[field_ptr.field_index]; - return writer.print(".{s}", .{field_name}); - } else if (field_ptr.container_ty.isSlice()) { - switch (field_ptr.field_index) { - Value.Payload.Slice.ptr_index => return writer.writeAll(".ptr"), - Value.Payload.Slice.len_index => return writer.writeAll(".len"), - else => unreachable, - } - } + }, + .opt => |opt| switch (opt.val) { + .none => return writer.writeAll("null"), + else => |payload| { + val = payload.toValue(); + ty = ty.optionalChild(mod); + }, + }, + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| { + // Strip the 0 sentinel off of strings before printing + const zero_sent = blk: { + const sent = ty.sentinel(mod) orelse break :blk false; + break :blk sent.eql(Value.zero_u8, Type.u8, mod); + }; + const str = if (zero_sent) bytes[0 .. bytes.len - 1] else bytes; + return writer.print("\"{}\"", .{std.zig.fmtEscapes(str)}); + }, + .elems, .repeated_elem => return printAggregate(ty, val, writer, level, mod), + }, + .un => |un| { + try writer.writeAll(".{ "); + if (level > 0) { + try print(.{ + .ty = ty.unionTagTypeHypothetical(mod), + .val = un.tag.toValue(), + }, writer, level - 1, mod); + try writer.writeAll(" = "); + try print(.{ + .ty = ty.unionFieldType(un.tag.toValue(), mod), + .val = un.val.toValue(), + }, writer, level - 1, mod); + } else try writer.writeAll("..."); + return writer.writeAll(" }"); + }, + .memoized_call => unreachable, }, - .empty_array => return writer.writeAll(".{}"), - .enum_literal => return writer.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}), - .enum_field_index => { - return writer.print(".{s}", .{ty.enumFieldName(val.castTag(.enum_field_index).?.data)}); - }, - .bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), - .str_lit => { - const str_lit = val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - return writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)}); - }, - .repeated => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - var i: u32 = 0; - try writer.writeAll(".{ "); - const elem_tv = TypedValue{ - .ty = ty.elemType2(), - .val = val.castTag(.repeated).?.data, - }; - const len = ty.arrayLen(); - const max_len = std.math.min(len, max_aggregate_items); - while (i < max_len) : (i += 1) { - if (i != 0) try writer.writeAll(", "); - try print(elem_tv, writer, level - 1, mod); - } - if (len > max_aggregate_items) { - try writer.writeAll(", ..."); - } - return writer.writeAll(" }"); - }, - .empty_array_sentinel => { - if (level == 0) { - return writer.writeAll(".{ (sentinel) }"); - } - try writer.writeAll(".{ "); - try print(.{ - .ty = ty.elemType2(), - .val = ty.sentinel().?, - }, writer, level - 1, mod); - return writer.writeAll(" }"); - }, - .slice => { - if (level == 0) { - return writer.writeAll(".{ ... }"); - } - const payload = val.castTag(.slice).?.data; - const elem_ty = ty.elemType2(); - const len = payload.len.toUnsignedInt(target); - - if (elem_ty.eql(Type.u8, mod)) str: { - const max_len = @intCast(usize, std.math.min(len, max_string_len)); - var buf: [max_string_len]u8 = undefined; - - var i: u32 = 0; - while (i < max_len) : (i += 1) { - var elem_buf: Value.ElemValueBuffer = undefined; - const elem_val = payload.ptr.elemValueBuffer(mod, i, &elem_buf); - if (elem_val.isUndef()) break :str; - buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(target)) orelse break :str; - } - - // TODO would be nice if this had a bit of unicode awareness. - const truncated = if (len > max_string_len) " (truncated)" else ""; - return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); - } - - try writer.writeAll(".{ "); - - const max_len = std.math.min(len, max_aggregate_items); - var i: u32 = 0; - while (i < max_len) : (i += 1) { - if (i != 0) try writer.writeAll(", "); - var buf: Value.ElemValueBuffer = undefined; - try print(.{ - .ty = elem_ty, - .val = payload.ptr.elemValueBuffer(mod, i, &buf), - }, writer, level - 1, mod); - } - if (len > max_aggregate_items) { - try writer.writeAll(", ..."); - } - return writer.writeAll(" }"); - }, - .float_16 => return writer.print("{d}", .{val.castTag(.float_16).?.data}), - .float_32 => return writer.print("{d}", .{val.castTag(.float_32).?.data}), - .float_64 => return writer.print("{d}", .{val.castTag(.float_64).?.data}), - .float_80 => return writer.print("{d}", .{@floatCast(f64, val.castTag(.float_80).?.data)}), - .float_128 => return writer.print("{d}", .{@floatCast(f64, val.castTag(.float_128).?.data)}), - .@"error" => return writer.print("error.{s}", .{val.castTag(.@"error").?.data.name}), - .eu_payload => { - val = val.castTag(.eu_payload).?.data; - ty = ty.errorUnionPayload(); - }, - .opt_payload => { - val = val.castTag(.opt_payload).?.data; - var buf: Type.Payload.ElemType = undefined; - ty = ty.optionalChild(&buf); - return print(.{ .ty = ty, .val = val }, writer, level, mod); - }, - .eu_payload_ptr => { - try writer.writeAll("&"); - - const data = val.castTag(.eu_payload_ptr).?.data; - - var ty_val: Value.Payload.Ty = .{ - .base = .{ .tag = .ty }, - .data = ty, - }; - - try writer.writeAll("@as("); - try print(.{ - .ty = Type.type, - .val = Value.initPayload(&ty_val.base), - }, writer, level - 1, mod); - - try writer.writeAll(", &(payload of "); - - var ptr_ty: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = data.container_ty, - }; - - try print(.{ - .ty = Type.initPayload(&ptr_ty.base), - .val = data.container_ptr, - }, writer, level - 1, mod); - - try writer.writeAll("))"); - return; - }, - .opt_payload_ptr => { - const data = val.castTag(.opt_payload_ptr).?.data; - - var ty_val: Value.Payload.Ty = .{ - .base = .{ .tag = .ty }, - .data = ty, - }; - - try writer.writeAll("@as("); - try print(.{ - .ty = Type.type, - .val = Value.initPayload(&ty_val.base), - }, writer, level - 1, mod); - - try writer.writeAll(", &(payload of "); - - var ptr_ty: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = data.container_ty, - }; - - try print(.{ - .ty = Type.initPayload(&ptr_ty.base), - .val = data.container_ptr, - }, writer, level - 1, mod); - - try writer.writeAll("))"); - return; - }, - - // TODO these should not appear in this function - .inferred_alloc => return writer.writeAll("(inferred allocation value)"), - .inferred_alloc_comptime => return writer.writeAll("(inferred comptime allocation value)"), - .generic_poison_type => return writer.writeAll("(generic poison type)"), - .generic_poison => return writer.writeAll("(generic poison)"), - .runtime_value => return writer.writeAll("[runtime value]"), }; } + +fn printAggregate( + ty: Type, + val: Value, + writer: anytype, + level: u8, + mod: *Module, +) (@TypeOf(writer).Error || Allocator.Error)!void { + if (level == 0) { + return writer.writeAll(".{ ... }"); + } + if (ty.zigTypeTag(mod) == .Struct) { + try writer.writeAll(".{"); + const max_len = @min(ty.structFieldCount(mod), max_aggregate_items); + + for (0..max_len) |i| { + if (i != 0) try writer.writeAll(", "); + + const field_name = switch (mod.intern_pool.indexToKey(ty.toIntern())) { + .struct_type => |x| mod.structPtrUnwrap(x.index).?.fields.keys()[i].toOptional(), + .anon_struct_type => |x| if (x.isTuple()) .none else x.names[i].toOptional(), + else => unreachable, + }; + + if (field_name.unwrap()) |name| try writer.print(".{} = ", .{name.fmt(&mod.intern_pool)}); + try print(.{ + .ty = ty.structFieldType(i, mod), + .val = try val.fieldValue(mod, i), + }, writer, level - 1, mod); + } + if (ty.structFieldCount(mod) > max_aggregate_items) { + try writer.writeAll(", ..."); + } + return writer.writeAll("}"); + } else { + const elem_ty = ty.elemType2(mod); + const len = ty.arrayLen(mod); + + if (elem_ty.eql(Type.u8, mod)) str: { + const max_len = @intCast(usize, std.math.min(len, max_string_len)); + var buf: [max_string_len]u8 = undefined; + + var i: u32 = 0; + while (i < max_len) : (i += 1) { + const elem = try val.fieldValue(mod, i); + if (elem.isUndef(mod)) break :str; + buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str; + } + + const truncated = if (len > max_string_len) " (truncated)" else ""; + return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated }); + } + + try writer.writeAll(".{ "); + + const max_len = std.math.min(len, max_aggregate_items); + var i: u32 = 0; + while (i < max_len) : (i += 1) { + if (i != 0) try writer.writeAll(", "); + try print(.{ + .ty = elem_ty, + .val = try val.fieldValue(mod, i), + }, writer, level - 1, mod); + } + if (len > max_aggregate_items) { + try writer.writeAll(", ..."); + } + return writer.writeAll(" }"); + } +} diff --git a/src/Zir.zig b/src/Zir.zig index 2bd5b21f79..c3a5f8e09b 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -19,6 +19,7 @@ const BigIntConst = std.math.big.int.Const; const BigIntMutable = std.math.big.int.Mutable; const Ast = std.zig.Ast; +const InternPool = @import("InternPool.zig"); const Zir = @This(); const Type = @import("type.zig").Type; const Value = @import("value.zig").Value; @@ -2041,448 +2042,103 @@ pub const Inst = struct { /// The position of a ZIR instruction within the `Zir` instructions array. pub const Index = u32; - /// A reference to a TypedValue or ZIR instruction. + /// A reference to ZIR instruction, or to an InternPool index, or neither. /// - /// If the Ref has a tag in this enum, it refers to a TypedValue. - /// - /// If the value of a Ref does not have a tag, it refers to a ZIR instruction. - /// - /// The first values after the the last tag refer to ZIR instructions which may - /// be derived by subtracting `typed_value_map.len`. - /// - /// When adding a tag to this enum, consider adding a corresponding entry to - /// `primitives` in astgen. + /// If the integer tag value is < InternPool.static_len, then it + /// corresponds to an InternPool index. Otherwise, this refers to a ZIR + /// instruction. /// /// The tag type is specified so that it is safe to bitcast between `[]u32` /// and `[]Ref`. pub const Ref = enum(u32) { + u1_type = @enumToInt(InternPool.Index.u1_type), + u8_type = @enumToInt(InternPool.Index.u8_type), + i8_type = @enumToInt(InternPool.Index.i8_type), + u16_type = @enumToInt(InternPool.Index.u16_type), + i16_type = @enumToInt(InternPool.Index.i16_type), + u29_type = @enumToInt(InternPool.Index.u29_type), + u32_type = @enumToInt(InternPool.Index.u32_type), + i32_type = @enumToInt(InternPool.Index.i32_type), + u64_type = @enumToInt(InternPool.Index.u64_type), + i64_type = @enumToInt(InternPool.Index.i64_type), + u80_type = @enumToInt(InternPool.Index.u80_type), + u128_type = @enumToInt(InternPool.Index.u128_type), + i128_type = @enumToInt(InternPool.Index.i128_type), + usize_type = @enumToInt(InternPool.Index.usize_type), + isize_type = @enumToInt(InternPool.Index.isize_type), + c_char_type = @enumToInt(InternPool.Index.c_char_type), + c_short_type = @enumToInt(InternPool.Index.c_short_type), + c_ushort_type = @enumToInt(InternPool.Index.c_ushort_type), + c_int_type = @enumToInt(InternPool.Index.c_int_type), + c_uint_type = @enumToInt(InternPool.Index.c_uint_type), + c_long_type = @enumToInt(InternPool.Index.c_long_type), + c_ulong_type = @enumToInt(InternPool.Index.c_ulong_type), + c_longlong_type = @enumToInt(InternPool.Index.c_longlong_type), + c_ulonglong_type = @enumToInt(InternPool.Index.c_ulonglong_type), + c_longdouble_type = @enumToInt(InternPool.Index.c_longdouble_type), + f16_type = @enumToInt(InternPool.Index.f16_type), + f32_type = @enumToInt(InternPool.Index.f32_type), + f64_type = @enumToInt(InternPool.Index.f64_type), + f80_type = @enumToInt(InternPool.Index.f80_type), + f128_type = @enumToInt(InternPool.Index.f128_type), + anyopaque_type = @enumToInt(InternPool.Index.anyopaque_type), + bool_type = @enumToInt(InternPool.Index.bool_type), + void_type = @enumToInt(InternPool.Index.void_type), + type_type = @enumToInt(InternPool.Index.type_type), + anyerror_type = @enumToInt(InternPool.Index.anyerror_type), + comptime_int_type = @enumToInt(InternPool.Index.comptime_int_type), + comptime_float_type = @enumToInt(InternPool.Index.comptime_float_type), + noreturn_type = @enumToInt(InternPool.Index.noreturn_type), + anyframe_type = @enumToInt(InternPool.Index.anyframe_type), + null_type = @enumToInt(InternPool.Index.null_type), + undefined_type = @enumToInt(InternPool.Index.undefined_type), + enum_literal_type = @enumToInt(InternPool.Index.enum_literal_type), + atomic_order_type = @enumToInt(InternPool.Index.atomic_order_type), + atomic_rmw_op_type = @enumToInt(InternPool.Index.atomic_rmw_op_type), + calling_convention_type = @enumToInt(InternPool.Index.calling_convention_type), + address_space_type = @enumToInt(InternPool.Index.address_space_type), + float_mode_type = @enumToInt(InternPool.Index.float_mode_type), + reduce_op_type = @enumToInt(InternPool.Index.reduce_op_type), + call_modifier_type = @enumToInt(InternPool.Index.call_modifier_type), + prefetch_options_type = @enumToInt(InternPool.Index.prefetch_options_type), + export_options_type = @enumToInt(InternPool.Index.export_options_type), + extern_options_type = @enumToInt(InternPool.Index.extern_options_type), + type_info_type = @enumToInt(InternPool.Index.type_info_type), + manyptr_u8_type = @enumToInt(InternPool.Index.manyptr_u8_type), + manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type), + manyptr_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.manyptr_const_u8_sentinel_0_type), + single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type), + slice_const_u8_type = @enumToInt(InternPool.Index.slice_const_u8_type), + slice_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.slice_const_u8_sentinel_0_type), + anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type), + generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type), + empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type), + undef = @enumToInt(InternPool.Index.undef), + zero = @enumToInt(InternPool.Index.zero), + zero_usize = @enumToInt(InternPool.Index.zero_usize), + zero_u8 = @enumToInt(InternPool.Index.zero_u8), + one = @enumToInt(InternPool.Index.one), + one_usize = @enumToInt(InternPool.Index.one_usize), + one_u8 = @enumToInt(InternPool.Index.one_u8), + four_u8 = @enumToInt(InternPool.Index.four_u8), + negative_one = @enumToInt(InternPool.Index.negative_one), + calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c), + calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline), + void_value = @enumToInt(InternPool.Index.void_value), + unreachable_value = @enumToInt(InternPool.Index.unreachable_value), + null_value = @enumToInt(InternPool.Index.null_value), + bool_true = @enumToInt(InternPool.Index.bool_true), + bool_false = @enumToInt(InternPool.Index.bool_false), + empty_struct = @enumToInt(InternPool.Index.empty_struct), + generic_poison = @enumToInt(InternPool.Index.generic_poison), + + /// This tag is here to match Air and InternPool, however it is unused + /// for ZIR purposes. + var_args_param_type = @enumToInt(InternPool.Index.var_args_param_type), /// This Ref does not correspond to any ZIR instruction or constant /// value and may instead be used as a sentinel to indicate null. - none, - - u1_type, - u8_type, - i8_type, - u16_type, - i16_type, - u29_type, - u32_type, - i32_type, - u64_type, - i64_type, - u128_type, - i128_type, - usize_type, - isize_type, - c_char_type, - c_short_type, - c_ushort_type, - c_int_type, - c_uint_type, - c_long_type, - c_ulong_type, - c_longlong_type, - c_ulonglong_type, - c_longdouble_type, - f16_type, - f32_type, - f64_type, - f80_type, - f128_type, - anyopaque_type, - bool_type, - void_type, - type_type, - anyerror_type, - comptime_int_type, - comptime_float_type, - noreturn_type, - anyframe_type, - null_type, - undefined_type, - enum_literal_type, - atomic_order_type, - atomic_rmw_op_type, - calling_convention_type, - address_space_type, - float_mode_type, - reduce_op_type, - modifier_type, - prefetch_options_type, - export_options_type, - extern_options_type, - type_info_type, - manyptr_u8_type, - manyptr_const_u8_type, - fn_noreturn_no_args_type, - fn_void_no_args_type, - fn_naked_noreturn_no_args_type, - fn_ccc_void_no_args_type, - single_const_pointer_to_comptime_int_type, - const_slice_u8_type, - anyerror_void_error_union_type, - generic_poison_type, - - /// `undefined` (untyped) - undef, - /// `0` (comptime_int) - zero, - /// `1` (comptime_int) - one, - /// `{}` - void_value, - /// `unreachable` (noreturn type) - unreachable_value, - /// `null` (untyped) - null_value, - /// `true` - bool_true, - /// `false` - bool_false, - /// `.{}` (untyped) - empty_struct, - /// `0` (usize) - zero_usize, - /// `1` (usize) - one_usize, - /// `std.builtin.CallingConvention.C` - calling_convention_c, - /// `std.builtin.CallingConvention.Inline` - calling_convention_inline, - /// Used for generic parameters where the type and value - /// is not known until generic function instantiation. - generic_poison, - /// This is a special type for variadic parameters of a function call. - /// Casts to it will validate that the type can be passed to a c - /// calling convention function. - var_args_param, - + none = @enumToInt(InternPool.Index.none), _, - - pub const typed_value_map = std.enums.directEnumArray(Ref, TypedValue, 0, .{ - .none = undefined, - - .u1_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u1_type), - }, - .u8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u8_type), - }, - .i8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i8_type), - }, - .u16_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u16_type), - }, - .i16_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i16_type), - }, - .u29_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u29_type), - }, - .u32_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u32_type), - }, - .i32_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i32_type), - }, - .u64_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u64_type), - }, - .i64_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i64_type), - }, - .u128_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.u128_type), - }, - .i128_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.i128_type), - }, - .usize_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.usize_type), - }, - .isize_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.isize_type), - }, - .c_char_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_char_type), - }, - .c_short_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_short_type), - }, - .c_ushort_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_ushort_type), - }, - .c_int_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_int_type), - }, - .c_uint_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_uint_type), - }, - .c_long_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_long_type), - }, - .c_ulong_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_ulong_type), - }, - .c_longlong_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_longlong_type), - }, - .c_ulonglong_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_ulonglong_type), - }, - .c_longdouble_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.c_longdouble_type), - }, - .f16_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f16_type), - }, - .f32_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f32_type), - }, - .f64_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f64_type), - }, - .f80_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f80_type), - }, - .f128_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.f128_type), - }, - .anyopaque_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyopaque_type), - }, - .bool_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.bool_type), - }, - .void_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.void_type), - }, - .type_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.type_type), - }, - .anyerror_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyerror_type), - }, - .comptime_int_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.comptime_int_type), - }, - .comptime_float_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.comptime_float_type), - }, - .noreturn_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.noreturn_type), - }, - .anyframe_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyframe_type), - }, - .null_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.null_type), - }, - .undefined_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.undefined_type), - }, - .fn_noreturn_no_args_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.fn_noreturn_no_args_type), - }, - .fn_void_no_args_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.fn_void_no_args_type), - }, - .fn_naked_noreturn_no_args_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.fn_naked_noreturn_no_args_type), - }, - .fn_ccc_void_no_args_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.fn_ccc_void_no_args_type), - }, - .single_const_pointer_to_comptime_int_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.single_const_pointer_to_comptime_int_type), - }, - .const_slice_u8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.const_slice_u8_type), - }, - .anyerror_void_error_union_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyerror_void_error_union_type), - }, - .generic_poison_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.generic_poison_type), - }, - .enum_literal_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.enum_literal_type), - }, - .manyptr_u8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.manyptr_u8_type), - }, - .manyptr_const_u8_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.manyptr_const_u8_type), - }, - .atomic_order_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.atomic_order_type), - }, - .atomic_rmw_op_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.atomic_rmw_op_type), - }, - .calling_convention_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.calling_convention_type), - }, - .address_space_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.address_space_type), - }, - .float_mode_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.float_mode_type), - }, - .reduce_op_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.reduce_op_type), - }, - .modifier_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.modifier_type), - }, - .prefetch_options_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.prefetch_options_type), - }, - .export_options_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.export_options_type), - }, - .extern_options_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.extern_options_type), - }, - .type_info_type = .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.type_info_type), - }, - - .undef = .{ - .ty = Type.initTag(.undefined), - .val = Value.initTag(.undef), - }, - .zero = .{ - .ty = Type.initTag(.comptime_int), - .val = Value.initTag(.zero), - }, - .zero_usize = .{ - .ty = Type.initTag(.usize), - .val = Value.initTag(.zero), - }, - .one = .{ - .ty = Type.initTag(.comptime_int), - .val = Value.initTag(.one), - }, - .one_usize = .{ - .ty = Type.initTag(.usize), - .val = Value.initTag(.one), - }, - .void_value = .{ - .ty = Type.initTag(.void), - .val = Value.initTag(.void_value), - }, - .unreachable_value = .{ - .ty = Type.initTag(.noreturn), - .val = Value.initTag(.unreachable_value), - }, - .null_value = .{ - .ty = Type.initTag(.null), - .val = Value.initTag(.null_value), - }, - .bool_true = .{ - .ty = Type.initTag(.bool), - .val = Value.initTag(.bool_true), - }, - .bool_false = .{ - .ty = Type.initTag(.bool), - .val = Value.initTag(.bool_false), - }, - .empty_struct = .{ - .ty = Type.initTag(.empty_struct_literal), - .val = Value.initTag(.empty_struct_value), - }, - .calling_convention_c = .{ - .ty = Type.initTag(.calling_convention), - .val = .{ .ptr_otherwise = &calling_convention_c_payload.base }, - }, - .calling_convention_inline = .{ - .ty = Type.initTag(.calling_convention), - .val = .{ .ptr_otherwise = &calling_convention_inline_payload.base }, - }, - .generic_poison = .{ - .ty = Type.initTag(.generic_poison), - .val = Value.initTag(.generic_poison), - }, - .var_args_param = undefined, - }); - }; - - /// We would like this to be const but `Value` wants a mutable pointer for - /// its payload field. Nothing should mutate this though. - var calling_convention_c_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @enumToInt(std.builtin.CallingConvention.C), - }; - - /// We would like this to be const but `Value` wants a mutable pointer for - /// its payload field. Nothing should mutate this though. - var calling_convention_inline_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @enumToInt(std.builtin.CallingConvention.Inline), }; /// All instructions have an 8-byte payload, which is contained within @@ -4163,13 +3819,14 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo { }; } -const ref_start_index: u32 = Inst.Ref.typed_value_map.len; +pub const ref_start_index: u32 = InternPool.static_len; pub fn indexToRef(inst: Inst.Index) Inst.Ref { return @intToEnum(Inst.Ref, ref_start_index + inst); } pub fn refToIndex(inst: Inst.Ref) ?Inst.Index { + assert(inst != .none); const ref_int = @enumToInt(inst); if (ref_int >= ref_start_index) { return ref_int - ref_start_index; @@ -4177,3 +3834,8 @@ pub fn refToIndex(inst: Inst.Ref) ?Inst.Index { return null; } } + +pub fn refToIndexAllowNone(inst: Inst.Ref) ?Inst.Index { + if (inst == .none) return null; + return refToIndex(inst); +} diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 971ed4749d..bf945e6983 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -328,7 +328,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -339,6 +339,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -471,7 +472,8 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { } fn gen(self: *Self) !void { - const cc = self.fn_type.fnCallingConvention(); + const mod = self.bin_file.options.module.?; + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // stp fp, lr, [sp, #-16]! _ = try self.addInst(.{ @@ -520,10 +522,10 @@ fn gen(self: *Self) !void { const inst = self.air.getMainBody()[arg_index]; assert(self.air.instructions.items(.tag)[inst] == .arg); - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); - const abi_align = ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_align = ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); try self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); @@ -652,13 +654,14 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; - } const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); @@ -842,8 +845,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -916,8 +918,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - const air_tags = self.air.instructions.items(.tag); - if (air_tags[inst] == .constant) return; // Constants are immortal. + assert(self.air.instructions.items(.tag)[inst] != .interned); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -951,8 +952,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; @@ -1026,31 +1027,31 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const elem_ty = self.air.typeOfIndex(inst).elemType(); + const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst).childType(mod); - if (!elem_ty.hasRuntimeBits()) { + if (!elem_ty.hasRuntimeBits(mod)) { // return the stack offset 0. Stack offset 0 will be where all // zero-sized stack allocations live as non-zero-sized // allocations will always have an offset > 0. return @as(u32, 0); } - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); return self.allocMem(abi_size, abi_align, inst); } fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue { - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); if (reg_ok) { // Make sure the type can fit in a register before we try to allocate one. @@ -1066,7 +1067,7 @@ fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst } pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { - const stack_mcv = try self.allocRegOrMem(self.air.typeOfIndex(inst), false, inst); + const stack_mcv = try self.allocRegOrMem(self.typeOfIndex(inst), false, inst); log.debug("spilling {d} to stack mcv {any}", .{ inst, stack_mcv }); const reg_mcv = self.getResolvedInstValue(inst); @@ -1078,14 +1079,14 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } /// Save the current instruction stored in the compare flags if /// occupied fn spillCompareFlagsIfOccupied(self: *Self) !void { if (self.compare_flags_inst) |inst_to_save| { - const ty = self.air.typeOfIndex(inst_to_save); + const ty = self.typeOfIndex(inst_to_save); const mcv = self.getResolvedInstValue(inst_to_save); const new_mcv = switch (mcv) { .compare_flags => try self.allocRegOrMem(ty, true, inst_to_save), @@ -1093,7 +1094,7 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void { else => unreachable, // mcv doesn't occupy the compare flags }; - try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv); + try self.setRegOrMem(self.typeOfIndex(inst_to_save), new_mcv, mcv); log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -1125,9 +1126,9 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { const raw_reg = try self.register_manager.allocReg(reg_owner, gp); - const ty = self.air.typeOfIndex(reg_owner); + const ty = self.typeOfIndex(reg_owner); const reg = self.registerAlias(raw_reg, ty); - try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv); + try self.genSetReg(self.typeOfIndex(reg_owner), reg, mcv); return MCValue{ .register = reg }; } @@ -1137,17 +1138,14 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { } fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const result: MCValue = switch (self.ret_mcv) { .none, .register => .{ .ptr_stack_offset = try self.allocMemPtr(inst) }, .stack_offset => blk: { // self.ret_mcv is an address to where this function // should store its result into - const ret_ty = self.fn_type.fnReturnType(); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ret_ty = self.fn_type.fnReturnType(mod); + const ptr_ty = try mod.singleMutPtrType(ret_ty); // addr_reg will contain the address of where to store the // result into @@ -1177,13 +1175,14 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); + const mod = self.bin_file.options.module.?; const operand = ty_op.operand; const operand_mcv = try self.resolveInst(operand); - const operand_ty = self.air.typeOf(operand); - const operand_info = operand_ty.intInfo(self.target.*); + const operand_ty = self.typeOf(operand); + const operand_info = operand_ty.intInfo(mod); - const dest_ty = self.air.typeOfIndex(inst); - const dest_info = dest_ty.intInfo(self.target.*); + const dest_ty = self.typeOfIndex(inst); + const dest_info = dest_ty.intInfo(mod); const result: MCValue = result: { const operand_lock: ?RegisterLock = switch (operand_mcv) { @@ -1199,14 +1198,14 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (dest_info.bits > operand_info.bits) { const dest_mcv = try self.allocRegOrMem(dest_ty, true, inst); - try self.setRegOrMem(self.air.typeOfIndex(inst), dest_mcv, truncated); + try self.setRegOrMem(self.typeOfIndex(inst), dest_mcv, truncated); break :result dest_mcv; } else { if (self.reuseOperand(inst, operand, 0, truncated)) { break :result truncated; } else { const dest_mcv = try self.allocRegOrMem(dest_ty, true, inst); - try self.setRegOrMem(self.air.typeOfIndex(inst), dest_mcv, truncated); + try self.setRegOrMem(self.typeOfIndex(inst), dest_mcv, truncated); break :result dest_mcv; } } @@ -1257,8 +1256,9 @@ fn trunc( operand_ty: Type, dest_ty: Type, ) !MCValue { - const info_a = operand_ty.intInfo(self.target.*); - const info_b = dest_ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const info_a = operand_ty.intInfo(mod); + const info_b = dest_ty.intInfo(mod); if (info_b.bits <= 64) { const operand_reg = switch (operand) { @@ -1300,8 +1300,8 @@ fn trunc( fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.trunc(inst, operand, operand_ty, dest_ty); @@ -1319,15 +1319,16 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); switch (operand) { .dead => unreachable, .unreach => unreachable, .compare_flags => |cond| break :result MCValue{ .compare_flags = cond.negate() }, else => { - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Bool => { // TODO convert this to mvn + and const op_reg = switch (operand) { @@ -1361,7 +1362,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }, .Vector => return self.fail("TODO bitwise not for vectors", .{}), .Int => { - const int_info = operand_ty.intInfo(self.target.*); + const int_info = operand_ty.intInfo(mod); if (int_info.bits <= 64) { const op_reg = switch (operand) { .register => |r| r, @@ -1413,13 +1414,13 @@ fn minMax( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM min/max on floats", .{}), .Vector => return self.fail("TODO ARM min/max on vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { var lhs_reg: Register = undefined; var rhs_reg: Register = undefined; @@ -1488,8 +1489,8 @@ fn minMax( fn airMinMax(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -1508,9 +1509,9 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const len_ty = self.air.typeOf(bin_op.rhs); + const len_ty = self.typeOf(bin_op.rhs); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -1907,12 +1908,12 @@ fn addSub( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const lhs_immediate = try lhs_bind.resolveToImmediate(self); const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -1968,11 +1969,11 @@ fn mul( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // TODO add optimisations for multiplication // with immediates, for example a * 2 can be @@ -1999,7 +2000,8 @@ fn divFloat( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div_float", .{}), .Vector => return self.fail("TODO div_float on vectors", .{}), else => unreachable, @@ -2015,12 +2017,12 @@ fn divTrunc( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div on floats", .{}), .Vector => return self.fail("TODO div on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { switch (int_info.signedness) { .signed => { @@ -2049,12 +2051,12 @@ fn divFloor( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div on floats", .{}), .Vector => return self.fail("TODO div on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { switch (int_info.signedness) { .signed => { @@ -2082,12 +2084,12 @@ fn divExact( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO div on floats", .{}), .Vector => return self.fail("TODO div on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { switch (int_info.signedness) { .signed => { @@ -2118,12 +2120,12 @@ fn rem( _ = maybe_inst; const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO rem/mod on floats", .{}), .Vector => return self.fail("TODO rem/mod on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { var lhs_reg: Register = undefined; var rhs_reg: Register = undefined; @@ -2188,7 +2190,8 @@ fn modulo( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO mod on floats", .{}), .Vector => return self.fail("TODO mod on vectors", .{}), .Int => return self.fail("TODO mod on ints", .{}), @@ -2205,10 +2208,11 @@ fn wrappingArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // Generate an add/sub/mul const result: MCValue = switch (tag) { @@ -2240,11 +2244,11 @@ fn bitwise( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // TODO implement bitwise operations with immediates const mir_tag: Mir.Inst.Tag = switch (tag) { @@ -2274,10 +2278,11 @@ fn shiftExact( ) InnerError!MCValue { _ = rhs_ty; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -2323,10 +2328,11 @@ fn shiftNormal( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // Generate a shl_exact/shr_exact const result: MCValue = switch (tag) { @@ -2362,7 +2368,8 @@ fn booleanOp( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Bool => { assert((try lhs_bind.resolveToImmediate(self)) == null); // should have been handled by Sema assert((try rhs_bind.resolveToImmediate(self)) == null); // should have been handled by Sema @@ -2388,17 +2395,17 @@ fn ptrArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { - const mod = self.bin_file.options.module.?; assert(rhs_ty.eql(Type.usize, mod)); const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); const base_tag: Air.Inst.Tag = switch (tag) { .ptr_add => .add, @@ -2426,8 +2433,8 @@ fn ptrArithmetic( fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -2477,8 +2484,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -2511,23 +2518,23 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_ty = self.typeOfIndex(inst); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); switch (int_info.bits) { 1...31, 33...63 => { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -2565,7 +2572,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; }, @@ -2639,24 +2646,23 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); + const mod = self.bin_file.options.module.?; const result: MCValue = result: { - const mod = self.bin_file.options.module.?; - const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_ty = self.typeOfIndex(inst); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -2709,7 +2715,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else if (int_info.bits <= 64) { @@ -2849,7 +2855,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits); try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else return self.fail("TODO implement mul_with_overflow for integers > u64/i64", .{}); @@ -2864,21 +2870,22 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); + const mod = self.bin_file.options.module.?; const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_ty = self.typeOfIndex(inst); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -2981,7 +2988,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = dest_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .compare_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else { @@ -3003,7 +3010,7 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const optional_ty = self.air.typeOf(ty_op.operand); + const optional_ty = self.typeOf(ty_op.operand); const mcv = try self.resolveInst(ty_op.operand); break :result try self.optionalPayload(inst, mcv, optional_ty); }; @@ -3011,10 +3018,10 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { } fn optionalPayload(self: *Self, inst: Air.Inst.Index, mcv: MCValue, optional_ty: Type) !MCValue { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&opt_buf); - if (!payload_ty.hasRuntimeBits()) return MCValue.none; - if (optional_ty.isPtrLikeOptional()) { + const mod = self.bin_file.options.module.?; + const payload_ty = optional_ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBits(mod)) return MCValue.none; + if (optional_ty.isPtrLikeOptional(mod)) { // TODO should we reuse the operand here? const raw_reg = try self.register_manager.allocReg(inst, gp); const reg = self.registerAlias(raw_reg, payload_ty); @@ -3055,16 +3062,17 @@ fn errUnionErr( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const err_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); - if (err_ty.errorSetIsEmpty()) { + const mod = self.bin_file.options.module.?; + const err_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); + if (err_ty.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try error_union_bind.resolveToMcv(self); } - const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, self.target.*)); + const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, mod)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -3086,7 +3094,7 @@ fn errUnionErr( ); const err_bit_offset = err_offset * 8; - const err_bit_size = @intCast(u32, err_ty.abiSize(self.target.*)) * 8; + const err_bit_size = @intCast(u32, err_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ .tag = .ubfx, // errors are unsigned integers @@ -3120,7 +3128,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); break :result try self.errUnionErr(error_union_bind, error_union_ty, inst); }; @@ -3134,16 +3142,17 @@ fn errUnionPayload( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const err_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); - if (err_ty.errorSetIsEmpty()) { + const mod = self.bin_file.options.module.?; + const err_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); + if (err_ty.errorSetIsEmpty(mod)) { return try error_union_bind.resolveToMcv(self); } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*)); + const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -3165,10 +3174,10 @@ fn errUnionPayload( ); const payload_bit_offset = payload_offset * 8; - const payload_bit_size = @intCast(u32, payload_ty.abiSize(self.target.*)) * 8; + const payload_bit_size = @intCast(u32, payload_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ - .tag = if (payload_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx, + .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, .data = .{ .rr_lsb_width = .{ // Set both registers to the X variant to get the full width @@ -3199,7 +3208,7 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); break :result try self.errUnionPayload(error_union_bind, error_union_ty, inst); }; @@ -3245,6 +3254,7 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) { @@ -3252,12 +3262,12 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { } const result: MCValue = result: { - const payload_ty = self.air.typeOf(ty_op.operand); - if (!payload_ty.hasRuntimeBits()) { + const payload_ty = self.typeOf(ty_op.operand); + if (!payload_ty.hasRuntimeBits(mod)) { break :result MCValue{ .immediate = 1 }; } - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); const operand_lock: ?RegisterLock = switch (operand) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -3265,7 +3275,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - if (optional_ty.isPtrLikeOptional()) { + if (optional_ty.isPtrLikeOptional(mod)) { // TODO should we check if we can reuse the operand? const raw_reg = try self.register_manager.allocReg(inst, gp); const reg = self.registerAlias(raw_reg, payload_ty); @@ -3273,9 +3283,9 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue{ .register = reg }; } - const optional_abi_size = @intCast(u32, optional_ty.abiSize(self.target.*)); - const optional_abi_align = optional_ty.abiAlignment(self.target.*); - const offset = @intCast(u32, payload_ty.abiSize(self.target.*)); + const optional_abi_size = @intCast(u32, optional_ty.abiSize(mod)); + const optional_abi_align = optional_ty.abiAlignment(mod); + const offset = @intCast(u32, payload_ty.abiSize(mod)); const stack_offset = try self.allocMem(optional_abi_size, optional_abi_align, inst); try self.genSetStack(payload_ty, stack_offset, operand); @@ -3289,19 +3299,20 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, mod); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand); try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 }); @@ -3314,17 +3325,18 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const error_union_ty = self.air.getRefType(ty_op.ty); - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, mod); try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef); @@ -3416,11 +3428,11 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); - const result: MCValue = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = slice_ty.slicePtrFieldType(&buf); + const slice_ty = self.typeOf(bin_op.lhs); + const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { + const ptr_ty = slice_ty.slicePtrFieldType(mod); const slice_mcv = try self.resolveInst(bin_op.lhs); const base_mcv = slicePtr(slice_mcv); @@ -3440,8 +3452,9 @@ fn ptrElemVal( ptr_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const elem_ty = ptr_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); // TODO optimize for elem_sizes of 1, 2, 4, 8 switch (elem_size) { @@ -3465,8 +3478,8 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { const base_bind: ReadArg.Bind = .{ .mcv = base_mcv }; const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const slice_ty = self.air.typeOf(extra.lhs); - const index_ty = self.air.typeOf(extra.rhs); + const slice_ty = self.typeOf(extra.lhs); + const index_ty = self.typeOf(extra.rhs); const addr = try self.ptrArithmetic(.ptr_add, base_bind, index_bind, slice_ty, index_ty, null); break :result addr; @@ -3481,9 +3494,10 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); - const result: MCValue = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { + const ptr_ty = self.typeOf(bin_op.lhs); + const result: MCValue = if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; @@ -3499,8 +3513,8 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ptr_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const ptr_ty = self.air.typeOf(extra.lhs); - const index_ty = self.air.typeOf(extra.rhs); + const ptr_ty = self.typeOf(extra.lhs); + const index_ty = self.typeOf(extra.rhs); const addr = try self.ptrArithmetic(.ptr_add, ptr_bind, index_bind, ptr_ty, index_ty, null); break :result addr; @@ -3597,8 +3611,9 @@ fn reuseOperand( } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { - const elem_ty = ptr_ty.elemType(); - const elem_size = elem_ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); + const elem_size = elem_ty.abiSize(mod); switch (ptr) { .none => unreachable, @@ -3753,14 +3768,14 @@ fn genInlineMemset( ) !void { const dst_reg = switch (dst) { .register => |r| r, - else => try self.copyToTmpRegister(Type.initTag(.manyptr_u8), dst), + else => try self.copyToTmpRegister(Type.manyptr_u8, dst), }; const dst_reg_lock = self.register_manager.lockReg(dst_reg); defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock); const val_reg = switch (val) { .register => |r| r, - else => try self.copyToTmpRegister(Type.initTag(.u8), val), + else => try self.copyToTmpRegister(Type.u8, val), }; const val_reg_lock = self.register_manager.lockReg(val_reg); defer if (val_reg_lock) |lock| self.register_manager.unlockReg(lock); @@ -3844,15 +3859,16 @@ fn genInlineMemsetCode( } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); - const elem_size = elem_ty.abiSize(self.target.*); + const elem_ty = self.typeOfIndex(inst); + const elem_size = elem_ty.abiSize(mod); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; @@ -3867,18 +3883,19 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(elem_ty, true, inst); } }; - try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); + try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand)); break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type) !void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_immediate else .ldrb_immediate, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_immediate else .ldrh_immediate, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_immediate else .ldrb_immediate, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_immediate else .ldrh_immediate, 4 => .ldr_immediate, 8 => .ldr_immediate, 3, 5, 6, 7 => return self.fail("TODO: genLdrRegister for more abi_sizes", .{}), @@ -3896,7 +3913,8 @@ fn genLdrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type } fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type) !void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .strb_immediate, @@ -3917,8 +3935,9 @@ fn genStrRegister(self: *Self, value_reg: Register, addr_reg: Register, ty: Type } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { + const mod = self.bin_file.options.module.?; log.debug("store: storing {} to {}", .{ value, ptr }); - const abi_size = value_ty.abiSize(self.target.*); + const abi_size = value_ty.abiSize(mod); switch (ptr) { .none => unreachable, @@ -4046,8 +4065,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const value_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const value_ty = self.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); @@ -4069,10 +4088,11 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { return if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const ptr_ty = self.air.typeOf(operand); - const struct_ty = ptr_ty.childType(); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const ptr_ty = self.typeOf(operand); + const struct_ty = ptr_ty.childType(mod); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -4093,10 +4113,11 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const operand = extra.struct_operand; const index = extra.field_index; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const struct_ty = self.air.typeOf(operand); - const struct_field_ty = struct_ty.structFieldType(index); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_ty = self.typeOf(operand); + const struct_field_ty = struct_ty.structFieldType(index, mod); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .dead, .unreach => unreachable, @@ -4142,12 +4163,13 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const field_ptr = try self.resolveInst(extra.field_ptr); - const struct_ty = self.air.getRefType(ty_pl.ty).childType(); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, self.target.*)); + const struct_ty = self.air.getRefType(ty_pl.ty).childType(mod); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod)); switch (field_ptr) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off + struct_field_offset }; @@ -4169,7 +4191,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { while (self.args[arg_index] == .none) arg_index += 1; self.arg_index = arg_index + 1; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const tag = self.air.instructions.items(.tag)[inst]; const src_index = self.air.instructions.items(.data)[inst].arg.src_index; const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index); @@ -4222,11 +4244,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const ty = self.air.typeOf(callee); + const ty = self.typeOf(callee); + const mod = self.bin_file.options.module.?; - const fn_ty = switch (ty.zigTypeTag()) { + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; @@ -4245,18 +4268,14 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (info.return_value == .stack_offset) { log.debug("airCall: return by reference", .{}); - const ret_ty = fn_ty.fnReturnType(); - const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*)); + const ret_ty = fn_ty.fnReturnType(mod); + const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); const ret_ptr_reg = self.registerAlias(.x0, Type.usize); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.register_manager.getReg(ret_ptr_reg, null); try self.genSetReg(ptr_ty, ret_ptr_reg, .{ .ptr_stack_offset = stack_offset }); @@ -4268,7 +4287,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier for (info.args, 0..) |mc_arg, arg_i| { const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -4289,21 +4308,18 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - const mod = self.bin_file.options.module.?; - if (self.air.value(callee)) |func_value| { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; - + if (try self.air.value(callee, mod)) |func_value| { + if (func_value.getFunction(mod)) |func| { if (self.bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); - try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .x30, .{ .memory = got_addr }); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl); const sym_index = macho_file.getAtom(atom).getSymbolIndex().?; - try self.genSetReg(Type.initTag(.u64), .x30, .{ + try self.genSetReg(Type.u64, .x30, .{ .linker_load = .{ .type = .got, .sym_index = sym_index, @@ -4312,7 +4328,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { const atom = try coff_file.getOrCreateAtomForDecl(func.owner_decl); const sym_index = coff_file.getAtom(atom).getSymbolIndex().?; - try self.genSetReg(Type.initTag(.u64), .x30, .{ + try self.genSetReg(Type.u64, .x30, .{ .linker_load = .{ .type = .got, .sym_index = sym_index, @@ -4326,17 +4342,16 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const got_addr = p9.bases.data; const got_index = decl_block.got_index.?; const fn_got_addr = got_addr + got_index * ptr_bytes; - try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr }); + try self.genSetReg(Type.usize, .x30, .{ .memory = fn_got_addr }); } else unreachable; _ = try self.addInst(.{ .tag = .blr, .data = .{ .reg = .x30 }, }); - } else if (func_value.castTag(.extern_fn)) |func_payload| { - const extern_fn = func_payload.data; - const decl_name = mem.sliceTo(mod.declPtr(extern_fn.owner_decl).name, 0); - const lib_name = mem.sliceTo(extern_fn.lib_name, 0); + } else if (func_value.getExternFunc(mod)) |extern_func| { + const decl_name = mod.intern_pool.stringToSlice(mod.declPtr(extern_func.decl).name); + const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name); if (self.bin_file.cast(link.File.MachO)) |macho_file| { const sym_index = try macho_file.getGlobalSymbol(decl_name, lib_name); const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); @@ -4352,7 +4367,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier }); } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { const sym_index = try coff_file.getGlobalSymbol(decl_name, lib_name); - try self.genSetReg(Type.initTag(.u64), .x30, .{ + try self.genSetReg(Type.u64, .x30, .{ .linker_load = .{ .type = .import, .sym_index = sym_index, @@ -4369,7 +4384,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - assert(ty.zigTypeTag() == .Pointer); + assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(ty, .x30, mcv); @@ -4407,14 +4422,15 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn airRet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv) { .none => {}, .immediate => { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); }, .register => |reg| { // Return result by value @@ -4425,11 +4441,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { // // self.ret_mcv is an address to where this function // should store its result into - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.store(self.ret_mcv, operand, ptr_ty, ret_ty); }, else => unreachable, @@ -4442,10 +4454,11 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { } fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const ret_ty = self.fn_type.fnReturnType(); + const ptr_ty = self.typeOf(un_op); + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv) { .none => {}, @@ -4465,8 +4478,8 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = Air.refToIndex(un_op).?; if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) { - const abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - const abi_align = ret_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const abi_align = ret_ty.abiAlignment(mod); const offset = try self.allocMem(abi_size, abi_align, null); @@ -4485,7 +4498,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); + const lhs_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.cmp(.{ .inst = bin_op.lhs }, .{ .inst = bin_op.rhs }, lhs_ty, op); @@ -4501,29 +4514,28 @@ fn cmp( lhs_ty: Type, op: math.CompareOperator, ) !MCValue { - var int_buffer: Type.Payload.Bits = undefined; - const int_ty = switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Optional => blk: { - var opt_buffer: Type.Payload.ElemType = undefined; - const payload_ty = lhs_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - break :blk Type.initTag(.u1); - } else if (lhs_ty.isPtrLikeOptional()) { + const payload_ty = lhs_ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + break :blk Type.u1; + } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { return self.fail("TODO ARM cmp non-pointer optionals", .{}); } }, .Float => return self.fail("TODO ARM cmp floats", .{}), - .Enum => lhs_ty.intTagType(&int_buffer), + .Enum => lhs_ty.intTagType(mod), .Int => lhs_ty, - .Bool => Type.initTag(.u1), + .Bool => Type.u1, .Pointer => Type.usize, - .ErrorSet => Type.initTag(.u16), + .ErrorSet => Type.u16, else => unreachable, }; - const int_info = int_ty.intInfo(self.target.*); + const int_info = int_ty.intInfo(mod); if (int_info.bits <= 64) { try self.spillCompareFlagsIfOccupied(); @@ -4609,8 +4621,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; + const mod = self.bin_file.options.module.?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); @@ -4625,7 +4638,7 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = pl_op.operand; const tag = self.air.instructions.items(.tag)[inst]; - const ty = self.air.typeOf(operand); + const ty = self.typeOf(operand); const mcv = try self.resolveInst(operand); const name = self.air.nullTerminatedString(pl_op.payload); @@ -4687,8 +4700,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { // whether it needs to be spilled in the branches if (self.liveness.operandDies(inst, 0)) { const op_int = @enumToInt(pl_op.operand); - if (op_int >= Air.Inst.Ref.typed_value_map.len) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int >= Air.ref_start_index) { + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } } @@ -4777,7 +4790,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value); + try self.setRegOrMem(self.typeOfIndex(else_key), canon_mcv, else_value); // TODO track the new register / stack allocation } try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); @@ -4804,7 +4817,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value); + try self.setRegOrMem(self.typeOfIndex(then_key), parent_mcv, then_value); // TODO track the new register / stack allocation } @@ -4819,13 +4832,13 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { } fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue { - const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional()) blk: { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = operand_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) + const mod = self.bin_file.options.module.?; + const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional(mod)) blk: { + const payload_ty = operand_ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :blk .{ .ty = operand_ty, .bind = operand_bind }; - const offset = @intCast(u32, payload_ty.abiSize(self.target.*)); + const offset = @intCast(u32, payload_ty.abiSize(mod)); const operand_mcv = try operand_bind.resolveToMcv(self); const new_mcv: MCValue = switch (operand_mcv) { .register => |source_reg| new: { @@ -4838,7 +4851,7 @@ fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue { try self.genSetReg(payload_ty, dest_reg, operand_mcv); } else { _ = try self.addInst(.{ - .tag = if (payload_ty.isSignedInt()) + .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.asr_immediate else Mir.Inst.Tag.lsr_immediate, @@ -4875,9 +4888,10 @@ fn isErr( error_union_bind: ReadArg.Bind, error_union_ty: Type, ) !MCValue { - const error_type = error_union_ty.errorUnionSet(); + const mod = self.bin_file.options.module.?; + const error_type = error_union_ty.errorUnionSet(mod); - if (error_type.errorSetIsEmpty()) { + if (error_type.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; // always false } @@ -4908,7 +4922,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); break :result try self.isNull(.{ .mcv = operand }, operand_ty); }; @@ -4916,11 +4930,12 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const ptr_ty = self.typeOf(un_op); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4934,7 +4949,7 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); break :result try self.isNonNull(.{ .mcv = operand }, operand_ty); }; @@ -4942,11 +4957,12 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const ptr_ty = self.typeOf(un_op); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4960,7 +4976,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; - const error_union_ty = self.air.typeOf(un_op); + const error_union_ty = self.typeOf(un_op); break :result try self.isErr(error_union_bind, error_union_ty); }; @@ -4968,11 +4984,12 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const ptr_ty = self.typeOf(un_op); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4986,7 +5003,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; - const error_union_ty = self.air.typeOf(un_op); + const error_union_ty = self.typeOf(un_op); break :result try self.isNonErr(error_union_bind, error_union_ty); }; @@ -4994,11 +5011,12 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const ptr_ty = self.typeOf(un_op); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -5065,7 +5083,7 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void { fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; - const condition_ty = self.air.typeOf(pl_op.operand); + const condition_ty = self.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const liveness = try self.liveness.getSwitchBr( self.gpa, @@ -5210,9 +5228,10 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { } fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { + const mod = self.bin_file.options.module.?; const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits()) { + if (self.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -5220,14 +5239,14 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { .none, .dead, .unreach => unreachable, .register, .stack_offset, .memory => operand_mcv, .immediate, .stack_argument_offset, .compare_flags => blk: { - const new_mcv = try self.allocRegOrMem(self.air.typeOfIndex(block), true, block); - try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv); + const new_mcv = try self.allocRegOrMem(self.typeOfIndex(block), true, block); + try self.setRegOrMem(self.typeOfIndex(block), new_mcv, operand_mcv); break :blk new_mcv; }, else => return self.fail("TODO implement block_data.mcv = operand_mcv for {}", .{operand_mcv}), }; } else { - try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -5293,7 +5312,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + try self.genSetReg(self.typeOf(input), reg, arg_mcv); } { @@ -5386,7 +5405,8 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { } fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5441,11 +5461,11 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg_lock = self.register_manager.lockReg(rwo.reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); - const wrapped_ty = ty.structFieldType(0); + const wrapped_ty = ty.structFieldType(0, mod); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); - const overflow_bit_ty = ty.structFieldType(1); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*)); + const overflow_bit_ty = ty.structFieldType(1, mod); + const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const raw_cond_reg = try self.register_manager.allocReg(null, gp); const cond_reg = self.registerAlias(raw_cond_reg, overflow_bit_ty); @@ -5478,11 +5498,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5559,6 +5575,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5669,13 +5686,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void try self.genLdrRegister(reg, reg.toX(), ty); }, .stack_offset => |off| { - const abi_size = ty.abiSize(self.target.*); + const abi_size = ty.abiSize(mod); switch (abi_size) { 1, 2, 4, 8 => { const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack else .ldrb_stack, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack else .ldrh_stack, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack else .ldrb_stack, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack else .ldrh_stack, 4, 8 => .ldr_stack, else => unreachable, // unexpected abi size }; @@ -5693,13 +5710,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } }, .stack_argument_offset => |off| { - const abi_size = ty.abiSize(self.target.*); + const abi_size = ty.abiSize(mod); switch (abi_size) { 1, 2, 4, 8 => { const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument, 4, 8 => .ldr_stack_argument, else => unreachable, // unexpected abi size }; @@ -5720,7 +5737,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (mcv) { .dead => unreachable, .none, .unreach => return, @@ -5728,7 +5746,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I if (!self.wantSafety()) return; // The already existing value will do just fine. // TODO Upgrade this to a memset call when we have that available. - switch (ty.abiSize(self.target.*)) { + switch (ty.abiSize(mod)) { 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), @@ -5798,11 +5816,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5913,7 +5927,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest = try self.allocRegOrMem(dest_ty, true, inst); try self.setRegOrMem(dest_ty, dest, operand); break :result dest; @@ -5922,19 +5936,20 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); - const array_ty = ptr_ty.childType(); - const array_len = @intCast(u32, array_ty.arrayLen()); + const array_ty = ptr_ty.childType(mod); + const array_len = @intCast(u32, array_ty.arrayLen(mod)); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); const stack_offset = try self.allocMem(ptr_bytes * 2, ptr_bytes * 2, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); - try self.genSetStack(Type.initTag(.usize), stack_offset - ptr_bytes, .{ .immediate = array_len }); + try self.genSetStack(Type.usize, stack_offset - ptr_bytes, .{ .immediate = array_len }); break :result MCValue{ .stack_offset = stack_offset }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -6044,8 +6059,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const vector_ty = self.air.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const mod = self.bin_file.options.module.?; + const vector_ty = self.typeOfIndex(inst); + const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { @@ -6087,14 +6103,15 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } fn airTry(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; const result: MCValue = result: { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; - const error_union_ty = self.air.typeOf(pl_op.operand); - const error_union_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const error_union_align = error_union_ty.abiAlignment(self.target.*); + const error_union_ty = self.typeOf(pl_op.operand); + const error_union_size = @intCast(u32, error_union_ty.abiSize(mod)); + const error_union_align = error_union_ty.abiAlignment(mod); // The error union will die in the body. However, we need the // error union after the body in order to extract the payload @@ -6123,37 +6140,32 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - // First section of indexes correspond to a set number of constant values. - const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { - return MCValue{ .none = {} }; - } - return self.genTypedValue(tv); - } + const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. - const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) + const inst_ty = self.typeOf(inst); + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod)) return MCValue{ .none = {} }; - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ + .ty = inst_ty, + .val = (try self.air.value(inst, mod)).?, + }); + switch (self.air.instructions.items(.tag)[inst_index]) { - .constant => { + .interned => { // Constants have static lifetimes, so they are always memoized in the outer most table. const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + const interned = self.air.instructions.items(.data)[inst_index].interned; gop.value_ptr.* = try self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.values[ty_pl.payload], + .val = interned.toValue(), }); } return gop.value_ptr.*; }, - .const_ty => unreachable, else => return self.getResolvedInstValue(inst_index), } } @@ -6208,12 +6220,11 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = self.bin_file.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, @@ -6221,7 +6232,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_ty.fnReturnType(mod); switch (cc) { .Naked => { @@ -6236,14 +6247,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var ncrn: usize = 0; // Next Core Register Number var nsaa: u32 = 0; // Next stacked argument address - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); if (ret_ty_size == 0) { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; } else if (ret_ty_size <= 8) { result.return_value = .{ .register = self.registerAlias(c_abi_int_return_regs[0], ret_ty) }; @@ -6252,8 +6263,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } } - for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); + for (fn_info.param_types, 0..) |ty, i| { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (param_size == 0) { result.args[i] = .{ .none = {} }; continue; @@ -6261,14 +6272,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { // We round up NCRN only for non-Apple platforms which allow the 16-byte aligned // values to spread across odd-numbered registers. - if (ty.abiAlignment(self.target.*) == 16 and !self.target.isDarwin()) { + if (ty.toType().abiAlignment(mod) == 16 and !self.target.isDarwin()) { // Round up NCRN to the next even number ncrn += ncrn % 2; } if (std.math.divCeil(u32, param_size, 8) catch unreachable <= 8 - ncrn) { if (param_size <= 8) { - result.args[i] = .{ .register = self.registerAlias(c_abi_int_param_regs[ncrn], ty) }; + result.args[i] = .{ .register = self.registerAlias(c_abi_int_param_regs[ncrn], ty.toType()) }; ncrn += 1; } else { return self.fail("TODO MCValues with multiple registers", .{}); @@ -6279,7 +6290,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { ncrn = 8; // TODO Apple allows the arguments on the stack to be non-8-byte aligned provided // that the entire stack space consumed by the arguments is 8-byte aligned. - if (ty.abiAlignment(self.target.*) == 8) { + if (ty.toType().abiAlignment(mod) == 8) { if (nsaa % 8 != 0) { nsaa += 8 - (nsaa % 8); } @@ -6294,14 +6305,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { result.stack_align = 16; }, .Unspecified => { - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); if (ret_ty_size == 0) { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; } else if (ret_ty_size <= 8) { result.return_value = .{ .register = self.registerAlias(.x0, ret_ty) }; @@ -6317,10 +6328,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var stack_offset: u32 = 0; - for (param_types, 0..) |ty, i| { - if (ty.abiSize(self.target.*) > 0) { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); - const param_alignment = ty.abiAlignment(self.target.*); + for (fn_info.param_types, 0..) |ty, i| { + if (ty.toType().abiSize(mod) > 0) { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_alignment = ty.toType().abiAlignment(mod); stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; @@ -6371,7 +6382,8 @@ fn parseRegName(name: []const u8) ?Register { } fn registerAlias(self: *Self, reg: Register, ty: Type) Register { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); switch (reg.class()) { .general_purpose => { @@ -6397,3 +6409,13 @@ fn registerAlias(self: *Self, reg: Register, ty: Type) Register { }, } } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, &mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, &mod.intern_pool); +} diff --git a/src/arch/aarch64/abi.zig b/src/arch/aarch64/abi.zig index 0c48f33ea1..72a6172895 100644 --- a/src/arch/aarch64/abi.zig +++ b/src/arch/aarch64/abi.zig @@ -4,6 +4,7 @@ const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../type.zig").Type; +const Module = @import("../../Module.zig"); pub const Class = union(enum) { memory, @@ -14,44 +15,44 @@ pub const Class = union(enum) { }; /// For `float_array` the second element will be the amount of floats. -pub fn classifyType(ty: Type, target: std.Target) Class { - std.debug.assert(ty.hasRuntimeBitsIgnoreComptime()); +pub fn classifyType(ty: Type, mod: *Module) Class { + std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod)); var maybe_float_bits: ?u16 = null; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct => { - if (ty.containerLayout() == .Packed) return .byval; - const float_count = countFloats(ty, target, &maybe_float_bits); + if (ty.containerLayout(mod) == .Packed) return .byval; + const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= sret_float_count) return .{ .float_array = float_count }; - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > 128) return .memory; if (bit_size > 64) return .double_integer; return .integer; }, .Union => { - if (ty.containerLayout() == .Packed) return .byval; - const float_count = countFloats(ty, target, &maybe_float_bits); + if (ty.containerLayout(mod) == .Packed) return .byval; + const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= sret_float_count) return .{ .float_array = float_count }; - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > 128) return .memory; if (bit_size > 64) return .double_integer; return .integer; }, .Int, .Enum, .ErrorSet, .Float, .Bool => return .byval, .Vector => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); // TODO is this controlled by a cpu feature? if (bit_size > 128) return .memory; return .byval; }, .Optional => { - std.debug.assert(ty.isPtrLikeOptional()); + std.debug.assert(ty.isPtrLikeOptional(mod)); return .byval; }, .Pointer => { - std.debug.assert(!ty.isSlice()); + std.debug.assert(!ty.isSlice(mod)); return .byval; }, .ErrorUnion, @@ -73,14 +74,15 @@ pub fn classifyType(ty: Type, target: std.Target) Class { } const sret_float_count = 4; -fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 { +fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 { + const target = mod.getTarget(); const invalid = std.math.maxInt(u8); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Union => { - const fields = ty.unionFields(); + const fields = ty.unionFields(mod); var max_count: u8 = 0; for (fields.values()) |field| { - const field_count = countFloats(field.ty, target, maybe_float_bits); + const field_count = countFloats(field.ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; if (field_count > max_count) max_count = field_count; if (max_count > sret_float_count) return invalid; @@ -88,12 +90,12 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 { return max_count; }, .Struct => { - const fields_len = ty.structFieldCount(); + const fields_len = ty.structFieldCount(mod); var count: u8 = 0; var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i); - const field_count = countFloats(field_ty, target, maybe_float_bits); + const field_ty = ty.structFieldType(i, mod); + const field_count = countFloats(field_ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; count += field_count; if (count > sret_float_count) return invalid; @@ -113,21 +115,21 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 { } } -pub fn getFloatArrayType(ty: Type) ?Type { - switch (ty.zigTypeTag()) { +pub fn getFloatArrayType(ty: Type, mod: *Module) ?Type { + switch (ty.zigTypeTag(mod)) { .Union => { - const fields = ty.unionFields(); + const fields = ty.unionFields(mod); for (fields.values()) |field| { - if (getFloatArrayType(field.ty)) |some| return some; + if (getFloatArrayType(field.ty, mod)) |some| return some; } return null; }, .Struct => { - const fields_len = ty.structFieldCount(); + const fields_len = ty.structFieldCount(mod); var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i); - if (getFloatArrayType(field_ty)) |some| return some; + const field_ty = ty.structFieldType(i, mod); + if (getFloatArrayType(field_ty, mod)) |some| return some; } return null; }, diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index bdc1627bd6..69a156999b 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -334,7 +334,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -345,6 +345,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -477,7 +478,8 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { } fn gen(self: *Self) !void { - const cc = self.fn_type.fnCallingConvention(); + const mod = self.bin_file.options.module.?; + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // push {fp, lr} const push_reloc = try self.addNop(); @@ -518,10 +520,10 @@ fn gen(self: *Self) !void { const inst = self.air.getMainBody()[arg_index]; assert(self.air.instructions.items(.tag)[inst] == .arg); - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); - const abi_align = ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, ty.abiSize(mod)); + const abi_align = ty.abiAlignment(mod); const stack_offset = try self.allocMem(abi_size, abi_align, inst); try self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); @@ -636,13 +638,14 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; - } const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); @@ -826,8 +829,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -900,8 +902,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - const air_tags = self.air.instructions.items(.tag); - if (air_tags[inst] == .constant) return; // Constants are immortal. + assert(self.air.instructions.items(.tag)[inst] != .interned); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -937,8 +938,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; @@ -1006,9 +1007,10 @@ fn allocMem( /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const elem_ty = self.air.typeOfIndex(inst).elemType(); + const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst).childType(mod); - if (!elem_ty.hasRuntimeBits()) { + if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, // return the stack offset 0. Stack offset 0 will be where all // zero-sized stack allocations live as non-zero-sized @@ -1016,22 +1018,21 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { return @as(u32, 0); } - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); return self.allocMem(abi_size, abi_align, inst); } fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue { - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); if (reg_ok) { // Make sure the type can fit in a register before we try to allocate one. @@ -1049,7 +1050,7 @@ fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst } pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { - const stack_mcv = try self.allocRegOrMem(self.air.typeOfIndex(inst), false, inst); + const stack_mcv = try self.allocRegOrMem(self.typeOfIndex(inst), false, inst); log.debug("spilling {} (%{d}) to stack mcv {any}", .{ reg, inst, stack_mcv }); const reg_mcv = self.getResolvedInstValue(inst); @@ -1063,14 +1064,14 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } /// Save the current instruction stored in the compare flags if /// occupied fn spillCompareFlagsIfOccupied(self: *Self) !void { if (self.cpsr_flags_inst) |inst_to_save| { - const ty = self.air.typeOfIndex(inst_to_save); + const ty = self.typeOfIndex(inst_to_save); const mcv = self.getResolvedInstValue(inst_to_save); const new_mcv = switch (mcv) { .cpsr_flags => try self.allocRegOrMem(ty, true, inst_to_save), @@ -1080,7 +1081,7 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void { else => unreachable, // mcv doesn't occupy the compare flags }; - try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv); + try self.setRegOrMem(self.typeOfIndex(inst_to_save), new_mcv, mcv); log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -1114,17 +1115,14 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { } fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const result: MCValue = switch (self.ret_mcv) { .none, .register => .{ .ptr_stack_offset = try self.allocMemPtr(inst) }, .stack_offset => blk: { // self.ret_mcv is an address to where this function // should store its result into - const ret_ty = self.fn_type.fnReturnType(); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ret_ty = self.fn_type.fnReturnType(mod); + const ptr_ty = try mod.singleMutPtrType(ret_ty); // addr_reg will contain the address of where to store the // result into @@ -1150,18 +1148,19 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { } fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); - const operand_abi_size = operand_ty.abiSize(self.target.*); - const dest_abi_size = dest_ty.abiSize(self.target.*); - const info_a = operand_ty.intInfo(self.target.*); - const info_b = dest_ty.intInfo(self.target.*); + const operand_abi_size = operand_ty.abiSize(mod); + const dest_abi_size = dest_ty.abiSize(mod); + const info_a = operand_ty.intInfo(mod); + const info_b = dest_ty.intInfo(mod); const dst_mcv: MCValue = blk: { if (info_a.bits == info_b.bits) { @@ -1215,8 +1214,9 @@ fn trunc( operand_ty: Type, dest_ty: Type, ) !MCValue { - const info_a = operand_ty.intInfo(self.target.*); - const info_b = dest_ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const info_a = operand_ty.intInfo(mod); + const info_b = dest_ty.intInfo(mod); if (info_b.bits <= 32) { if (info_a.bits > 32) { @@ -1259,8 +1259,8 @@ fn trunc( fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.trunc(inst, operand_bind, operand_ty, dest_ty); @@ -1278,15 +1278,16 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); switch (try operand_bind.resolveToMcv(self)) { .dead => unreachable, .unreach => unreachable, .cpsr_flags => |cond| break :result MCValue{ .cpsr_flags = cond.negate() }, else => { - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Bool => { var op_reg: Register = undefined; var dest_reg: Register = undefined; @@ -1319,7 +1320,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }, .Vector => return self.fail("TODO bitwise not for vectors", .{}), .Int => { - const int_info = operand_ty.intInfo(self.target.*); + const int_info = operand_ty.intInfo(mod); if (int_info.bits <= 32) { var op_reg: Register = undefined; var dest_reg: Register = undefined; @@ -1373,13 +1374,13 @@ fn minMax( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM min/max on floats", .{}), .Vector => return self.fail("TODO ARM min/max on vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { var lhs_reg: Register = undefined; var rhs_reg: Register = undefined; @@ -1463,8 +1464,8 @@ fn minMax( fn airMinMax(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -1483,9 +1484,9 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const len_ty = self.air.typeOf(bin_op.rhs); + const len_ty = self.typeOf(bin_op.rhs); const stack_offset = try self.allocMem(8, 4, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); @@ -1497,8 +1498,8 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -1548,8 +1549,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; @@ -1582,23 +1583,23 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_ty = self.typeOfIndex(inst); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits < 32) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -1631,7 +1632,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else if (int_info.bits == 32) { @@ -1695,23 +1696,23 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); + const mod = self.bin_file.options.module.?; const result: MCValue = result: { const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_ty = self.typeOfIndex(inst); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 16) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -1744,7 +1745,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else if (int_info.bits <= 32) { @@ -1842,7 +1843,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }); // strb rdlo, [...] - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .register = rdlo }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .register = rdlo }); break :result MCValue{ .stack_offset = stack_offset }; } else { @@ -1859,19 +1860,20 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); + const mod = self.bin_file.options.module.?; const result: MCValue = result: { - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - const tuple_ty = self.air.typeOfIndex(inst); - const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*)); - const tuple_align = tuple_ty.abiAlignment(self.target.*); - const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, self.target.*)); + const tuple_ty = self.typeOfIndex(inst); + const tuple_size = @intCast(u32, tuple_ty.abiSize(mod)); + const tuple_align = tuple_ty.abiAlignment(mod); + const overflow_bit_offset = @intCast(u32, tuple_ty.structFieldOffset(1, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl_with_overflow for vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); @@ -1976,7 +1978,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = dest_reg }); - try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); + try self.genSetStack(Type.u1, stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; } else { @@ -2014,10 +2016,11 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const optional_ty = self.air.typeOfIndex(inst); - const abi_size = @intCast(u32, optional_ty.abiSize(self.target.*)); + const optional_ty = self.typeOfIndex(inst); + const abi_size = @intCast(u32, optional_ty.abiSize(mod)); // Optional with a zero-bit payload type is just a boolean true if (abi_size == 1) { @@ -2036,16 +2039,17 @@ fn errUnionErr( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const err_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); - if (err_ty.errorSetIsEmpty()) { + const mod = self.bin_file.options.module.?; + const err_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); + if (err_ty.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return try error_union_bind.resolveToMcv(self); } - const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, self.target.*)); + const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, mod)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -2067,7 +2071,7 @@ fn errUnionErr( ); const err_bit_offset = err_offset * 8; - const err_bit_size = @intCast(u32, err_ty.abiSize(self.target.*)) * 8; + const err_bit_size = @intCast(u32, err_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ .tag = .ubfx, // errors are unsigned integers @@ -2098,7 +2102,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); break :result try self.errUnionErr(error_union_bind, error_union_ty, inst); }; @@ -2112,16 +2116,17 @@ fn errUnionPayload( error_union_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const err_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); - if (err_ty.errorSetIsEmpty()) { + const mod = self.bin_file.options.module.?; + const err_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); + if (err_ty.errorSetIsEmpty(mod)) { return try error_union_bind.resolveToMcv(self); } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*)); + const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); switch (try error_union_bind.resolveToMcv(self)) { .register => { var operand_reg: Register = undefined; @@ -2143,10 +2148,10 @@ fn errUnionPayload( ); const payload_bit_offset = payload_offset * 8; - const payload_bit_size = @intCast(u32, payload_ty.abiSize(self.target.*)) * 8; + const payload_bit_size = @intCast(u32, payload_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ - .tag = if (payload_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx, + .tag = if (payload_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, .data = .{ .rr_lsb_width = .{ .rd = dest_reg, .rn = operand_reg, @@ -2174,7 +2179,7 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; - const error_union_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = self.typeOf(ty_op.operand); break :result try self.errUnionPayload(error_union_bind, error_union_ty, inst); }; @@ -2221,19 +2226,20 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst)); - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, mod); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand); try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 }); @@ -2244,19 +2250,20 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); - const error_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result operand; - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, error_union_ty.abiSize(mod)); + const abi_align = error_union_ty.abiAlignment(mod); const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst)); - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); + const err_off = errUnionErrorOffset(payload_ty, mod); try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef); @@ -2360,8 +2367,9 @@ fn ptrElemVal( ptr_ty: Type, maybe_inst: ?Air.Inst.Index, ) !MCValue { - const elem_ty = ptr_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); switch (elem_size) { 1, 4 => { @@ -2418,11 +2426,11 @@ fn ptrElemVal( } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); - const result: MCValue = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = slice_ty.slicePtrFieldType(&buf); + const slice_ty = self.typeOf(bin_op.lhs); + const result: MCValue = if (!slice_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { + const ptr_ty = slice_ty.slicePtrFieldType(mod); const slice_mcv = try self.resolveInst(bin_op.lhs); const base_mcv = slicePtr(slice_mcv); @@ -2445,8 +2453,8 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { const base_bind: ReadArg.Bind = .{ .mcv = base_mcv }; const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const slice_ty = self.air.typeOf(extra.lhs); - const index_ty = self.air.typeOf(extra.rhs); + const slice_ty = self.typeOf(extra.lhs); + const index_ty = self.typeOf(extra.rhs); const addr = try self.ptrArithmetic(.ptr_add, base_bind, index_bind, slice_ty, index_ty, null); break :result addr; @@ -2461,7 +2469,8 @@ fn arrayElemVal( array_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - const elem_ty = array_ty.childType(); + const mod = self.bin_file.options.module.?; + const elem_ty = array_ty.childType(mod); const mcv = try array_bind.resolveToMcv(self); switch (mcv) { @@ -2495,11 +2504,7 @@ fn arrayElemVal( const base_bind: ReadArg.Bind = .{ .mcv = ptr_to_mcv }; - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = elem_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(elem_ty); return try self.ptrElemVal(base_bind, index_bind, ptr_ty, maybe_inst); }, @@ -2512,7 +2517,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const array_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; - const array_ty = self.air.typeOf(bin_op.lhs); + const array_ty = self.typeOf(bin_op.lhs); break :result try self.arrayElemVal(array_bind, index_bind, array_ty, inst); }; @@ -2520,9 +2525,10 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); - const result: MCValue = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { + const ptr_ty = self.typeOf(bin_op.lhs); + const result: MCValue = if (!ptr_ty.isVolatilePtr(mod) and self.liveness.isUnused(inst)) .dead else result: { const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; @@ -2538,8 +2544,8 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ptr_bind: ReadArg.Bind = .{ .inst = extra.lhs }; const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const ptr_ty = self.air.typeOf(extra.lhs); - const index_ty = self.air.typeOf(extra.rhs); + const ptr_ty = self.typeOf(extra.lhs); + const index_ty = self.typeOf(extra.rhs); const addr = try self.ptrArithmetic(.ptr_add, ptr_bind, index_bind, ptr_ty, index_ty, null); break :result addr; @@ -2646,8 +2652,9 @@ fn reuseOperand( } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { - const elem_ty = ptr_ty.elemType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); switch (ptr) { .none => unreachable, @@ -2722,19 +2729,20 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); + const elem_ty = self.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; const dest_mcv: MCValue = blk: { - const ptr_fits_dest = elem_ty.abiSize(self.target.*) <= 4; + const ptr_fits_dest = elem_ty.abiSize(mod) <= 4; if (ptr_fits_dest and self.reuseOperand(inst, ty_op.operand, 0, ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk ptr; @@ -2742,7 +2750,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(elem_ty, true, inst); } }; - try self.load(dest_mcv, ptr, self.air.typeOf(ty_op.operand)); + try self.load(dest_mcv, ptr, self.typeOf(ty_op.operand)); break :result dest_mcv; }; @@ -2750,7 +2758,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { - const elem_size = @intCast(u32, value_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const elem_size = @intCast(u32, value_ty.abiSize(mod)); switch (ptr) { .none => unreachable, @@ -2846,8 +2855,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const value_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const value_ty = self.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); @@ -2869,10 +2878,11 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { return if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const ptr_ty = self.air.typeOf(operand); - const struct_ty = ptr_ty.childType(); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const ptr_ty = self.typeOf(operand); + const struct_ty = ptr_ty.childType(mod); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -2892,11 +2902,12 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; const operand = extra.struct_operand; const index = extra.field_index; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mcv = try self.resolveInst(operand); - const struct_ty = self.air.typeOf(operand); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); - const struct_field_ty = struct_ty.structFieldType(index); + const struct_ty = self.typeOf(operand); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); + const struct_field_ty = struct_ty.structFieldType(index, mod); switch (mcv) { .dead, .unreach => unreachable, @@ -2959,10 +2970,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { ); const field_bit_offset = struct_field_offset * 8; - const field_bit_size = @intCast(u32, struct_field_ty.abiSize(self.target.*)) * 8; + const field_bit_size = @intCast(u32, struct_field_ty.abiSize(mod)) * 8; _ = try self.addInst(.{ - .tag = if (struct_field_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx, + .tag = if (struct_field_ty.isSignedInt(mod)) Mir.Inst.Tag.sbfx else .ubfx, .data = .{ .rr_lsb_width = .{ .rd = dest_reg, .rn = operand_reg, @@ -2981,17 +2992,18 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const field_ptr = try self.resolveInst(extra.field_ptr); - const struct_ty = self.air.getRefType(ty_pl.ty).childType(); + const struct_ty = self.air.getRefType(ty_pl.ty).childType(mod); - if (struct_ty.zigTypeTag() == .Union) { + if (struct_ty.zigTypeTag(mod) == .Union) { return self.fail("TODO implement @fieldParentPtr codegen for unions", .{}); } - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, self.target.*)); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, mod)); switch (field_ptr) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off + struct_field_offset }; @@ -3375,12 +3387,12 @@ fn addSub( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const lhs_immediate = try lhs_bind.resolveToImmediate(self); const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -3431,12 +3443,12 @@ fn mul( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { // TODO add optimisations for multiplication // with immediates, for example a * 2 can be @@ -3463,7 +3475,8 @@ fn divFloat( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), else => unreachable, @@ -3479,12 +3492,12 @@ fn divTrunc( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { switch (int_info.signedness) { .signed => { @@ -3522,12 +3535,12 @@ fn divFloor( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { switch (int_info.signedness) { .signed => { @@ -3569,7 +3582,8 @@ fn divExact( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => return self.fail("TODO ARM div_exact", .{}), @@ -3586,12 +3600,12 @@ fn rem( maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { const mod = self.bin_file.options.module.?; - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { switch (int_info.signedness) { .signed => { @@ -3654,7 +3668,8 @@ fn modulo( _ = rhs_ty; _ = maybe_inst; - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => return self.fail("TODO ARM mod", .{}), @@ -3671,10 +3686,11 @@ fn wrappingArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { // Generate an add/sub/mul const result: MCValue = switch (tag) { @@ -3708,12 +3724,12 @@ fn bitwise( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const lhs_immediate = try lhs_bind.resolveToImmediate(self); const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -3753,16 +3769,17 @@ fn shiftExact( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { const rhs_immediate = try rhs_bind.resolveToImmediate(self); const mir_tag: Mir.Inst.Tag = switch (tag) { .shl_exact => .lsl, - .shr_exact => switch (lhs_ty.intInfo(self.target.*).signedness) { + .shr_exact => switch (lhs_ty.intInfo(mod).signedness) { .signed => Mir.Inst.Tag.asr, .unsigned => Mir.Inst.Tag.lsr, }, @@ -3791,10 +3808,11 @@ fn shiftNormal( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 32) { // Generate a shl_exact/shr_exact const result: MCValue = switch (tag) { @@ -3833,7 +3851,8 @@ fn booleanOp( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Bool => { const lhs_immediate = try lhs_bind.resolveToImmediate(self); const rhs_immediate = try rhs_bind.resolveToImmediate(self); @@ -3866,17 +3885,17 @@ fn ptrArithmetic( rhs_ty: Type, maybe_inst: ?Air.Inst.Index, ) InnerError!MCValue { - switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { - const mod = self.bin_file.options.module.?; assert(rhs_ty.eql(Type.usize, mod)); const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); const base_tag: Air.Inst.Tag = switch (tag) { .ptr_add => .add, @@ -3903,11 +3922,12 @@ fn ptrArithmetic( } fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) !void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb else .ldrb, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh else .ldrh, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh else .ldrh, 3, 4 => .ldr, else => unreachable, }; @@ -3924,7 +3944,7 @@ fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) } }; const data: Mir.Inst.Data = switch (abi_size) { - 1 => if (ty.isSignedInt()) rr_extra_offset else rr_offset, + 1 => if (ty.isSignedInt(mod)) rr_extra_offset else rr_offset, 2 => rr_extra_offset, 3, 4 => rr_offset, else => unreachable, @@ -3937,7 +3957,8 @@ fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) } fn genStrRegister(self: *Self, source_reg: Register, addr_reg: Register, ty: Type) !void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .strb, @@ -4051,14 +4072,14 @@ fn genInlineMemset( ) !void { const dst_reg = switch (dst) { .register => |r| r, - else => try self.copyToTmpRegister(Type.initTag(.manyptr_u8), dst), + else => try self.copyToTmpRegister(Type.manyptr_u8, dst), }; const dst_reg_lock = self.register_manager.lockReg(dst_reg); defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock); const val_reg = switch (val) { .register => |r| r, - else => try self.copyToTmpRegister(Type.initTag(.u8), val), + else => try self.copyToTmpRegister(Type.u8, val), }; const val_reg_lock = self.register_manager.lockReg(val_reg); defer if (val_reg_lock) |lock| self.register_manager.unlockReg(lock); @@ -4143,7 +4164,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { while (self.args[arg_index] == .none) arg_index += 1; self.arg_index = arg_index + 1; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const tag = self.air.instructions.items(.tag)[inst]; const src_index = self.air.instructions.items(.data)[inst].arg.src_index; const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index); @@ -4196,11 +4217,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const ty = self.air.typeOf(callee); + const ty = self.typeOf(callee); + const mod = self.bin_file.options.module.?; - const fn_ty = switch (ty.zigTypeTag()) { + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; @@ -4225,16 +4247,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // untouched by the parameter passing code const r0_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: { log.debug("airCall: return by reference", .{}); - const ret_ty = fn_ty.fnReturnType(); - const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*)); + const ret_ty = fn_ty.fnReturnType(mod); + const ret_abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(mod)); const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.register_manager.getReg(.r0, null); try self.genSetReg(ptr_ty, .r0, .{ .ptr_stack_offset = stack_offset }); @@ -4249,7 +4267,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier for (info.args, 0..) |mc_arg, arg_i| { const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -4270,16 +4288,14 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee)) |func_value| { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; - + if (try self.air.value(callee, mod)) |func_value| { + if (func_value.getFunction(mod)) |func| { if (self.bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); - try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .lr, .{ .memory = got_addr }); } else if (self.bin_file.cast(link.File.MachO)) |_| { unreachable; // unsupported architecture for MachO } else { @@ -4288,16 +4304,16 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier @tagName(self.target.cpu.arch), }); } - } else if (func_value.castTag(.extern_fn)) |_| { + } else if (func_value.getExternFunc(mod)) |_| { return self.fail("TODO implement calling extern functions", .{}); } else { return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - assert(ty.zigTypeTag() == .Pointer); + assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); - try self.genSetReg(Type.initTag(.usize), .lr, mcv); + try self.genSetReg(Type.usize, .lr, mcv); } // TODO: add Instruction.supportedOn @@ -4329,7 +4345,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (RegisterManager.indexOfRegIntoTracked(reg) == null) { // Save function return value into a tracked register log.debug("airCall: copying {} as it is not tracked", .{reg}); - const new_reg = try self.copyToTmpRegister(fn_ty.fnReturnType(), info.return_value); + const new_reg = try self.copyToTmpRegister(fn_ty.fnReturnType(mod), info.return_value); break :result MCValue{ .register = new_reg }; } }, @@ -4353,14 +4369,15 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn airRet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv) { .none => {}, .immediate => { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); }, .register => |reg| { // Return result by value @@ -4371,11 +4388,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { // // self.ret_mcv is an address to where this function // should store its result into - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ret_ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ret_ty); try self.store(self.ret_mcv, operand, ptr_ty, ret_ty); }, else => unreachable, // invalid return result @@ -4388,10 +4401,11 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { } fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const ret_ty = self.fn_type.fnReturnType(); + const ptr_ty = self.typeOf(un_op); + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv) { .none => {}, @@ -4411,8 +4425,8 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { // location. const op_inst = Air.refToIndex(un_op).?; if (self.air.instructions.items(.tag)[op_inst] != .ret_ptr) { - const abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - const abi_align = ret_ty.abiAlignment(self.target.*); + const abi_size = @intCast(u32, ret_ty.abiSize(mod)); + const abi_align = ret_ty.abiAlignment(mod); const offset = try self.allocMem(abi_size, abi_align, null); @@ -4432,7 +4446,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs_ty = self.air.typeOf(bin_op.lhs); + const lhs_ty = self.typeOf(bin_op.lhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.cmp(.{ .inst = bin_op.lhs }, .{ .inst = bin_op.rhs }, lhs_ty, op); @@ -4448,29 +4462,28 @@ fn cmp( lhs_ty: Type, op: math.CompareOperator, ) !MCValue { - var int_buffer: Type.Payload.Bits = undefined; - const int_ty = switch (lhs_ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Optional => blk: { - var opt_buffer: Type.Payload.ElemType = undefined; - const payload_ty = lhs_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - break :blk Type.initTag(.u1); - } else if (lhs_ty.isPtrLikeOptional()) { + const payload_ty = lhs_ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + break :blk Type.u1; + } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { return self.fail("TODO ARM cmp non-pointer optionals", .{}); } }, .Float => return self.fail("TODO ARM cmp floats", .{}), - .Enum => lhs_ty.intTagType(&int_buffer), + .Enum => lhs_ty.intTagType(mod), .Int => lhs_ty, - .Bool => Type.initTag(.u1), + .Bool => Type.u1, .Pointer => Type.usize, - .ErrorSet => Type.initTag(.u16), + .ErrorSet => Type.u16, else => unreachable, }; - const int_info = int_ty.intInfo(self.target.*); + const int_info = int_ty.intInfo(mod); if (int_info.bits <= 32) { try self.spillCompareFlagsIfOccupied(); @@ -4555,8 +4568,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; + const mod = self.bin_file.options.module.?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); @@ -4571,7 +4585,7 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = pl_op.operand; const tag = self.air.instructions.items(.tag)[inst]; - const ty = self.air.typeOf(operand); + const ty = self.typeOf(operand); const mcv = try self.resolveInst(operand); const name = self.air.nullTerminatedString(pl_op.payload); @@ -4636,8 +4650,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { // whether it needs to be spilled in the branches if (self.liveness.operandDies(inst, 0)) { const op_int = @enumToInt(pl_op.operand); - if (op_int >= Air.Inst.Ref.typed_value_map.len) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int >= Air.ref_start_index) { + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } } @@ -4726,7 +4740,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value); + try self.setRegOrMem(self.typeOfIndex(else_key), canon_mcv, else_value); // TODO track the new register / stack allocation } try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); @@ -4753,7 +4767,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value); + try self.setRegOrMem(self.typeOfIndex(then_key), parent_mcv, then_value); // TODO track the new register / stack allocation } @@ -4772,8 +4786,9 @@ fn isNull( operand_bind: ReadArg.Bind, operand_ty: Type, ) !MCValue { - if (operand_ty.isPtrLikeOptional()) { - assert(operand_ty.abiSize(self.target.*) == 4); + const mod = self.bin_file.options.module.?; + if (operand_ty.isPtrLikeOptional(mod)) { + assert(operand_ty.abiSize(mod) == 4); const imm_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = 0 } }; return self.cmp(operand_bind, imm_bind, Type.usize, .eq); @@ -4797,7 +4812,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_bind: ReadArg.Bind = .{ .inst = un_op }; - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); break :result try self.isNull(operand_bind, operand_ty); }; @@ -4805,11 +4820,12 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const ptr_ty = self.typeOf(un_op); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4823,7 +4839,7 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_bind: ReadArg.Bind = .{ .inst = un_op }; - const operand_ty = self.air.typeOf(un_op); + const operand_ty = self.typeOf(un_op); break :result try self.isNonNull(operand_bind, operand_ty); }; @@ -4831,11 +4847,12 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const ptr_ty = self.typeOf(un_op); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4850,9 +4867,10 @@ fn isErr( error_union_bind: ReadArg.Bind, error_union_ty: Type, ) !MCValue { - const error_type = error_union_ty.errorUnionSet(); + const mod = self.bin_file.options.module.?; + const error_type = error_union_ty.errorUnionSet(mod); - if (error_type.errorSetIsEmpty()) { + if (error_type.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; // always false } @@ -4883,7 +4901,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; - const error_union_ty = self.air.typeOf(un_op); + const error_union_ty = self.typeOf(un_op); break :result try self.isErr(error_union_bind, error_union_ty); }; @@ -4891,11 +4909,12 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const ptr_ty = self.typeOf(un_op); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4909,7 +4928,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; - const error_union_ty = self.air.typeOf(un_op); + const error_union_ty = self.typeOf(un_op); break :result try self.isNonErr(error_union_bind, error_union_ty); }; @@ -4917,11 +4936,12 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { } fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const elem_ty = ptr_ty.elemType(); + const ptr_ty = self.typeOf(un_op); + const elem_ty = ptr_ty.childType(mod); const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); @@ -4988,7 +5008,7 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void { fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; - const condition_ty = self.air.typeOf(pl_op.operand); + const condition_ty = self.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); const liveness = try self.liveness.getSwitchBr( self.gpa, @@ -5131,9 +5151,10 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { } fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { + const mod = self.bin_file.options.module.?; const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits()) { + if (self.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -5141,14 +5162,14 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { .none, .dead, .unreach => unreachable, .register, .stack_offset, .memory => operand_mcv, .immediate, .stack_argument_offset, .cpsr_flags => blk: { - const new_mcv = try self.allocRegOrMem(self.air.typeOfIndex(block), true, block); - try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv); + const new_mcv = try self.allocRegOrMem(self.typeOfIndex(block), true, block); + try self.setRegOrMem(self.typeOfIndex(block), new_mcv, operand_mcv); break :blk new_mcv; }, else => return self.fail("TODO implement block_data.mcv = operand_mcv for {}", .{operand_mcv}), }; } else { - try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -5212,7 +5233,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + try self.genSetReg(self.typeOf(input), reg, arg_mcv); } { @@ -5301,7 +5322,8 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { } fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5332,7 +5354,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro 1, 4 => { const offset = if (math.cast(u12, stack_offset)) |imm| blk: { break :blk Instruction.Offset.imm(imm); - } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset }), .none); + } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset }), .none); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .strb, @@ -5355,7 +5377,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro 2 => { const offset = if (stack_offset <= math.maxInt(u8)) blk: { break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, stack_offset)); - } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset })); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset })); _ = try self.addInst(.{ .tag = .strh, @@ -5378,11 +5400,11 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg_lock = self.register_manager.lockReg(reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); - const wrapped_ty = ty.structFieldType(0); + const wrapped_ty = ty.structFieldType(0, mod); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg }); - const overflow_bit_ty = ty.structFieldType(1); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*)); + const overflow_bit_ty = ty.structFieldType(1, mod); + const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const cond_reg = try self.register_manager.allocReg(null, gp); // C flag: movcs reg, #1 @@ -5420,11 +5442,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5466,6 +5484,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -5640,17 +5659,17 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void }, .stack_offset => |off| { // TODO: maybe addressing from sp instead of fp - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb else .ldrb, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh else .ldrh, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb else .ldrb, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh else .ldrh, 3, 4 => .ldr, else => unreachable, }; const extra_offset = switch (abi_size) { - 1 => ty.isSignedInt(), + 1 => ty.isSignedInt(mod), 2 => true, 3, 4 => false, else => unreachable, @@ -5659,7 +5678,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void if (extra_offset) { const offset = if (off <= math.maxInt(u8)) blk: { break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, off)); - } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.usize), MCValue{ .immediate = off })); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.usize, MCValue{ .immediate = off })); _ = try self.addInst(.{ .tag = tag, @@ -5675,7 +5694,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } else { const offset = if (off <= math.maxInt(u12)) blk: { break :blk Instruction.Offset.imm(@intCast(u12, off)); - } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.usize), MCValue{ .immediate = off }), .none); + } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.usize, MCValue{ .immediate = off }), .none); _ = try self.addInst(.{ .tag = tag, @@ -5691,11 +5710,11 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } }, .stack_argument_offset => |off| { - const abi_size = ty.abiSize(self.target.*); + const abi_size = ty.abiSize(mod); const tag: Mir.Inst.Tag = switch (abi_size) { - 1 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, - 2 => if (ty.isSignedInt()) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument, + 1 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsb_stack_argument else .ldrb_stack_argument, + 2 => if (ty.isSignedInt(mod)) Mir.Inst.Tag.ldrsh_stack_argument else .ldrh_stack_argument, 3, 4 => .ldr_stack_argument, else => unreachable, }; @@ -5712,7 +5731,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (mcv) { .dead => unreachable, .none, .unreach => return, @@ -5732,7 +5752,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I 1, 4 => { const offset = if (math.cast(u12, stack_offset)) |imm| blk: { break :blk Instruction.Offset.imm(imm); - } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset }), .none); + } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset }), .none); const tag: Mir.Inst.Tag = switch (abi_size) { 1 => .strb, @@ -5752,7 +5772,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I 2 => { const offset = if (stack_offset <= math.maxInt(u8)) blk: { break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, stack_offset)); - } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = stack_offset })); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.u32, MCValue{ .immediate = stack_offset })); _ = try self.addInst(.{ .tag = .strh, @@ -5779,11 +5799,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArgument(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); // TODO call extern memcpy const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp); @@ -5862,7 +5878,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - const dest_ty = self.air.typeOfIndex(inst); + const dest_ty = self.typeOfIndex(inst); const dest = try self.allocRegOrMem(dest_ty, true, inst); try self.setRegOrMem(dest_ty, dest, operand); break :result dest; @@ -5871,16 +5887,17 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); - const array_ty = ptr_ty.childType(); - const array_len = @intCast(u32, array_ty.arrayLen()); + const array_ty = ptr_ty.childType(mod); + const array_len = @intCast(u32, array_ty.arrayLen(mod)); const stack_offset = try self.allocMem(8, 8, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); - try self.genSetStack(Type.initTag(.usize), stack_offset - 4, .{ .immediate = array_len }); + try self.genSetStack(Type.usize, stack_offset - 4, .{ .immediate = array_len }); break :result MCValue{ .stack_offset = stack_offset }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -5989,8 +6006,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const vector_ty = self.air.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const mod = self.bin_file.options.module.?; + const vector_ty = self.typeOfIndex(inst); + const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { @@ -6038,9 +6056,10 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const body = self.air.extra[extra.end..][0..extra.data.body_len]; const result: MCValue = result: { const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; - const error_union_ty = self.air.typeOf(pl_op.operand); - const error_union_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const error_union_align = error_union_ty.abiAlignment(self.target.*); + const error_union_ty = self.typeOf(pl_op.operand); + const mod = self.bin_file.options.module.?; + const error_union_size = @intCast(u32, error_union_ty.abiSize(mod)); + const error_union_align = error_union_ty.abiAlignment(mod); // The error union will die in the body. However, we need the // error union after the body in order to extract the payload @@ -6069,37 +6088,32 @@ fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - // First section of indexes correspond to a set number of constant values. - const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { - return MCValue{ .none = {} }; - } - return self.genTypedValue(tv); - } + const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. - const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) + const inst_ty = self.typeOf(inst); + if (!inst_ty.hasRuntimeBitsIgnoreComptime(mod) and !inst_ty.isError(mod)) return MCValue{ .none = {} }; - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ + .ty = inst_ty, + .val = (try self.air.value(inst, mod)).?, + }); + switch (self.air.instructions.items(.tag)[inst_index]) { - .constant => { + .interned => { // Constants have static lifetimes, so they are always memoized in the outer most table. const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + const interned = self.air.instructions.items(.data)[inst_index].interned; gop.value_ptr.* = try self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.values[ty_pl.payload], + .val = interned.toValue(), }); } return gop.value_ptr.*; }, - .const_ty => unreachable, else => return self.getResolvedInstValue(inst_index), } } @@ -6152,12 +6166,11 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = self.bin_file.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, @@ -6165,7 +6178,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_ty.fnReturnType(mod); switch (cc) { .Naked => { @@ -6180,12 +6193,12 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var ncrn: usize = 0; // Next Core Register Number var nsaa: u32 = 0; // Next stacked argument address - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); // TODO handle cases where multiple registers are used if (ret_ty_size <= 4) { result.return_value = .{ .register = c_abi_int_return_regs[0] }; @@ -6199,11 +6212,11 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { } } - for (param_types, 0..) |ty, i| { - if (ty.abiAlignment(self.target.*) == 8) + for (fn_info.param_types, 0..) |ty, i| { + if (ty.toType().abiAlignment(mod) == 8) ncrn = std.mem.alignForwardGeneric(usize, ncrn, 2); - const param_size = @intCast(u32, ty.abiSize(self.target.*)); + const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (std.math.divCeil(u32, param_size, 4) catch unreachable <= 4 - ncrn) { if (param_size <= 4) { result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] }; @@ -6215,7 +6228,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { return self.fail("TODO MCValues split between registers and stack", .{}); } else { ncrn = 4; - if (ty.abiAlignment(self.target.*) == 8) + if (ty.toType().abiAlignment(mod) == 8) nsaa = std.mem.alignForwardGeneric(u32, nsaa, 8); result.args[i] = .{ .stack_argument_offset = nsaa }; @@ -6227,14 +6240,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { result.stack_align = 8; }, .Unspecified => { - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); if (ret_ty_size == 0) { - assert(ret_ty.isError()); + assert(ret_ty.isError(mod)); result.return_value = .{ .immediate = 0 }; } else if (ret_ty_size <= 4) { result.return_value = .{ .register = .r0 }; @@ -6249,10 +6262,10 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var stack_offset: u32 = 0; - for (param_types, 0..) |ty, i| { - if (ty.abiSize(self.target.*) > 0) { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); - const param_alignment = ty.abiAlignment(self.target.*); + for (fn_info.param_types, 0..) |ty, i| { + if (ty.toType().abiSize(mod) > 0) { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); + const param_alignment = ty.toType().abiAlignment(mod); stack_offset = std.mem.alignForwardGeneric(u32, stack_offset, param_alignment); result.args[i] = .{ .stack_argument_offset = stack_offset }; @@ -6301,3 +6314,13 @@ fn parseRegName(name: []const u8) ?Register { } return std.meta.stringToEnum(Register, name); } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, &mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, &mod.intern_pool); +} diff --git a/src/arch/arm/abi.zig b/src/arch/arm/abi.zig index 8b9ec45e24..e4a07f22bf 100644 --- a/src/arch/arm/abi.zig +++ b/src/arch/arm/abi.zig @@ -1,8 +1,10 @@ const std = @import("std"); +const assert = std.debug.assert; const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../type.zig").Type; +const Module = @import("../../Module.zig"); pub const Class = union(enum) { memory, @@ -22,28 +24,28 @@ pub const Class = union(enum) { pub const Context = enum { ret, arg }; -pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class { - std.debug.assert(ty.hasRuntimeBitsIgnoreComptime()); +pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class { + assert(ty.hasRuntimeBitsIgnoreComptime(mod)); var maybe_float_bits: ?u16 = null; const max_byval_size = 512; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct => { - const bit_size = ty.bitSize(target); - if (ty.containerLayout() == .Packed) { + const bit_size = ty.bitSize(mod); + if (ty.containerLayout(mod) == .Packed) { if (bit_size > 64) return .memory; return .byval; } if (bit_size > max_byval_size) return .memory; - const float_count = countFloats(ty, target, &maybe_float_bits); + const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= byval_float_count) return .byval; - const fields = ty.structFieldCount(); + const fields = ty.structFieldCount(mod); var i: u32 = 0; while (i < fields) : (i += 1) { - const field_ty = ty.structFieldType(i); - const field_alignment = ty.structFieldAlign(i, target); - const field_size = field_ty.bitSize(target); + const field_ty = ty.structFieldType(i, mod); + const field_alignment = ty.structFieldAlign(i, mod); + const field_size = field_ty.bitSize(mod); if (field_size > 32 or field_alignment > 32) { return Class.arrSize(bit_size, 64); } @@ -51,17 +53,17 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class { return Class.arrSize(bit_size, 32); }, .Union => { - const bit_size = ty.bitSize(target); - if (ty.containerLayout() == .Packed) { + const bit_size = ty.bitSize(mod); + if (ty.containerLayout(mod) == .Packed) { if (bit_size > 64) return .memory; return .byval; } if (bit_size > max_byval_size) return .memory; - const float_count = countFloats(ty, target, &maybe_float_bits); + const float_count = countFloats(ty, mod, &maybe_float_bits); if (float_count <= byval_float_count) return .byval; - for (ty.unionFields().values()) |field| { - if (field.ty.bitSize(target) > 32 or field.normalAlignment(target) > 32) { + for (ty.unionFields(mod).values()) |field| { + if (field.ty.bitSize(mod) > 32 or field.normalAlignment(mod) > 32) { return Class.arrSize(bit_size, 64); } } @@ -71,28 +73,28 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class { .Int => { // TODO this is incorrect for _BitInt(128) but implementing // this correctly makes implementing compiler-rt impossible. - // const bit_size = ty.bitSize(target); + // const bit_size = ty.bitSize(mod); // if (bit_size > 64) return .memory; return .byval; }, .Enum, .ErrorSet => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > 64) return .memory; return .byval; }, .Vector => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); // TODO is this controlled by a cpu feature? if (ctx == .ret and bit_size > 128) return .memory; if (bit_size > 512) return .memory; return .byval; }, .Optional => { - std.debug.assert(ty.isPtrLikeOptional()); + assert(ty.isPtrLikeOptional(mod)); return .byval; }, .Pointer => { - std.debug.assert(!ty.isSlice()); + assert(!ty.isSlice(mod)); return .byval; }, .ErrorUnion, @@ -114,14 +116,15 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class { } const byval_float_count = 4; -fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u32 { +fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u32 { + const target = mod.getTarget(); const invalid = std.math.maxInt(u32); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Union => { - const fields = ty.unionFields(); + const fields = ty.unionFields(mod); var max_count: u32 = 0; for (fields.values()) |field| { - const field_count = countFloats(field.ty, target, maybe_float_bits); + const field_count = countFloats(field.ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; if (field_count > max_count) max_count = field_count; if (max_count > byval_float_count) return invalid; @@ -129,12 +132,12 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u32 { return max_count; }, .Struct => { - const fields_len = ty.structFieldCount(); + const fields_len = ty.structFieldCount(mod); var count: u32 = 0; var i: u32 = 0; while (i < fields_len) : (i += 1) { - const field_ty = ty.structFieldType(i); - const field_count = countFloats(field_ty, target, maybe_float_bits); + const field_ty = ty.structFieldType(i, mod); + const field_count = countFloats(field_ty, mod, maybe_float_bits); if (field_count == invalid) return invalid; count += field_count; if (count > byval_float_count) return invalid; diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 5fb07c5fdc..809c388532 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -217,7 +217,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -228,6 +228,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -347,7 +348,8 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { } fn gen(self: *Self) !void { - const cc = self.fn_type.fnCallingConvention(); + const mod = self.bin_file.options.module.?; + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // TODO Finish function prologue and epilogue for riscv64. @@ -470,13 +472,14 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; - } const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); @@ -656,8 +659,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -727,8 +729,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - const air_tags = self.air.instructions.items(.tag); - if (air_tags[inst] == .constant) return; // Constants are immortal. + assert(self.air.instructions.items(.tag)[inst] != .interned); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -755,8 +756,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; @@ -804,23 +805,23 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const elem_ty = self.air.typeOfIndex(inst).elemType(); - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst).childType(mod); + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); return self.allocMem(inst, abi_size, abi_align); } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - const elem_ty = self.air.typeOfIndex(inst); - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst); + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); if (abi_align > self.stack_align) self.stack_align = abi_align; @@ -845,7 +846,7 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void assert(reg == reg_mcv.register); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } /// Copies a value to a register without tracking the register. The register is not considered @@ -862,7 +863,7 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { const reg = try self.register_manager.allocReg(reg_owner, gp); - try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv); + try self.genSetReg(self.typeOfIndex(reg_owner), reg, mcv); return MCValue{ .register = reg }; } @@ -893,10 +894,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); - const operand_ty = self.air.typeOf(ty_op.operand); + const mod = self.bin_file.options.module.?; + const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - const info_a = operand_ty.intInfo(self.target.*); - const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*); + const info_a = operand_ty.intInfo(mod); + const info_b = self.typeOfIndex(inst).intInfo(mod); if (info_a.signedness != info_b.signedness) return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); @@ -1068,18 +1070,18 @@ fn binOp( lhs_ty: Type, rhs_ty: Type, ) InnerError!MCValue { + const mod = self.bin_file.options.module.?; switch (tag) { // Arithmetic operations on integers and floats .add, .sub, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // TODO immediate operands return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); @@ -1093,14 +1095,14 @@ fn binOp( .ptr_add, .ptr_sub, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); if (elem_size == 1) { const base_tag: Air.Inst.Tag = switch (tag) { @@ -1125,8 +1127,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -1137,8 +1139,8 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -1331,10 +1333,11 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const optional_ty = self.air.typeOfIndex(inst); + const mod = self.bin_file.options.module.?; + const optional_ty = self.typeOfIndex(inst); // Optional with a zero-bit payload type is just a boolean true - if (optional_ty.abiSize(self.target.*) == 1) + if (optional_ty.abiSize(mod) == 1) break :result MCValue{ .immediate = 1 }; return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}); @@ -1498,7 +1501,8 @@ fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_ind } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { - const elem_ty = ptr_ty.elemType(); + const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); switch (ptr) { .none => unreachable, .undef => unreachable, @@ -1523,14 +1527,15 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); + const elem_ty = self.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; @@ -1542,7 +1547,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); + try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand)); break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1583,8 +1588,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const value_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const value_ty = self.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); @@ -1644,7 +1649,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { const arg_index = self.arg_index; self.arg_index += 1; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); _ = ty; const result = self.args[arg_index]; @@ -1698,9 +1703,10 @@ fn airFence(self: *Self) !void { } fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void { + const mod = self.bin_file.options.module.?; if (modifier == .always_tail) return self.fail("TODO implement tail calls for riscv64", .{}); const pl_op = self.air.instructions.items(.data)[inst].pl_op; - const fn_ty = self.air.typeOf(pl_op.operand); + const fn_ty = self.typeOf(pl_op.operand); const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); @@ -1713,7 +1719,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier if (self.bin_file.cast(link.File.Elf)) |elf_file| { for (info.args, 0..) |mc_arg, arg_i| { const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -1736,14 +1742,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } } - if (self.air.value(callee)) |func_value| { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; + if (try self.air.value(callee, mod)) |func_value| { + if (mod.funcPtrUnwrap(mod.intern_pool.indexToFunc(func_value.ip_index))) |func| { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); _ = try atom.getOrCreateOffsetTableEntry(elf_file); const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file)); - try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr }); _ = try self.addInst(.{ .tag = .jalr, .data = .{ .i_type = .{ @@ -1752,7 +1757,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .imm12 = 0, } }, }); - } else if (func_value.castTag(.extern_fn)) |_| { + } else if (mod.intern_pool.indexToKey(func_value.ip_index) == .extern_func) { return self.fail("TODO implement calling extern functions", .{}); } else { return self.fail("TODO implement calling bitcasted functions", .{}); @@ -1796,7 +1801,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn ret(self: *Self, mcv: MCValue) !void { - const ret_ty = self.fn_type.fnReturnType(); + const mod = self.bin_file.options.module.?; + const ret_ty = self.fn_type.fnReturnType(mod); try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); // Just add space for an instruction, patch this later const index = try self.addInst(.{ @@ -1825,10 +1831,10 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); const mod = self.bin_file.options.module.?; - assert(ty.eql(self.air.typeOf(bin_op.rhs), mod)); - if (ty.zigTypeTag() == .ErrorSet) + assert(ty.eql(self.typeOf(bin_op.rhs), mod)); + if (ty.zigTypeTag(mod) == .ErrorSet) return self.fail("TODO implement cmp for errors", .{}); const lhs = try self.resolveInst(bin_op.lhs); @@ -1869,8 +1875,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; + const mod = self.bin_file.options.module.?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); @@ -1946,7 +1953,7 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isNull(operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -1973,7 +1980,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isNonNull(operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -2000,7 +2007,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isErr(operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -2027,7 +2034,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + try self.load(operand, operand_ptr, self.typeOf(un_op)); break :result try self.isNonErr(operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -2107,13 +2114,14 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits()) { + const mod = self.bin_file.options.module.?; + if (self.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { block_data.mcv = operand_mcv; } else { - try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -2176,7 +2184,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + try self.genSetReg(self.typeOf(input), reg, arg_mcv); } { @@ -2372,7 +2380,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); const dest = try self.allocRegOrMem(inst, true); - try self.setRegOrMem(self.air.typeOfIndex(inst), dest, operand); + try self.setRegOrMem(self.typeOfIndex(inst), dest, operand); break :result dest; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -2489,8 +2497,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const vector_ty = self.air.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const mod = self.bin_file.options.module.?; + const vector_ty = self.typeOfIndex(inst); + const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { @@ -2533,37 +2542,32 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - // First section of indexes correspond to a set number of constant values. - const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBits()) { - return MCValue{ .none = {} }; - } - return self.genTypedValue(tv); - } + const mod = self.bin_file.options.module.?; // If the type has no codegen bits, no need to store it. - const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBits()) + const inst_ty = self.typeOf(inst); + if (!inst_ty.hasRuntimeBits(mod)) return MCValue{ .none = {} }; - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{ + .ty = inst_ty, + .val = (try self.air.value(inst, mod)).?, + }); + switch (self.air.instructions.items(.tag)[inst_index]) { - .constant => { + .interned => { // Constants have static lifetimes, so they are always memoized in the outer most table. const branch = &self.branch_stack.items[0]; const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + const interned = self.air.instructions.items(.data)[inst_index].interned; gop.value_ptr.* = try self.genTypedValue(.{ .ty = inst_ty, - .val = self.air.values[ty_pl.payload], + .val = interned.toValue(), }); } return gop.value_ptr.*; }, - .const_ty => unreachable, else => return self.getResolvedInstValue(inst_index), } } @@ -2616,12 +2620,11 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = self.bin_file.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, @@ -2629,7 +2632,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_ty.fnReturnType(mod); switch (cc) { .Naked => { @@ -2649,8 +2652,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var next_stack_offset: u32 = 0; const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 }; - for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); + for (fn_info.param_types, 0..) |ty, i| { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; @@ -2680,14 +2683,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { else => return self.fail("TODO implement function parameters for {} on riscv64", .{cc}), } - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits()) { + } else if (!ret_ty.hasRuntimeBits(mod)) { result.return_value = .{ .none = {} }; } else switch (cc) { .Naked => unreachable, .Unspecified, .C => { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); if (ret_ty_size <= 8) { result.return_value = .{ .register = .a0 }; } else if (ret_ty_size <= 16) { @@ -2731,3 +2734,13 @@ fn parseRegName(name: []const u8) ?Register { } return std.meta.stringToEnum(Register, name); } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, &mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, &mod.intern_pool); +} diff --git a/src/arch/riscv64/abi.zig b/src/arch/riscv64/abi.zig index bec1b49a4e..41a1850635 100644 --- a/src/arch/riscv64/abi.zig +++ b/src/arch/riscv64/abi.zig @@ -3,17 +3,19 @@ const bits = @import("bits.zig"); const Register = bits.Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../type.zig").Type; +const Module = @import("../../Module.zig"); pub const Class = enum { memory, byval, integer, double_integer }; -pub fn classifyType(ty: Type, target: std.Target) Class { - std.debug.assert(ty.hasRuntimeBitsIgnoreComptime()); +pub fn classifyType(ty: Type, mod: *Module) Class { + const target = mod.getTarget(); + std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod)); const max_byval_size = target.ptrBitWidth() * 2; - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct => { - const bit_size = ty.bitSize(target); - if (ty.containerLayout() == .Packed) { + const bit_size = ty.bitSize(mod); + if (ty.containerLayout(mod) == .Packed) { if (bit_size > max_byval_size) return .memory; return .byval; } @@ -23,8 +25,8 @@ pub fn classifyType(ty: Type, target: std.Target) Class { return .integer; }, .Union => { - const bit_size = ty.bitSize(target); - if (ty.containerLayout() == .Packed) { + const bit_size = ty.bitSize(mod); + if (ty.containerLayout(mod) == .Packed) { if (bit_size > max_byval_size) return .memory; return .byval; } @@ -36,21 +38,21 @@ pub fn classifyType(ty: Type, target: std.Target) Class { .Bool => return .integer, .Float => return .byval, .Int, .Enum, .ErrorSet => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > max_byval_size) return .memory; return .byval; }, .Vector => { - const bit_size = ty.bitSize(target); + const bit_size = ty.bitSize(mod); if (bit_size > max_byval_size) return .memory; return .integer; }, .Optional => { - std.debug.assert(ty.isPtrLikeOptional()); + std.debug.assert(ty.isPtrLikeOptional(mod)); return .byval; }, .Pointer => { - std.debug.assert(!ty.isSlice()); + std.debug.assert(!ty.isSlice(mod)); return .byval; }, .ErrorUnion, diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index b70bc0f73d..b660126604 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -260,7 +260,7 @@ const BigTomb = struct { pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -271,12 +271,11 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; - log.debug("fn {s}", .{fn_owner_decl.name}); - var branch_stack = std.ArrayList(Branch).init(bin_file.allocator); defer { assert(branch_stack.items.len == 1); @@ -363,7 +362,8 @@ pub fn generate( } fn gen(self: *Self) !void { - const cc = self.fn_type.fnCallingConvention(); + const mod = self.bin_file.options.module.?; + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { // TODO Finish function prologue and epilogue for sparc64. @@ -490,13 +490,14 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; - } const old_air_bookkeeping = self.air_bookkeeping; try self.ensureProcessDeathCapacity(Liveness.bpi); @@ -676,8 +677,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -758,18 +758,18 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); switch (int_info.bits) { 32, 64 => { // Only say yes if the operation is @@ -836,8 +836,9 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const vector_ty = self.air.typeOfIndex(inst); - const len = vector_ty.vectorLen(); + const mod = self.bin_file.options.module.?; + const vector_ty = self.typeOfIndex(inst); + const len = vector_ty.vectorLen(mod); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = res: { @@ -869,19 +870,20 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); - const array_ty = ptr_ty.childType(); - const array_len = @intCast(u32, array_ty.arrayLen()); + const array_ty = ptr_ty.childType(mod); + const array_len = @intCast(u32, array_ty.arrayLen(mod)); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); const stack_offset = try self.allocMem(inst, ptr_bytes * 2, ptr_bytes * 2); try self.genSetStack(ptr_ty, stack_offset, ptr); - try self.genSetStack(Type.initTag(.usize), stack_offset - ptr_bytes, .{ .immediate = array_len }); + try self.genSetStack(Type.usize, stack_offset - ptr_bytes, .{ .immediate = array_len }); break :result MCValue{ .stack_offset = stack_offset }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -935,7 +937,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + try self.genSetReg(self.typeOf(input), reg, arg_mcv); } { @@ -1008,17 +1010,17 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { } fn airArg(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const arg_index = self.arg_index; self.arg_index += 1; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const arg = self.args[arg_index]; const mcv = blk: { switch (arg) { .stack_offset => |off| { - const mod = self.bin_file.options.module.?; - const abi_size = math.cast(u32, ty.abiSize(self.target.*)) orelse { + const abi_size = math.cast(u32, ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)}); }; const offset = off + abi_size; @@ -1063,8 +1065,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else @@ -1088,8 +1090,8 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else @@ -1115,7 +1117,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); const dest = try self.allocRegOrMem(inst, true); - try self.setRegOrMem(self.air.typeOfIndex(inst), dest, operand); + try self.setRegOrMem(self.typeOfIndex(inst), dest, operand); break :result dest; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1203,6 +1205,7 @@ fn airBreakpoint(self: *Self) !void { } fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; // We have hardware byteswapper in SPARCv9, don't let mainstream compilers mislead you. @@ -1217,15 +1220,15 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { // TODO: Fold byteswap+store into a single ST*A and load+byteswap into a single LD*A. const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - switch (operand_ty.zigTypeTag()) { + const operand_ty = self.typeOf(ty_op.operand); + switch (operand_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO byteswap for vectors", .{}), .Int => { - const int_info = operand_ty.intInfo(self.target.*); + const int_info = operand_ty.intInfo(mod); if (int_info.bits == 8) break :result operand; const abi_size = int_info.bits >> 3; - const abi_align = operand_ty.abiAlignment(self.target.*); + const abi_align = operand_ty.abiAlignment(mod); const opposite_endian_asi = switch (self.target.cpu.arch.endian()) { Endian.Big => ASI.asi_primary_little, Endian.Little => ASI.asi_primary, @@ -1293,10 +1296,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end .. extra.end + extra.data.args_len]); - const ty = self.air.typeOf(callee); - const fn_ty = switch (ty.zigTypeTag()) { + const ty = self.typeOf(callee); + const mod = self.bin_file.options.module.?; + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; @@ -1316,7 +1320,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier for (info.args, 0..) |mc_arg, arg_i| { const arg = args[arg_i]; - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(arg); switch (mc_arg) { @@ -1337,10 +1341,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - if (self.air.value(callee)) |func_value| { + if (try self.air.value(callee, mod)) |func_value| { if (self.bin_file.tag == link.File.Elf.base_tag) { - if (func_value.castTag(.function)) |func_payload| { - const func = func_payload.data; + if (mod.funcPtrUnwrap(mod.intern_pool.indexToFunc(func_value.ip_index))) |func| { const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl); const atom = elf_file.getAtom(atom_index); @@ -1348,7 +1351,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier break :blk @intCast(u32, atom.getOffsetTableAddress(elf_file)); } else unreachable; - try self.genSetReg(Type.initTag(.usize), .o7, .{ .memory = got_addr }); + try self.genSetReg(Type.usize, .o7, .{ .memory = got_addr }); _ = try self.addInst(.{ .tag = .jmpl, @@ -1367,14 +1370,14 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .tag = .nop, .data = .{ .nop = {} }, }); - } else if (func_value.castTag(.extern_fn)) |_| { + } else if (mod.intern_pool.indexToKey(func_value.ip_index) == .extern_func) { return self.fail("TODO implement calling extern functions", .{}); } else { return self.fail("TODO implement calling bitcasted functions", .{}); } } else @panic("TODO SPARCv9 currently does not support non-ELF binaries"); } else { - assert(ty.zigTypeTag() == .Pointer); + assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(ty, .o7, mcv); @@ -1422,25 +1425,24 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); + const lhs_ty = self.typeOf(bin_op.lhs); - var int_buffer: Type.Payload.Bits = undefined; - const int_ty = switch (lhs_ty.zigTypeTag()) { + const int_ty = switch (lhs_ty.zigTypeTag(mod)) { .Vector => unreachable, // Handled by cmp_vector. - .Enum => lhs_ty.intTagType(&int_buffer), + .Enum => lhs_ty.intTagType(mod), .Int => lhs_ty, - .Bool => Type.initTag(.u1), + .Bool => Type.u1, .Pointer => Type.usize, - .ErrorSet => Type.initTag(.u16), + .ErrorSet => Type.u16, .Optional => blk: { - var opt_buffer: Type.Payload.ElemType = undefined; - const payload_ty = lhs_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - break :blk Type.initTag(.u1); - } else if (lhs_ty.isPtrLikeOptional()) { + const payload_ty = lhs_ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + break :blk Type.u1; + } else if (lhs_ty.isPtrLikeOptional(mod)) { break :blk Type.usize; } else { return self.fail("TODO SPARCv9 cmp non-pointer optionals", .{}); @@ -1450,7 +1452,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { else => unreachable, }; - const int_info = int_ty.intInfo(self.target.*); + const int_info = int_ty.intInfo(mod); if (int_info.bits <= 64) { _ = try self.binOp(.cmp_eq, lhs, rhs, int_ty, int_ty, BinOpMetadata{ .lhs = bin_op.lhs, @@ -1512,8 +1514,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { // whether it needs to be spilled in the branches if (self.liveness.operandDies(inst, 0)) { const op_int = @enumToInt(pl_op.operand); - if (op_int >= Air.Inst.Ref.typed_value_map.len) { - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int >= Air.ref_start_index) { + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } } @@ -1603,7 +1605,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value); + try self.setRegOrMem(self.typeOfIndex(else_key), canon_mcv, else_value); // TODO track the new register / stack allocation } try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); @@ -1630,7 +1632,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value); + try self.setRegOrMem(self.typeOfIndex(then_key), parent_mcv, then_value); // TODO track the new register / stack allocation } @@ -1656,8 +1658,9 @@ fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; + const mod = self.bin_file.options.module.?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .dead, .{ .none, .none, .none }); @@ -1752,10 +1755,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); - const operand_ty = self.air.typeOf(ty_op.operand); + const mod = self.bin_file.options.module.?; + const operand_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - const info_a = operand_ty.intInfo(self.target.*); - const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*); + const info_a = operand_ty.intInfo(mod); + const info_b = self.typeOfIndex(inst).intInfo(mod); if (info_a.signedness != info_b.signedness) return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); @@ -1777,7 +1781,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); break :result try self.isErr(ty, operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -1787,7 +1791,7 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); break :result try self.isNonErr(ty, operand); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); @@ -1812,15 +1816,16 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); - const elem_size = elem_ty.abiSize(self.target.*); + const elem_ty = self.typeOfIndex(inst); + const elem_size = elem_ty.abiSize(mod); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + if (!elem_ty.hasRuntimeBits(mod)) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod); if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; @@ -1835,7 +1840,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); + try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand)); break :result dst_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1878,8 +1883,8 @@ fn airMinMax(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead @@ -1893,8 +1898,8 @@ fn airMod(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); assert(lhs_ty.eql(rhs_ty, self.bin_file.options.module.?)); if (self.liveness.isUnused(inst)) @@ -2037,18 +2042,18 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { //const tag = self.air.instructions.items(.tag)[inst]; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { - const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); switch (int_info.bits) { 1...32 => { try self.spillConditionFlagsIfOccupied(); @@ -2101,9 +2106,10 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); + const operand_ty = self.typeOf(ty_op.operand); switch (operand) { .dead => unreachable, .unreach => unreachable, @@ -2116,7 +2122,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }; }, else => { - switch (operand_ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag(mod)) { .Bool => { const op_reg = switch (operand) { .register => |r| r, @@ -2150,7 +2156,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { }, .Vector => return self.fail("TODO bitwise not for vectors", .{}), .Int => { - const int_info = operand_ty.intInfo(self.target.*); + const int_info = operand_ty.intInfo(mod); if (int_info.bits <= 64) { const op_reg = switch (operand) { .register => |r| r, @@ -2280,8 +2286,8 @@ fn airRem(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); // TODO add safety check @@ -2332,16 +2338,17 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; + const mod = self.bin_file.options.module.?; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs = try self.resolveInst(extra.lhs); const rhs = try self.resolveInst(extra.rhs); - const lhs_ty = self.air.typeOf(extra.lhs); - const rhs_ty = self.air.typeOf(extra.rhs); + const lhs_ty = self.typeOf(extra.lhs); + const rhs_ty = self.typeOf(extra.rhs); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { try self.spillConditionFlagsIfOccupied(); @@ -2423,9 +2430,9 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const len_ty = self.air.typeOf(bin_op.rhs); + const len_ty = self.typeOf(bin_op.rhs); const ptr_bits = self.target.ptrBitWidth(); const ptr_bytes = @divExact(ptr_bits, 8); @@ -2439,6 +2446,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const is_volatile = false; // TODO const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -2447,12 +2455,11 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { const slice_mcv = try self.resolveInst(bin_op.lhs); const index_mcv = try self.resolveInst(bin_op.rhs); - const slice_ty = self.air.typeOf(bin_op.lhs); - const elem_ty = slice_ty.childType(); - const elem_size = elem_ty.abiSize(self.target.*); + const slice_ty = self.typeOf(bin_op.lhs); + const elem_ty = slice_ty.childType(mod); + const elem_size = elem_ty.abiSize(mod); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); const index_lock: ?RegisterLock = if (index_mcv == .register) self.register_manager.lockRegAssumeUnused(index_mcv.register) @@ -2537,8 +2544,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); - const value_ty = self.air.typeOf(bin_op.rhs); + const ptr_ty = self.typeOf(bin_op.lhs); + const value_ty = self.typeOf(bin_op.rhs); try self.store(ptr, value, ptr_ty, value_ty); @@ -2564,9 +2571,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const operand = extra.struct_operand; const index = extra.field_index; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const struct_ty = self.air.typeOf(operand); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_ty = self.typeOf(operand); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .dead, .unreach => unreachable, @@ -2651,8 +2659,8 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const operand_ty = self.air.typeOf(ty_op.operand); - const dest_ty = self.air.typeOfIndex(inst); + const operand_ty = self.typeOf(ty_op.operand); + const dest_ty = self.typeOfIndex(inst); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { break :blk try self.trunc(inst, operand, operand_ty, dest_ty); @@ -2666,7 +2674,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; const result: MCValue = result: { - const error_union_ty = self.air.typeOf(pl_op.operand); + const error_union_ty = self.typeOf(pl_op.operand); const error_union = try self.resolveInst(pl_op.operand); const is_err_result = try self.isErr(error_union_ty, error_union); const reloc = try self.condBr(is_err_result); @@ -2696,12 +2704,13 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { } fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const error_union_ty = self.air.typeOf(ty_op.operand); - const payload_ty = error_union_ty.errorUnionPayload(); + const error_union_ty = self.typeOf(ty_op.operand); + const payload_ty = error_union_ty.errorUnionPayload(mod); const mcv = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBits()) break :result mcv; + if (!payload_ty.hasRuntimeBits(mod)) break :result mcv; return self.fail("TODO implement unwrap error union error for non-empty payloads", .{}); }; @@ -2709,11 +2718,12 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { } fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const error_union_ty = self.air.typeOf(ty_op.operand); - const payload_ty = error_union_ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBits()) break :result MCValue.none; + const error_union_ty = self.typeOf(ty_op.operand); + const payload_ty = error_union_ty.errorUnionPayload(mod); + if (!payload_ty.hasRuntimeBits(mod)) break :result MCValue.none; return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{}); }; @@ -2722,12 +2732,13 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); - const payload_ty = error_union_ty.errorUnionPayload(); + const payload_ty = error_union_ty.errorUnionPayload(mod); const mcv = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasRuntimeBits()) break :result mcv; + if (!payload_ty.hasRuntimeBits(mod)) break :result mcv; return self.fail("TODO implement wrap errunion error for non-empty payloads", .{}); }; @@ -2742,12 +2753,13 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const optional_ty = self.air.typeOfIndex(inst); + const optional_ty = self.typeOfIndex(inst); // Optional with a zero-bit payload type is just a boolean true - if (optional_ty.abiSize(self.target.*) == 1) + if (optional_ty.abiSize(mod) == 1) break :result MCValue{ .immediate = 1 }; return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}); @@ -2782,9 +2794,10 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const elem_ty = self.air.typeOfIndex(inst).elemType(); + const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst).childType(mod); - if (!elem_ty.hasRuntimeBits()) { + if (!elem_ty.hasRuntimeBits(mod)) { // As this stack item will never be dereferenced at runtime, // return the stack offset 0. Stack offset 0 will be where all // zero-sized stack allocations live as non-zero-sized @@ -2792,22 +2805,21 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { return @as(u32, 0); } - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); return self.allocMem(inst, abi_size, abi_align); } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - const elem_ty = self.air.typeOfIndex(inst); - const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const elem_ty = self.typeOfIndex(inst); + const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = elem_ty.abiAlignment(mod); if (abi_align > self.stack_align) self.stack_align = abi_align; @@ -2860,12 +2872,12 @@ fn binOp( .xor, .cmp_eq, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO binary operations on floats", .{}), .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // Only say yes if the operation is // commutative, i.e. we can swap both of the @@ -2934,10 +2946,10 @@ fn binOp( const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); // Truncate if necessary - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const result_reg = result.register; try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits); @@ -2951,11 +2963,11 @@ fn binOp( }, .div_trunc => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const rhs_immediate_ok = switch (tag) { .div_trunc => rhs == .immediate and rhs.immediate <= std.math.maxInt(u12), @@ -2984,14 +2996,14 @@ fn binOp( }, .ptr_add => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Pointer => { const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const elem_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; - const elem_size = elem_ty.abiSize(self.target.*); + const elem_size = elem_ty.abiSize(mod); if (elem_size == 1) { const base_tag: Mir.Inst.Tag = switch (tag) { @@ -3005,7 +3017,7 @@ fn binOp( // multiplying it with elem_size const offset = try self.binOp(.mul, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize, null); - const addr = try self.binOp(tag, lhs, offset, Type.initTag(.manyptr_u8), Type.usize, null); + const addr = try self.binOp(tag, lhs, offset, Type.manyptr_u8, Type.usize, null); return addr; } }, @@ -3016,7 +3028,7 @@ fn binOp( .bool_and, .bool_or, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Bool => { assert(lhs != .immediate); // should have been handled by Sema assert(rhs != .immediate); // should have been handled by Sema @@ -3046,10 +3058,10 @@ fn binOp( const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); // Truncate if necessary - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // 32 and 64 bit operands doesn't need truncating if (int_info.bits == 32 or int_info.bits == 64) return result; @@ -3068,10 +3080,10 @@ fn binOp( .shl_exact, .shr_exact, => { - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO binary operations on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { const rhs_immediate_ok = rhs == .immediate; @@ -3393,7 +3405,8 @@ fn binOpRegister( fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasRuntimeBits()) { + const mod = self.bin_file.options.module.?; + if (self.typeOf(operand).hasRuntimeBits(mod)) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -3402,13 +3415,13 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { .register, .stack_offset, .memory => operand_mcv, .immediate => blk: { const new_mcv = try self.allocRegOrMem(block, true); - try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), new_mcv, operand_mcv); break :blk new_mcv; }, else => return self.fail("TODO implement block_data.mcv = operand_mcv for {}", .{operand_mcv}), }; } else { - try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); + try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); @@ -3512,16 +3525,17 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { /// Given an error union, returns the payload fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue { - const err_ty = error_union_ty.errorUnionSet(); - const payload_ty = error_union_ty.errorUnionPayload(); - if (err_ty.errorSetIsEmpty()) { + const mod = self.bin_file.options.module.?; + const err_ty = error_union_ty.errorUnionSet(mod); + const payload_ty = error_union_ty.errorUnionPayload(mod); + if (err_ty.errorSetIsEmpty(mod)) { return error_union_mcv; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*)); + const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); switch (error_union_mcv) { .register => return self.fail("TODO errUnionPayload for registers", .{}), .stack_offset => |off| { @@ -3555,8 +3569,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live tomb_bits >>= 1; if (!dies) continue; const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + if (op_int < Air.ref_start_index) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index); self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; @@ -3730,6 +3744,7 @@ fn genLoadASI(self: *Self, value_reg: Register, addr_reg: Register, off_reg: Reg } fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { + const mod = self.bin_file.options.module.?; switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -3928,19 +3943,20 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. try self.genSetReg(ty, reg, .{ .immediate = addr }); - try self.genLoad(reg, reg, i13, 0, ty.abiSize(self.target.*)); + try self.genLoad(reg, reg, i13, 0, ty.abiSize(mod)); }, .stack_offset => |off| { const real_offset = realStackOffset(off); const simm13 = math.cast(i13, real_offset) orelse return self.fail("TODO larger stack offsets: {}", .{real_offset}); - try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(self.target.*)); + try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(mod)); }, } } fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); switch (mcv) { .dead => unreachable, .unreach, .none => return, // Nothing to do. @@ -3948,7 +3964,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro if (!self.wantSafety()) return; // The already existing value will do just fine. // TODO Upgrade this to a memset call when we have that available. - switch (ty.abiSize(self.target.*)) { + switch (ty.abiSize(mod)) { 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), @@ -3974,11 +3990,11 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg_lock = self.register_manager.lockReg(rwo.reg); defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg); - const wrapped_ty = ty.structFieldType(0); + const wrapped_ty = ty.structFieldType(0, mod); try self.genSetStack(wrapped_ty, stack_offset, .{ .register = rwo.reg }); - const overflow_bit_ty = ty.structFieldType(1); - const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*)); + const overflow_bit_ty = ty.structFieldType(1, mod); + const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, mod)); const cond_reg = try self.register_manager.allocReg(null, gp); // TODO handle floating point CCRs @@ -4024,11 +4040,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } else { - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const ptr_ty = try mod.singleMutPtrType(ty); const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, gp); const regs_locks = self.register_manager.lockRegsAssumeUnused(4, regs); @@ -4152,13 +4164,14 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { } fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { - const error_type = ty.errorUnionSet(); - const payload_type = ty.errorUnionPayload(); + const mod = self.bin_file.options.module.?; + const error_type = ty.errorUnionSet(mod); + const payload_type = ty.errorUnionPayload(mod); - if (!error_type.hasRuntimeBits()) { + if (!error_type.hasRuntimeBits(mod)) { return MCValue{ .immediate = 0 }; // always false - } else if (!payload_type.hasRuntimeBits()) { - if (error_type.abiSize(self.target.*) <= 8) { + } else if (!payload_type.hasRuntimeBits(mod)) { + if (error_type.abiSize(mod) <= 8) { const reg_mcv: MCValue = switch (operand) { .register => operand, else => .{ .register = try self.copyToTmpRegister(error_type, operand) }, @@ -4249,8 +4262,9 @@ fn jump(self: *Self, inst: Mir.Inst.Index) !void { } fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void { - const elem_ty = ptr_ty.elemType(); - const elem_size = elem_ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const elem_ty = ptr_ty.childType(mod); + const elem_size = elem_ty.abiSize(mod); switch (ptr) { .none => unreachable, @@ -4321,11 +4335,11 @@ fn minMax( ) InnerError!MCValue { const mod = self.bin_file.options.module.?; assert(lhs_ty.eql(rhs_ty, mod)); - switch (lhs_ty.zigTypeTag()) { + switch (lhs_ty.zigTypeTag(mod)) { .Float => return self.fail("TODO min/max on floats", .{}), .Vector => return self.fail("TODO min/max on vectors", .{}), .Int => { - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); if (int_info.bits <= 64) { // TODO skip register setting when one of the operands // is a small (fits in i13) immediate. @@ -4406,8 +4420,7 @@ fn performReloc(self: *Self, inst: Mir.Inst.Index) !void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - const air_tags = self.air.instructions.items(.tag); - if (air_tags[inst] == .constant) return; // Constants are immortal. + assert(self.air.instructions.items(.tag)[inst] != .interned); // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -4441,12 +4454,11 @@ fn realStackOffset(off: u32) u32 { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = self.bin_file.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), + .args = try self.gpa.alloc(MCValue, fn_info.param_types.len), // These undefined values must be populated before returning from this function. .return_value = undefined, .stack_byte_count = undefined, @@ -4454,7 +4466,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_ty.fnReturnType(mod); switch (cc) { .Naked => { @@ -4477,8 +4489,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) .callee => abi.c_abi_int_param_regs_callee_view, }; - for (param_types, 0..) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); + for (fn_info.param_types, 0..) |ty, i| { + const param_size = @intCast(u32, ty.toType().abiSize(mod)); if (param_size <= 8) { if (next_register < argument_registers.len) { result.args[i] = .{ .register = argument_registers[next_register] }; @@ -4505,12 +4517,12 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) result.stack_byte_count = next_stack_offset; result.stack_align = 16; - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits()) { + } else if (!ret_ty.hasRuntimeBits(mod)) { result.return_value = .{ .none = {} }; } else { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod)); // The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller. if (ret_ty_size <= 8) { result.return_value = switch (role) { @@ -4528,44 +4540,41 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) return result; } -fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { - // First section of indexes correspond to a set number of constant values. - const ref_int = @enumToInt(inst); - if (ref_int < Air.Inst.Ref.typed_value_map.len) { - const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { - return MCValue{ .none = {} }; - } - return self.genTypedValue(tv); - } +fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { + const mod = self.bin_file.options.module.?; + const ty = self.typeOf(ref); // If the type has no codegen bits, no need to store it. - const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) - return MCValue{ .none = {} }; + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; - const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); - switch (self.air.instructions.items(.tag)[inst_index]) { - .constant => { - // Constants have static lifetimes, so they are always memoized in the outer most table. - const branch = &self.branch_stack.items[0]; - const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); - if (!gop.found_existing) { - const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; - gop.value_ptr.* = try self.genTypedValue(.{ - .ty = inst_ty, - .val = self.air.values[ty_pl.payload], - }); - } - return gop.value_ptr.*; - }, - .const_ty => unreachable, - else => return self.getResolvedInstValue(inst_index), + if (Air.refToIndex(ref)) |inst| { + switch (self.air.instructions.items(.tag)[inst]) { + .interned => { + // Constants have static lifetimes, so they are always memoized in the outer most table. + const branch = &self.branch_stack.items[0]; + const gop = try branch.inst_table.getOrPut(self.gpa, inst); + if (!gop.found_existing) { + const interned = self.air.instructions.items(.data)[inst].interned; + gop.value_ptr.* = try self.genTypedValue(.{ + .ty = ty, + .val = interned.toValue(), + }); + } + return gop.value_ptr.*; + }, + else => return self.getResolvedInstValue(inst), + } } + + return self.genTypedValue(.{ + .ty = ty, + .val = (try self.air.value(ref, mod)).?, + }); } fn ret(self: *Self, mcv: MCValue) !void { - const ret_ty = self.fn_type.fnReturnType(); + const mod = self.bin_file.options.module.?; + const ret_ty = self.fn_type.fnReturnType(mod); try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); // Just add space for a branch instruction, patch this later @@ -4638,7 +4647,7 @@ fn spillConditionFlagsIfOccupied(self: *Self) !void { else => unreachable, // mcv doesn't occupy the compare flags }; - try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv); + try self.setRegOrMem(self.typeOfIndex(inst_to_save), new_mcv, mcv); log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -4662,11 +4671,12 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void assert(reg == reg_mcv.register); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) InnerError!void { - const abi_size = value_ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = value_ty.abiSize(mod); switch (ptr) { .none => unreachable, @@ -4707,10 +4717,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { return if (self.liveness.isUnused(inst)) .dead else result: { + const mod = self.bin_file.options.module.?; const mcv = try self.resolveInst(operand); - const ptr_ty = self.air.typeOf(operand); - const struct_ty = ptr_ty.childType(); - const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const ptr_ty = self.typeOf(operand); + const struct_ty = ptr_ty.childType(mod); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, mod)); switch (mcv) { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; @@ -4748,8 +4759,9 @@ fn trunc( operand_ty: Type, dest_ty: Type, ) !MCValue { - const info_a = operand_ty.intInfo(self.target.*); - const info_b = dest_ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const info_a = operand_ty.intInfo(mod); + const info_b = dest_ty.intInfo(mod); if (info_b.bits <= 64) { const operand_reg = switch (operand) { @@ -4866,3 +4878,13 @@ fn wantSafety(self: *Self) bool { .ReleaseSmall => false, }; } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, &mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, &mod.intern_pool); +} diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index d4be9bf139..877db4b623 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -11,6 +11,7 @@ const log = std.log.scoped(.codegen); const codegen = @import("../../codegen.zig"); const Module = @import("../../Module.zig"); +const InternPool = @import("../../InternPool.zig"); const Decl = Module.Decl; const Type = @import("../../type.zig").Type; const Value = @import("../../value.zig").Value; @@ -764,8 +765,9 @@ pub fn deinit(func: *CodeGen) void { /// Sets `err_msg` on `CodeGen` and returns `error.CodegenFail` which is caught in link/Wasm.zig fn fail(func: *CodeGen, comptime fmt: []const u8, args: anytype) InnerError { + const mod = func.bin_file.base.options.module.?; const src = LazySrcLoc.nodeOffset(0); - const src_loc = src.toSrcLoc(func.decl); + const src_loc = src.toSrcLoc(func.decl, mod); func.err_msg = try Module.ErrorMsg.create(func.gpa, src_loc, fmt, args); return error.CodegenFail; } @@ -788,9 +790,10 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { const gop = try func.branches.items[0].values.getOrPut(func.gpa, ref); assert(!gop.found_existing); - const val = func.air.value(ref).?; - const ty = func.air.typeOf(ref); - if (!ty.hasRuntimeBitsIgnoreComptime() and !ty.isInt() and !ty.isError()) { + const mod = func.bin_file.base.options.module.?; + const val = (try func.air.value(ref, mod)).?; + const ty = func.typeOf(ref); + if (!ty.hasRuntimeBitsIgnoreComptime(mod) and !ty.isInt(mod) and !ty.isError(mod)) { gop.value_ptr.* = WValue{ .none = {} }; return gop.value_ptr.*; } @@ -801,7 +804,7 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue { // // In the other cases, we will simply lower the constant to a value that fits // into a single local (such as a pointer, integer, bool, etc). - const result = if (isByRef(ty, func.target)) blk: { + const result = if (isByRef(ty, mod)) blk: { const sym_index = try func.bin_file.lowerUnnamedConst(.{ .ty = ty, .val = val }, func.decl_index); break :blk WValue{ .memory = sym_index }; } else try func.lowerConstant(val, ty); @@ -880,7 +883,7 @@ fn iterateBigTomb(func: *CodeGen, inst: Air.Inst.Index, operand_count: usize) !B fn processDeath(func: *CodeGen, ref: Air.Inst.Ref) void { const inst = Air.refToIndex(ref) orelse return; - if (func.air.instructions.items(.tag)[inst] == .constant) return; + assert(func.air.instructions.items(.tag)[inst] != .interned); // Branches are currently only allowed to free locals allocated // within their own branch. // TODO: Upon branch consolidation free any locals if needed. @@ -987,8 +990,9 @@ fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 } /// Using a given `Type`, returns the corresponding type -fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype { - return switch (ty.zigTypeTag()) { +fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype { + const target = mod.getTarget(); + return switch (ty.zigTypeTag(mod)) { .Float => blk: { const bits = ty.floatBits(target); if (bits == 16) return wasm.Valtype.i32; // stored/loaded as u16 @@ -998,30 +1002,26 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype { return wasm.Valtype.i32; // represented as pointer to stack }, .Int, .Enum => blk: { - const info = ty.intInfo(target); + const info = ty.intInfo(mod); if (info.bits <= 32) break :blk wasm.Valtype.i32; if (info.bits > 32 and info.bits <= 128) break :blk wasm.Valtype.i64; break :blk wasm.Valtype.i32; // represented as pointer to stack }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Packed => { - const struct_obj = ty.castTag(.@"struct").?.data; - return typeToValtype(struct_obj.backing_int_ty, target); + const struct_obj = mod.typeToStruct(ty).?; + return typeToValtype(struct_obj.backing_int_ty, mod); }, else => wasm.Valtype.i32, }, - .Vector => switch (determineSimdStoreStrategy(ty, target)) { + .Vector => switch (determineSimdStoreStrategy(ty, mod)) { .direct => wasm.Valtype.v128, .unrolled => wasm.Valtype.i32, }, - .Union => switch (ty.containerLayout()) { + .Union => switch (ty.containerLayout(mod)) { .Packed => { - var int_ty_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, ty.bitSize(target)), - }; - const int_ty = Type.initPayload(&int_ty_payload.base); - return typeToValtype(int_ty, target); + const int_ty = mod.intType(.unsigned, @intCast(u16, ty.bitSize(mod))) catch @panic("out of memory"); + return typeToValtype(int_ty, mod); }, else => wasm.Valtype.i32, }, @@ -1030,17 +1030,17 @@ fn typeToValtype(ty: Type, target: std.Target) wasm.Valtype { } /// Using a given `Type`, returns the byte representation of its wasm value type -fn genValtype(ty: Type, target: std.Target) u8 { - return wasm.valtype(typeToValtype(ty, target)); +fn genValtype(ty: Type, mod: *Module) u8 { + return wasm.valtype(typeToValtype(ty, mod)); } /// Using a given `Type`, returns the corresponding wasm value type /// Differently from `genValtype` this also allows `void` to create a block /// with no return type -fn genBlockType(ty: Type, target: std.Target) u8 { - return switch (ty.tag()) { - .void, .noreturn => wasm.block_empty, - else => genValtype(ty, target), +fn genBlockType(ty: Type, mod: *Module) u8 { + return switch (ty.ip_index) { + .void_type, .noreturn_type => wasm.block_empty, + else => genValtype(ty, mod), }; } @@ -1101,7 +1101,8 @@ fn getResolvedInst(func: *CodeGen, ref: Air.Inst.Ref) *WValue { /// Creates one locals for a given `Type`. /// Returns a corresponding `Wvalue` with `local` as active tag fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue { - const valtype = typeToValtype(ty, func.target); + const mod = func.bin_file.base.options.module.?; + const valtype = typeToValtype(ty, mod); switch (valtype) { .i32 => if (func.free_locals_i32.popOrNull()) |index| { log.debug("reusing local ({d}) of type {}", .{ index, valtype }); @@ -1132,7 +1133,8 @@ fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue { /// Ensures a new local will be created. This is useful when it's useful /// to use a zero-initialized local. fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue { - try func.locals.append(func.gpa, genValtype(ty, func.target)); + const mod = func.bin_file.base.options.module.?; + try func.locals.append(func.gpa, genValtype(ty, mod)); const initial_index = func.local_index; func.local_index += 1; return WValue{ .local = .{ .value = initial_index, .references = 1 } }; @@ -1140,48 +1142,55 @@ fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue { /// Generates a `wasm.Type` from a given function type. /// Memory is owned by the caller. -fn genFunctype(gpa: Allocator, cc: std.builtin.CallingConvention, params: []const Type, return_type: Type, target: std.Target) !wasm.Type { +fn genFunctype( + gpa: Allocator, + cc: std.builtin.CallingConvention, + params: []const InternPool.Index, + return_type: Type, + mod: *Module, +) !wasm.Type { var temp_params = std.ArrayList(wasm.Valtype).init(gpa); defer temp_params.deinit(); var returns = std.ArrayList(wasm.Valtype).init(gpa); defer returns.deinit(); - if (firstParamSRet(cc, return_type, target)) { + if (firstParamSRet(cc, return_type, mod)) { try temp_params.append(.i32); // memory address is always a 32-bit handle - } else if (return_type.hasRuntimeBitsIgnoreComptime()) { + } else if (return_type.hasRuntimeBitsIgnoreComptime(mod)) { if (cc == .C) { - const res_classes = abi.classifyType(return_type, target); + const res_classes = abi.classifyType(return_type, mod); assert(res_classes[0] == .direct and res_classes[1] == .none); - const scalar_type = abi.scalarType(return_type, target); - try returns.append(typeToValtype(scalar_type, target)); + const scalar_type = abi.scalarType(return_type, mod); + try returns.append(typeToValtype(scalar_type, mod)); } else { - try returns.append(typeToValtype(return_type, target)); + try returns.append(typeToValtype(return_type, mod)); } - } else if (return_type.isError()) { + } else if (return_type.isError(mod)) { try returns.append(.i32); } // param types - for (params) |param_type| { - if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; + for (params) |param_type_ip| { + const param_type = param_type_ip.toType(); + if (!param_type.hasRuntimeBitsIgnoreComptime(mod)) continue; switch (cc) { .C => { - const param_classes = abi.classifyType(param_type, target); + const param_classes = abi.classifyType(param_type, mod); for (param_classes) |class| { if (class == .none) continue; if (class == .direct) { - const scalar_type = abi.scalarType(param_type, target); - try temp_params.append(typeToValtype(scalar_type, target)); + const scalar_type = abi.scalarType(param_type, mod); + try temp_params.append(typeToValtype(scalar_type, mod)); } else { - try temp_params.append(typeToValtype(param_type, target)); + try temp_params.append(typeToValtype(param_type, mod)); } } }, - else => if (isByRef(param_type, target)) + else => if (isByRef(param_type, mod)) try temp_params.append(.i32) else - try temp_params.append(typeToValtype(param_type, target)), + try temp_params.append(typeToValtype(param_type, mod)), } } @@ -1194,20 +1203,22 @@ fn genFunctype(gpa: Allocator, cc: std.builtin.CallingConvention, params: []cons pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - func: *Module.Fn, + func_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), debug_output: codegen.DebugInfoOutput, ) codegen.CodeGenError!codegen.Result { _ = src_loc; + const mod = bin_file.options.module.?; + const func = mod.funcPtr(func_index); var code_gen: CodeGen = .{ .gpa = bin_file.allocator, .air = air, .liveness = liveness, .code = code, .decl_index = func.owner_decl, - .decl = bin_file.options.module.?.declPtr(func.owner_decl), + .decl = mod.declPtr(func.owner_decl), .err_msg = undefined, .locals = .{}, .target = bin_file.options.target, @@ -1226,8 +1237,9 @@ pub fn generate( } fn genFunc(func: *CodeGen) InnerError!void { - const fn_info = func.decl.ty.fnInfo(); - var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target); + const mod = func.bin_file.base.options.module.?; + const fn_info = mod.typeToFunc(func.decl.ty).?; + var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type.toType(), mod); defer func_type.deinit(func.gpa); _ = try func.bin_file.storeDeclType(func.decl_index, func_type); @@ -1253,8 +1265,8 @@ fn genFunc(func: *CodeGen) InnerError!void { // we emit an unreachable instruction to tell the stack validator that part will never be reached. if (func_type.returns.len != 0 and func.air.instructions.len > 0) { const inst = @intCast(u32, func.air.instructions.len - 1); - const last_inst_ty = func.air.typeOfIndex(inst); - if (!last_inst_ty.hasRuntimeBitsIgnoreComptime() or last_inst_ty.isNoReturn()) { + const last_inst_ty = func.typeOfIndex(inst); + if (!last_inst_ty.hasRuntimeBitsIgnoreComptime(mod) or last_inst_ty.isNoReturn(mod)) { try func.addTag(.@"unreachable"); } } @@ -1335,10 +1347,9 @@ const CallWValues = struct { }; fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try func.gpa.alloc(Type, fn_ty.fnParamLen()); - defer func.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + const mod = func.bin_file.base.options.module.?; + const fn_info = mod.typeToFunc(fn_ty).?; + const cc = fn_info.cc; var result: CallWValues = .{ .args = &.{}, .return_value = .none, @@ -1350,8 +1361,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV // Check if we store the result as a pointer to the stack rather than // by value - const fn_info = fn_ty.fnInfo(); - if (firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) { + if (firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) { // the sret arg will be passed as first argument, therefore we // set the `return_value` before allocating locals for regular args. result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } }; @@ -1360,8 +1370,8 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV switch (cc) { .Unspecified => { - for (param_types) |ty| { - if (!ty.hasRuntimeBitsIgnoreComptime()) { + for (fn_info.param_types) |ty| { + if (!ty.toType().hasRuntimeBitsIgnoreComptime(mod)) { continue; } @@ -1370,8 +1380,8 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV } }, .C => { - for (param_types) |ty| { - const ty_classes = abi.classifyType(ty, func.target); + for (fn_info.param_types) |ty| { + const ty_classes = abi.classifyType(ty.toType(), mod); for (ty_classes) |class| { if (class == .none) continue; try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } }); @@ -1385,11 +1395,11 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV return result; } -fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, target: std.Target) bool { +fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, mod: *Module) bool { switch (cc) { - .Unspecified, .Inline => return isByRef(return_type, target), + .Unspecified, .Inline => return isByRef(return_type, mod), .C => { - const ty_classes = abi.classifyType(return_type, target); + const ty_classes = abi.classifyType(return_type, mod); if (ty_classes[0] == .indirect) return true; if (ty_classes[0] == .direct and ty_classes[1] == .direct) return true; return false; @@ -1405,16 +1415,17 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: return func.lowerToStack(value); } - const ty_classes = abi.classifyType(ty, func.target); + const mod = func.bin_file.base.options.module.?; + const ty_classes = abi.classifyType(ty, mod); assert(ty_classes[0] != .none); - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Struct, .Union => { if (ty_classes[0] == .indirect) { return func.lowerToStack(value); } assert(ty_classes[0] == .direct); - const scalar_type = abi.scalarType(ty, func.target); - const abi_size = scalar_type.abiSize(func.target); + const scalar_type = abi.scalarType(ty, mod); + const abi_size = scalar_type.abiSize(mod); try func.emitWValue(value); // When the value lives in the virtual stack, we must load it onto the actual stack @@ -1422,12 +1433,12 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: const opcode = buildOpcode(.{ .op = .load, .width = @intCast(u8, abi_size), - .signedness = if (scalar_type.isSignedInt()) .signed else .unsigned, - .valtype1 = typeToValtype(scalar_type, func.target), + .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned, + .valtype1 = typeToValtype(scalar_type, mod), }); try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{ .offset = value.offset(), - .alignment = scalar_type.abiAlignment(func.target), + .alignment = scalar_type.abiAlignment(mod), }); } }, @@ -1436,7 +1447,7 @@ fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: return func.lowerToStack(value); } assert(ty_classes[0] == .direct and ty_classes[1] == .direct); - assert(ty.abiSize(func.target) == 16); + assert(ty.abiSize(mod) == 16); // in this case we have an integer or float that must be lowered as 2 i64's. try func.emitWValue(value); try func.addMemArg(.i64_load, .{ .offset = value.offset(), .alignment = 8 }); @@ -1503,18 +1514,18 @@ fn restoreStackPointer(func: *CodeGen) !void { /// /// Asserts Type has codegenbits fn allocStack(func: *CodeGen, ty: Type) !WValue { - assert(ty.hasRuntimeBitsIgnoreComptime()); + const mod = func.bin_file.base.options.module.?; + assert(ty.hasRuntimeBitsIgnoreComptime(mod)); if (func.initial_stack_value == .none) { try func.initializeStack(); } - const abi_size = std.math.cast(u32, ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; + const abi_size = std.math.cast(u32, ty.abiSize(mod)) orelse { return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ - ty.fmt(module), ty.abiSize(func.target), + ty.fmt(mod), ty.abiSize(mod), }); }; - const abi_align = ty.abiAlignment(func.target); + const abi_align = ty.abiAlignment(mod); if (abi_align > func.stack_alignment) { func.stack_alignment = abi_align; @@ -1531,22 +1542,22 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue { /// This is different from allocStack where this will use the pointer's alignment /// if it is set, to ensure the stack alignment will be set correctly. fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue { - const ptr_ty = func.air.typeOfIndex(inst); - const pointee_ty = ptr_ty.childType(); + const mod = func.bin_file.base.options.module.?; + const ptr_ty = func.typeOfIndex(inst); + const pointee_ty = ptr_ty.childType(mod); if (func.initial_stack_value == .none) { try func.initializeStack(); } - if (!pointee_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pointee_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.allocStack(Type.usize); // create a value containing just the stack pointer. } - const abi_alignment = ptr_ty.ptrAlignment(func.target); - const abi_size = std.math.cast(u32, pointee_ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; + const abi_alignment = ptr_ty.ptrAlignment(mod); + const abi_size = std.math.cast(u32, pointee_ty.abiSize(mod)) orelse { return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{ - pointee_ty.fmt(module), pointee_ty.abiSize(func.target), + pointee_ty.fmt(mod), pointee_ty.abiSize(mod), }); }; if (abi_alignment > func.stack_alignment) { @@ -1704,8 +1715,9 @@ fn arch(func: *const CodeGen) std.Target.Cpu.Arch { /// For a given `Type`, will return true when the type will be passed /// by reference, rather than by value -fn isByRef(ty: Type, target: std.Target) bool { - switch (ty.zigTypeTag()) { +fn isByRef(ty: Type, mod: *Module) bool { + const target = mod.getTarget(); + switch (ty.zigTypeTag(mod)) { .Type, .ComptimeInt, .ComptimeFloat, @@ -1726,44 +1738,42 @@ fn isByRef(ty: Type, target: std.Target) bool { .Array, .Frame, - => return ty.hasRuntimeBitsIgnoreComptime(), + => return ty.hasRuntimeBitsIgnoreComptime(mod), .Union => { - if (ty.castTag(.@"union")) |union_ty| { - if (union_ty.data.layout == .Packed) { - return ty.abiSize(target) > 8; + if (mod.typeToUnion(ty)) |union_obj| { + if (union_obj.layout == .Packed) { + return ty.abiSize(mod) > 8; } } - return ty.hasRuntimeBitsIgnoreComptime(); + return ty.hasRuntimeBitsIgnoreComptime(mod); }, .Struct => { - if (ty.castTag(.@"struct")) |struct_ty| { - const struct_obj = struct_ty.data; + if (mod.typeToStruct(ty)) |struct_obj| { if (struct_obj.layout == .Packed and struct_obj.haveFieldTypes()) { - return isByRef(struct_obj.backing_int_ty, target); + return isByRef(struct_obj.backing_int_ty, mod); } } - return ty.hasRuntimeBitsIgnoreComptime(); + return ty.hasRuntimeBitsIgnoreComptime(mod); }, - .Vector => return determineSimdStoreStrategy(ty, target) == .unrolled, - .Int => return ty.intInfo(target).bits > 64, + .Vector => return determineSimdStoreStrategy(ty, mod) == .unrolled, + .Int => return ty.intInfo(mod).bits > 64, .Float => return ty.floatBits(target) > 64, .ErrorUnion => { - const pl_ty = ty.errorUnionPayload(); - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + const pl_ty = ty.errorUnionPayload(mod); + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return false; } return true; }, .Optional => { - if (ty.isPtrLikeOptional()) return false; - var buf: Type.Payload.ElemType = undefined; - const pl_type = ty.optionalChild(&buf); - if (pl_type.zigTypeTag() == .ErrorSet) return false; - return pl_type.hasRuntimeBitsIgnoreComptime(); + if (ty.isPtrLikeOptional(mod)) return false; + const pl_type = ty.optionalChild(mod); + if (pl_type.zigTypeTag(mod) == .ErrorSet) return false; + return pl_type.hasRuntimeBitsIgnoreComptime(mod); }, .Pointer => { // Slices act like struct and will be passed by reference - if (ty.isSlice()) return true; + if (ty.isSlice(mod)) return true; return false; }, } @@ -1778,10 +1788,11 @@ const SimdStoreStrategy = enum { /// This means when a given type is 128 bits and either the simd128 or relaxed-simd /// features are enabled, the function will return `.direct`. This would allow to store /// it using a instruction, rather than an unrolled version. -fn determineSimdStoreStrategy(ty: Type, target: std.Target) SimdStoreStrategy { - std.debug.assert(ty.zigTypeTag() == .Vector); - if (ty.bitSize(target) != 128) return .unrolled; +fn determineSimdStoreStrategy(ty: Type, mod: *Module) SimdStoreStrategy { + std.debug.assert(ty.zigTypeTag(mod) == .Vector); + if (ty.bitSize(mod) != 128) return .unrolled; const hasFeature = std.Target.wasm.featureSetHas; + const target = mod.getTarget(); const features = target.cpu.features; if (hasFeature(features, .relaxed_simd) or hasFeature(features, .simd128)) { return .direct; @@ -1821,8 +1832,7 @@ fn buildPointerOffset(func: *CodeGen, ptr_value: WValue, offset: u64, action: en fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const air_tags = func.air.instructions.items(.tag); return switch (air_tags[inst]) { - .constant => unreachable, - .const_ty => unreachable, + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .add => func.airBinOp(inst, .add), .add_sat => func.airSatBinOp(inst, .add), @@ -2062,8 +2072,11 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; + const ip = &mod.intern_pool; + for (body) |inst| { - if (func.liveness.isUnused(inst) and !func.air.mustLower(inst)) { + if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip)) { continue; } const old_bookkeeping_value = func.air_bookkeeping; @@ -2080,36 +2093,37 @@ fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void { } fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const fn_info = func.decl.ty.fnInfo(); - const ret_ty = fn_info.return_type; + const fn_info = mod.typeToFunc(func.decl.ty).?; + const ret_ty = fn_info.return_type.toType(); // result must be stored in the stack and we return a pointer // to the stack instead if (func.return_value != .none) { try func.store(func.return_value, operand, ret_ty, 0); - } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime()) { - switch (ret_ty.zigTypeTag()) { + } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + switch (ret_ty.zigTypeTag(mod)) { // Aggregate types can be lowered as a singular value .Struct, .Union => { - const scalar_type = abi.scalarType(ret_ty, func.target); + const scalar_type = abi.scalarType(ret_ty, mod); try func.emitWValue(operand); const opcode = buildOpcode(.{ .op = .load, - .width = @intCast(u8, scalar_type.abiSize(func.target) * 8), - .signedness = if (scalar_type.isSignedInt()) .signed else .unsigned, - .valtype1 = typeToValtype(scalar_type, func.target), + .width = @intCast(u8, scalar_type.abiSize(mod) * 8), + .signedness = if (scalar_type.isSignedInt(mod)) .signed else .unsigned, + .valtype1 = typeToValtype(scalar_type, mod), }); try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{ .offset = operand.offset(), - .alignment = scalar_type.abiAlignment(func.target), + .alignment = scalar_type.abiAlignment(mod), }); }, else => try func.emitWValue(operand), } } else { - if (!ret_ty.hasRuntimeBitsIgnoreComptime() and ret_ty.isError()) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and ret_ty.isError(mod)) { try func.addImm32(0); } else { try func.emitWValue(operand); @@ -2122,15 +2136,16 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const child_type = func.air.typeOfIndex(inst).childType(); + const mod = func.bin_file.base.options.module.?; + const child_type = func.typeOfIndex(inst).childType(mod); var result = result: { - if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime()) { + if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { break :result try func.allocStack(Type.usize); // create pointer to void } - const fn_info = func.decl.ty.fnInfo(); - if (firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) { + const fn_info = mod.typeToFunc(func.decl.ty).?; + if (firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) { break :result func.return_value; } @@ -2141,16 +2156,17 @@ fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const ret_ty = func.air.typeOf(un_op).childType(); + const ret_ty = func.typeOf(un_op).childType(mod); - const fn_info = func.decl.ty.fnInfo(); - if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { - if (ret_ty.isError()) { + const fn_info = mod.typeToFunc(func.decl.ty).?; + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { + if (ret_ty.isError(mod)) { try func.addImm32(0); } - } else if (!firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) { + } else if (!firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod)) { // leave on the stack _ = try func.load(operand, ret_ty, 0); } @@ -2165,42 +2181,48 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif const pl_op = func.air.instructions.items(.data)[inst].pl_op; const extra = func.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, func.air.extra[extra.end..][0..extra.data.args_len]); - const ty = func.air.typeOf(pl_op.operand); + const ty = func.typeOf(pl_op.operand); - const fn_ty = switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; - const ret_ty = fn_ty.fnReturnType(); - const fn_info = fn_ty.fnInfo(); - const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, func.target); + const ret_ty = fn_ty.fnReturnType(mod); + const fn_info = mod.typeToFunc(fn_ty).?; + const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type.toType(), mod); const callee: ?Decl.Index = blk: { - const func_val = func.air.value(pl_op.operand) orelse break :blk null; - const module = func.bin_file.base.options.module.?; + const func_val = (try func.air.value(pl_op.operand, mod)) orelse break :blk null; - if (func_val.castTag(.function)) |function| { - _ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl); - break :blk function.data.owner_decl; - } else if (func_val.castTag(.extern_fn)) |extern_fn| { - const ext_decl = module.declPtr(extern_fn.data.owner_decl); - const ext_info = ext_decl.ty.fnInfo(); - var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, func.target); + if (func_val.getFunction(mod)) |function| { + _ = try func.bin_file.getOrCreateAtomForDecl(function.owner_decl); + break :blk function.owner_decl; + } else if (func_val.getExternFunc(mod)) |extern_func| { + const ext_decl = mod.declPtr(extern_func.decl); + const ext_info = mod.typeToFunc(ext_decl.ty).?; + var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type.toType(), mod); defer func_type.deinit(func.gpa); - const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl); + const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_func.decl); const atom = func.bin_file.getAtomPtr(atom_index); - const type_index = try func.bin_file.storeDeclType(extern_fn.data.owner_decl, func_type); + const type_index = try func.bin_file.storeDeclType(extern_func.decl, func_type); try func.bin_file.addOrUpdateImport( - mem.sliceTo(ext_decl.name, 0), + mod.intern_pool.stringToSlice(ext_decl.name), atom.getSymbolIndex().?, - ext_decl.getExternFn().?.lib_name, + mod.intern_pool.stringToSliceUnwrap(ext_decl.getOwnedExternFunc(mod).?.lib_name), type_index, ); - break :blk extern_fn.data.owner_decl; - } else if (func_val.castTag(.decl_ref)) |decl_ref| { - _ = try func.bin_file.getOrCreateAtomForDecl(decl_ref.data); - break :blk decl_ref.data; + break :blk extern_func.decl; + } else switch (mod.intern_pool.indexToKey(func_val.ip_index)) { + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| { + _ = try func.bin_file.getOrCreateAtomForDecl(decl); + break :blk decl; + }, + else => {}, + }, + else => {}, } return func.fail("Expected a function, but instead found type '{}'", .{func_val.tag()}); }; @@ -2214,10 +2236,10 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif for (args) |arg| { const arg_val = try func.resolveInst(arg); - const arg_ty = func.air.typeOf(arg); - if (!arg_ty.hasRuntimeBitsIgnoreComptime()) continue; + const arg_ty = func.typeOf(arg); + if (!arg_ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - try func.lowerArg(fn_ty.fnInfo().cc, arg_ty, arg_val); + try func.lowerArg(mod.typeToFunc(fn_ty).?.cc, arg_ty, arg_val); } if (callee) |direct| { @@ -2226,11 +2248,11 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } else { // in this case we call a function pointer // so load its value onto the stack - std.debug.assert(ty.zigTypeTag() == .Pointer); + std.debug.assert(ty.zigTypeTag(mod) == .Pointer); const operand = try func.resolveInst(pl_op.operand); try func.emitWValue(operand); - var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target); + var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type.toType(), mod); defer fn_type.deinit(func.gpa); const fn_type_index = try func.bin_file.putOrGetFuncType(fn_type); @@ -2238,18 +2260,18 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif } const result_value = result_value: { - if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { + if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod) and !ret_ty.isError(mod)) { break :result_value WValue{ .none = {} }; - } else if (ret_ty.isNoReturn()) { + } else if (ret_ty.isNoReturn(mod)) { try func.addTag(.@"unreachable"); break :result_value WValue{ .none = {} }; } else if (first_param_sret) { break :result_value sret; // TODO: Make this less fragile and optimize - } else if (fn_ty.fnInfo().cc == .C and ret_ty.zigTypeTag() == .Struct or ret_ty.zigTypeTag() == .Union) { + } else if (mod.typeToFunc(fn_ty).?.cc == .C and ret_ty.zigTypeTag(mod) == .Struct or ret_ty.zigTypeTag(mod) == .Union) { const result_local = try func.allocLocal(ret_ty); try func.addLabel(.local_set, result_local.local.value); - const scalar_type = abi.scalarType(ret_ty, func.target); + const scalar_type = abi.scalarType(ret_ty, mod); const result = try func.allocStack(scalar_type); try func.store(result, result_local, scalar_type, 0); break :result_value result; @@ -2272,6 +2294,7 @@ fn airAlloc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { + const mod = func.bin_file.base.options.module.?; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -2281,26 +2304,22 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const ptr_ty = func.air.typeOf(bin_op.lhs); - const ptr_info = ptr_ty.ptrInfo().data; - const ty = ptr_ty.childType(); + const ptr_ty = func.typeOf(bin_op.lhs); + const ptr_info = ptr_ty.ptrInfo(mod); + const ty = ptr_ty.childType(mod); if (ptr_info.host_size == 0) { try func.store(lhs, rhs, ty, 0); } else { // at this point we have a non-natural alignment, we must // load the value, and then shift+or the rhs into the result location. - var int_ty_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = ptr_info.host_size * 8, - }; - const int_elem_ty = Type.initPayload(&int_ty_payload.base); + const int_elem_ty = try mod.intType(.unsigned, ptr_info.host_size * 8); - if (isByRef(int_elem_ty, func.target)) { + if (isByRef(int_elem_ty, mod)) { return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{}); } - var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(func.target))) - 1); + var mask = @intCast(u64, (@as(u65, 1) << @intCast(u7, ty.bitSize(mod))) - 1); mask <<= @intCast(u6, ptr_info.bit_offset); mask ^= ~@as(u64, 0); const shift_val = if (ptr_info.host_size <= 4) @@ -2329,11 +2348,12 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void { assert(!(lhs != .stack and rhs == .stack)); - const abi_size = ty.abiSize(func.target); - switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + const abi_size = ty.abiSize(mod); + switch (ty.zigTypeTag(mod)) { .ErrorUnion => { - const pl_ty = ty.errorUnionPayload(); - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + const pl_ty = ty.errorUnionPayload(mod); + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.store(lhs, rhs, Type.anyerror, 0); } @@ -2341,26 +2361,25 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, .Optional => { - if (ty.isPtrLikeOptional()) { + if (ty.isPtrLikeOptional(mod)) { return func.store(lhs, rhs, Type.usize, 0); } - var buf: Type.Payload.ElemType = undefined; - const pl_ty = ty.optionalChild(&buf); - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + const pl_ty = ty.optionalChild(mod); + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.store(lhs, rhs, Type.u8, 0); } - if (pl_ty.zigTypeTag() == .ErrorSet) { + if (pl_ty.zigTypeTag(mod) == .ErrorSet) { return func.store(lhs, rhs, Type.anyerror, 0); } const len = @intCast(u32, abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, - .Struct, .Array, .Union => if (isByRef(ty, func.target)) { + .Struct, .Array, .Union => if (isByRef(ty, mod)) { const len = @intCast(u32, abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); }, - .Vector => switch (determineSimdStoreStrategy(ty, func.target)) { + .Vector => switch (determineSimdStoreStrategy(ty, mod)) { .unrolled => { const len = @intCast(u32, abi_size); return func.memcpy(lhs, rhs, .{ .imm32 = len }); @@ -2374,13 +2393,13 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE try func.mir_extra.appendSlice(func.gpa, &[_]u32{ std.wasm.simdOpcode(.v128_store), offset + lhs.offset(), - ty.abiAlignment(func.target), + ty.abiAlignment(mod), }); return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); }, }, .Pointer => { - if (ty.isSlice()) { + if (ty.isSlice(mod)) { // store pointer first // lower it to the stack so we do not have to store rhs into a local first try func.emitWValue(lhs); @@ -2404,7 +2423,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE try func.store(.{ .stack = {} }, msb, Type.u64, 8 + lhs.offset()); return; } else if (abi_size > 16) { - try func.memcpy(lhs, rhs, .{ .imm32 = @intCast(u32, ty.abiSize(func.target)) }); + try func.memcpy(lhs, rhs, .{ .imm32 = @intCast(u32, ty.abiSize(mod)) }); }, else => if (abi_size > 8) { return func.fail("TODO: `store` for type `{}` with abisize `{d}`", .{ @@ -2418,7 +2437,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE // into lhs, so we calculate that and emit that instead try func.lowerToStack(rhs); - const valtype = typeToValtype(ty, func.target); + const valtype = typeToValtype(ty, mod); const opcode = buildOpcode(.{ .valtype1 = valtype, .width = @intCast(u8, abi_size * 8), @@ -2428,21 +2447,22 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE // store rhs value at stack pointer's location in memory try func.addMemArg( Mir.Inst.Tag.fromOpcode(opcode), - .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(func.target) }, + .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(mod) }, ); } fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const ty = func.air.getRefType(ty_op.ty); - const ptr_ty = func.air.typeOf(ty_op.operand); - const ptr_info = ptr_ty.ptrInfo().data; + const ptr_ty = func.typeOf(ty_op.operand); + const ptr_info = ptr_ty.ptrInfo(mod); - if (!ty.hasRuntimeBitsIgnoreComptime()) return func.finishAir(inst, .none, &.{ty_op.operand}); + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{ty_op.operand}); const result = result: { - if (isByRef(ty, func.target)) { + if (isByRef(ty, mod)) { const new_local = try func.allocStack(ty); try func.store(new_local, operand, ty, 0); break :result new_local; @@ -2455,11 +2475,7 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // at this point we have a non-natural alignment, we must // shift the value to obtain the correct bit. - var int_ty_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = ptr_info.host_size * 8, - }; - const int_elem_ty = Type.initPayload(&int_ty_payload.base); + const int_elem_ty = try mod.intType(.unsigned, ptr_info.host_size * 8); const shift_val = if (ptr_info.host_size <= 4) WValue{ .imm32 = ptr_info.bit_offset } else if (ptr_info.host_size <= 8) @@ -2479,25 +2495,26 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// Loads an operand from the linear memory section. /// NOTE: Leaves the value on the stack. fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; // load local's value from memory by its stack position try func.emitWValue(operand); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { // TODO: Add helper functions for simd opcodes const extra_index = @intCast(u32, func.mir_extra.items.len); // stores as := opcode, offset, alignment (opcode::memarg) try func.mir_extra.appendSlice(func.gpa, &[_]u32{ std.wasm.simdOpcode(.v128_load), offset + operand.offset(), - ty.abiAlignment(func.target), + ty.abiAlignment(mod), }); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); return WValue{ .stack = {} }; } - const abi_size = @intCast(u8, ty.abiSize(func.target)); + const abi_size = @intCast(u8, ty.abiSize(mod)); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(ty, func.target), + .valtype1 = typeToValtype(ty, mod), .width = abi_size * 8, .op = .load, .signedness = .unsigned, @@ -2505,19 +2522,20 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu try func.addMemArg( Mir.Inst.Tag.fromOpcode(opcode), - .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(func.target) }, + .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(mod) }, ); return WValue{ .stack = {} }; } fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const arg_index = func.arg_index; const arg = func.args[arg_index]; - const cc = func.decl.ty.fnInfo().cc; - const arg_ty = func.air.typeOfIndex(inst); + const cc = mod.typeToFunc(func.decl.ty).?.cc; + const arg_ty = func.typeOfIndex(inst); if (cc == .C) { - const arg_classes = abi.classifyType(arg_ty, func.target); + const arg_classes = abi.classifyType(arg_ty, mod); for (arg_classes) |class| { if (class != .none) { func.arg_index += 1; @@ -2527,7 +2545,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // When we have an argument that's passed using more than a single parameter, // we combine them into a single stack value if (arg_classes[0] == .direct and arg_classes[1] == .direct) { - if (arg_ty.zigTypeTag() != .Int and arg_ty.zigTypeTag() != .Float) { + if (arg_ty.zigTypeTag(mod) != .Int and arg_ty.zigTypeTag(mod) != .Float) { return func.fail( "TODO: Implement C-ABI argument for type '{}'", .{arg_ty.fmt(func.bin_file.base.options.module.?)}, @@ -2557,11 +2575,12 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = func.air.typeOf(bin_op.lhs); - const rhs_ty = func.air.typeOf(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); + const rhs_ty = func.typeOf(bin_op.rhs); // For certain operations, such as shifting, the types are different. // When converting this to a WebAssembly type, they *must* match to perform @@ -2570,10 +2589,10 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { // For big integers we can ignore this as we will call into compiler-rt which handles this. const result = switch (op) { .shr, .shl => res: { - const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(func.target))) orelse { + const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse { return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); }; - const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(func.target))).?; + const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?; const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: { const tmp = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try tmp.toLocal(func, lhs_ty); @@ -2593,6 +2612,7 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { /// Performs a binary operation on the given `WValue`'s /// NOTE: THis leaves the value on top of the stack. fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; assert(!(lhs != .stack and rhs == .stack)); if (ty.isAnyFloat()) { @@ -2600,8 +2620,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError! return func.floatOp(float_op, ty, &.{ lhs, rhs }); } - if (isByRef(ty, func.target)) { - if (ty.zigTypeTag() == .Int) { + if (isByRef(ty, mod)) { + if (ty.zigTypeTag(mod) == .Int) { return func.binOpBigInt(lhs, rhs, ty, op); } else { return func.fail( @@ -2613,8 +2633,8 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError! const opcode: wasm.Opcode = buildOpcode(.{ .op = op, - .valtype1 = typeToValtype(ty, func.target), - .signedness = if (ty.isSignedInt()) .signed else .unsigned, + .valtype1 = typeToValtype(ty, mod), + .signedness = if (ty.isSignedInt(mod)) .signed else .unsigned, }); try func.emitWValue(lhs); try func.emitWValue(rhs); @@ -2625,14 +2645,15 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError! } fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue { - if (ty.intInfo(func.target).bits > 128) { + const mod = func.bin_file.base.options.module.?; + if (ty.intInfo(mod).bits > 128) { return func.fail("TODO: Implement binary operation for big integers larger than 128 bits", .{}); } switch (op) { - .mul => return func.callIntrinsic("__multi3", &.{ ty, ty }, ty, &.{ lhs, rhs }), - .shr => return func.callIntrinsic("__lshrti3", &.{ ty, Type.i32 }, ty, &.{ lhs, rhs }), - .shl => return func.callIntrinsic("__ashlti3", &.{ ty, Type.i32 }, ty, &.{ lhs, rhs }), + .mul => return func.callIntrinsic("__multi3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }), + .shr => return func.callIntrinsic("__lshrti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }), + .shl => return func.callIntrinsic("__ashlti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }), .xor => { const result = try func.allocStack(ty); try func.emitWValue(result); @@ -2756,14 +2777,15 @@ const FloatOp = enum { fn airUnaryFloatOp(func: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError!void { const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const ty = func.air.typeOf(un_op); + const ty = func.typeOf(un_op); const result = try (try func.floatOp(op, ty, &.{operand})).toLocal(func, ty); func.finishAir(inst, result, &.{un_op}); } fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) InnerError!WValue { - if (ty.zigTypeTag() == .Vector) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement floatOps for vectors", .{}); } @@ -2773,7 +2795,7 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In for (args) |operand| { try func.emitWValue(operand); } - const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, func.target) }); + const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, mod) }); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); return .stack; } @@ -2821,20 +2843,21 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In }; // fma requires three operands - var param_types_buffer: [3]Type = .{ ty, ty, ty }; + var param_types_buffer: [3]InternPool.Index = .{ ty.ip_index, ty.ip_index, ty.ip_index }; const param_types = param_types_buffer[0..args.len]; return func.callIntrinsic(fn_name, param_types, ty, args); } fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const lhs_ty = func.air.typeOf(bin_op.lhs); - const rhs_ty = func.air.typeOf(bin_op.rhs); + const lhs_ty = func.typeOf(bin_op.lhs); + const rhs_ty = func.typeOf(bin_op.rhs); - if (lhs_ty.zigTypeTag() == .Vector or rhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector or rhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement wrapping arithmetic for vectors", .{}); } @@ -2845,10 +2868,10 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { // For big integers we can ignore this as we will call into compiler-rt which handles this. const result = switch (op) { .shr, .shl => res: { - const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(func.target))) orelse { + const lhs_wasm_bits = toWasmBits(@intCast(u16, lhs_ty.bitSize(mod))) orelse { return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)}); }; - const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(func.target))).?; + const rhs_wasm_bits = toWasmBits(@intCast(u16, rhs_ty.bitSize(mod))).?; const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128) blk: { const tmp = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try tmp.toLocal(func, lhs_ty); @@ -2877,8 +2900,9 @@ fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr /// Asserts `Type` is <= 128 bits. /// NOTE: When the Type is <= 64 bits, leaves the value on top of the stack. fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { - assert(ty.abiSize(func.target) <= 16); - const bitsize = @intCast(u16, ty.bitSize(func.target)); + const mod = func.bin_file.base.options.module.?; + assert(ty.abiSize(mod) <= 16); + const bitsize = @intCast(u16, ty.bitSize(mod)); const wasm_bits = toWasmBits(bitsize) orelse { return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{bitsize}); }; @@ -2914,43 +2938,67 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { return WValue{ .stack = {} }; } -fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue { - switch (ptr_val.tag()) { - .decl_ref_mut => { - const decl_index = ptr_val.castTag(.decl_ref_mut).?.data.decl_index; - return func.lowerParentPtrDecl(ptr_val, decl_index, offset); +fn lowerParentPtr(func: *CodeGen, ptr_val: Value) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; + const ptr = mod.intern_pool.indexToKey(ptr_val.ip_index).ptr; + switch (ptr.addr) { + .decl => |decl_index| { + return func.lowerParentPtrDecl(ptr_val, decl_index, 0); }, - .decl_ref => { - const decl_index = ptr_val.castTag(.decl_ref).?.data; - return func.lowerParentPtrDecl(ptr_val, decl_index, offset); + .mut_decl => |mut_decl| { + const decl_index = mut_decl.decl; + return func.lowerParentPtrDecl(ptr_val, decl_index, 0); }, - .variable => { - const decl_index = ptr_val.castTag(.variable).?.data.owner_decl; - return func.lowerParentPtrDecl(ptr_val, decl_index, offset); + .int, .eu_payload => |tag| return func.fail("TODO: Implement lowerParentPtr for {}", .{tag}), + .opt_payload => |base_ptr| { + return func.lowerParentPtr(base_ptr.toValue()); }, - .field_ptr => { - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - const parent_ty = field_ptr.container_ty; + .comptime_field => unreachable, + .elem => |elem| { + const index = elem.index; + const elem_type = mod.intern_pool.typeOf(elem.base).toType().elemType2(mod); + const offset = index * elem_type.abiSize(mod); + const array_ptr = try func.lowerParentPtr(elem.base.toValue()); - const field_offset = switch (parent_ty.zigTypeTag()) { - .Struct => switch (parent_ty.containerLayout()) { - .Packed => parent_ty.packedStructFieldByteOffset(field_ptr.field_index, func.target), - else => parent_ty.structFieldOffset(field_ptr.field_index, func.target), + return switch (array_ptr) { + .memory => |ptr_| WValue{ + .memory_offset = .{ + .pointer = ptr_, + .offset = @intCast(u32, offset), + }, }, - .Union => switch (parent_ty.containerLayout()) { + .memory_offset => |mem_off| WValue{ + .memory_offset = .{ + .pointer = mem_off.pointer, + .offset = @intCast(u32, offset) + mem_off.offset, + }, + }, + else => unreachable, + }; + }, + .field => |field| { + const parent_ty = mod.intern_pool.typeOf(field.base).toType().childType(mod); + const parent_ptr = try func.lowerParentPtr(field.base.toValue()); + + const offset = switch (parent_ty.zigTypeTag(mod)) { + .Struct => switch (parent_ty.containerLayout(mod)) { + .Packed => parent_ty.packedStructFieldByteOffset(@intCast(usize, field.index), mod), + else => parent_ty.structFieldOffset(@intCast(usize, field.index), mod), + }, + .Union => switch (parent_ty.containerLayout(mod)) { .Packed => 0, else => blk: { - const layout: Module.Union.Layout = parent_ty.unionGetLayout(func.target); + const layout: Module.Union.Layout = parent_ty.unionGetLayout(mod); if (layout.payload_size == 0) break :blk 0; if (layout.payload_align > layout.tag_align) break :blk 0; // tag is stored first so calculate offset from where payload starts - const field_offset = @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align)); - break :blk field_offset; + const offset = @intCast(u32, std.mem.alignForwardGeneric(u64, layout.tag_size, layout.tag_align)); + break :blk offset; }, }, - .Pointer => switch (parent_ty.ptrSize()) { - .Slice => switch (field_ptr.field_index) { + .Pointer => switch (parent_ty.ptrSize(mod)) { + .Slice => switch (field.index) { 0 => 0, 1 => func.ptrSize(), else => unreachable, @@ -2959,51 +3007,51 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue }, else => unreachable, }; - return func.lowerParentPtr(field_ptr.container_ptr, offset + @intCast(u32, field_offset)); + + return switch (parent_ptr) { + .memory => |ptr_| WValue{ + .memory_offset = .{ + .pointer = ptr_, + .offset = @intCast(u32, offset), + }, + }, + .memory_offset => |mem_off| WValue{ + .memory_offset = .{ + .pointer = mem_off.pointer, + .offset = @intCast(u32, offset) + mem_off.offset, + }, + }, + else => unreachable, + }; }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - const index = elem_ptr.index; - const elem_offset = index * elem_ptr.elem_ty.abiSize(func.target); - return func.lowerParentPtr(elem_ptr.array_ptr, offset + @intCast(u32, elem_offset)); - }, - .opt_payload_ptr => { - const payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; - return func.lowerParentPtr(payload_ptr.container_ptr, offset); - }, - else => |tag| return func.fail("TODO: Implement lowerParentPtr for tag: {}", .{tag}), } } fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.Index, offset: u32) InnerError!WValue { - const module = func.bin_file.base.options.module.?; - const decl = module.declPtr(decl_index); - module.markDeclAlive(decl); - var ptr_ty_payload: Type.Payload.ElemType = .{ - .base = .{ .tag = .single_mut_pointer }, - .data = decl.ty, - }; - const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + const mod = func.bin_file.base.options.module.?; + const decl = mod.declPtr(decl_index); + try mod.markDeclAlive(decl); + const ptr_ty = try mod.singleMutPtrType(decl.ty); return func.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index, offset); } fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Index, offset: u32) InnerError!WValue { - if (tv.ty.isSlice()) { + const mod = func.bin_file.base.options.module.?; + if (tv.ty.isSlice(mod)) { return WValue{ .memory = try func.bin_file.lowerUnnamedConst(tv, decl_index) }; } - const module = func.bin_file.base.options.module.?; - const decl = module.declPtr(decl_index); - if (decl.ty.zigTypeTag() != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime()) { + const decl = mod.declPtr(decl_index); + if (decl.ty.zigTypeTag(mod) != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime(mod)) { return WValue{ .imm32 = 0xaaaaaaaa }; } - module.markDeclAlive(decl); + try mod.markDeclAlive(decl); const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index); const atom = func.bin_file.getAtom(atom_index); const target_sym_index = atom.sym_index; - if (decl.ty.zigTypeTag() == .Fn) { + if (decl.ty.zigTypeTag(mod) == .Fn) { try func.bin_file.addTableFunction(target_sym_index); return WValue{ .function_index = target_sym_index }; } else if (offset == 0) { @@ -3028,142 +3076,201 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo( } fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; var val = arg_val; - if (val.castTag(.runtime_value)) |rt| { - val = rt.data; + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .runtime_value => |rt| val = rt.val.toValue(), + else => {}, } - if (val.isUndefDeep()) return func.emitUndefined(ty); - if (val.castTag(.decl_ref)) |decl_ref| { - const decl_index = decl_ref.data; - return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0); - } - if (val.castTag(.decl_ref_mut)) |decl_ref_mut| { - const decl_index = decl_ref_mut.data.decl_index; - return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index, 0); - } - const target = func.target; - switch (ty.zigTypeTag()) { - .Void => return WValue{ .none = {} }, - .Int => { - const int_info = ty.intInfo(func.target); + if (val.isUndefDeep(mod)) return func.emitUndefined(ty); + + if (val.ip_index == .none) switch (ty.zigTypeTag(mod)) { + .Array => |zig_type| return func.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}), + .Struct => { + const struct_obj = mod.typeToStruct(ty).?; + assert(struct_obj.layout == .Packed); + var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer + val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; + const int_val = try mod.intValue( + struct_obj.backing_int_ty, + std.mem.readIntLittle(u64, &buf), + ); + return func.lowerConstant(int_val, struct_obj.backing_int_ty); + }, + .Vector => { + assert(determineSimdStoreStrategy(ty, mod) == .direct); + var buf: [16]u8 = undefined; + val.writeToMemory(ty, mod, &buf) catch unreachable; + return func.storeSimdImmd(buf); + }, + .Frame, + .AnyFrame, + => return func.fail("Wasm TODO: LowerConstant for type {}", .{ty.fmt(mod)}), + .Float, + .Union, + .Optional, + .ErrorUnion, + .ErrorSet, + .Int, + .Enum, + .Bool, + .Pointer, + => unreachable, // handled below + .Type, + .Void, + .NoReturn, + .ComptimeFloat, + .ComptimeInt, + .Undefined, + .Null, + .Opaque, + .EnumLiteral, + .Fn, + => unreachable, // comptime-only types + }; + + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => return WValue{ .imm32 = switch (simple_value) { + .false => 0, + .true => 1, + else => unreachable, + } }, + }, + .variable, + .extern_func, + .func, + .enum_literal, + .empty_enum_value, + => unreachable, // non-runtime values + .int => { + const int_info = ty.intInfo(mod); switch (int_info.signedness) { .signed => switch (int_info.bits) { 0...32 => return WValue{ .imm32 = @intCast(u32, toTwosComplement( - val.toSignedInt(target), + val.toSignedInt(mod), @intCast(u6, int_info.bits), )) }, 33...64 => return WValue{ .imm64 = toTwosComplement( - val.toSignedInt(target), + val.toSignedInt(mod), @intCast(u7, int_info.bits), ) }, else => unreachable, }, .unsigned => switch (int_info.bits) { - 0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) }, - 33...64 => return WValue{ .imm64 = val.toUnsignedInt(target) }, + 0...32 => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(mod)) }, + 33...64 => return WValue{ .imm64 = val.toUnsignedInt(mod) }, else => unreachable, }, } }, - .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) }, - .Float => switch (ty.floatBits(func.target)) { - 16 => return WValue{ .imm32 = @bitCast(u16, val.toFloat(f16)) }, - 32 => return WValue{ .float32 = val.toFloat(f32) }, - 64 => return WValue{ .float64 = val.toFloat(f64) }, - else => unreachable, + .err => |err| { + const int = try mod.getErrorValue(err.name); + return WValue{ .imm32 = int }; }, - .Pointer => switch (val.tag()) { - .field_ptr, .elem_ptr, .opt_payload_ptr => return func.lowerParentPtr(val, 0), - .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) }, - .zero, .null_value => return WValue{ .imm32 = 0 }, - else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}), - }, - .Enum => { - if (val.castTag(.enum_field_index)) |field_index| { - switch (ty.tag()) { - .enum_simple => return WValue{ .imm32 = field_index.data }, - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Type.Payload.EnumFull).?.data; - if (enum_full.values.count() != 0) { - const tag_val = enum_full.values.keys()[field_index.data]; - return func.lowerConstant(tag_val, enum_full.tag_ty); - } else { - return WValue{ .imm32 = field_index.data }; - } - }, - .enum_numbered => { - const index = field_index.data; - const enum_data = ty.castTag(.enum_numbered).?.data; - const enum_val = enum_data.values.keys()[index]; - return func.lowerConstant(enum_val, enum_data.tag_ty); - }, - else => return func.fail("TODO: lowerConstant for enum tag: {}", .{ty.tag()}), - } - } else { - var int_tag_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&int_tag_buffer); - return func.lowerConstant(val, int_tag_ty); - } - }, - .ErrorSet => switch (val.tag()) { - .@"error" => { - const kv = try func.bin_file.base.options.module.?.getErrorValue(val.getError().?); - return WValue{ .imm32 = kv.value }; - }, - else => return WValue{ .imm32 = 0 }, - }, - .ErrorUnion => { - const error_type = ty.errorUnionSet(); - const payload_type = ty.errorUnionPayload(); - if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + .error_union => |error_union| { + const err_tv: TypedValue = switch (error_union.val) { + .err_name => |err_name| .{ + .ty = ty.errorUnionSet(mod), + .val = (try mod.intern(.{ .err = .{ + .ty = ty.errorUnionSet(mod).toIntern(), + .name = err_name, + } })).toValue(), + }, + .payload => .{ + .ty = Type.err_int, + .val = try mod.intValue(Type.err_int, 0), + }, + }; + const payload_type = ty.errorUnionPayload(mod); + if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const is_pl = val.errorUnionIsPayload(); - const err_val = if (!is_pl) val else Value.initTag(.zero); - return func.lowerConstant(err_val, error_type); + return func.lowerConstant(err_tv.val, err_tv.ty); } + return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{}); }, - .Optional => if (ty.optionalReprIsPayload()) { - var buf: Type.Payload.ElemType = undefined; - const pl_ty = ty.optionalChild(&buf); - if (val.castTag(.opt_payload)) |payload| { - return func.lowerConstant(payload.data, pl_ty); - } else if (val.isNull()) { - return WValue{ .imm32 = 0 }; + .enum_tag => |enum_tag| { + const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); + return func.lowerConstant(enum_tag.int.toValue(), int_tag_ty.toType()); + }, + .float => |float| switch (float.storage) { + .f16 => |f16_val| return WValue{ .imm32 = @bitCast(u16, f16_val) }, + .f32 => |f32_val| return WValue{ .float32 = f32_val }, + .f64 => |f64_val| return WValue{ .float64 = f64_val }, + else => unreachable, + }, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl, 0), + .mut_decl => |mut_decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, mut_decl.decl, 0), + .int => |int| return func.lowerConstant(int.toValue(), mod.intern_pool.typeOf(int).toType()), + .opt_payload, .elem, .field => return func.lowerParentPtr(val), + else => return func.fail("Wasm TODO: lowerConstant for other const addr tag {}", .{ptr.addr}), + }, + .opt => if (ty.optionalReprIsPayload(mod)) { + const pl_ty = ty.optionalChild(mod); + if (val.optionalValue(mod)) |payload| { + return func.lowerConstant(payload, pl_ty); } else { - return func.lowerConstant(val, pl_ty); + return WValue{ .imm32 = 0 }; } } else { - const is_pl = val.tag() == .opt_payload; - return WValue{ .imm32 = @boolToInt(is_pl) }; + return WValue{ .imm32 = @boolToInt(!val.isNull(mod)) }; }, - .Struct => { - const struct_obj = ty.castTag(.@"struct").?.data; - assert(struct_obj.layout == .Packed); - var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer - val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; - var payload: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = std.mem.readIntLittle(u64, &buf), - }; - const int_val = Value.initPayload(&payload.base); - return func.lowerConstant(int_val, struct_obj.backing_int_ty); + .aggregate => switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .array_type => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(mod)}), + .vector_type => { + assert(determineSimdStoreStrategy(ty, mod) == .direct); + var buf: [16]u8 = undefined; + val.writeToMemory(ty, mod, &buf) catch unreachable; + return func.storeSimdImmd(buf); + }, + .struct_type, .anon_struct_type => { + const struct_obj = mod.typeToStruct(ty).?; + assert(struct_obj.layout == .Packed); + var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer + val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable; + const int_val = try mod.intValue( + struct_obj.backing_int_ty, + std.mem.readIntLittle(u64, &buf), + ); + return func.lowerConstant(int_val, struct_obj.backing_int_ty); + }, + else => unreachable, }, - .Vector => { - assert(determineSimdStoreStrategy(ty, target) == .direct); - var buf: [16]u8 = undefined; - val.writeToMemory(ty, func.bin_file.base.options.module.?, &buf) catch unreachable; - return func.storeSimdImmd(buf); - }, - .Union => { + .un => |union_obj| { // in this case we have a packed union which will not be passed by reference. - const union_ty = ty.cast(Type.Payload.Union).?.data; - const union_obj = val.castTag(.@"union").?.data; - const field_index = ty.unionTagFieldIndex(union_obj.tag, func.bin_file.base.options.module.?).?; - const field_ty = union_ty.fields.values()[field_index].ty; - return func.lowerConstant(union_obj.val, field_ty); + const field_index = ty.unionTagFieldIndex(union_obj.tag.toValue(), func.bin_file.base.options.module.?).?; + const field_ty = ty.unionFields(mod).values()[field_index].ty; + return func.lowerConstant(union_obj.val.toValue(), field_ty); }, - else => |zig_type| return func.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}), + .memoized_call => unreachable, } } @@ -3176,9 +3283,10 @@ fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue { } fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { - switch (ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + switch (ty.zigTypeTag(mod)) { .Bool, .ErrorSet => return WValue{ .imm32 = 0xaaaaaaaa }, - .Int, .Enum => switch (ty.intInfo(func.target).bits) { + .Int, .Enum => switch (ty.intInfo(mod).bits) { 0...32 => return WValue{ .imm32 = 0xaaaaaaaa }, 33...64 => return WValue{ .imm64 = 0xaaaaaaaaaaaaaaaa }, else => unreachable, @@ -3195,9 +3303,8 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { else => unreachable, }, .Optional => { - var buf: Type.Payload.ElemType = undefined; - const pl_ty = ty.optionalChild(&buf); - if (ty.optionalReprIsPayload()) { + const pl_ty = ty.optionalChild(mod); + if (ty.optionalReprIsPayload(mod)) { return func.emitUndefined(pl_ty); } return WValue{ .imm32 = 0xaaaaaaaa }; @@ -3206,11 +3313,11 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { return WValue{ .imm32 = 0xaaaaaaaa }; }, .Struct => { - const struct_obj = ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(ty).?; assert(struct_obj.layout == .Packed); return func.emitUndefined(struct_obj.backing_int_ty); }, - else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag()}), + else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(mod)}), } } @@ -3218,56 +3325,52 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue { /// It's illegal to provide a value with a type that cannot be represented /// as an integer value. fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 { - const target = func.target; - switch (ty.zigTypeTag()) { - .Enum => { - if (val.castTag(.enum_field_index)) |field_index| { - switch (ty.tag()) { - .enum_simple => return @bitCast(i32, field_index.data), - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Type.Payload.EnumFull).?.data; - if (enum_full.values.count() != 0) { - const tag_val = enum_full.values.keys()[field_index.data]; - return func.valueAsI32(tag_val, enum_full.tag_ty); - } else return @bitCast(i32, field_index.data); - }, - .enum_numbered => { - const index = field_index.data; - const enum_data = ty.castTag(.enum_numbered).?.data; - return func.valueAsI32(enum_data.values.keys()[index], enum_data.tag_ty); - }, - else => unreachable, - } - } else { - var int_tag_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&int_tag_buffer); - return func.valueAsI32(val, int_tag_ty); - } + const mod = func.bin_file.base.options.module.?; + + switch (val.ip_index) { + .none => {}, + .bool_true => return 1, + .bool_false => return 0, + else => return switch (mod.intern_pool.indexToKey(val.ip_index)) { + .enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, mod), + .int => |int| intStorageAsI32(int.storage, mod), + .ptr => |ptr| intIndexAsI32(&mod.intern_pool, ptr.addr.int, mod), + .err => |err| @bitCast(i32, @intCast(Module.ErrorInt, mod.global_error_set.getIndex(err.name).?)), + else => unreachable, }, - .Int => switch (ty.intInfo(func.target).signedness) { - .signed => return @truncate(i32, val.toSignedInt(target)), - .unsigned => return @bitCast(i32, @truncate(u32, val.toUnsignedInt(target))), - }, - .ErrorSet => { - const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError().?) catch unreachable; // passed invalid `Value` to function - return @bitCast(i32, kv.value); - }, - .Bool => return @intCast(i32, val.toSignedInt(target)), - .Pointer => return @intCast(i32, val.toSignedInt(target)), - else => unreachable, // Programmer called this function for an illegal type } + + return switch (ty.zigTypeTag(mod)) { + .ErrorSet => @bitCast(i32, val.getErrorInt(mod)), + else => unreachable, // Programmer called this function for an illegal type + }; +} + +fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, mod: *Module) i32 { + return intStorageAsI32(ip.indexToKey(int).int.storage, mod); +} + +fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 { + return switch (storage) { + .i64 => |x| @intCast(i32, x), + .u64 => |x| @bitCast(i32, @intCast(u32, x)), + .big_int => unreachable, + .lazy_align => |ty| @bitCast(i32, ty.toType().abiAlignment(mod)), + .lazy_size => |ty| @bitCast(i32, @intCast(u32, ty.toType().abiSize(mod))), + }; } fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const block_ty = func.air.getRefType(ty_pl.ty); - const wasm_block_ty = genBlockType(block_ty, func.target); + const wasm_block_ty = genBlockType(block_ty, mod); const extra = func.air.extraData(Air.Block, ty_pl.payload); const body = func.air.extra[extra.end..][0..extra.data.body_len]; // if wasm_block_ty is non-empty, we create a register to store the temporary value const block_result: WValue = if (wasm_block_ty != wasm.block_empty) blk: { - const ty: Type = if (isByRef(block_ty, func.target)) Type.u32 else block_ty; + const ty: Type = if (isByRef(block_ty, mod)) Type.u32 else block_ty; break :blk try func.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten } else WValue.none; @@ -3369,7 +3472,7 @@ fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) In const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const operand_ty = func.air.typeOf(bin_op.lhs); + const operand_ty = func.typeOf(bin_op.lhs); const result = try (try func.cmp(lhs, rhs, operand_ty, op)).toLocal(func, Type.u32); // comparison result is always 32 bits func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } @@ -3379,16 +3482,16 @@ fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) In /// NOTE: This leaves the result on top of the stack, rather than a new local. fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue { assert(!(lhs != .stack and rhs == .stack)); - if (ty.zigTypeTag() == .Optional and !ty.optionalReprIsPayload()) { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&buf); - if (payload_ty.hasRuntimeBitsIgnoreComptime()) { + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Optional and !ty.optionalReprIsPayload(mod)) { + const payload_ty = ty.optionalChild(mod); + if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { // When we hit this case, we must check the value of optionals // that are not pointers. This means first checking against non-null for // both lhs and rhs, as well as checking the payload are matching of lhs and rhs return func.cmpOptionals(lhs, rhs, ty, op); } - } else if (isByRef(ty, func.target)) { + } else if (isByRef(ty, mod)) { return func.cmpBigInt(lhs, rhs, ty, op); } else if (ty.isAnyFloat() and ty.floatBits(func.target) == 16) { return func.cmpFloat16(lhs, rhs, op); @@ -3401,13 +3504,13 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO const signedness: std.builtin.Signedness = blk: { // by default we tell the operand type is unsigned (i.e. bools and enum values) - if (ty.zigTypeTag() != .Int) break :blk .unsigned; + if (ty.zigTypeTag(mod) != .Int) break :blk .unsigned; // incase of an actual integer, we emit the correct signedness - break :blk ty.intInfo(func.target).signedness; + break :blk ty.intInfo(mod).signedness; }; const opcode: wasm.Opcode = buildOpcode(.{ - .valtype1 = typeToValtype(ty, func.target), + .valtype1 = typeToValtype(ty, mod), .op = switch (op) { .lt => .lt, .lte => .le, @@ -3464,11 +3567,12 @@ fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const br = func.air.instructions.items(.data)[inst].br; const block = func.blocks.get(br.block_inst).?; // if operand has codegen bits we should break with a value - if (func.air.typeOf(br.operand).hasRuntimeBitsIgnoreComptime()) { + if (func.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(mod)) { const operand = try func.resolveInst(br.operand); try func.lowerToStack(operand); @@ -3489,17 +3593,18 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const operand_ty = func.air.typeOf(ty_op.operand); + const operand_ty = func.typeOf(ty_op.operand); + const mod = func.bin_file.base.options.module.?; const result = result: { - if (operand_ty.zigTypeTag() == .Bool) { + if (operand_ty.zigTypeTag(mod) == .Bool) { try func.emitWValue(operand); try func.addTag(.i32_eqz); const not_tmp = try func.allocLocal(operand_ty); try func.addLabel(.local_set, not_tmp.local.value); break :result not_tmp; } else { - const operand_bits = operand_ty.intInfo(func.target).bits; + const operand_bits = operand_ty.intInfo(mod).bits; const wasm_bits = toWasmBits(operand_bits) orelse { return func.fail("TODO: Implement binary NOT for integer with bitsize '{d}'", .{operand_bits}); }; @@ -3554,8 +3659,8 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; const result = result: { const operand = try func.resolveInst(ty_op.operand); - const wanted_ty = func.air.typeOfIndex(inst); - const given_ty = func.air.typeOf(ty_op.operand); + const wanted_ty = func.typeOfIndex(inst); + const given_ty = func.typeOf(ty_op.operand); if (given_ty.isAnyFloat() or wanted_ty.isAnyFloat()) { const bitcast_result = try func.bitcast(wanted_ty, given_ty, operand); break :result try bitcast_result.toLocal(func, wanted_ty); @@ -3566,16 +3671,17 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; // if we bitcast a float to or from an integer we must use the 'reinterpret' instruction if (!(wanted_ty.isAnyFloat() or given_ty.isAnyFloat())) return operand; - if (wanted_ty.tag() == .f16 or given_ty.tag() == .f16) return operand; - if (wanted_ty.bitSize(func.target) > 64) return operand; - assert((wanted_ty.isInt() and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt())); + if (wanted_ty.ip_index == .f16_type or given_ty.ip_index == .f16_type) return operand; + if (wanted_ty.bitSize(mod) > 64) return operand; + assert((wanted_ty.isInt(mod) and given_ty.isAnyFloat()) or (wanted_ty.isAnyFloat() and given_ty.isInt(mod))); const opcode = buildOpcode(.{ .op = .reinterpret, - .valtype1 = typeToValtype(wanted_ty, func.target), - .valtype2 = typeToValtype(given_ty, func.target), + .valtype1 = typeToValtype(wanted_ty, mod), + .valtype2 = typeToValtype(given_ty, mod), }); try func.emitWValue(operand); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); @@ -3583,19 +3689,21 @@ fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) Inn } fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.StructField, ty_pl.payload); const struct_ptr = try func.resolveInst(extra.data.struct_operand); - const struct_ty = func.air.typeOf(extra.data.struct_operand).childType(); + const struct_ty = func.typeOf(extra.data.struct_operand).childType(mod); const result = try func.structFieldPtr(inst, extra.data.struct_operand, struct_ptr, struct_ty, extra.data.field_index); func.finishAir(inst, result, &.{extra.data.struct_operand}); } fn airStructFieldPtrIndex(func: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const struct_ptr = try func.resolveInst(ty_op.operand); - const struct_ty = func.air.typeOf(ty_op.operand).childType(); + const struct_ty = func.typeOf(ty_op.operand).childType(mod); const result = try func.structFieldPtr(inst, ty_op.operand, struct_ptr, struct_ty, index); func.finishAir(inst, result, &.{ty_op.operand}); @@ -3609,19 +3717,20 @@ fn structFieldPtr( struct_ty: Type, index: u32, ) InnerError!WValue { - const result_ty = func.air.typeOfIndex(inst); - const offset = switch (struct_ty.containerLayout()) { - .Packed => switch (struct_ty.zigTypeTag()) { + const mod = func.bin_file.base.options.module.?; + const result_ty = func.typeOfIndex(inst); + const offset = switch (struct_ty.containerLayout(mod)) { + .Packed => switch (struct_ty.zigTypeTag(mod)) { .Struct => offset: { - if (result_ty.ptrInfo().data.host_size != 0) { + if (result_ty.ptrInfo(mod).host_size != 0) { break :offset @as(u32, 0); } - break :offset struct_ty.packedStructFieldByteOffset(index, func.target); + break :offset struct_ty.packedStructFieldByteOffset(index, mod); }, .Union => 0, else => unreachable, }, - else => struct_ty.structFieldOffset(index, func.target), + else => struct_ty.structFieldOffset(index, mod), }; // save a load and store when we can simply reuse the operand if (offset == 0) { @@ -3636,22 +3745,23 @@ fn structFieldPtr( } fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data; - const struct_ty = func.air.typeOf(struct_field.struct_operand); + const struct_ty = func.typeOf(struct_field.struct_operand); const operand = try func.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; - const field_ty = struct_ty.structFieldType(field_index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) return func.finishAir(inst, .none, &.{struct_field.struct_operand}); + const field_ty = struct_ty.structFieldType(field_index, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{struct_field.struct_operand}); - const result = switch (struct_ty.containerLayout()) { - .Packed => switch (struct_ty.zigTypeTag()) { + const result = switch (struct_ty.containerLayout(mod)) { + .Packed => switch (struct_ty.zigTypeTag(mod)) { .Struct => result: { - const struct_obj = struct_ty.castTag(.@"struct").?.data; - const offset = struct_obj.packedFieldBitOffset(func.target, field_index); + const struct_obj = mod.typeToStruct(struct_ty).?; + const offset = struct_obj.packedFieldBitOffset(mod, field_index); const backing_ty = struct_obj.backing_int_ty; - const wasm_bits = toWasmBits(backing_ty.intInfo(func.target).bits) orelse { + const wasm_bits = toWasmBits(backing_ty.intInfo(mod).bits) orelse { return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{}); }; const const_wvalue = if (wasm_bits == 32) @@ -3667,25 +3777,17 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else try func.binOp(operand, const_wvalue, backing_ty, .shr); - if (field_ty.zigTypeTag() == .Float) { - var payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field_ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&payload.base); + if (field_ty.zigTypeTag(mod) == .Float) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); const truncated = try func.trunc(shifted_value, int_type, backing_ty); const bitcasted = try func.bitcast(field_ty, int_type, truncated); break :result try bitcasted.toLocal(func, field_ty); - } else if (field_ty.isPtrAtRuntime() and struct_obj.fields.count() == 1) { + } else if (field_ty.isPtrAtRuntime(mod) and struct_obj.fields.count() == 1) { // In this case we do not have to perform any transformations, // we can simply reuse the operand. break :result func.reuseOperand(struct_field.struct_operand, operand); - } else if (field_ty.isPtrAtRuntime()) { - var payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field_ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&payload.base); + } else if (field_ty.isPtrAtRuntime(mod)) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); const truncated = try func.trunc(shifted_value, int_type, backing_ty); break :result try truncated.toLocal(func, field_ty); } @@ -3693,8 +3795,8 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result try truncated.toLocal(func, field_ty); }, .Union => result: { - if (isByRef(struct_ty, func.target)) { - if (!isByRef(field_ty, func.target)) { + if (isByRef(struct_ty, mod)) { + if (!isByRef(field_ty, mod)) { const val = try func.load(operand, field_ty, 0); break :result try val.toLocal(func, field_ty); } else { @@ -3704,26 +3806,14 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } } - var payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, struct_ty.bitSize(func.target)), - }; - const union_int_type = Type.initPayload(&payload.base); - if (field_ty.zigTypeTag() == .Float) { - var int_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field_ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&int_payload.base); + const union_int_type = try mod.intType(.unsigned, @intCast(u16, struct_ty.bitSize(mod))); + if (field_ty.zigTypeTag(mod) == .Float) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); const truncated = try func.trunc(operand, int_type, union_int_type); const bitcasted = try func.bitcast(field_ty, int_type, truncated); break :result try bitcasted.toLocal(func, field_ty); - } else if (field_ty.isPtrAtRuntime()) { - var int_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field_ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&int_payload.base); + } else if (field_ty.isPtrAtRuntime(mod)) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field_ty.bitSize(mod))); const truncated = try func.trunc(operand, int_type, union_int_type); break :result try truncated.toLocal(func, field_ty); } @@ -3733,11 +3823,10 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => unreachable, }, else => result: { - const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, func.target)) orelse { - const module = func.bin_file.base.options.module.?; - return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(module)}); + const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, mod)) orelse { + return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(mod)}); }; - if (isByRef(field_ty, func.target)) { + if (isByRef(field_ty, mod)) { switch (operand) { .stack_offset => |stack_offset| { break :result WValue{ .stack_offset = .{ .value = stack_offset.value + offset, .references = 1 } }; @@ -3754,11 +3843,12 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; // result type is always 'noreturn' const blocktype = wasm.block_empty; const pl_op = func.air.instructions.items(.data)[inst].pl_op; const target = try func.resolveInst(pl_op.operand); - const target_ty = func.air.typeOf(pl_op.operand); + const target_ty = func.typeOf(pl_op.operand); const switch_br = func.air.extraData(Air.SwitchBr, pl_op.payload); const liveness = try func.liveness.getSwitchBr(func.gpa, inst, switch_br.data.cases_len + 1); defer func.gpa.free(liveness.deaths); @@ -3787,7 +3877,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { errdefer func.gpa.free(values); for (items, 0..) |ref, i| { - const item_val = func.air.value(ref).?; + const item_val = (try func.air.value(ref, mod)).?; const int_val = func.valueAsI32(item_val, target_ty); if (lowest_maybe == null or int_val < lowest_maybe.?) { lowest_maybe = int_val; @@ -3810,7 +3900,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // When the target is an integer size larger than u32, we have no way to use the value // as an index, therefore we also use an if/else-chain for those cases. // TODO: Benchmark this to find a proper value, LLVM seems to draw the line at '40~45'. - const is_sparse = highest - lowest > 50 or target_ty.bitSize(func.target) > 32; + const is_sparse = highest - lowest > 50 or target_ty.bitSize(mod) > 32; const else_body = func.air.extra[extra_index..][0..switch_br.data.else_body_len]; const has_else_body = else_body.len != 0; @@ -3855,7 +3945,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // for errors that are not present in any branch. This is fine as this default // case will never be hit for those cases but we do save runtime cost and size // by using a jump table for this instead of if-else chains. - break :blk if (has_else_body or target_ty.zigTypeTag() == .ErrorSet) case_i else unreachable; + break :blk if (has_else_body or target_ty.zigTypeTag(mod) == .ErrorSet) case_i else unreachable; }; func.mir_extra.appendAssumeCapacity(idx); } else if (has_else_body) { @@ -3866,10 +3956,10 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const signedness: std.builtin.Signedness = blk: { // by default we tell the operand type is unsigned (i.e. bools and enum values) - if (target_ty.zigTypeTag() != .Int) break :blk .unsigned; + if (target_ty.zigTypeTag(mod) != .Int) break :blk .unsigned; // incase of an actual integer, we emit the correct signedness - break :blk target_ty.intInfo(func.target).signedness; + break :blk target_ty.intInfo(mod).signedness; }; try func.branches.ensureUnusedCapacity(func.gpa, case_list.items.len + @boolToInt(has_else_body)); @@ -3882,7 +3972,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const val = try func.lowerConstant(case.values[0].value, target_ty); try func.emitWValue(val); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(target_ty, func.target), + .valtype1 = typeToValtype(target_ty, mod), .op = .ne, // not equal, because we want to jump out of this block if it does not match the condition. .signedness = signedness, }); @@ -3896,7 +3986,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const val = try func.lowerConstant(value.value, target_ty); try func.emitWValue(val); const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(target_ty, func.target), + .valtype1 = typeToValtype(target_ty, mod), .op = .eq, .signedness = signedness, }); @@ -3933,13 +4023,14 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const err_union_ty = func.air.typeOf(un_op); - const pl_ty = err_union_ty.errorUnionPayload(); + const err_union_ty = func.typeOf(un_op); + const pl_ty = err_union_ty.errorUnionPayload(mod); const result = result: { - if (err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { switch (opcode) { .i32_ne => break :result WValue{ .imm32 = 0 }, .i32_eq => break :result WValue{ .imm32 = 1 }, @@ -3948,10 +4039,10 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro } try func.emitWValue(operand); - if (pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { try func.addMemArg(.i32_load16_u, .{ - .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, func.target)), - .alignment = Type.anyerror.abiAlignment(func.target), + .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, mod)), + .alignment = Type.anyerror.abiAlignment(mod), }); } @@ -3967,23 +4058,24 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro } fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.air.typeOf(ty_op.operand); - const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; - const payload_ty = err_ty.errorUnionPayload(); + const op_ty = func.typeOf(ty_op.operand); + const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty; + const payload_ty = err_ty.errorUnionPayload(mod); const result = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { if (op_is_ptr) { break :result func.reuseOperand(ty_op.operand, operand); } break :result WValue{ .none = {} }; } - const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, func.target)); - if (op_is_ptr or isByRef(payload_ty, func.target)) { + const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, mod)); + if (op_is_ptr or isByRef(payload_ty, mod)) { break :result try func.buildPointerOffset(operand, pl_offset, .new); } @@ -3994,48 +4086,50 @@ fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: boo } fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.air.typeOf(ty_op.operand); - const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; - const payload_ty = err_ty.errorUnionPayload(); + const op_ty = func.typeOf(ty_op.operand); + const err_ty = if (op_is_ptr) op_ty.childType(mod) else op_ty; + const payload_ty = err_ty.errorUnionPayload(mod); const result = result: { - if (err_ty.errorUnionSet().errorSetIsEmpty()) { + if (err_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { break :result WValue{ .imm32 = 0 }; } - if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } - const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, func.target))); + const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, mod))); break :result try error_val.toLocal(func, Type.anyerror); }; func.finishAir(inst, result, &.{ty_op.operand}); } fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const err_ty = func.air.typeOfIndex(inst); + const err_ty = func.typeOfIndex(inst); - const pl_ty = func.air.typeOf(ty_op.operand); + const pl_ty = func.typeOf(ty_op.operand); const result = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } const err_union = try func.allocStack(err_ty); - const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)), .new); + const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new); try func.store(payload_ptr, operand, pl_ty, 0); // ensure we also write '0' to the error part, so any present stack value gets overwritten by it. try func.emitWValue(err_union); try func.addImm32(0); - const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, func.target)); + const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod)); try func.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 }); break :result err_union; }; @@ -4043,24 +4137,25 @@ fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void } fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const err_ty = func.air.getRefType(ty_op.ty); - const pl_ty = err_ty.errorUnionPayload(); + const pl_ty = err_ty.errorUnionPayload(mod); const result = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } const err_union = try func.allocStack(err_ty); // store error value - try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, func.target))); + try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, mod))); // write 'undefined' to the payload - const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)), .new); - const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(func.target)); + const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, mod)), .new); + const len = @intCast(u32, err_ty.errorUnionPayload(mod).abiSize(mod)); try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa }); break :result err_union; @@ -4073,16 +4168,17 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty = func.air.getRefType(ty_op.ty); const operand = try func.resolveInst(ty_op.operand); - const operand_ty = func.air.typeOf(ty_op.operand); - if (ty.zigTypeTag() == .Vector or operand_ty.zigTypeTag() == .Vector) { + const operand_ty = func.typeOf(ty_op.operand); + const mod = func.bin_file.base.options.module.?; + if (ty.zigTypeTag(mod) == .Vector or operand_ty.zigTypeTag(mod) == .Vector) { return func.fail("todo Wasm intcast for vectors", .{}); } - if (ty.abiSize(func.target) > 16 or operand_ty.abiSize(func.target) > 16) { + if (ty.abiSize(mod) > 16 or operand_ty.abiSize(mod) > 16) { return func.fail("todo Wasm intcast for bitsize > 128", .{}); } - const op_bits = toWasmBits(@intCast(u16, operand_ty.bitSize(func.target))).?; - const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?; + const op_bits = toWasmBits(@intCast(u16, operand_ty.bitSize(mod))).?; + const wanted_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; const result = if (op_bits == wanted_bits) func.reuseOperand(ty_op.operand, operand) else @@ -4096,8 +4192,9 @@ fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// Asserts type's bitsize <= 128 /// NOTE: May leave the result on the top of the stack. fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue { - const given_bitsize = @intCast(u16, given.bitSize(func.target)); - const wanted_bitsize = @intCast(u16, wanted.bitSize(func.target)); + const mod = func.bin_file.base.options.module.?; + const given_bitsize = @intCast(u16, given.bitSize(mod)); + const wanted_bitsize = @intCast(u16, wanted.bitSize(mod)); assert(given_bitsize <= 128); assert(wanted_bitsize <= 128); @@ -4110,7 +4207,7 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro try func.addTag(.i32_wrap_i64); } else if (op_bits == 32 and wanted_bits > 32 and wanted_bits <= 64) { try func.emitWValue(operand); - try func.addTag(if (wanted.isSignedInt()) .i64_extend_i32_s else .i64_extend_i32_u); + try func.addTag(if (wanted.isSignedInt(mod)) .i64_extend_i32_s else .i64_extend_i32_u); } else if (wanted_bits == 128) { // for 128bit integers we store the integer in the virtual stack, rather than a local const stack_ptr = try func.allocStack(wanted); @@ -4119,14 +4216,14 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro // for 32 bit integers, we first coerce the value into a 64 bit integer before storing it // meaning less store operations are required. const lhs = if (op_bits == 32) blk: { - break :blk try func.intcast(operand, given, if (wanted.isSignedInt()) Type.i64 else Type.u64); + break :blk try func.intcast(operand, given, if (wanted.isSignedInt(mod)) Type.i64 else Type.u64); } else operand; // store msb first try func.store(.{ .stack = {} }, lhs, Type.u64, 0 + stack_ptr.offset()); // For signed integers we shift msb by 63 (64bit integer - 1 sign bit) and store remaining value - if (wanted.isSignedInt()) { + if (wanted.isSignedInt(mod)) { try func.emitWValue(stack_ptr); const shr = try func.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr); try func.store(.{ .stack = {} }, shr, Type.u64, 8 + stack_ptr.offset()); @@ -4141,11 +4238,12 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro } fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const op_ty = func.air.typeOf(un_op); - const optional_ty = if (op_kind == .ptr) op_ty.childType() else op_ty; + const op_ty = func.typeOf(un_op); + const optional_ty = if (op_kind == .ptr) op_ty.childType(mod) else op_ty; const is_null = try func.isNull(operand, optional_ty, opcode); const result = try is_null.toLocal(func, optional_ty); func.finishAir(inst, result, &.{un_op}); @@ -4154,20 +4252,19 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: /// For a given type and operand, checks if it's considered `null`. /// NOTE: Leaves the result on the stack fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; try func.emitWValue(operand); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = optional_ty.optionalChild(&buf); - if (!optional_ty.optionalReprIsPayload()) { + const payload_ty = optional_ty.optionalChild(mod); + if (!optional_ty.optionalReprIsPayload(mod)) { // When payload is zero-bits, we can treat operand as a value, rather than // a pointer to the stack value - if (payload_ty.hasRuntimeBitsIgnoreComptime()) { - const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; - return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(module)}); + if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse { + return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(mod)}); }; try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 }); } - } else if (payload_ty.isSlice()) { + } else if (payload_ty.isSlice(mod)) { switch (func.arch()) { .wasm32 => try func.addMemArg(.i32_load, .{ .offset = operand.offset(), .alignment = 4 }), .wasm64 => try func.addMemArg(.i64_load, .{ .offset = operand.offset(), .alignment = 8 }), @@ -4183,18 +4280,19 @@ fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcod } fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const opt_ty = func.air.typeOf(ty_op.operand); - const payload_ty = func.air.typeOfIndex(inst); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + const opt_ty = func.typeOf(ty_op.operand); + const payload_ty = func.typeOfIndex(inst); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.finishAir(inst, .none, &.{ty_op.operand}); } const result = result: { const operand = try func.resolveInst(ty_op.operand); - if (opt_ty.optionalReprIsPayload()) break :result func.reuseOperand(ty_op.operand, operand); + if (opt_ty.optionalReprIsPayload(mod)) break :result func.reuseOperand(ty_op.operand, operand); - if (isByRef(payload_ty, func.target)) { + if (isByRef(payload_ty, mod)) { break :result try func.buildPointerOffset(operand, 0, .new); } @@ -4205,14 +4303,14 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const opt_ty = func.air.typeOf(ty_op.operand).childType(); + const opt_ty = func.typeOf(ty_op.operand).childType(mod); const result = result: { - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or opt_ty.optionalReprIsPayload()) { + const payload_ty = opt_ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod) or opt_ty.optionalReprIsPayload(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } @@ -4222,22 +4320,21 @@ fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const opt_ty = func.air.typeOf(ty_op.operand).childType(); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = opt_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + const opt_ty = func.typeOf(ty_op.operand).childType(mod); + const payload_ty = opt_ty.optionalChild(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return func.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()}); } - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { return func.finishAir(inst, operand, &.{ty_op.operand}); } - const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; - return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(module)}); + const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse { + return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(mod)}); }; try func.emitWValue(operand); @@ -4250,11 +4347,12 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const payload_ty = func.air.typeOf(ty_op.operand); + const payload_ty = func.typeOf(ty_op.operand); + const mod = func.bin_file.base.options.module.?; const result = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - const non_null_bit = try func.allocStack(Type.initTag(.u1)); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + const non_null_bit = try func.allocStack(Type.u1); try func.emitWValue(non_null_bit); try func.addImm32(1); try func.addMemArg(.i32_store8, .{ .offset = non_null_bit.offset(), .alignment = 1 }); @@ -4262,13 +4360,12 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.air.typeOfIndex(inst); - if (op_ty.optionalReprIsPayload()) { + const op_ty = func.typeOfIndex(inst); + if (op_ty.optionalReprIsPayload(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } - const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse { - const module = func.bin_file.base.options.module.?; - return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(module)}); + const offset = std.math.cast(u32, payload_ty.abiSize(mod)) orelse { + return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(mod)}); }; // Create optional type, set the non-null bit, and store the operand inside the optional type @@ -4291,7 +4388,7 @@ fn airSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const slice_ty = func.air.typeOfIndex(inst); + const slice_ty = func.typeOfIndex(inst); const slice = try func.allocStack(slice_ty); try func.store(slice, lhs, Type.usize, 0); @@ -4308,13 +4405,14 @@ fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const slice_ty = func.air.typeOf(bin_op.lhs); + const slice_ty = func.typeOf(bin_op.lhs); const slice = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); - const elem_ty = slice_ty.childType(); - const elem_size = elem_ty.abiSize(func.target); + const elem_ty = slice_ty.childType(mod); + const elem_size = elem_ty.abiSize(mod); // load pointer onto stack _ = try func.load(slice, Type.usize, 0); @@ -4328,7 +4426,7 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocLocal(Type.usize); try func.addLabel(.local_set, result_ptr.local.value); - const result = if (!isByRef(elem_ty, func.target)) result: { + const result = if (!isByRef(elem_ty, mod)) result: { const elem_val = try func.load(result_ptr, elem_ty, 0); break :result try elem_val.toLocal(func, elem_ty); } else result_ptr; @@ -4337,11 +4435,12 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; - const elem_ty = func.air.getRefType(ty_pl.ty).childType(); - const elem_size = elem_ty.abiSize(func.target); + const elem_ty = func.air.getRefType(ty_pl.ty).childType(mod); + const elem_size = elem_ty.abiSize(mod); const slice = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); @@ -4380,7 +4479,7 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); const wanted_ty = func.air.getRefType(ty_op.ty); - const op_ty = func.air.typeOf(ty_op.operand); + const op_ty = func.typeOf(ty_op.operand); const result = try func.trunc(operand, wanted_ty, op_ty); func.finishAir(inst, try result.toLocal(func, wanted_ty), &.{ty_op.operand}); @@ -4389,13 +4488,14 @@ fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { /// Truncates a given operand to a given type, discarding any overflown bits. /// NOTE: Resulting value is left on the stack. fn trunc(func: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue { - const given_bits = @intCast(u16, given_ty.bitSize(func.target)); + const mod = func.bin_file.base.options.module.?; + const given_bits = @intCast(u16, given_ty.bitSize(mod)); if (toWasmBits(given_bits) == null) { return func.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits}); } var result = try func.intcast(operand, given_ty, wanted_ty); - const wanted_bits = @intCast(u16, wanted_ty.bitSize(func.target)); + const wanted_bits = @intCast(u16, wanted_ty.bitSize(mod)); const wasm_bits = toWasmBits(wanted_bits).?; if (wasm_bits != wanted_bits) { result = try func.wrapOperand(result, wanted_ty); @@ -4412,32 +4512,34 @@ fn airBoolToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const array_ty = func.air.typeOf(ty_op.operand).childType(); + const array_ty = func.typeOf(ty_op.operand).childType(mod); const slice_ty = func.air.getRefType(ty_op.ty); // create a slice on the stack const slice_local = try func.allocStack(slice_ty); // store the array ptr in the slice - if (array_ty.hasRuntimeBitsIgnoreComptime()) { + if (array_ty.hasRuntimeBitsIgnoreComptime(mod)) { try func.store(slice_local, operand, Type.usize, 0); } // store the length of the array in the slice - const len = WValue{ .imm32 = @intCast(u32, array_ty.arrayLen()) }; + const len = WValue{ .imm32 = @intCast(u32, array_ty.arrayLen(mod)) }; try func.store(slice_local, len, Type.usize, func.ptrSize()); func.finishAir(inst, slice_local, &.{ty_op.operand}); } fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const ptr_ty = func.air.typeOf(un_op); - const result = if (ptr_ty.isSlice()) + const ptr_ty = func.typeOf(un_op); + const result = if (ptr_ty.isSlice(mod)) try func.slicePtr(operand) else switch (operand) { // for stack offset, return a pointer to this offset. @@ -4448,16 +4550,17 @@ fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const ptr = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); - const elem_ty = ptr_ty.childType(); - const elem_size = elem_ty.abiSize(func.target); + const elem_ty = ptr_ty.childType(mod); + const elem_size = elem_ty.abiSize(mod); // load pointer onto the stack - if (ptr_ty.isSlice()) { + if (ptr_ty.isSlice(mod)) { _ = try func.load(ptr, Type.usize, 0); } else { try func.lowerToStack(ptr); @@ -4472,7 +4575,7 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const elem_result = val: { var result = try func.allocLocal(Type.usize); try func.addLabel(.local_set, result.local.value); - if (isByRef(elem_ty, func.target)) { + if (isByRef(elem_ty, mod)) { break :val result; } defer result.free(func); // only free if it's not returned like above @@ -4484,18 +4587,19 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; - const ptr_ty = func.air.typeOf(bin_op.lhs); - const elem_ty = func.air.getRefType(ty_pl.ty).childType(); - const elem_size = elem_ty.abiSize(func.target); + const ptr_ty = func.typeOf(bin_op.lhs); + const elem_ty = func.air.getRefType(ty_pl.ty).childType(mod); + const elem_size = elem_ty.abiSize(mod); const ptr = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); // load pointer onto the stack - if (ptr_ty.isSlice()) { + if (ptr_ty.isSlice(mod)) { _ = try func.load(ptr, Type.usize, 0); } else { try func.lowerToStack(ptr); @@ -4513,24 +4617,25 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data; const ptr = try func.resolveInst(bin_op.lhs); const offset = try func.resolveInst(bin_op.rhs); - const ptr_ty = func.air.typeOf(bin_op.lhs); - const pointee_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), + const ptr_ty = func.typeOf(bin_op.lhs); + const pointee_ty = switch (ptr_ty.ptrSize(mod)) { + .One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type + else => ptr_ty.childType(mod), }; - const valtype = typeToValtype(Type.usize, func.target); + const valtype = typeToValtype(Type.usize, mod); const mul_opcode = buildOpcode(.{ .valtype1 = valtype, .op = .mul }); const bin_opcode = buildOpcode(.{ .valtype1 = valtype, .op = op }); try func.lowerToStack(ptr); try func.emitWValue(offset); - try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(func.target)))); + try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(mod)))); try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode)); try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode)); @@ -4540,6 +4645,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { } fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void { + const mod = func.bin_file.base.options.module.?; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -4548,18 +4654,18 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ptr = try func.resolveInst(bin_op.lhs); - const ptr_ty = func.air.typeOf(bin_op.lhs); + const ptr_ty = func.typeOf(bin_op.lhs); const value = try func.resolveInst(bin_op.rhs); - const len = switch (ptr_ty.ptrSize()) { + const len = switch (ptr_ty.ptrSize(mod)) { .Slice => try func.sliceLen(ptr), - .One => @as(WValue, .{ .imm32 = @intCast(u32, ptr_ty.childType().arrayLen()) }), + .One => @as(WValue, .{ .imm32 = @intCast(u32, ptr_ty.childType(mod).arrayLen(mod)) }), .C, .Many => unreachable, }; - const elem_ty = if (ptr_ty.ptrSize() == .One) - ptr_ty.childType().childType() + const elem_ty = if (ptr_ty.ptrSize(mod) == .One) + ptr_ty.childType(mod).childType(mod) else - ptr_ty.childType(); + ptr_ty.childType(mod); const dst_ptr = try func.sliceOrArrayPtr(ptr, ptr_ty); try func.memset(elem_ty, dst_ptr, len, value); @@ -4572,7 +4678,8 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void /// this to wasm's memset instruction. When the feature is not present, /// we implement it manually. fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void { - const abi_size = @intCast(u32, elem_ty.abiSize(func.target)); + const mod = func.bin_file.base.options.module.?; + const abi_size = @intCast(u32, elem_ty.abiSize(mod)); // When bulk_memory is enabled, we lower it to wasm's memset instruction. // If not, we lower it ourselves. @@ -4660,30 +4767,31 @@ fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue } fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const array_ty = func.air.typeOf(bin_op.lhs); + const array_ty = func.typeOf(bin_op.lhs); const array = try func.resolveInst(bin_op.lhs); const index = try func.resolveInst(bin_op.rhs); - const elem_ty = array_ty.childType(); - const elem_size = elem_ty.abiSize(func.target); + const elem_ty = array_ty.childType(mod); + const elem_size = elem_ty.abiSize(mod); - if (isByRef(array_ty, func.target)) { + if (isByRef(array_ty, mod)) { try func.lowerToStack(array); try func.emitWValue(index); try func.addImm32(@bitCast(i32, @intCast(u32, elem_size))); try func.addTag(.i32_mul); try func.addTag(.i32_add); } else { - std.debug.assert(array_ty.zigTypeTag() == .Vector); + std.debug.assert(array_ty.zigTypeTag(mod) == .Vector); switch (index) { inline .imm32, .imm64 => |lane| { - const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(func.target)) { - 8 => if (elem_ty.isSignedInt()) .i8x16_extract_lane_s else .i8x16_extract_lane_u, - 16 => if (elem_ty.isSignedInt()) .i16x8_extract_lane_s else .i16x8_extract_lane_u, - 32 => if (elem_ty.isInt()) .i32x4_extract_lane else .f32x4_extract_lane, - 64 => if (elem_ty.isInt()) .i64x2_extract_lane else .f64x2_extract_lane, + const opcode: wasm.SimdOpcode = switch (elem_ty.bitSize(mod)) { + 8 => if (elem_ty.isSignedInt(mod)) .i8x16_extract_lane_s else .i8x16_extract_lane_u, + 16 => if (elem_ty.isSignedInt(mod)) .i16x8_extract_lane_s else .i16x8_extract_lane_u, + 32 => if (elem_ty.isInt(mod)) .i32x4_extract_lane else .f32x4_extract_lane, + 64 => if (elem_ty.isInt(mod)) .i64x2_extract_lane else .f64x2_extract_lane, else => unreachable, }; @@ -4715,7 +4823,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var result = try func.allocLocal(Type.usize); try func.addLabel(.local_set, result.local.value); - if (isByRef(elem_ty, func.target)) { + if (isByRef(elem_ty, mod)) { break :val result; } defer result.free(func); // only free if no longer needed and not returned like above @@ -4728,22 +4836,23 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airFloatToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const dest_ty = func.air.typeOfIndex(inst); - const op_ty = func.air.typeOf(ty_op.operand); + const dest_ty = func.typeOfIndex(inst); + const op_ty = func.typeOf(ty_op.operand); - if (op_ty.abiSize(func.target) > 8) { + if (op_ty.abiSize(mod) > 8) { return func.fail("TODO: floatToInt for integers/floats with bitsize larger than 64 bits", .{}); } try func.emitWValue(operand); const op = buildOpcode(.{ .op = .trunc, - .valtype1 = typeToValtype(dest_ty, func.target), - .valtype2 = typeToValtype(op_ty, func.target), - .signedness = if (dest_ty.isSignedInt()) .signed else .unsigned, + .valtype1 = typeToValtype(dest_ty, mod), + .valtype2 = typeToValtype(op_ty, mod), + .signedness = if (dest_ty.isSignedInt(mod)) .signed else .unsigned, }); try func.addTag(Mir.Inst.Tag.fromOpcode(op)); const wrapped = try func.wrapOperand(.{ .stack = {} }, dest_ty); @@ -4752,22 +4861,23 @@ fn airFloatToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const dest_ty = func.air.typeOfIndex(inst); - const op_ty = func.air.typeOf(ty_op.operand); + const dest_ty = func.typeOfIndex(inst); + const op_ty = func.typeOf(ty_op.operand); - if (op_ty.abiSize(func.target) > 8) { + if (op_ty.abiSize(mod) > 8) { return func.fail("TODO: intToFloat for integers/floats with bitsize larger than 64 bits", .{}); } try func.emitWValue(operand); const op = buildOpcode(.{ .op = .convert, - .valtype1 = typeToValtype(dest_ty, func.target), - .valtype2 = typeToValtype(op_ty, func.target), - .signedness = if (op_ty.isSignedInt()) .signed else .unsigned, + .valtype1 = typeToValtype(dest_ty, mod), + .valtype2 = typeToValtype(op_ty, mod), + .signedness = if (op_ty.isSignedInt(mod)) .signed else .unsigned, }); try func.addTag(Mir.Inst.Tag.fromOpcode(op)); @@ -4777,18 +4887,19 @@ fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const ty = func.air.typeOfIndex(inst); - const elem_ty = ty.childType(); + const ty = func.typeOfIndex(inst); + const elem_ty = ty.childType(mod); - if (determineSimdStoreStrategy(ty, func.target) == .direct) blk: { + if (determineSimdStoreStrategy(ty, mod) == .direct) blk: { switch (operand) { // when the operand lives in the linear memory section, we can directly // load and splat the value at once. Meaning we do not first have to load // the scalar value onto the stack. .stack_offset, .memory, .memory_offset => { - const opcode = switch (elem_ty.bitSize(func.target)) { + const opcode = switch (elem_ty.bitSize(mod)) { 8 => std.wasm.simdOpcode(.v128_load8_splat), 16 => std.wasm.simdOpcode(.v128_load16_splat), 32 => std.wasm.simdOpcode(.v128_load32_splat), @@ -4803,18 +4914,18 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.mir_extra.appendSlice(func.gpa, &[_]u32{ opcode, operand.offset(), - elem_ty.abiAlignment(func.target), + elem_ty.abiAlignment(mod), }); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); try func.addLabel(.local_set, result.local.value); return func.finishAir(inst, result, &.{ty_op.operand}); }, .local => { - const opcode = switch (elem_ty.bitSize(func.target)) { + const opcode = switch (elem_ty.bitSize(mod)) { 8 => std.wasm.simdOpcode(.i8x16_splat), 16 => std.wasm.simdOpcode(.i16x8_splat), - 32 => if (elem_ty.isInt()) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat), - 64 => if (elem_ty.isInt()) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat), + 32 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i32x4_splat) else std.wasm.simdOpcode(.f32x4_splat), + 64 => if (elem_ty.isInt(mod)) std.wasm.simdOpcode(.i64x2_splat) else std.wasm.simdOpcode(.f64x2_splat), else => break :blk, // Cannot make use of simd-instructions }; const result = try func.allocLocal(ty); @@ -4828,14 +4939,14 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => unreachable, } } - const elem_size = elem_ty.bitSize(func.target); - const vector_len = @intCast(usize, ty.vectorLen()); + const elem_size = elem_ty.bitSize(mod); + const vector_len = @intCast(usize, ty.vectorLen(mod)); if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) { return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size}); } const result = try func.allocStack(ty); - const elem_byte_size = @intCast(u32, elem_ty.abiSize(func.target)); + const elem_byte_size = @intCast(u32, elem_ty.abiSize(mod)); var index: usize = 0; var offset: u32 = 0; while (index < vector_len) : (index += 1) { @@ -4855,26 +4966,25 @@ fn airSelect(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { - const inst_ty = func.air.typeOfIndex(inst); + const mod = func.bin_file.base.options.module.?; + const inst_ty = func.typeOfIndex(inst); const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Shuffle, ty_pl.payload).data; const a = try func.resolveInst(extra.a); const b = try func.resolveInst(extra.b); - const mask = func.air.values[extra.mask]; + const mask = extra.mask.toValue(); const mask_len = extra.mask_len; - const child_ty = inst_ty.childType(); - const elem_size = child_ty.abiSize(func.target); + const child_ty = inst_ty.childType(mod); + const elem_size = child_ty.abiSize(mod); - const module = func.bin_file.base.options.module.?; // TODO: One of them could be by ref; handle in loop - if (isByRef(func.air.typeOf(extra.a), func.target) or isByRef(inst_ty, func.target)) { + if (isByRef(func.typeOf(extra.a), mod) or isByRef(inst_ty, mod)) { const result = try func.allocStack(inst_ty); for (0..mask_len) |index| { - var buf: Value.ElemValueBuffer = undefined; - const value = mask.elemValueBuffer(module, index, &buf).toSignedInt(func.target); + const value = (try mask.elemValue(mod, index)).toSignedInt(mod); try func.emitWValue(result); @@ -4894,8 +5004,7 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var lanes = std.mem.asBytes(operands[1..]); for (0..@intCast(usize, mask_len)) |index| { - var buf: Value.ElemValueBuffer = undefined; - const mask_elem = mask.elemValueBuffer(module, index, &buf).toSignedInt(func.target); + const mask_elem = (try mask.elemValue(mod, index)).toSignedInt(mod); const base_index = if (mask_elem >= 0) @intCast(u8, @intCast(i64, elem_size) * mask_elem) else @@ -4926,25 +5035,26 @@ fn airReduce(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; - const result_ty = func.air.typeOfIndex(inst); - const len = @intCast(usize, result_ty.arrayLen()); + const result_ty = func.typeOfIndex(inst); + const len = @intCast(usize, result_ty.arrayLen(mod)); const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]); const result: WValue = result_value: { - switch (result_ty.zigTypeTag()) { + switch (result_ty.zigTypeTag(mod)) { .Array => { const result = try func.allocStack(result_ty); - const elem_ty = result_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(func.target)); - const sentinel = if (result_ty.sentinel()) |sent| blk: { + const elem_ty = result_ty.childType(mod); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); + const sentinel = if (result_ty.sentinel(mod)) |sent| blk: { break :blk try func.lowerConstant(sent, elem_ty); } else null; // When the element type is by reference, we must copy the entire // value. It is therefore safer to move the offset pointer and store // each value individually, instead of using store offsets. - if (isByRef(elem_ty, func.target)) { + if (isByRef(elem_ty, mod)) { // copy stack pointer into a temporary local, which is // moved for each element to store each value in the right position. const offset = try func.buildPointerOffset(result, 0, .new); @@ -4972,18 +5082,18 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } break :result_value result; }, - .Struct => switch (result_ty.containerLayout()) { + .Struct => switch (result_ty.containerLayout(mod)) { .Packed => { - if (isByRef(result_ty, func.target)) { + if (isByRef(result_ty, mod)) { return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{}); } - const struct_obj = result_ty.castTag(.@"struct").?.data; + const struct_obj = mod.typeToStruct(result_ty).?; const fields = struct_obj.fields.values(); const backing_type = struct_obj.backing_int_ty; // ensure the result is zero'd const result = try func.allocLocal(backing_type); - if (struct_obj.backing_int_ty.bitSize(func.target) <= 32) + if (struct_obj.backing_int_ty.bitSize(mod) <= 32) try func.addImm32(0) else try func.addImm64(0); @@ -4992,20 +5102,16 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var current_bit: u16 = 0; for (elements, 0..) |elem, elem_index| { const field = fields[elem_index]; - if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - const shift_val = if (struct_obj.backing_int_ty.bitSize(func.target) <= 32) + const shift_val = if (struct_obj.backing_int_ty.bitSize(mod) <= 32) WValue{ .imm32 = current_bit } else WValue{ .imm64 = current_bit }; const value = try func.resolveInst(elem); - const value_bit_size = @intCast(u16, field.ty.bitSize(func.target)); - var int_ty_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = value_bit_size, - }; - const int_ty = Type.initPayload(&int_ty_payload.base); + const value_bit_size = @intCast(u16, field.ty.bitSize(mod)); + const int_ty = try mod.intType(.unsigned, value_bit_size); // load our current result on stack so we can perform all transformations // using only stack values. Saving the cost of loads and stores. @@ -5027,10 +5133,10 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try func.allocStack(result_ty); const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset for (elements, 0..) |elem, elem_index| { - if (result_ty.structFieldValueComptime(elem_index) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, elem_index)) != null) continue; - const elem_ty = result_ty.structFieldType(elem_index); - const elem_size = @intCast(u32, elem_ty.abiSize(func.target)); + const elem_ty = result_ty.structFieldType(elem_index, mod); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); const value = try func.resolveInst(elem); try func.store(offset, value, elem_ty, 0); @@ -5058,39 +5164,36 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data; const result = result: { - const union_ty = func.air.typeOfIndex(inst); - const layout = union_ty.unionGetLayout(func.target); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_ty = func.typeOfIndex(inst); + const layout = union_ty.unionGetLayout(mod); + const union_obj = mod.typeToUnion(union_ty).?; const field = union_obj.fields.values()[extra.field_index]; const field_name = union_obj.fields.keys()[extra.field_index]; const tag_int = blk: { - const tag_ty = union_ty.unionTagTypeHypothetical(); - const enum_field_index = tag_ty.enumFieldIndex(field_name).?; - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, enum_field_index), - }; - const tag_val = Value.initPayload(&tag_val_payload.base); + const tag_ty = union_ty.unionTagTypeHypothetical(mod); + const enum_field_index = tag_ty.enumFieldIndex(field_name, mod).?; + const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index); break :blk try func.lowerConstant(tag_val, tag_ty); }; if (layout.payload_size == 0) { if (layout.tag_size == 0) { break :result WValue{ .none = {} }; } - assert(!isByRef(union_ty, func.target)); + assert(!isByRef(union_ty, mod)); break :result tag_int; } - if (isByRef(union_ty, func.target)) { + if (isByRef(union_ty, mod)) { const result_ptr = try func.allocStack(union_ty); const payload = try func.resolveInst(extra.init); if (layout.tag_align >= layout.payload_align) { - if (isByRef(field.ty, func.target)) { + if (isByRef(field.ty, mod)) { const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new); try func.store(payload_ptr, payload, field.ty, 0); } else { @@ -5114,26 +5217,14 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result result_ptr; } else { const operand = try func.resolveInst(extra.init); - var payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, union_ty.bitSize(func.target)), - }; - const union_int_type = Type.initPayload(&payload.base); - if (field.ty.zigTypeTag() == .Float) { - var int_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field.ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&int_payload.base); + const union_int_type = try mod.intType(.unsigned, @intCast(u16, union_ty.bitSize(mod))); + if (field.ty.zigTypeTag(mod) == .Float) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod))); const bitcasted = try func.bitcast(field.ty, int_type, operand); const casted = try func.trunc(bitcasted, int_type, union_int_type); break :result try casted.toLocal(func, field.ty); - } else if (field.ty.isPtrAtRuntime()) { - var int_payload: Type.Payload.Bits = .{ - .base = .{ .tag = .int_unsigned }, - .data = @intCast(u16, field.ty.bitSize(func.target)), - }; - const int_type = Type.initPayload(&int_payload.base); + } else if (field.ty.isPtrAtRuntime(mod)) { + const int_type = try mod.intType(.unsigned, @intCast(u16, field.ty.bitSize(mod))); const casted = try func.intcast(operand, int_type, union_int_type); break :result try casted.toLocal(func, field.ty); } @@ -5153,7 +5244,7 @@ fn airPrefetch(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airWasmMemorySize(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const pl_op = func.air.instructions.items(.data)[inst].pl_op; - const result = try func.allocLocal(func.air.typeOfIndex(inst)); + const result = try func.allocLocal(func.typeOfIndex(inst)); try func.addLabel(.memory_size, pl_op.payload); try func.addLabel(.local_set, result.local.value); func.finishAir(inst, result, &.{pl_op.operand}); @@ -5163,7 +5254,7 @@ fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void { const pl_op = func.air.instructions.items(.data)[inst].pl_op; const operand = try func.resolveInst(pl_op.operand); - const result = try func.allocLocal(func.air.typeOfIndex(inst)); + const result = try func.allocLocal(func.typeOfIndex(inst)); try func.emitWValue(operand); try func.addLabel(.memory_grow, pl_op.payload); try func.addLabel(.local_set, result.local.value); @@ -5171,14 +5262,14 @@ fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void { } fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue { - assert(operand_ty.hasRuntimeBitsIgnoreComptime()); + const mod = func.bin_file.base.options.module.?; + assert(operand_ty.hasRuntimeBitsIgnoreComptime(mod)); assert(op == .eq or op == .neq); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = operand_ty.optionalChild(&buf); + const payload_ty = operand_ty.optionalChild(mod); // We store the final result in here that will be validated // if the optional is truly equal. - var result = try func.ensureAllocLocal(Type.initTag(.i32)); + var result = try func.ensureAllocLocal(Type.i32); defer result.free(func); try func.startBlock(.block, wasm.block_empty); @@ -5189,7 +5280,7 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: _ = try func.load(lhs, payload_ty, 0); _ = try func.load(rhs, payload_ty, 0); - const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, func.target) }); + const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, mod) }); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); try func.addLabel(.br_if, 0); @@ -5207,10 +5298,11 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: /// NOTE: Leaves the result of the comparison on top of the stack. /// TODO: Lower this to compiler_rt call when bitsize > 128 fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue { - assert(operand_ty.abiSize(func.target) >= 16); + const mod = func.bin_file.base.options.module.?; + assert(operand_ty.abiSize(mod) >= 16); assert(!(lhs != .stack and rhs == .stack)); - if (operand_ty.bitSize(func.target) > 128) { - return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(func.target)}); + if (operand_ty.bitSize(mod) > 128) { + return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(mod)}); } var lhs_high_bit = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64); @@ -5233,7 +5325,7 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std } }, else => { - const ty = if (operand_ty.isSignedInt()) Type.i64 else Type.u64; + const ty = if (operand_ty.isSignedInt(mod)) Type.i64 else Type.u64; // leave those value on top of the stack for '.select' const lhs_low_bit = try func.load(lhs, Type.u64, 8); const rhs_low_bit = try func.load(rhs, Type.u64, 8); @@ -5248,10 +5340,11 @@ fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std } fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const un_ty = func.air.typeOf(bin_op.lhs).childType(); - const tag_ty = func.air.typeOf(bin_op.rhs); - const layout = un_ty.unionGetLayout(func.target); + const un_ty = func.typeOf(bin_op.lhs).childType(mod); + const tag_ty = func.typeOf(bin_op.rhs); + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs }); const union_ptr = try func.resolveInst(bin_op.lhs); @@ -5271,11 +5364,12 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const un_ty = func.air.typeOf(ty_op.operand); - const tag_ty = func.air.typeOfIndex(inst); - const layout = un_ty.unionGetLayout(func.target); + const un_ty = func.typeOf(ty_op.operand); + const tag_ty = func.typeOfIndex(inst); + const layout = un_ty.unionGetLayout(mod); if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ty_op.operand}); const operand = try func.resolveInst(ty_op.operand); @@ -5292,9 +5386,9 @@ fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airFpext(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const dest_ty = func.air.typeOfIndex(inst); + const dest_ty = func.typeOfIndex(inst); const operand = try func.resolveInst(ty_op.operand); - const extended = try func.fpext(operand, func.air.typeOf(ty_op.operand), dest_ty); + const extended = try func.fpext(operand, func.typeOf(ty_op.operand), dest_ty); const result = try extended.toLocal(func, dest_ty); func.finishAir(inst, result, &.{ty_op.operand}); } @@ -5313,7 +5407,7 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError! // call __extendhfsf2(f16) f32 const f32_result = try func.callIntrinsic( "__extendhfsf2", - &.{Type.f16}, + &.{.f16_type}, Type.f32, &.{operand}, ); @@ -5331,15 +5425,15 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError! target_util.compilerRtFloatAbbrev(wanted_bits), }) catch unreachable; - return func.callIntrinsic(fn_name, &.{given}, wanted, &.{operand}); + return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand}); } fn airFptrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const dest_ty = func.air.typeOfIndex(inst); + const dest_ty = func.typeOfIndex(inst); const operand = try func.resolveInst(ty_op.operand); - const truncated = try func.fptrunc(operand, func.air.typeOf(ty_op.operand), dest_ty); + const truncated = try func.fptrunc(operand, func.typeOf(ty_op.operand), dest_ty); const result = try truncated.toLocal(func, dest_ty); func.finishAir(inst, result, &.{ty_op.operand}); } @@ -5362,7 +5456,7 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro } else operand; // call __truncsfhf2(f32) f16 - return func.callIntrinsic("__truncsfhf2", &.{Type.f32}, Type.f16, &.{op}); + return func.callIntrinsic("__truncsfhf2", &.{.f32_type}, Type.f16, &.{op}); } var fn_name_buf: [12]u8 = undefined; @@ -5371,14 +5465,15 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro target_util.compilerRtFloatAbbrev(wanted_bits), }) catch unreachable; - return func.callIntrinsic(fn_name, &.{given}, wanted, &.{operand}); + return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand}); } fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const err_set_ty = func.air.typeOf(ty_op.operand).childType(); - const payload_ty = err_set_ty.errorUnionPayload(); + const err_set_ty = func.typeOf(ty_op.operand).childType(mod); + const payload_ty = err_set_ty.errorUnionPayload(mod); const operand = try func.resolveInst(ty_op.operand); // set error-tag to '0' to annotate error union is non-error @@ -5386,26 +5481,27 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi operand, .{ .imm32 = 0 }, Type.anyerror, - @intCast(u32, errUnionErrorOffset(payload_ty, func.target)), + @intCast(u32, errUnionErrorOffset(payload_ty, mod)), ); const result = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result func.reuseOperand(ty_op.operand, operand); } - break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, func.target)), .new); + break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, mod)), .new); }; func.finishAir(inst, result, &.{ty_op.operand}); } fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; const field_ptr = try func.resolveInst(extra.field_ptr); - const parent_ty = func.air.getRefType(ty_pl.ty).childType(); - const field_offset = parent_ty.structFieldOffset(extra.field_index, func.target); + const parent_ty = func.air.getRefType(ty_pl.ty).childType(mod); + const field_offset = parent_ty.structFieldOffset(extra.field_index, mod); const result = if (field_offset != 0) result: { const base = try func.buildPointerOffset(field_ptr, 0, .new); @@ -5420,7 +5516,8 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue { - if (ptr_ty.isSlice()) { + const mod = func.bin_file.base.options.module.?; + if (ptr_ty.isSlice(mod)) { return func.slicePtr(ptr); } else { return ptr; @@ -5428,25 +5525,26 @@ fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue } fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const dst = try func.resolveInst(bin_op.lhs); - const dst_ty = func.air.typeOf(bin_op.lhs); - const ptr_elem_ty = dst_ty.childType(); + const dst_ty = func.typeOf(bin_op.lhs); + const ptr_elem_ty = dst_ty.childType(mod); const src = try func.resolveInst(bin_op.rhs); - const src_ty = func.air.typeOf(bin_op.rhs); - const len = switch (dst_ty.ptrSize()) { + const src_ty = func.typeOf(bin_op.rhs); + const len = switch (dst_ty.ptrSize(mod)) { .Slice => blk: { const slice_len = try func.sliceLen(dst); - if (ptr_elem_ty.abiSize(func.target) != 1) { + if (ptr_elem_ty.abiSize(mod) != 1) { try func.emitWValue(slice_len); - try func.emitWValue(.{ .imm32 = @intCast(u32, ptr_elem_ty.abiSize(func.target)) }); + try func.emitWValue(.{ .imm32 = @intCast(u32, ptr_elem_ty.abiSize(mod)) }); try func.addTag(.i32_mul); try func.addLabel(.local_set, slice_len.local.value); } break :blk slice_len; }, .One => @as(WValue, .{ - .imm32 = @intCast(u32, ptr_elem_ty.arrayLen() * ptr_elem_ty.childType().abiSize(func.target)), + .imm32 = @intCast(u32, ptr_elem_ty.arrayLen(mod) * ptr_elem_ty.childType(mod).abiSize(mod)), }), .C, .Many => unreachable, }; @@ -5467,17 +5565,18 @@ fn airRetAddr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); - const op_ty = func.air.typeOf(ty_op.operand); - const result_ty = func.air.typeOfIndex(inst); + const op_ty = func.typeOf(ty_op.operand); + const result_ty = func.typeOfIndex(inst); - if (op_ty.zigTypeTag() == .Vector) { + if (op_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement @popCount for vectors", .{}); } - const int_info = op_ty.intInfo(func.target); + const int_info = op_ty.intInfo(mod); const bits = int_info.bits; const wasm_bits = toWasmBits(bits) orelse { return func.fail("TODO: Implement @popCount for integers with bitsize '{d}'", .{bits}); @@ -5526,8 +5625,9 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // As the names are global and the slice elements are constant, we do not have // to make a copy of the ptr+value but can point towards them directly. const error_table_symbol = try func.bin_file.getErrorTableSymbol(); - const name_ty = Type.initTag(.const_slice_u8_sentinel_0); - const abi_size = name_ty.abiSize(func.target); + const name_ty = Type.slice_const_u8_sentinel_0; + const mod = func.bin_file.base.options.module.?; + const abi_size = name_ty.abiSize(mod); const error_name_value: WValue = .{ .memory = error_table_symbol }; // emitting this will create a relocation try func.emitWValue(error_name_value); @@ -5565,20 +5665,21 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro const lhs_op = try func.resolveInst(extra.lhs); const rhs_op = try func.resolveInst(extra.rhs); - const lhs_ty = func.air.typeOf(extra.lhs); + const lhs_ty = func.typeOf(extra.lhs); + const mod = func.bin_file.base.options.module.?; - if (lhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); } - const int_info = lhs_ty.intInfo(func.target); + const int_info = lhs_ty.intInfo(mod); const is_signed = int_info.signedness == .signed; const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: Implement {{add/sub}}_with_overflow for integer bitsize: {d}", .{int_info.bits}); }; if (wasm_bits == 128) { - const result = try func.addSubWithOverflowBigInt(lhs_op, rhs_op, lhs_ty, func.air.typeOfIndex(inst), op); + const result = try func.addSubWithOverflowBigInt(lhs_op, rhs_op, lhs_ty, func.typeOfIndex(inst), op); return func.finishAir(inst, result, &.{ extra.lhs, extra.rhs }); } @@ -5628,17 +5729,18 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro var overflow_local = try overflow_bit.toLocal(func, Type.u32); defer overflow_local.free(func); - const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); + const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(func.target)); - try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset); + const offset = @intCast(u32, lhs_ty.abiSize(mod)); + try func.store(result_ptr, overflow_local, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); } fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, result_ty: Type, op: Op) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; assert(op == .add or op == .sub); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const is_signed = int_info.signedness == .signed; if (int_info.bits != 128) { return func.fail("TODO: Implement @{{add/sub}}WithOverflow for integer bitsize '{d}'", .{int_info.bits}); @@ -5689,31 +5791,32 @@ fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, break :blk WValue{ .stack = {} }; }; - var overflow_local = try overflow_bit.toLocal(func, Type.initTag(.u1)); + var overflow_local = try overflow_bit.toLocal(func, Type.u1); defer overflow_local.free(func); const result_ptr = try func.allocStack(result_ty); try func.store(result_ptr, high_op_res, Type.u64, 0); try func.store(result_ptr, tmp_op, Type.u64, 8); - try func.store(result_ptr, overflow_local, Type.initTag(.u1), 16); + try func.store(result_ptr, overflow_local, Type.u1, 16); return result_ptr; } fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Bin, ty_pl.payload).data; const lhs = try func.resolveInst(extra.lhs); const rhs = try func.resolveInst(extra.rhs); - const lhs_ty = func.air.typeOf(extra.lhs); - const rhs_ty = func.air.typeOf(extra.rhs); + const lhs_ty = func.typeOf(extra.lhs); + const rhs_ty = func.typeOf(extra.rhs); - if (lhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); } - const int_info = lhs_ty.intInfo(func.target); + const int_info = lhs_ty.intInfo(mod); const is_signed = int_info.signedness == .signed; const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits}); @@ -5721,7 +5824,7 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // Ensure rhs is coerced to lhs as they must have the same WebAssembly types // before we can perform any binary operation. - const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(func.target).bits).?; + const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(mod).bits).?; const rhs_final = if (wasm_bits != rhs_wasm_bits) blk: { const rhs_casted = try func.intcast(rhs, rhs_ty, lhs_ty); break :blk try rhs_casted.toLocal(func, lhs_ty); @@ -5745,13 +5848,13 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const shr = try func.binOp(result, rhs_final, lhs_ty, .shr); break :blk try func.cmp(.{ .stack = {} }, shr, lhs_ty, .neq); }; - var overflow_local = try overflow_bit.toLocal(func, Type.initTag(.u1)); + var overflow_local = try overflow_bit.toLocal(func, Type.u1); defer overflow_local.free(func); - const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); + const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, result, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(func.target)); - try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset); + const offset = @intCast(u32, lhs_ty.abiSize(mod)); + try func.store(result_ptr, overflow_local, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); } @@ -5762,18 +5865,19 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const lhs = try func.resolveInst(extra.lhs); const rhs = try func.resolveInst(extra.rhs); - const lhs_ty = func.air.typeOf(extra.lhs); + const lhs_ty = func.typeOf(extra.lhs); + const mod = func.bin_file.base.options.module.?; - if (lhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: Implement overflow arithmetic for vectors", .{}); } // We store the bit if it's overflowed or not in this. As it's zero-initialized // we only need to update it if an overflow (or underflow) occurred. - var overflow_bit = try func.ensureAllocLocal(Type.initTag(.u1)); + var overflow_bit = try func.ensureAllocLocal(Type.u1); defer overflow_bit.free(func); - const int_info = lhs_ty.intInfo(func.target); + const int_info = lhs_ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: Implement `@mulWithOverflow` for integer bitsize: {d}", .{int_info.bits}); }; @@ -5827,7 +5931,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addLabel(.local_set, overflow_bit.local.value); break :blk try func.wrapOperand(bin_op, lhs_ty); } else if (int_info.bits == 64 and int_info.signedness == .unsigned) blk: { - const new_ty = Type.initTag(.u128); + const new_ty = Type.u128; var lhs_upcast = try (try func.intcast(lhs, lhs_ty, new_ty)).toLocal(func, lhs_ty); defer lhs_upcast.free(func); var rhs_upcast = try (try func.intcast(rhs, lhs_ty, new_ty)).toLocal(func, lhs_ty); @@ -5847,8 +5951,8 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = try func.callIntrinsic( "__multi3", - &[_]Type{Type.i64} ** 4, - Type.initTag(.i128), + &[_]InternPool.Index{.i64_type} ** 4, + Type.i128, &.{ lhs, lhs_shifted, rhs, rhs_shifted }, ); const res = try func.allocLocal(lhs_ty); @@ -5871,20 +5975,20 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const mul1 = try func.callIntrinsic( "__multi3", - &[_]Type{Type.i64} ** 4, - Type.initTag(.i128), + &[_]InternPool.Index{.i64_type} ** 4, + Type.i128, &.{ lhs_lsb, zero, rhs_msb, zero }, ); const mul2 = try func.callIntrinsic( "__multi3", - &[_]Type{Type.i64} ** 4, - Type.initTag(.i128), + &[_]InternPool.Index{.i64_type} ** 4, + Type.i128, &.{ rhs_lsb, zero, lhs_msb, zero }, ); const mul3 = try func.callIntrinsic( "__multi3", - &[_]Type{Type.i64} ** 4, - Type.initTag(.i128), + &[_]InternPool.Index{.i64_type} ** 4, + Type.i128, &.{ lhs_msb, zero, rhs_msb, zero }, ); @@ -5912,7 +6016,7 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { _ = try func.binOp(lsb_or, mul_add_lt, Type.bool, .@"or"); try func.addLabel(.local_set, overflow_bit.local.value); - const tmp_result = try func.allocStack(Type.initTag(.u128)); + const tmp_result = try func.allocStack(Type.u128); try func.emitWValue(tmp_result); const mul3_msb = try func.load(mul3, Type.u64, 0); try func.store(.stack, mul3_msb, Type.u64, tmp_result.offset()); @@ -5922,23 +6026,24 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var bin_op_local = try bin_op.toLocal(func, lhs_ty); defer bin_op_local.free(func); - const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); + const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.store(result_ptr, bin_op_local, lhs_ty, 0); - const offset = @intCast(u32, lhs_ty.abiSize(func.target)); - try func.store(result_ptr, overflow_bit, Type.initTag(.u1), offset); + const offset = @intCast(u32, lhs_ty.abiSize(mod)); + try func.store(result_ptr, overflow_bit, Type.u1, offset); func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs }); } fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); - if (ty.zigTypeTag() == .Vector) { + const ty = func.typeOfIndex(inst); + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@maximum` and `@minimum` for vectors", .{}); } - if (ty.abiSize(func.target) > 16) { + if (ty.abiSize(mod) > 16) { return func.fail("TODO: `@maximum` and `@minimum` for types larger than 16 bytes", .{}); } @@ -5954,18 +6059,19 @@ fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerE try func.addTag(.select); // store result in local - const result_ty = if (isByRef(ty, func.target)) Type.u32 else ty; + const result_ty = if (isByRef(ty, mod)) Type.u32 else ty; const result = try func.allocLocal(result_ty); try func.addLabel(.local_set, result.local.value); func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); } fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const pl_op = func.air.instructions.items(.data)[inst].pl_op; const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data; - const ty = func.air.typeOfIndex(inst); - if (ty.zigTypeTag() == .Vector) { + const ty = func.typeOfIndex(inst); + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@mulAdd` for vectors", .{}); } @@ -5980,7 +6086,7 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // call to compiler-rt `fn fmaf(f32, f32, f32) f32` var result = try func.callIntrinsic( "fmaf", - &.{ Type.f32, Type.f32, Type.f32 }, + &.{ .f32_type, .f32_type, .f32_type }, Type.f32, &.{ rhs_ext, lhs_ext, addend_ext }, ); @@ -5994,16 +6100,17 @@ fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const ty = func.air.typeOf(ty_op.operand); - const result_ty = func.air.typeOfIndex(inst); - if (ty.zigTypeTag() == .Vector) { + const ty = func.typeOf(ty_op.operand); + const result_ty = func.typeOfIndex(inst); + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@clz` for vectors", .{}); } const operand = try func.resolveInst(ty_op.operand); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits}); }; @@ -6046,17 +6153,18 @@ fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const ty = func.air.typeOf(ty_op.operand); - const result_ty = func.air.typeOfIndex(inst); + const ty = func.typeOf(ty_op.operand); + const result_ty = func.typeOfIndex(inst); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: `@ctz` for vectors", .{}); } const operand = try func.resolveInst(ty_op.operand); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits) orelse { return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits}); }; @@ -6113,7 +6221,7 @@ fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) !void { if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{}); const pl_op = func.air.instructions.items(.data)[inst].pl_op; - const ty = func.air.typeOf(pl_op.operand); + const ty = func.typeOf(pl_op.operand); const operand = try func.resolveInst(pl_op.operand); log.debug("airDbgVar: %{d}: {}, {}", .{ inst, ty.fmtDebug(), operand }); @@ -6151,17 +6259,18 @@ fn airTry(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const err_union = try func.resolveInst(pl_op.operand); const extra = func.air.extraData(Air.Try, pl_op.payload); const body = func.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = func.air.typeOf(pl_op.operand); + const err_union_ty = func.typeOf(pl_op.operand); const result = try lowerTry(func, inst, err_union, body, err_union_ty, false); func.finishAir(inst, result, &.{pl_op.operand}); } fn airTryPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.TryPtr, ty_pl.payload); const err_union_ptr = try func.resolveInst(extra.data.ptr); const body = func.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = func.air.typeOf(extra.data.ptr).childType(); + const err_union_ty = func.typeOf(extra.data.ptr).childType(mod); const result = try lowerTry(func, inst, err_union_ptr, body, err_union_ty, true); func.finishAir(inst, result, &.{extra.data.ptr}); } @@ -6174,24 +6283,25 @@ fn lowerTry( err_union_ty: Type, operand_is_ptr: bool, ) InnerError!WValue { + const mod = func.bin_file.base.options.module.?; if (operand_is_ptr) { return func.fail("TODO: lowerTry for pointers", .{}); } - const pl_ty = err_union_ty.errorUnionPayload(); - const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(); + const pl_ty = err_union_ty.errorUnionPayload(mod); + const pl_has_bits = pl_ty.hasRuntimeBitsIgnoreComptime(mod); - if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) { + if (!err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) { // Block we can jump out of when error is not set try func.startBlock(.block, wasm.block_empty); // check if the error tag is set for the error union. try func.emitWValue(err_union); if (pl_has_bits) { - const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, func.target)); + const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, mod)); try func.addMemArg(.i32_load16_u, .{ .offset = err_union.offset() + err_offset, - .alignment = Type.anyerror.abiAlignment(func.target), + .alignment = Type.anyerror.abiAlignment(mod), }); } try func.addTag(.i32_eqz); @@ -6213,8 +6323,8 @@ fn lowerTry( return WValue{ .none = {} }; } - const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)); - if (isByRef(pl_ty, func.target)) { + const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, mod)); + if (isByRef(pl_ty, mod)) { return buildPointerOffset(func, err_union, pl_offset, .new); } const payload = try func.load(err_union, pl_ty, pl_offset); @@ -6222,15 +6332,16 @@ fn lowerTry( } fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const operand = try func.resolveInst(ty_op.operand); - if (ty.zigTypeTag() == .Vector) { + if (ty.zigTypeTag(mod) == .Vector) { return func.fail("TODO: @byteSwap for vectors", .{}); } - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); // bytes are no-op if (int_info.bits == 8) { @@ -6292,13 +6403,14 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const result = if (ty.isSignedInt()) + const result = if (ty.isSignedInt(mod)) try func.divSigned(lhs, rhs, ty) else try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); @@ -6306,13 +6418,14 @@ fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const div_result = if (ty.isSignedInt()) + const div_result = if (ty.isSignedInt(mod)) try func.divSigned(lhs, rhs, ty) else try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); @@ -6328,15 +6441,16 @@ fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); + const mod = func.bin_file.base.options.module.?; + const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - if (ty.isUnsignedInt()) { + if (ty.isUnsignedInt(mod)) { const result = try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty); return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs }); - } else if (ty.isSignedInt()) { - const int_bits = ty.intInfo(func.target).bits; + } else if (ty.isSignedInt(mod)) { + const int_bits = ty.intInfo(mod).bits; const wasm_bits = toWasmBits(int_bits) orelse { return func.fail("TODO: `@divFloor` for signed integers larger than '{d}' bits", .{int_bits}); }; @@ -6414,7 +6528,8 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn divSigned(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type) InnerError!WValue { - const int_bits = ty.intInfo(func.target).bits; + const mod = func.bin_file.base.options.module.?; + const int_bits = ty.intInfo(mod).bits; const wasm_bits = toWasmBits(int_bits) orelse { return func.fail("TODO: Implement signed division for integers with bitsize '{d}'", .{int_bits}); }; @@ -6441,7 +6556,8 @@ fn divSigned(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type) InnerError!WVal /// Retrieves the absolute value of a signed integer /// NOTE: Leaves the result value on the stack. fn signAbsValue(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue { - const int_bits = ty.intInfo(func.target).bits; + const mod = func.bin_file.base.options.module.?; + const int_bits = ty.intInfo(mod).bits; const wasm_bits = toWasmBits(int_bits) orelse { return func.fail("TODO: signAbsValue for signed integers larger than '{d}' bits", .{int_bits}); }; @@ -6476,11 +6592,12 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { assert(op == .add or op == .sub); const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); + const mod = func.bin_file.base.options.module.?; + const ty = func.typeOfIndex(inst); const lhs = try func.resolveInst(bin_op.lhs); const rhs = try func.resolveInst(bin_op.rhs); - const int_info = ty.intInfo(func.target); + const int_info = ty.intInfo(mod); const is_signed = int_info.signedness == .signed; if (int_info.bits > 64) { @@ -6523,7 +6640,8 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void { } fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type, op: Op) InnerError!WValue { - const int_info = ty.intInfo(func.target); + const mod = func.bin_file.base.options.module.?; + const int_info = ty.intInfo(mod); const wasm_bits = toWasmBits(int_info.bits).?; const is_wasm_bits = wasm_bits == int_info.bits; @@ -6588,8 +6706,9 @@ fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type, fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const bin_op = func.air.instructions.items(.data)[inst].bin_op; - const ty = func.air.typeOfIndex(inst); - const int_info = ty.intInfo(func.target); + const mod = func.bin_file.base.options.module.?; + const ty = func.typeOfIndex(inst); + const int_info = ty.intInfo(mod); const is_signed = int_info.signedness == .signed; if (int_info.bits > 64) { return func.fail("TODO: Saturating shifting left for integers with bitsize '{d}'", .{int_info.bits}); @@ -6697,7 +6816,7 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn callIntrinsic( func: *CodeGen, name: []const u8, - param_types: []const Type, + param_types: []const InternPool.Index, return_type: Type, args: []const WValue, ) InnerError!WValue { @@ -6707,12 +6826,13 @@ fn callIntrinsic( }; // Always pass over C-ABI - var func_type = try genFunctype(func.gpa, .C, param_types, return_type, func.target); + const mod = func.bin_file.base.options.module.?; + var func_type = try genFunctype(func.gpa, .C, param_types, return_type, mod); defer func_type.deinit(func.gpa); const func_type_index = try func.bin_file.putOrGetFuncType(func_type); try func.bin_file.addOrUpdateImport(name, symbol_index, null, func_type_index); - const want_sret_param = firstParamSRet(.C, return_type, func.target); + const want_sret_param = firstParamSRet(.C, return_type, mod); // if we want return as first param, we allocate a pointer to stack, // and emit it as our first argument const sret = if (want_sret_param) blk: { @@ -6724,16 +6844,16 @@ fn callIntrinsic( // Lower all arguments to the stack before we call our function for (args, 0..) |arg, arg_i| { assert(!(want_sret_param and arg == .stack)); - assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime()); - try func.lowerArg(.C, param_types[arg_i], arg); + assert(param_types[arg_i].toType().hasRuntimeBitsIgnoreComptime(mod)); + try func.lowerArg(.C, param_types[arg_i].toType(), arg); } // Actually call our intrinsic try func.addLabel(.call, symbol_index); - if (!return_type.hasRuntimeBitsIgnoreComptime()) { + if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) { return WValue.none; - } else if (return_type.isNoReturn()) { + } else if (return_type.isNoReturn(mod)) { try func.addTag(.@"unreachable"); return WValue.none; } else if (want_sret_param) { @@ -6746,11 +6866,11 @@ fn callIntrinsic( fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const un_op = func.air.instructions.items(.data)[inst].un_op; const operand = try func.resolveInst(un_op); - const enum_ty = func.air.typeOf(un_op); + const enum_ty = func.typeOf(un_op); const func_sym_index = try func.getTagNameFunction(enum_ty); - const result_ptr = try func.allocStack(func.air.typeOfIndex(inst)); + const result_ptr = try func.allocStack(func.typeOfIndex(inst)); try func.lowerToStack(result_ptr); try func.emitWValue(operand); try func.addLabel(.call, func_sym_index); @@ -6759,15 +6879,14 @@ fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { - const enum_decl_index = enum_ty.getOwnerDecl(); - const module = func.bin_file.base.options.module.?; + const mod = func.bin_file.base.options.module.?; + const enum_decl_index = enum_ty.getOwnerDecl(mod); var arena_allocator = std.heap.ArenaAllocator.init(func.gpa); defer arena_allocator.deinit(); const arena = arena_allocator.allocator(); - const fqn = try module.declPtr(enum_decl_index).getFullyQualifiedName(module); - defer module.gpa.free(fqn); + const fqn = mod.intern_pool.stringToSlice(try mod.declPtr(enum_decl_index).getFullyQualifiedName(mod)); const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn}); // check if we already generated code for this. @@ -6775,10 +6894,9 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { return loc.index; } - var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer); + const int_tag_ty = enum_ty.intTagType(mod); - if (int_tag_ty.bitSize(func.target) > 64) { + if (int_tag_ty.bitSize(mod) > 64) { return func.fail("TODO: Implement @tagName for enums with tag size larger than 64 bits", .{}); } @@ -6798,36 +6916,22 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { // TODO: Make switch implementation generic so we can use a jump table for this when the tags are not sparse. // generate an if-else chain for each tag value as well as constant. - for (enum_ty.enumFields().keys(), 0..) |tag_name, field_index| { + for (enum_ty.enumFields(mod), 0..) |tag_name_ip, field_index_usize| { + const field_index = @intCast(u32, field_index_usize); + const tag_name = mod.intern_pool.stringToSlice(tag_name_ip); // for each tag name, create an unnamed const, // and then get a pointer to its value. - var name_ty_payload: Type.Payload.Len = .{ - .base = .{ .tag = .array_u8_sentinel_0 }, - .data = @intCast(u64, tag_name.len), - }; - const name_ty = Type.initPayload(&name_ty_payload.base); - const string_bytes = &module.string_literal_bytes; - try string_bytes.ensureUnusedCapacity(module.gpa, tag_name.len); - const gop = try module.string_literal_table.getOrPutContextAdapted(module.gpa, tag_name, Module.StringLiteralAdapter{ - .bytes = string_bytes, - }, Module.StringLiteralContext{ - .bytes = string_bytes, + const name_ty = try mod.arrayType(.{ + .len = tag_name.len, + .child = .u8_type, + .sentinel = .zero_u8, }); - if (!gop.found_existing) { - gop.key_ptr.* = .{ - .index = @intCast(u32, string_bytes.items.len), - .len = @intCast(u32, tag_name.len), - }; - string_bytes.appendSliceAssumeCapacity(tag_name); - gop.value_ptr.* = .none; - } - var name_val_payload: Value.Payload.StrLit = .{ - .base = .{ .tag = .str_lit }, - .data = gop.key_ptr.*, - }; - const name_val = Value.initPayload(&name_val_payload.base); + const name_val = try mod.intern(.{ .aggregate = .{ + .ty = name_ty.toIntern(), + .storage = .{ .bytes = tag_name }, + } }); const tag_sym_index = try func.bin_file.lowerUnnamedConst( - .{ .ty = name_ty, .val = name_val }, + .{ .ty = name_ty, .val = name_val.toValue() }, enum_decl_index, ); @@ -6839,11 +6943,8 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { try writer.writeByte(std.wasm.opcode(.local_get)); try leb.writeULEB128(writer, @as(u32, 1)); - var tag_val_payload: Value.Payload.U32 = .{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, field_index), - }; - const tag_value = try func.lowerConstant(Value.initPayload(&tag_val_payload.base), enum_ty); + const tag_val = try mod.enumValueFieldIndex(enum_ty, field_index); + const tag_value = try func.lowerConstant(tag_val, enum_ty); switch (tag_value) { .imm32 => |value| { @@ -6928,27 +7029,27 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 { // finish function body try writer.writeByte(std.wasm.opcode(.end)); - const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty}, slice_ty, func.target); + const slice_ty = Type.slice_const_u8_sentinel_0; + const func_type = try genFunctype(arena, .Unspecified, &.{int_tag_ty.ip_index}, slice_ty, mod); return func.bin_file.createFunction(func_name, func_type, &body_list, &relocs); } fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_op = func.air.instructions.items(.data)[inst].ty_op; const operand = try func.resolveInst(ty_op.operand); const error_set_ty = func.air.getRefType(ty_op.ty); const result = try func.allocLocal(Type.bool); - const names = error_set_ty.errorSetNames(); + const names = error_set_ty.errorSetNames(mod); var values = try std.ArrayList(u32).initCapacity(func.gpa, names.len); defer values.deinit(); - const module = func.bin_file.base.options.module.?; var lowest: ?u32 = null; var highest: ?u32 = null; for (names) |name| { - const err_int = module.global_error_set.get(name).?; + const err_int = @intCast(Module.ErrorInt, mod.global_error_set.getIndex(name).?); if (lowest) |*l| { if (err_int < l.*) { l.* = err_int; @@ -7019,12 +7120,13 @@ inline fn useAtomicFeature(func: *const CodeGen) bool { } fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const ty_pl = func.air.instructions.items(.data)[inst].ty_pl; const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data; - const ptr_ty = func.air.typeOf(extra.ptr); - const ty = ptr_ty.childType(); - const result_ty = func.air.typeOfIndex(inst); + const ptr_ty = func.typeOf(extra.ptr); + const ty = ptr_ty.childType(mod); + const result_ty = func.typeOfIndex(inst); const ptr_operand = try func.resolveInst(extra.ptr); const expected_val = try func.resolveInst(extra.expected_value); @@ -7037,7 +7139,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr_operand); try func.lowerToStack(expected_val); try func.lowerToStack(new_val); - try func.addAtomicMemArg(switch (ty.abiSize(func.target)) { + try func.addAtomicMemArg(switch (ty.abiSize(mod)) { 1 => .i32_atomic_rmw8_cmpxchg_u, 2 => .i32_atomic_rmw16_cmpxchg_u, 4 => .i32_atomic_rmw_cmpxchg, @@ -7045,14 +7147,14 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}), }, .{ .offset = ptr_operand.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }); try func.addLabel(.local_tee, val_local.local.value); _ = try func.cmp(.stack, expected_val, ty, .eq); try func.addLabel(.local_set, cmp_result.local.value); break :val val_local; } else val: { - if (ty.abiSize(func.target) > 8) { + if (ty.abiSize(mod) > 8) { return func.fail("TODO: Implement `@cmpxchg` for types larger than abi size of 8 bytes", .{}); } const ptr_val = try WValue.toLocal(try func.load(ptr_operand, ty, 0), func, ty); @@ -7068,7 +7170,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :val ptr_val; }; - const result_ptr = if (isByRef(result_ty, func.target)) val: { + const result_ptr = if (isByRef(result_ty, mod)) val: { try func.emitWValue(cmp_result); try func.addImm32(-1); try func.addTag(.i32_xor); @@ -7076,7 +7178,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.i32_and); const and_result = try WValue.toLocal(.stack, func, Type.bool); const result_ptr = try func.allocStack(result_ty); - try func.store(result_ptr, and_result, Type.bool, @intCast(u32, ty.abiSize(func.target))); + try func.store(result_ptr, and_result, Type.bool, @intCast(u32, ty.abiSize(mod))); try func.store(result_ptr, ptr_val, ty, 0); break :val result_ptr; } else val: { @@ -7087,16 +7189,17 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :val try WValue.toLocal(.stack, func, result_ty); }; - return func.finishAir(inst, result_ptr, &.{ extra.ptr, extra.new_value, extra.expected_value }); + return func.finishAir(inst, result_ptr, &.{ extra.ptr, extra.expected_value, extra.new_value }); } fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const atomic_load = func.air.instructions.items(.data)[inst].atomic_load; const ptr = try func.resolveInst(atomic_load.ptr); - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); if (func.useAtomicFeature()) { - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) { + const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { 1 => .i32_atomic_load8_u, 2 => .i32_atomic_load16_u, 4 => .i32_atomic_load, @@ -7106,7 +7209,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr); try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }); } else { _ = try func.load(ptr, ty, 0); @@ -7117,12 +7220,13 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const pl_op = func.air.instructions.items(.data)[inst].pl_op; const extra = func.air.extraData(Air.AtomicRmw, pl_op.payload).data; const ptr = try func.resolveInst(pl_op.operand); const operand = try func.resolveInst(extra.operand); - const ty = func.air.typeOfIndex(inst); + const ty = func.typeOfIndex(inst); const op: std.builtin.AtomicRmwOp = extra.op(); if (func.useAtomicFeature()) { @@ -7140,7 +7244,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr); try func.emitWValue(value); if (op == .Nand) { - const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?; + const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; const and_res = try func.binOp(value, operand, ty, .@"and"); if (wasm_bits == 32) @@ -7157,7 +7261,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.addTag(.select); } try func.addAtomicMemArg( - switch (ty.abiSize(func.target)) { + switch (ty.abiSize(mod)) { 1 => .i32_atomic_rmw8_cmpxchg_u, 2 => .i32_atomic_rmw16_cmpxchg_u, 4 => .i32_atomic_rmw_cmpxchg, @@ -7166,7 +7270,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }, .{ .offset = ptr.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }, ); const select_res = try func.allocLocal(ty); @@ -7185,7 +7289,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => { try func.emitWValue(ptr); try func.emitWValue(operand); - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) { + const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { 1 => switch (op) { .Xchg => .i32_atomic_rmw8_xchg_u, .Add => .i32_atomic_rmw8_add_u, @@ -7226,7 +7330,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }; try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }); const result = try WValue.toLocal(.stack, func, ty); return func.finishAir(inst, result, &.{ pl_op.operand, extra.operand }); @@ -7255,7 +7359,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .Xor => .xor, else => unreachable, }); - if (ty.isInt() and (op == .Add or op == .Sub)) { + if (ty.isInt(mod) and (op == .Add or op == .Sub)) { _ = try func.wrapOperand(.stack, ty); } try func.store(.stack, .stack, ty, ptr.offset()); @@ -7271,7 +7375,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.store(.stack, .stack, ty, ptr.offset()); }, .Nand => { - const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(func.target))).?; + const wasm_bits = toWasmBits(@intCast(u16, ty.bitSize(mod))).?; try func.emitWValue(ptr); const and_res = try func.binOp(result, operand, ty, .@"and"); @@ -7302,15 +7406,16 @@ fn airFence(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { } fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const mod = func.bin_file.base.options.module.?; const bin_op = func.air.instructions.items(.data)[inst].bin_op; const ptr = try func.resolveInst(bin_op.lhs); const operand = try func.resolveInst(bin_op.rhs); - const ptr_ty = func.air.typeOf(bin_op.lhs); - const ty = ptr_ty.childType(); + const ptr_ty = func.typeOf(bin_op.lhs); + const ty = ptr_ty.childType(mod); if (func.useAtomicFeature()) { - const tag: wasm.AtomicsOpcode = switch (ty.abiSize(func.target)) { + const tag: wasm.AtomicsOpcode = switch (ty.abiSize(mod)) { 1 => .i32_atomic_store8, 2 => .i32_atomic_store16, 4 => .i32_atomic_store, @@ -7321,7 +7426,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.lowerToStack(operand); try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = ty.abiAlignment(func.target), + .alignment = ty.abiAlignment(mod), }); } else { try func.store(ptr, operand, ty, 0); @@ -7338,3 +7443,13 @@ fn airFrameAddress(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result = try WValue.toLocal(.stack, func, Type.usize); return func.finishAir(inst, result, &.{}); } + +fn typeOf(func: *CodeGen, inst: Air.Inst.Ref) Type { + const mod = func.bin_file.base.options.module.?; + return func.air.typeOf(inst, &mod.intern_pool); +} + +fn typeOfIndex(func: *CodeGen, inst: Air.Inst.Index) Type { + const mod = func.bin_file.base.options.module.?; + return func.air.typeOfIndex(inst, &mod.intern_pool); +} diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig index bfa5324dc6..45ad1d7eb3 100644 --- a/src/arch/wasm/Emit.zig +++ b/src/arch/wasm/Emit.zig @@ -254,7 +254,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { @setCold(true); std.debug.assert(emit.error_msg == null); const mod = emit.bin_file.base.options.module.?; - emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.base.allocator, mod.declPtr(emit.decl_index).srcLoc(), format, args); + emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.base.allocator, mod.declPtr(emit.decl_index).srcLoc(mod), format, args); return error.EmitFail; } diff --git a/src/arch/wasm/abi.zig b/src/arch/wasm/abi.zig index 4692f65dd1..92b0f4dc40 100644 --- a/src/arch/wasm/abi.zig +++ b/src/arch/wasm/abi.zig @@ -5,9 +5,11 @@ //! Note: Above mentioned document is not an official specification, therefore called a convention. const std = @import("std"); -const Type = @import("../../type.zig").Type; const Target = std.Target; +const Type = @import("../../type.zig").Type; +const Module = @import("../../Module.zig"); + /// Defines how to pass a type as part of a function signature, /// both for parameters as well as return values. pub const Class = enum { direct, indirect, none }; @@ -19,27 +21,28 @@ const direct: [2]Class = .{ .direct, .none }; /// Classifies a given Zig type to determine how they must be passed /// or returned as value within a wasm function. /// When all elements result in `.none`, no value must be passed in or returned. -pub fn classifyType(ty: Type, target: Target) [2]Class { - if (!ty.hasRuntimeBitsIgnoreComptime()) return none; - switch (ty.zigTypeTag()) { +pub fn classifyType(ty: Type, mod: *Module) [2]Class { + const target = mod.getTarget(); + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none; + switch (ty.zigTypeTag(mod)) { .Struct => { - if (ty.containerLayout() == .Packed) { - if (ty.bitSize(target) <= 64) return direct; + if (ty.containerLayout(mod) == .Packed) { + if (ty.bitSize(mod) <= 64) return direct; return .{ .direct, .direct }; } // When the struct type is non-scalar - if (ty.structFieldCount() > 1) return memory; + if (ty.structFieldCount(mod) > 1) return memory; // When the struct's alignment is non-natural - const field = ty.structFields().values()[0]; + const field = ty.structFields(mod).values()[0]; if (field.abi_align != 0) { - if (field.abi_align > field.ty.abiAlignment(target)) { + if (field.abi_align > field.ty.abiAlignment(mod)) { return memory; } } - return classifyType(field.ty, target); + return classifyType(field.ty, mod); }, .Int, .Enum, .ErrorSet, .Vector => { - const int_bits = ty.intInfo(target).bits; + const int_bits = ty.intInfo(mod).bits; if (int_bits <= 64) return direct; if (int_bits <= 128) return .{ .direct, .direct }; return memory; @@ -53,22 +56,22 @@ pub fn classifyType(ty: Type, target: Target) [2]Class { .Bool => return direct, .Array => return memory, .Optional => { - std.debug.assert(ty.isPtrLikeOptional()); + std.debug.assert(ty.isPtrLikeOptional(mod)); return direct; }, .Pointer => { - std.debug.assert(!ty.isSlice()); + std.debug.assert(!ty.isSlice(mod)); return direct; }, .Union => { - if (ty.containerLayout() == .Packed) { - if (ty.bitSize(target) <= 64) return direct; + if (ty.containerLayout(mod) == .Packed) { + if (ty.bitSize(mod) <= 64) return direct; return .{ .direct, .direct }; } - const layout = ty.unionGetLayout(target); + const layout = ty.unionGetLayout(mod); std.debug.assert(layout.tag_size == 0); - if (ty.unionFields().count() > 1) return memory; - return classifyType(ty.unionFields().values()[0].ty, target); + if (ty.unionFields(mod).count() > 1) return memory; + return classifyType(ty.unionFields(mod).values()[0].ty, mod); }, .ErrorUnion, .Frame, @@ -90,29 +93,29 @@ pub fn classifyType(ty: Type, target: Target) [2]Class { /// Returns the scalar type a given type can represent. /// Asserts given type can be represented as scalar, such as /// a struct with a single scalar field. -pub fn scalarType(ty: Type, target: std.Target) Type { - switch (ty.zigTypeTag()) { +pub fn scalarType(ty: Type, mod: *Module) Type { + switch (ty.zigTypeTag(mod)) { .Struct => { - switch (ty.containerLayout()) { + switch (ty.containerLayout(mod)) { .Packed => { - const struct_obj = ty.castTag(.@"struct").?.data; - return scalarType(struct_obj.backing_int_ty, target); + const struct_obj = mod.typeToStruct(ty).?; + return scalarType(struct_obj.backing_int_ty, mod); }, else => { - std.debug.assert(ty.structFieldCount() == 1); - return scalarType(ty.structFieldType(0), target); + std.debug.assert(ty.structFieldCount(mod) == 1); + return scalarType(ty.structFieldType(0, mod), mod); }, } }, .Union => { - if (ty.containerLayout() != .Packed) { - const layout = ty.unionGetLayout(target); + if (ty.containerLayout(mod) != .Packed) { + const layout = ty.unionGetLayout(mod); if (layout.payload_size == 0 and layout.tag_size != 0) { - return scalarType(ty.unionTagTypeSafety().?, target); + return scalarType(ty.unionTagTypeSafety(mod).?, mod); } - std.debug.assert(ty.unionFields().count() == 1); + std.debug.assert(ty.unionFields(mod).count() == 1); } - return scalarType(ty.unionFields().values()[0].ty, target); + return scalarType(ty.unionFields(mod).values()[0].ty, mod); }, else => return ty, } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index b614200e41..a1b57516ee 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -26,6 +26,7 @@ const Liveness = @import("../../Liveness.zig"); const Lower = @import("Lower.zig"); const Mir = @import("Mir.zig"); const Module = @import("../../Module.zig"); +const InternPool = @import("../../InternPool.zig"); const Target = std.Target; const Type = @import("../../type.zig").Type; const TypedValue = @import("../../TypedValue.zig"); @@ -112,10 +113,10 @@ const Owner = union(enum) { mod_fn: *const Module.Fn, lazy_sym: link.File.LazySymbol, - fn getDecl(owner: Owner) Module.Decl.Index { + fn getDecl(owner: Owner, mod: *Module) Module.Decl.Index { return switch (owner) { .mod_fn => |mod_fn| mod_fn.owner_decl, - .lazy_sym => |lazy_sym| lazy_sym.ty.getOwnerDecl(), + .lazy_sym => |lazy_sym| lazy_sym.ty.getOwnerDecl(mod), }; } @@ -447,7 +448,7 @@ const InstTracking = struct { else => unreachable, } tracking_log.debug("spill %{d} from {} to {}", .{ inst, self.short, self.long }); - try function.genCopy(function.air.typeOfIndex(inst), self.long, self.short); + try function.genCopy(function.typeOfIndex(inst), self.long, self.short); } fn reuseFrame(self: *InstTracking) void { @@ -537,7 +538,7 @@ const InstTracking = struct { inst: Air.Inst.Index, target: InstTracking, ) !void { - const ty = function.air.typeOfIndex(inst); + const ty = function.typeOfIndex(inst); if ((self.long == .none or self.long == .reserved_frame) and target.long == .load_frame) try function.genCopy(ty, target.long, self.short); try function.genCopy(ty, target.short, self.short); @@ -605,14 +606,14 @@ const FrameAlloc = struct { .ref_count = 0, }; } - fn initType(ty: Type, target: Target) FrameAlloc { - return init(.{ .size = ty.abiSize(target), .alignment = ty.abiAlignment(target) }); + fn initType(ty: Type, mod: *Module) FrameAlloc { + return init(.{ .size = ty.abiSize(mod), .alignment = ty.abiAlignment(mod) }); } }; const StackAllocation = struct { inst: ?Air.Inst.Index, - /// TODO do we need size? should be determined by inst.ty.abiSize(self.target.*) + /// TODO do we need size? should be determined by inst.ty.abiSize(mod) size: u32, }; @@ -631,7 +632,7 @@ const Self = @This(); pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - module_fn: *Module.Fn, + module_fn_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -642,6 +643,7 @@ pub fn generate( } const mod = bin_file.options.module.?; + const module_fn = mod.funcPtr(module_fn_index); const fn_owner_decl = mod.declPtr(module_fn.owner_decl); assert(fn_owner_decl.has_tv); const fn_type = fn_owner_decl.ty; @@ -686,7 +688,7 @@ pub fn generate( @enumToInt(FrameIndex.stack_frame), FrameAlloc.init(.{ .size = 0, - .alignment = if (mod.align_stack_fns.get(module_fn)) |set_align_stack| + .alignment = if (mod.align_stack_fns.get(module_fn_index)) |set_align_stack| set_align_stack.alignment else 1, @@ -697,7 +699,8 @@ pub fn generate( FrameAlloc.init(.{ .size = 0, .alignment = 1 }), ); - var call_info = function.resolveCallingConventionValues(fn_type, &.{}, .args_frame) catch |err| switch (err) { + const fn_info = mod.typeToFunc(fn_type).?; + var call_info = function.resolveCallingConventionValues(fn_info, &.{}, .args_frame) catch |err| switch (err) { error.CodegenFail => return Result{ .fail = function.err_msg.? }, error.OutOfRegisters => return Result{ .fail = try ErrorMsg.create( @@ -714,12 +717,12 @@ pub fn generate( function.args = call_info.args; function.ret_mcv = call_info.return_value; function.frame_allocs.set(@enumToInt(FrameIndex.ret_addr), FrameAlloc.init(.{ - .size = Type.usize.abiSize(function.target.*), - .alignment = @min(Type.usize.abiAlignment(function.target.*), call_info.stack_align), + .size = Type.usize.abiSize(mod), + .alignment = @min(Type.usize.abiAlignment(mod), call_info.stack_align), })); function.frame_allocs.set(@enumToInt(FrameIndex.base_ptr), FrameAlloc.init(.{ - .size = Type.usize.abiSize(function.target.*), - .alignment = @min(Type.usize.abiAlignment(function.target.*) * 2, call_info.stack_align), + .size = Type.usize.abiSize(mod), + .alignment = @min(Type.usize.abiAlignment(mod) * 2, call_info.stack_align), })); function.frame_allocs.set( @enumToInt(FrameIndex.args_frame), @@ -1565,7 +1568,8 @@ fn asmMemoryRegisterImmediate( } fn gen(self: *Self) InnerError!void { - const cc = self.fn_type.fnCallingConvention(); + const mod = self.bin_file.options.module.?; + const cc = self.fn_type.fnCallingConvention(mod); if (cc != .Naked) { try self.asmRegister(.{ ._, .push }, .rbp); const backpatch_push_callee_preserved_regs = try self.asmPlaceholder(); @@ -1582,7 +1586,7 @@ fn gen(self: *Self) InnerError!void { // register which the callee is free to clobber. Therefore, we purposely // spill it to stack immediately. const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(Type.usize, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(Type.usize, mod)); try self.genSetMem( .{ .frame = frame_index }, 0, @@ -1724,6 +1728,8 @@ fn gen(self: *Self) InnerError!void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const mod = self.bin_file.options.module.?; + const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body) |inst| { @@ -1732,7 +1738,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { try self.mir_to_air_map.put(self.gpa, mir_inst, inst); } - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) continue; + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; wip_mir_log.debug("{}", .{self.fmtAir(inst)}); verbose_tracking_log.debug("{}", .{self.fmtTracking()}); @@ -1916,8 +1922,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .ptr_elem_val => try self.airPtrElemVal(inst), .ptr_elem_ptr => try self.airPtrElemPtr(inst), - .constant => unreachable, // excluded from function bodies - .const_ty => unreachable, // excluded from function bodies + .inferred_alloc, .inferred_alloc_comptime, .interned => unreachable, .unreach => if (self.wantSafety()) try self.airTrap() else self.finishAirBookkeeping(), .optional_payload => try self.airOptionalPayload(inst), @@ -1999,7 +2004,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { } fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { - switch (lazy_sym.ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (lazy_sym.ty.zigTypeTag(mod)) { .Enum => { const enum_ty = lazy_sym.ty; wip_mir_log.debug("{}.@tagName:", .{enum_ty.fmt(self.bin_file.options.module.?)}); @@ -2011,7 +2017,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { const ret_reg = param_regs[0]; const enum_mcv = MCValue{ .register = param_regs[1] }; - var exitlude_jump_relocs = try self.gpa.alloc(u32, enum_ty.enumFieldCount()); + var exitlude_jump_relocs = try self.gpa.alloc(u32, enum_ty.enumFieldCount(mod)); defer self.gpa.free(exitlude_jump_relocs); const data_reg = try self.register_manager.allocReg(null, gp); @@ -2020,16 +2026,10 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void { try self.genLazySymbolRef(.lea, data_reg, .{ .kind = .const_data, .ty = enum_ty }); var data_off: i32 = 0; - for ( - exitlude_jump_relocs, - enum_ty.enumFields().keys(), - 0.., - ) |*exitlude_jump_reloc, tag_name, index| { - var tag_pl = Value.Payload.U32{ - .base = .{ .tag = .enum_field_index }, - .data = @intCast(u32, index), - }; - const tag_val = Value.initPayload(&tag_pl.base); + for (exitlude_jump_relocs, 0..) |*exitlude_jump_reloc, index_usize| { + const index = @intCast(u32, index_usize); + const tag_name = mod.intern_pool.stringToSlice(enum_ty.enumFields(mod)[index_usize]); + const tag_val = try mod.enumValueFieldIndex(enum_ty, index); const tag_mcv = try self.genTypedValue(.{ .ty = enum_ty, .val = tag_val }); try self.genBinOpMir(.{ ._, .cmp }, enum_ty, enum_mcv, tag_mcv); const skip_reloc = try self.asmJccReloc(undefined, .ne); @@ -2092,10 +2092,8 @@ fn feed(self: *Self, bt: *Liveness.BigTomb, operand: Air.Inst.Ref) void { /// Asserts there is already capacity to insert into top branch inst_table. fn processDeath(self: *Self, inst: Air.Inst.Index) void { - switch (self.air.instructions.items(.tag)[inst]) { - .constant, .const_ty => unreachable, - else => self.inst_tracking.getPtr(inst).?.die(self, inst), - } + assert(self.air.instructions.items(.tag)[inst] != .interned); + self.inst_tracking.getPtr(inst).?.die(self, inst); } /// Called when there are no operands, and the instruction is always unreferenced. @@ -2126,10 +2124,7 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live const dies = @truncate(u1, tomb_bits) != 0; tomb_bits >>= 1; if (!dies) continue; - const op_int = @enumToInt(op); - if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); - self.processDeath(op_index); + self.processDeath(Air.refToIndexAllowNone(op) orelse continue); } self.finishAirResult(inst, result); } @@ -2252,19 +2247,19 @@ fn allocFrameIndex(self: *Self, alloc: FrameAlloc) !FrameIndex { /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !FrameIndex { - const ptr_ty = self.air.typeOfIndex(inst); - const val_ty = ptr_ty.childType(); + const mod = self.bin_file.options.module.?; + const ptr_ty = self.typeOfIndex(inst); + const val_ty = ptr_ty.childType(mod); return self.allocFrameIndex(FrameAlloc.init(.{ - .size = math.cast(u32, val_ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + .size = math.cast(u32, val_ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{val_ty.fmt(mod)}); }, - .alignment = @max(ptr_ty.ptrAlignment(self.target.*), 1), + .alignment = @max(ptr_ty.ptrAlignment(mod), 1), })); } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - return self.allocRegOrMemAdvanced(self.air.typeOfIndex(inst), inst, reg_ok); + return self.allocRegOrMemAdvanced(self.typeOfIndex(inst), inst, reg_ok); } fn allocTempRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool) !MCValue { @@ -2272,20 +2267,20 @@ fn allocTempRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool) !MCValue { } fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: bool) !MCValue { - const abi_size = math.cast(u32, ty.abiSize(self.target.*)) orelse { - const mod = self.bin_file.options.module.?; + const mod = self.bin_file.options.module.?; + const abi_size = math.cast(u32, ty.abiSize(mod)) orelse { return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)}); }; if (reg_ok) need_mem: { - if (abi_size <= @as(u32, switch (ty.zigTypeTag()) { + if (abi_size <= @as(u32, switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 16, 32, 64, 128 => 16, 80 => break :need_mem, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Float => switch (ty.childType().floatBits(self.target.*)) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { 16, 32, 64, 128 => if (self.hasFeature(.avx)) 32 else 16, 80 => break :need_mem, else => unreachable, @@ -2294,18 +2289,18 @@ fn allocRegOrMemAdvanced(self: *Self, ty: Type, inst: ?Air.Inst.Index, reg_ok: b }, else => 8, })) { - if (self.register_manager.tryAllocReg(inst, regClassForType(ty))) |reg| { + if (self.register_manager.tryAllocReg(inst, regClassForType(ty, mod))) |reg| { return MCValue{ .register = registerAlias(reg, abi_size) }; } } } - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ty, mod)); return .{ .load_frame = .{ .index = frame_index } }; } -fn regClassForType(ty: Type) RegisterManager.RegisterBitSet { - return switch (ty.zigTypeTag()) { +fn regClassForType(ty: Type, mod: *Module) RegisterManager.RegisterBitSet { + return switch (ty.zigTypeTag(mod)) { .Float, .Vector => sse, else => gp, }; @@ -2449,7 +2444,8 @@ pub fn spillRegisters(self: *Self, registers: []const Register) !void { /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { - const reg = try self.register_manager.allocReg(null, regClassForType(ty)); + const mod = self.bin_file.options.module.?; + const reg = try self.register_manager.allocReg(null, regClassForType(ty, mod)); try self.genSetReg(reg, ty, mcv); return reg; } @@ -2464,7 +2460,8 @@ fn copyToRegisterWithInstTracking( ty: Type, mcv: MCValue, ) !MCValue { - const reg: Register = try self.register_manager.allocReg(reg_owner, regClassForType(ty)); + const mod = self.bin_file.options.module.?; + const reg: Register = try self.register_manager.allocReg(reg_owner, regClassForType(ty, mod)); try self.genSetReg(reg, ty, mcv); return MCValue{ .register = reg }; } @@ -2481,7 +2478,7 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { .load_frame => .{ .register_offset = .{ .reg = (try self.copyToRegisterWithInstTracking( inst, - self.air.typeOfIndex(inst), + self.typeOfIndex(inst), self.ret_mcv.long, )).register, .off = self.ret_mcv.short.indirect.off, @@ -2492,9 +2489,9 @@ fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_bits = dst_ty.floatBits(self.target.*); - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_bits = src_ty.floatBits(self.target.*); const src_mcv = try self.resolveInst(ty_op.operand); @@ -2558,9 +2555,9 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { fn airFpext(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_bits = dst_ty.floatBits(self.target.*); - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_bits = src_ty.floatBits(self.target.*); const src_mcv = try self.resolveInst(ty_op.operand); @@ -2618,14 +2615,15 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void { } fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const src_ty = self.air.typeOf(ty_op.operand); - const src_int_info = src_ty.intInfo(self.target.*); + const src_ty = self.typeOf(ty_op.operand); + const src_int_info = src_ty.intInfo(mod); - const dst_ty = self.air.typeOfIndex(inst); - const dst_int_info = dst_ty.intInfo(self.target.*); - const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const dst_ty = self.typeOfIndex(inst); + const dst_int_info = dst_ty.intInfo(mod); + const abi_size = @intCast(u32, dst_ty.abiSize(mod)); const min_ty = if (dst_int_info.bits < src_int_info.bits) dst_ty else src_ty; const extend = switch (src_int_info.signedness) { @@ -2670,14 +2668,7 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { const high_bits = src_int_info.bits % 64; if (high_bits > 0) { - var high_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (extend) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = high_bits, - }; - const high_ty = Type.initPayload(&high_pl.base); + const high_ty = try mod.intType(extend, high_bits); try self.truncateRegister(high_ty, high_reg); try self.genCopy(Type.usize, high_mcv, .{ .register = high_reg }); } @@ -2706,12 +2697,13 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { } fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); - const src_ty = self.air.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const dst_ty = self.typeOfIndex(inst); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const src_ty = self.typeOf(ty_op.operand); + const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const result = result: { const src_mcv = try self.resolveInst(ty_op.operand); @@ -2724,13 +2716,13 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { else try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); - if (dst_ty.zigTypeTag() == .Vector) { - assert(src_ty.zigTypeTag() == .Vector and dst_ty.vectorLen() == src_ty.vectorLen()); - const dst_info = dst_ty.childType().intInfo(self.target.*); - const src_info = src_ty.childType().intInfo(self.target.*); + if (dst_ty.zigTypeTag(mod) == .Vector) { + assert(src_ty.zigTypeTag(mod) == .Vector and dst_ty.vectorLen(mod) == src_ty.vectorLen(mod)); + const dst_info = dst_ty.childType(mod).intInfo(mod); + const src_info = src_ty.childType(mod).intInfo(mod); const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_info.bits) { 8 => switch (src_info.bits) { - 16 => switch (dst_ty.vectorLen()) { + 16 => switch (dst_ty.vectorLen(mod)) { 1...8 => if (self.hasFeature(.avx)) .{ .vp_b, .ackusw } else .{ .p_b, .ackusw }, 9...16 => if (self.hasFeature(.avx2)) .{ .vp_b, .ackusw } else null, else => null, @@ -2738,7 +2730,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { else => null, }, 16 => switch (src_info.bits) { - 32 => switch (dst_ty.vectorLen()) { + 32 => switch (dst_ty.vectorLen(mod)) { 1...4 => if (self.hasFeature(.avx)) .{ .vp_w, .ackusd } else if (self.hasFeature(.sse4_1)) @@ -2755,29 +2747,21 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { dst_ty.fmt(self.bin_file.options.module.?), }); - var mask_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - dst_info.bits), - }; - const mask_val = Value.initPayload(&mask_pl.base); + const elem_ty = src_ty.childType(mod); + const mask_val = try mod.intValue(elem_ty, @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - dst_info.bits)); - var splat_pl = Value.Payload.SubValue{ - .base = .{ .tag = .repeated }, - .data = mask_val, - }; - const splat_val = Value.initPayload(&splat_pl.base); + const splat_ty = try mod.vectorType(.{ + .len = @intCast(u32, @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits)), + .child = elem_ty.ip_index, + }); + const splat_abi_size = @intCast(u32, splat_ty.abiSize(mod)); - var full_pl = Type.Payload.Array{ - .base = .{ .tag = .vector }, - .data = .{ - .len = @divExact(@as(u64, if (src_abi_size > 16) 256 else 128), src_info.bits), - .elem_type = src_ty.childType(), - }, - }; - const full_ty = Type.initPayload(&full_pl.base); - const full_abi_size = @intCast(u32, full_ty.abiSize(self.target.*)); + const splat_val = try mod.intern(.{ .aggregate = .{ + .ty = splat_ty.ip_index, + .storage = .{ .repeated_elem = mask_val.ip_index }, + } }); - const splat_mcv = try self.genTypedValue(.{ .ty = full_ty, .val = splat_val }); + const splat_mcv = try self.genTypedValue(.{ .ty = splat_ty, .val = splat_val.toValue() }); const splat_addr_mcv: MCValue = switch (splat_mcv) { .memory, .indirect, .load_frame => splat_mcv.address(), else => .{ .register = try self.copyToTmpRegister(Type.usize, splat_mcv.address()) }, @@ -2789,14 +2773,14 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { .{ .vp_, .@"and" }, dst_reg, dst_reg, - splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(full_abi_size)), + splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(splat_abi_size)), ); try self.asmRegisterRegisterRegister(mir_tag, dst_reg, dst_reg, dst_reg); } else { try self.asmRegisterMemory( .{ .p_, .@"and" }, dst_reg, - splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(full_abi_size)), + splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(splat_abi_size)), ); try self.asmRegisterRegister(mir_tag, dst_reg, dst_reg); } @@ -2819,7 +2803,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const operand = try self.resolveInst(un_op); const dst_mcv = if (self.reuseOperand(inst, un_op, 0, operand)) @@ -2831,20 +2815,21 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { } fn airSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const slice_ty = self.air.typeOfIndex(inst); + const slice_ty = self.typeOfIndex(inst); const ptr = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); - const len_ty = self.air.typeOf(bin_op.rhs); + const len_ty = self.typeOf(bin_op.rhs); - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, ptr_ty.abiSize(self.target.*)), + @intCast(i32, ptr_ty.abiSize(mod)), len_ty, len, ); @@ -2873,23 +2858,24 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void } fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { + const mod = self.bin_file.options.module.?; const air_tag = self.air.instructions.items(.tag); const air_data = self.air.instructions.items(.data); - const dst_ty = self.air.typeOf(dst_air); - const dst_info = dst_ty.intInfo(self.target.*); + const dst_ty = self.typeOf(dst_air); + const dst_info = dst_ty.intInfo(mod); if (Air.refToIndex(dst_air)) |inst| { switch (air_tag[inst]) { - .constant => { - const src_val = self.air.values[air_data[inst].ty_pl.payload]; + .interned => { + const src_val = air_data[inst].interned.toValue(); var space: Value.BigIntSpace = undefined; - const src_int = src_val.toBigInt(&space, self.target.*); + const src_int = src_val.toBigInt(&space, mod); return @intCast(u16, src_int.bitCountTwosComp()) + @boolToInt(src_int.positive and dst_info.signedness == .signed); }, .intcast => { - const src_ty = self.air.typeOf(air_data[inst].ty_op.operand); - const src_info = src_ty.intInfo(self.target.*); + const src_ty = self.typeOf(air_data[inst].ty_op.operand); + const src_info = src_ty.intInfo(mod); return @min(switch (src_info.signedness) { .signed => switch (dst_info.signedness) { .signed => src_info.bits, @@ -2908,20 +2894,18 @@ fn activeIntBits(self: *Self, dst_air: Air.Inst.Ref) u16 { } fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result = result: { const tag = self.air.instructions.items(.tag)[inst]; - const dst_ty = self.air.typeOfIndex(inst); - switch (dst_ty.zigTypeTag()) { + const dst_ty = self.typeOfIndex(inst); + switch (dst_ty.zigTypeTag(mod)) { .Float, .Vector => break :result try self.genBinOp(inst, tag, bin_op.lhs, bin_op.rhs), else => {}, } - const dst_info = dst_ty.intInfo(self.target.*); - var src_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dst_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, .data = switch (tag) { + const dst_info = dst_ty.intInfo(mod); + const src_ty = try mod.intType(dst_info.signedness, switch (tag) { else => unreachable, .mul, .mulwrap => math.max3( self.activeIntBits(bin_op.lhs), @@ -2929,8 +2913,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { dst_info.bits / 2, ), .div_trunc, .div_floor, .div_exact, .rem, .mod => dst_info.bits, - } }; - const src_ty = Type.initPayload(&src_pl.base); + }); try self.spillEflagsIfOccupied(); try self.spillRegisters(&.{ .rax, .rdx }); @@ -2942,8 +2925,9 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void { } fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); const lhs_mcv = try self.resolveInst(bin_op.lhs); const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv)) @@ -2968,7 +2952,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { const reg_bits = self.regBitSize(ty); const reg_extra_bits = self.regExtraBits(ty); - const cc: Condition = if (ty.isSignedInt()) cc: { + const cc: Condition = if (ty.isSignedInt(mod)) cc: { if (reg_extra_bits > 0) { try self.genShiftBinOpMir(.{ ._l, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); } @@ -2994,7 +2978,7 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .o; } else cc: { try self.genSetReg(limit_reg, ty, .{ - .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - ty.bitSize(self.target.*)), + .immediate = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - ty.bitSize(mod)), }); try self.genBinOpMir(.{ ._, .add }, ty, dst_mcv, rhs_mcv); @@ -3005,14 +2989,14 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .c; }; - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), cc, ); - if (reg_extra_bits > 0 and ty.isSignedInt()) { + if (reg_extra_bits > 0 and ty.isSignedInt(mod)) { try self.genShiftBinOpMir(.{ ._r, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); } @@ -3020,8 +3004,9 @@ fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { } fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); const lhs_mcv = try self.resolveInst(bin_op.lhs); const dst_mcv = if (lhs_mcv.isRegister() and self.reuseOperand(inst, bin_op.lhs, 0, lhs_mcv)) @@ -3046,7 +3031,7 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { const reg_bits = self.regBitSize(ty); const reg_extra_bits = self.regExtraBits(ty); - const cc: Condition = if (ty.isSignedInt()) cc: { + const cc: Condition = if (ty.isSignedInt(mod)) cc: { if (reg_extra_bits > 0) { try self.genShiftBinOpMir(.{ ._l, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); } @@ -3076,14 +3061,14 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { break :cc .c; }; - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), cc, ); - if (reg_extra_bits > 0 and ty.isSignedInt()) { + if (reg_extra_bits > 0 and ty.isSignedInt(mod)) { try self.genShiftBinOpMir(.{ ._r, .sa }, ty, dst_mcv, .{ .immediate = reg_extra_bits }); } @@ -3091,8 +3076,9 @@ fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { } fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); try self.spillRegisters(&.{ .rax, .rdx }); const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx }); @@ -3118,7 +3104,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { defer self.register_manager.unlockReg(limit_lock); const reg_bits = self.regBitSize(ty); - const cc: Condition = if (ty.isSignedInt()) cc: { + const cc: Condition = if (ty.isSignedInt(mod)) cc: { try self.genSetReg(limit_reg, ty, lhs_mcv); try self.genBinOpMir(.{ ._, .xor }, ty, limit_mcv, rhs_mcv); try self.genShiftBinOpMir(.{ ._, .sa }, ty, limit_mcv, .{ .immediate = reg_bits - 1 }); @@ -3134,7 +3120,7 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { }; const dst_mcv = try self.genMulDivBinOp(.mul, inst, ty, ty, lhs_mcv, rhs_mcv); - const cmov_abi_size = @max(@intCast(u32, ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_mcv.register, cmov_abi_size), registerAlias(limit_reg, cmov_abi_size), @@ -3145,12 +3131,13 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { } fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = result: { const tag = self.air.instructions.items(.tag)[inst]; - const ty = self.air.typeOf(bin_op.lhs); - switch (ty.zigTypeTag()) { + const ty = self.typeOf(bin_op.lhs); + switch (ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement add/sub with overflow for Vector type", .{}), .Int => { try self.spillEflagsIfOccupied(); @@ -3160,13 +3147,13 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .sub_with_overflow => .sub, else => unreachable, }, bin_op.lhs, bin_op.rhs); - const int_info = ty.intInfo(self.target.*); + const int_info = ty.intInfo(mod); const cc: Condition = switch (int_info.signedness) { .unsigned => .c, .signed => .o, }; - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { switch (partial_mcv) { .register => |reg| { @@ -3177,16 +3164,16 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(1, mod)), Type.u1, .{ .eflags = cc }, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)), + @intCast(i32, tuple_ty.structFieldOffset(0, mod)), ty, partial_mcv, ); @@ -3194,7 +3181,7 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, @@ -3205,12 +3192,13 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = result: { - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); - switch (lhs_ty.zigTypeTag()) { + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); + switch (lhs_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement shl with overflow for Vector type", .{}), .Int => { try self.spillEflagsIfOccupied(); @@ -3219,7 +3207,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); const partial_mcv = try self.genShiftBinOp(.shl, null, lhs, rhs, lhs_ty, rhs_ty); const partial_lock = switch (partial_mcv) { @@ -3238,7 +3226,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .cmp }, lhs_ty, tmp_mcv, lhs); const cc = Condition.ne; - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); if (int_info.bits >= 8 and math.isPowerOfTwo(int_info.bits)) { switch (partial_mcv) { .register => |reg| { @@ -3249,24 +3237,24 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { } const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), - tuple_ty.structFieldType(1), + @intCast(i32, tuple_ty.structFieldOffset(1, mod)), + tuple_ty.structFieldType(1, mod), .{ .eflags = cc }, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)), - tuple_ty.structFieldType(0), + @intCast(i32, tuple_ty.structFieldOffset(0, mod)), + tuple_ty.structFieldType(0, mod), partial_mcv, ); break :result .{ .load_frame = .{ .index = frame_index } }; } const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, @@ -3283,29 +3271,20 @@ fn genSetFrameTruncatedOverflowCompare( src_mcv: MCValue, overflow_cc: ?Condition, ) !void { + const mod = self.bin_file.options.module.?; const src_lock = switch (src_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, }; defer if (src_lock) |lock| self.register_manager.unlockReg(lock); - const ty = tuple_ty.structFieldType(0); - const int_info = ty.intInfo(self.target.*); + const ty = tuple_ty.structFieldType(0, mod); + const int_info = ty.intInfo(mod); - var hi_limb_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (int_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = (int_info.bits - 1) % 64 + 1, - }; - const hi_limb_ty = Type.initPayload(&hi_limb_pl.base); + const hi_limb_bits = (int_info.bits - 1) % 64 + 1; + const hi_limb_ty = try mod.intType(int_info.signedness, hi_limb_bits); - var rest_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = int_info.bits - hi_limb_pl.data, - }; - const rest_ty = Type.initPayload(&rest_pl.base); + const rest_ty = try mod.intType(.unsigned, int_info.bits - hi_limb_bits); const temp_regs = try self.register_manager.allocRegs(3, .{ null, null, null }, gp); const temp_locks = self.register_manager.lockRegsAssumeUnused(3, temp_regs); @@ -3335,7 +3314,7 @@ fn genSetFrameTruncatedOverflowCompare( ); } - const payload_off = @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)); + const payload_off = @intCast(i32, tuple_ty.structFieldOffset(0, mod)); if (hi_limb_off > 0) try self.genSetMem(.{ .frame = frame_index }, payload_off, rest_ty, src_mcv); try self.genSetMem( .{ .frame = frame_index }, @@ -3345,23 +3324,24 @@ fn genSetFrameTruncatedOverflowCompare( ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), - tuple_ty.structFieldType(1), + @intCast(i32, tuple_ty.structFieldOffset(1, mod)), + tuple_ty.structFieldType(1, mod), if (overflow_cc) |_| .{ .register = overflow_reg.to8() } else .{ .eflags = .ne }, ); } fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const dst_ty = self.air.typeOf(bin_op.lhs); - const result: MCValue = switch (dst_ty.zigTypeTag()) { + const dst_ty = self.typeOf(bin_op.lhs); + const result: MCValue = switch (dst_ty.zigTypeTag(mod)) { .Vector => return self.fail("TODO implement mul_with_overflow for Vector type", .{}), .Int => result: { try self.spillEflagsIfOccupied(); try self.spillRegisters(&.{ .rax, .rdx }); - const dst_info = dst_ty.intInfo(self.target.*); + const dst_info = dst_ty.intInfo(mod); const cc: Condition = switch (dst_info.signedness) { .unsigned => .c, .signed => .o, @@ -3369,16 +3349,13 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const lhs_active_bits = self.activeIntBits(bin_op.lhs); const rhs_active_bits = self.activeIntBits(bin_op.rhs); - var src_pl = Type.Payload.Bits{ .base = .{ .tag = switch (dst_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, .data = math.max3(lhs_active_bits, rhs_active_bits, dst_info.bits / 2) }; - const src_ty = Type.initPayload(&src_pl.base); + const src_bits = math.max3(lhs_active_bits, rhs_active_bits, dst_info.bits / 2); + const src_ty = try mod.intType(dst_info.signedness, src_bits); const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const tuple_ty = self.air.typeOfIndex(inst); + const tuple_ty = self.typeOfIndex(inst); const extra_bits = if (dst_info.bits <= 64) self.regExtraBits(dst_ty) else @@ -3391,27 +3368,27 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { break :result .{ .register_overflow = .{ .reg = reg, .eflags = cc } }; } else { const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); try self.genSetFrameTruncatedOverflowCompare(tuple_ty, frame_index, partial_mcv, cc); break :result .{ .load_frame = .{ .index = frame_index } }; }, else => { // For now, this is the only supported multiply that doesn't fit in a register. - assert(dst_info.bits <= 128 and src_pl.data == 64); + assert(dst_info.bits <= 128 and src_bits == 64); const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(tuple_ty, mod)); if (dst_info.bits >= lhs_active_bits + rhs_active_bits) { try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(0, self.target.*)), - tuple_ty.structFieldType(0), + @intCast(i32, tuple_ty.structFieldOffset(0, mod)), + tuple_ty.structFieldType(0, mod), partial_mcv, ); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, tuple_ty.structFieldOffset(1, self.target.*)), - tuple_ty.structFieldType(1), + @intCast(i32, tuple_ty.structFieldOffset(1, mod)), + tuple_ty.structFieldType(1, mod), .{ .immediate = 0 }, // cc being set is impossible ); } else try self.genSetFrameTruncatedOverflowCompare( @@ -3433,7 +3410,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { /// Clobbers .rax and .rdx registers. /// Quotient is saved in .rax and remainder in .rdx. fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue, rhs: MCValue) !void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); if (abi_size > 8) { return self.fail("TODO implement genIntMulDivOpMir for ABI size larger than 8", .{}); } @@ -3472,8 +3450,9 @@ fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue /// Always returns a register. /// Clobbers .rax and .rdx registers. fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCValue { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); - const int_info = ty.intInfo(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); + const int_info = ty.intInfo(mod); const dividend: Register = switch (lhs) { .register => |reg| reg, else => try self.copyToTmpRegister(ty, lhs), @@ -3531,8 +3510,8 @@ fn airShlShrBinOp(self: *Self, inst: Air.Inst.Index) !void { try self.register_manager.getReg(.rcx, null); const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - const lhs_ty = self.air.typeOf(bin_op.lhs); - const rhs_ty = self.air.typeOf(bin_op.rhs); + const lhs_ty = self.typeOf(bin_op.lhs); + const rhs_ty = self.typeOf(bin_op.rhs); const result = try self.genShiftBinOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty); @@ -3549,7 +3528,7 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const pl_ty = self.air.typeOfIndex(inst); + const pl_ty = self.typeOfIndex(inst); const opt_mcv = try self.resolveInst(ty_op.operand); if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) { @@ -3574,7 +3553,7 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const opt_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) @@ -3585,14 +3564,15 @@ fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { - const dst_ty = self.air.typeOfIndex(inst); - const src_ty = self.air.typeOf(ty_op.operand); - const opt_ty = src_ty.childType(); + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); + const opt_ty = src_ty.childType(mod); const src_mcv = try self.resolveInst(ty_op.operand); - if (opt_ty.optionalReprIsPayload()) { + if (opt_ty.optionalReprIsPayload(mod)) { break :result if (self.liveness.isUnused(inst)) .unreach else if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) @@ -3609,8 +3589,8 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { else try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv); - const pl_ty = dst_ty.childType(); - const pl_abi_size = @intCast(i32, pl_ty.abiSize(self.target.*)); + const pl_ty = dst_ty.childType(mod); + const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod)); try self.genSetMem(.{ .reg = dst_mcv.getReg().? }, pl_abi_size, Type.bool, .{ .immediate = 1 }); break :result if (self.liveness.isUnused(inst)) .unreach else dst_mcv; }; @@ -3618,22 +3598,23 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { } fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_union_ty = self.air.typeOf(ty_op.operand); - const err_ty = err_union_ty.errorUnionSet(); - const payload_ty = err_union_ty.errorUnionPayload(); + const err_union_ty = self.typeOf(ty_op.operand); + const err_ty = err_union_ty.errorUnionSet(mod); + const payload_ty = err_union_ty.errorUnionPayload(mod); const operand = try self.resolveInst(ty_op.operand); const result: MCValue = result: { - if (err_ty.errorSetIsEmpty()) { + if (err_ty.errorSetIsEmpty(mod)) { break :result MCValue{ .immediate = 0 }; } - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { break :result operand; } - const err_off = errUnionErrorOffset(payload_ty, self.target.*); + const err_off = errUnionErrorOffset(payload_ty, mod); switch (operand) { .register => |reg| { // TODO reuse operand @@ -3666,7 +3647,7 @@ fn airUnwrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const err_union_ty = self.air.typeOf(ty_op.operand); + const err_union_ty = self.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const result = try self.genUnwrapErrorUnionPayloadMir(inst, err_union_ty, operand); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -3678,12 +3659,13 @@ fn genUnwrapErrorUnionPayloadMir( err_union_ty: Type, err_union: MCValue, ) !MCValue { - const payload_ty = err_union_ty.errorUnionPayload(); + const mod = self.bin_file.options.module.?; + const payload_ty = err_union_ty.errorUnionPayload(mod); const result: MCValue = result: { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result .none; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; - const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, mod); switch (err_union) { .load_frame => |frame_addr| break :result .{ .load_frame = .{ .index = frame_addr.index, @@ -3720,9 +3702,10 @@ fn genUnwrapErrorUnionPayloadMir( // *(E!T) -> E fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const src_reg = switch (src_mcv) { .register => |reg| reg, @@ -3736,11 +3719,11 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); - const eu_ty = src_ty.childType(); - const pl_ty = eu_ty.errorUnionPayload(); - const err_ty = eu_ty.errorUnionSet(); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); - const err_abi_size = @intCast(u32, err_ty.abiSize(self.target.*)); + const eu_ty = src_ty.childType(mod); + const pl_ty = eu_ty.errorUnionPayload(mod); + const err_ty = eu_ty.errorUnionSet(mod); + const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); + const err_abi_size = @intCast(u32, err_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .mov }, registerAlias(dst_reg, err_abi_size), @@ -3755,9 +3738,10 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void { // *(E!T) -> *T fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const src_reg = switch (src_mcv) { .register => |reg| reg, @@ -3766,7 +3750,7 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_reg else @@ -3775,10 +3759,10 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const eu_ty = src_ty.childType(); - const pl_ty = eu_ty.errorUnionPayload(); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const eu_ty = src_ty.childType(mod); + const pl_ty = eu_ty.errorUnionPayload(mod); + const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -3789,9 +3773,10 @@ fn airUnwrapErrUnionPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const src_reg = switch (src_mcv) { .register => |reg| reg, @@ -3800,11 +3785,11 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); - const eu_ty = src_ty.childType(); - const pl_ty = eu_ty.errorUnionPayload(); - const err_ty = eu_ty.errorUnionSet(); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); - const err_abi_size = @intCast(u32, err_ty.abiSize(self.target.*)); + const eu_ty = src_ty.childType(mod); + const pl_ty = eu_ty.errorUnionPayload(mod); + const err_ty = eu_ty.errorUnionSet(mod); + const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); + const err_abi_size = @intCast(u32, err_ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .mov }, Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{ @@ -3816,7 +3801,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) break :result .unreach; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_reg else @@ -3824,8 +3809,8 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -3853,14 +3838,15 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void { } fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const pl_ty = self.air.typeOf(ty_op.operand); - if (!pl_ty.hasRuntimeBits()) break :result .{ .immediate = 1 }; + const pl_ty = self.typeOf(ty_op.operand); + if (!pl_ty.hasRuntimeBits(mod)) break :result .{ .immediate = 1 }; - const opt_ty = self.air.typeOfIndex(inst); + const opt_ty = self.typeOfIndex(inst); const pl_mcv = try self.resolveInst(ty_op.operand); - const same_repr = opt_ty.optionalReprIsPayload(); + const same_repr = opt_ty.optionalReprIsPayload(mod); if (same_repr and self.reuseOperand(inst, ty_op.operand, 0, pl_mcv)) break :result pl_mcv; const pl_lock: ?RegisterLock = switch (pl_mcv) { @@ -3873,7 +3859,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { try self.genCopy(pl_ty, opt_mcv, pl_mcv); if (!same_repr) { - const pl_abi_size = @intCast(i32, pl_ty.abiSize(self.target.*)); + const pl_abi_size = @intCast(i32, pl_ty.abiSize(mod)); switch (opt_mcv) { else => unreachable, @@ -3900,19 +3886,20 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const eu_ty = self.air.getRefType(ty_op.ty); - const pl_ty = eu_ty.errorUnionPayload(); - const err_ty = eu_ty.errorUnionSet(); + const pl_ty = eu_ty.errorUnionPayload(mod); + const err_ty = eu_ty.errorUnionSet(mod); const operand = try self.resolveInst(ty_op.operand); const result: MCValue = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) break :result .{ .immediate = 0 }; + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .{ .immediate = 0 }; - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, self.target.*)); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod)); + const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); + const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, operand); try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, .{ .immediate = 0 }); break :result .{ .load_frame = .{ .index = frame_index } }; @@ -3922,18 +3909,19 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { /// E to E!T fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const eu_ty = self.air.getRefType(ty_op.ty); - const pl_ty = eu_ty.errorUnionPayload(); - const err_ty = eu_ty.errorUnionSet(); + const pl_ty = eu_ty.errorUnionPayload(mod); + const err_ty = eu_ty.errorUnionSet(mod); const result: MCValue = result: { - if (!pl_ty.hasRuntimeBitsIgnoreComptime()) break :result try self.resolveInst(ty_op.operand); + if (!pl_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result try self.resolveInst(ty_op.operand); - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, self.target.*)); - const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*)); - const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(eu_ty, mod)); + const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, mod)); + const err_off = @intCast(i32, errUnionErrorOffset(pl_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, pl_off, pl_ty, .undef); const operand = try self.resolveInst(ty_op.operand); try self.genSetMem(.{ .frame = frame_index }, err_off, err_ty, operand); @@ -3949,7 +3937,7 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void { if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; const dst_mcv = try self.allocRegOrMem(inst, true); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); try self.genCopy(dst_ty, dst_mcv, src_mcv); break :result dst_mcv; }; @@ -3974,9 +3962,10 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const src_reg = switch (src_mcv) { .register => |reg| reg, @@ -3985,7 +3974,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const dst_reg = if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) src_reg else @@ -3994,7 +3983,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); try self.asmRegisterMemory( .{ ._, .lea }, registerAlias(dst_reg, dst_abi_size), @@ -4010,7 +3999,7 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); const opt_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = if (self.reuseOperand(inst, ty_op.operand, 0, opt_mcv)) @@ -4041,7 +4030,8 @@ fn elemOffset(self: *Self, index_ty: Type, index: MCValue, elem_size: u64) !Regi } fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { - const slice_ty = self.air.typeOf(lhs); + const mod = self.bin_file.options.module.?; + const slice_ty = self.typeOf(lhs); const slice_mcv = try self.resolveInst(lhs); const slice_mcv_lock: ?RegisterLock = switch (slice_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4049,12 +4039,11 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { }; defer if (slice_mcv_lock) |lock| self.register_manager.unlockReg(lock); - const elem_ty = slice_ty.childType(); - const elem_size = elem_ty.abiSize(self.target.*); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); + const elem_ty = slice_ty.childType(mod); + const elem_size = elem_ty.abiSize(mod); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); - const index_ty = self.air.typeOf(rhs); + const index_ty = self.typeOf(rhs); const index_mcv = try self.resolveInst(rhs); const index_mcv_lock: ?RegisterLock = switch (index_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4077,11 +4066,11 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue { } fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const slice_ty = self.air.typeOf(bin_op.lhs); + const slice_ty = self.typeOf(bin_op.lhs); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); + const slice_ptr_field_type = slice_ty.slicePtrFieldType(mod); const elem_ptr = try self.genSliceElemPtr(bin_op.lhs, bin_op.rhs); const dst_mcv = try self.allocRegOrMem(inst, false); try self.load(dst_mcv, slice_ptr_field_type, elem_ptr); @@ -4097,9 +4086,10 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const array_ty = self.air.typeOf(bin_op.lhs); + const array_ty = self.typeOf(bin_op.lhs); const array = try self.resolveInst(bin_op.lhs); const array_lock: ?RegisterLock = switch (array) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4107,10 +4097,10 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { }; defer if (array_lock) |lock| self.register_manager.unlockReg(lock); - const elem_ty = array_ty.childType(); - const elem_abi_size = elem_ty.abiSize(self.target.*); + const elem_ty = array_ty.childType(mod); + const elem_abi_size = elem_ty.abiSize(mod); - const index_ty = self.air.typeOf(bin_op.rhs); + const index_ty = self.typeOf(bin_op.rhs); const index = try self.resolveInst(bin_op.rhs); const index_lock: ?RegisterLock = switch (index) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4125,7 +4115,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const addr_reg = try self.register_manager.allocReg(null, gp); switch (array) { .register => { - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(array_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, array_ty, array); try self.asmRegisterMemory( .{ ._, .lea }, @@ -4162,15 +4152,16 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); // this is identical to the `airPtrElemPtr` codegen expect here an // additional `mov` is needed at the end to get the actual value - const elem_ty = ptr_ty.elemType2(); - const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*)); - const index_ty = self.air.typeOf(bin_op.rhs); + const elem_ty = ptr_ty.elemType2(mod); + const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod)); + const index_ty = self.typeOf(bin_op.rhs); const index_mcv = try self.resolveInst(bin_op.rhs); const index_lock = switch (index_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4207,10 +4198,11 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; - const ptr_ty = self.air.typeOf(extra.lhs); + const ptr_ty = self.typeOf(extra.lhs); const ptr = try self.resolveInst(extra.lhs); const ptr_lock: ?RegisterLock = switch (ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4218,9 +4210,9 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { }; defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); - const elem_ty = ptr_ty.elemType2(); - const elem_abi_size = elem_ty.abiSize(self.target.*); - const index_ty = self.air.typeOf(extra.rhs); + const elem_ty = ptr_ty.elemType2(mod); + const elem_abi_size = elem_ty.abiSize(mod); + const index_ty = self.typeOf(extra.rhs); const index = try self.resolveInst(extra.rhs); const index_lock: ?RegisterLock = switch (index) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -4239,11 +4231,12 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { } fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_union_ty = self.air.typeOf(bin_op.lhs); - const union_ty = ptr_union_ty.childType(); - const tag_ty = self.air.typeOf(bin_op.rhs); - const layout = union_ty.unionGetLayout(self.target.*); + const ptr_union_ty = self.typeOf(bin_op.lhs); + const union_ty = ptr_union_ty.childType(mod); + const tag_ty = self.typeOf(bin_op.rhs); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) { return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -4275,20 +4268,19 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { break :blk MCValue{ .register = reg }; } else ptr; - var ptr_tag_pl = ptr_union_ty.ptrInfo(); - ptr_tag_pl.data.pointee_type = tag_ty; - const ptr_tag_ty = Type.initPayload(&ptr_tag_pl.base); + const ptr_tag_ty = try mod.adjustPtrTypeChild(ptr_union_ty, tag_ty); try self.store(ptr_tag_ty, adjusted_ptr, tag); return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const tag_ty = self.air.typeOfIndex(inst); - const union_ty = self.air.typeOf(ty_op.operand); - const layout = union_ty.unionGetLayout(self.target.*); + const tag_ty = self.typeOfIndex(inst); + const union_ty = self.typeOf(ty_op.operand); + const layout = union_ty.unionGetLayout(mod); if (layout.tag_size == 0) { return self.finishAir(inst, .none, .{ ty_op.operand, .none, .none }); @@ -4302,7 +4294,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { }; defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); - const tag_abi_size = tag_ty.abiSize(self.target.*); + const tag_abi_size = tag_ty.abiSize(mod); const dst_mcv: MCValue = blk: { switch (operand) { .load_frame => |frame_addr| { @@ -4337,10 +4329,11 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { } fn airClz(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { - const dst_ty = self.air.typeOfIndex(inst); - const src_ty = self.air.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const mat_src_mcv = switch (src_mcv) { @@ -4358,7 +4351,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); - const src_bits = src_ty.bitSize(self.target.*); + const src_bits = src_ty.bitSize(mod); if (self.hasFeature(.lzcnt)) { if (src_bits <= 8) { const wide_reg = try self.copyToTmpRegister(src_ty, mat_src_mcv); @@ -4405,7 +4398,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { } if (src_bits > 64) - return self.fail("TODO airClz of {}", .{src_ty.fmt(self.bin_file.options.module.?)}); + return self.fail("TODO airClz of {}", .{src_ty.fmt(mod)}); if (math.isPowerOfTwo(src_bits)) { const imm_reg = try self.copyToTmpRegister(dst_ty, .{ .immediate = src_bits ^ (src_bits - 1), @@ -4422,7 +4415,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .bsr }, Type.u16, dst_mcv, .{ .register = wide_reg }); } else try self.genBinOpMir(.{ ._, .bsr }, src_ty, dst_mcv, mat_src_mcv); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(imm_reg, cmov_abi_size), @@ -4449,7 +4442,7 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { .{ .register = wide_reg }, ); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(imm_reg, cmov_abi_size), registerAlias(dst_reg, cmov_abi_size), @@ -4465,11 +4458,12 @@ fn airClz(self: *Self, inst: Air.Inst.Index) !void { } fn airCtz(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result = result: { - const dst_ty = self.air.typeOfIndex(inst); - const src_ty = self.air.typeOf(ty_op.operand); - const src_bits = src_ty.bitSize(self.target.*); + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); + const src_bits = src_ty.bitSize(mod); const src_mcv = try self.resolveInst(ty_op.operand); const mat_src_mcv = switch (src_mcv) { @@ -4548,7 +4542,7 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { try self.genBinOpMir(.{ ._, .bsf }, Type.u16, dst_mcv, .{ .register = wide_reg }); } else try self.genBinOpMir(.{ ._, .bsf }, src_ty, dst_mcv, mat_src_mcv); - const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, dst_ty.abiSize(mod)), 2); try self.asmCmovccRegisterRegister( registerAlias(dst_reg, cmov_abi_size), registerAlias(width_reg, cmov_abi_size), @@ -4560,10 +4554,11 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void { } fn airPopcount(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = result: { - const src_ty = self.air.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const src_ty = self.typeOf(ty_op.operand); + const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const src_mcv = try self.resolveInst(ty_op.operand); if (self.hasFeature(.popcnt)) { @@ -4729,16 +4724,17 @@ fn byteSwap(self: *Self, inst: Air.Inst.Index, src_ty: Type, src_mcv: MCValue, m } fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); + const src_ty = self.typeOf(ty_op.operand); const src_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, true); switch (self.regExtraBits(src_ty)) { 0 => {}, else => |extra| try self.genBinOpMir( - if (src_ty.isSignedInt()) .{ ._r, .sa } else .{ ._r, .sh }, + if (src_ty.isSignedInt(mod)) .{ ._r, .sa } else .{ ._r, .sh }, src_ty, dst_mcv, .{ .immediate = extra }, @@ -4749,10 +4745,11 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void { } fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const src_ty = self.typeOf(ty_op.operand); + const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); const src_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = try self.byteSwap(inst, src_ty, src_mcv, false); @@ -4847,7 +4844,7 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { switch (self.regExtraBits(src_ty)) { 0 => {}, else => |extra| try self.genBinOpMir( - if (src_ty.isSignedInt()) .{ ._r, .sa } else .{ ._r, .sh }, + if (src_ty.isSignedInt(mod)) .{ ._r, .sa } else .{ ._r, .sh }, src_ty, dst_mcv, .{ .immediate = extra }, @@ -4858,17 +4855,18 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void { } fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const tag = self.air.instructions.items(.tag)[inst]; const un_op = self.air.instructions.items(.data)[inst].un_op; - const ty = self.air.typeOf(un_op); - const abi_size: u32 = switch (ty.abiSize(self.target.*)) { + const ty = self.typeOf(un_op); + const abi_size: u32 = switch (ty.abiSize(mod)) { 1...16 => 16, 17...32 => 32, else => return self.fail("TODO implement airFloatSign for {}", .{ - ty.fmt(self.bin_file.options.module.?), + ty.fmt(mod), }), }; - const scalar_bits = ty.scalarType().floatBits(self.target.*); + const scalar_bits = ty.scalarType(mod).floatBits(self.target.*); const src_mcv = try self.resolveInst(un_op); const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; @@ -4884,42 +4882,14 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { const dst_lock = self.register_manager.lockReg(dst_reg); defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); - var arena = std.heap.ArenaAllocator.init(self.gpa); - defer arena.deinit(); + const vec_ty = try mod.vectorType(.{ + .len = @divExact(abi_size * 8, scalar_bits), + .child = (try mod.intType(.signed, scalar_bits)).ip_index, + }); - const ExpectedContents = struct { - scalar: union { - i64: Value.Payload.I64, - big: struct { - limbs: [ - @max( - std.math.big.int.Managed.default_capacity, - std.math.big.int.calcTwosCompLimbCount(128), - ) - ]std.math.big.Limb, - pl: Value.Payload.BigInt, - }, - }, - repeated: Value.Payload.SubValue, - }; - var stack align(@alignOf(ExpectedContents)) = - std.heap.stackFallback(@sizeOf(ExpectedContents), arena.allocator()); - - var int_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_signed }, - .data = scalar_bits, - }; - var vec_pl = Type.Payload.Array{ - .base = .{ .tag = .vector }, - .data = .{ - .len = @divExact(abi_size * 8, scalar_bits), - .elem_type = Type.initPayload(&int_pl.base), - }, - }; - const vec_ty = Type.initPayload(&vec_pl.base); const sign_val = switch (tag) { - .neg => try vec_ty.minInt(stack.get(), self.target.*), - .fabs => try vec_ty.maxInt(stack.get(), self.target.*), + .neg => try vec_ty.minInt(mod, vec_ty), + .fabs => try vec_ty.maxInt(mod, vec_ty), else => unreachable, }; @@ -4993,7 +4963,7 @@ fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void { fn airRound(self: *Self, inst: Air.Inst.Index, mode: u4) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const src_mcv = try self.resolveInst(un_op); const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) @@ -5008,25 +4978,26 @@ fn airRound(self: *Self, inst: Air.Inst.Index, mode: u4) !void { } fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4) !void { + const mod = self.bin_file.options.module.?; if (!self.hasFeature(.sse4_1)) return self.fail("TODO implement genRound without sse4_1 feature", .{}); - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round }, 64 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) .{ .v_ss, .round } else .{ ._ss, .round }, 2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else .{ ._ps, .round }, 5...8 => if (self.hasFeature(.avx)) .{ .v_ps, .round } else null, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) .{ .v_sd, .round } else .{ ._sd, .round }, 2 => if (self.hasFeature(.avx)) .{ .v_pd, .round } else .{ ._pd, .round }, 3...4 => if (self.hasFeature(.avx)) .{ .v_pd, .round } else null, @@ -5041,7 +5012,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 })) |tag| tag else return self.fail("TODO implement genRound for {}", .{ ty.fmt(self.bin_file.options.module.?), }); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); const dst_alias = registerAlias(dst_reg, abi_size); switch (mir_tag[0]) { .v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemoryImmediate( @@ -5078,9 +5049,10 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: u4 } fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; - const ty = self.air.typeOf(un_op); - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const ty = self.typeOf(un_op); + const abi_size = @intCast(u32, ty.abiSize(mod)); const src_mcv = try self.resolveInst(un_op); const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, un_op, 0, src_mcv)) @@ -5092,7 +5064,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { defer if (dst_lock) |lock| self.register_manager.unlockReg(lock); const result: MCValue = result: { - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 16 => if (self.hasFeature(.f16c)) { const mat_src_reg = if (src_mcv.isRegister()) @@ -5114,9 +5086,9 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 16 => if (self.hasFeature(.f16c)) switch (ty.vectorLen(mod)) { 1 => { try self.asmRegisterRegister( .{ .v_ps, .cvtph2 }, @@ -5167,13 +5139,13 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { }, else => null, } else null, - 32 => switch (ty.vectorLen()) { + 32 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) .{ .v_ss, .sqrt } else .{ ._ss, .sqrt }, 2...4 => if (self.hasFeature(.avx)) .{ .v_ps, .sqrt } else .{ ._ps, .sqrt }, 5...8 => if (self.hasFeature(.avx)) .{ .v_ps, .sqrt } else null, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) .{ .v_sd, .sqrt } else .{ ._sd, .sqrt }, 2 => if (self.hasFeature(.avx)) .{ .v_pd, .sqrt } else .{ ._pd, .sqrt }, 3...4 => if (self.hasFeature(.avx)) .{ .v_pd, .sqrt } else null, @@ -5186,7 +5158,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void { }, else => unreachable, })) |tag| tag else return self.fail("TODO implement airSqrt for {}", .{ - ty.fmt(self.bin_file.options.module.?), + ty.fmt(mod), }); switch (mir_tag[0]) { .v_ss, .v_sd => if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory( @@ -5274,10 +5246,11 @@ fn reuseOperandAdvanced( } fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void { - const ptr_info = ptr_ty.ptrInfo().data; + const mod = self.bin_file.options.module.?; + const ptr_info = ptr_ty.ptrInfo(mod); const val_ty = ptr_info.pointee_type; - const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); + const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); const limb_abi_size: u32 = @min(val_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; const val_byte_off = @intCast(i32, ptr_info.bit_offset / limb_abi_bits * limb_abi_size); @@ -5347,7 +5320,8 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn } fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void { - const dst_ty = ptr_ty.childType(); + const mod = self.bin_file.options.module.?; + const dst_ty = ptr_ty.childType(mod); switch (ptr_mcv) { .none, .unreach, @@ -5382,20 +5356,21 @@ fn load(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerErro } fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const elem_ty = self.air.typeOfIndex(inst); + const elem_ty = self.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBitsIgnoreComptime()) break :result .none; + if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; try self.spillRegisters(&.{ .rdi, .rsi, .rcx }); const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx }); defer for (reg_locks) |lock| self.register_manager.unlockReg(lock); - const ptr_ty = self.air.typeOf(ty_op.operand); - const elem_size = elem_ty.abiSize(self.target.*); + const ptr_ty = self.typeOf(ty_op.operand); + const elem_size = elem_ty.abiSize(mod); - const elem_rc = regClassForType(elem_ty); - const ptr_rc = regClassForType(ptr_ty); + const elem_rc = regClassForType(elem_ty, mod); + const ptr_rc = regClassForType(ptr_ty, mod); const ptr_mcv = try self.resolveInst(ty_op.operand); const dst_mcv = if (elem_size <= 8 and elem_rc.supersetOf(ptr_rc) and @@ -5405,7 +5380,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { else try self.allocRegOrMem(inst, true); - if (ptr_ty.ptrInfo().data.host_size > 0) { + if (ptr_ty.ptrInfo(mod).host_size > 0) { try self.packedLoad(dst_mcv, ptr_ty, ptr_mcv); } else { try self.load(dst_mcv, ptr_ty, ptr_mcv); @@ -5416,13 +5391,14 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { } fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void { - const ptr_info = ptr_ty.ptrInfo().data; - const src_ty = ptr_ty.childType(); + const mod = self.bin_file.options.module.?; + const ptr_info = ptr_ty.ptrInfo(mod); + const src_ty = ptr_ty.childType(mod); const limb_abi_size: u16 = @min(ptr_info.host_size, 8); const limb_abi_bits = limb_abi_size * 8; - const src_bit_size = src_ty.bitSize(self.target.*); + const src_bit_size = src_ty.bitSize(mod); const src_byte_off = @intCast(i32, ptr_info.bit_offset / limb_abi_bits * limb_abi_size); const src_bit_off = ptr_info.bit_offset % limb_abi_bits; @@ -5489,7 +5465,8 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In } fn store(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void { - const src_ty = ptr_ty.childType(); + const mod = self.bin_file.options.module.?; + const src_ty = ptr_ty.childType(mod); switch (ptr_mcv) { .none, .unreach, @@ -5524,6 +5501,7 @@ fn store(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerErr } fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { + const mod = self.bin_file.options.module.?; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -5531,9 +5509,9 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void { } const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_mcv = try self.resolveInst(bin_op.lhs); - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const src_mcv = try self.resolveInst(bin_op.rhs); - if (ptr_ty.ptrInfo().data.host_size > 0) { + if (ptr_ty.ptrInfo(mod).host_size > 0) { try self.packedStore(ptr_ty, ptr_mcv, src_mcv); } else { try self.store(ptr_ty, ptr_mcv, src_mcv); @@ -5555,14 +5533,15 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void { } fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32) !MCValue { - const ptr_field_ty = self.air.typeOfIndex(inst); - const ptr_container_ty = self.air.typeOf(operand); - const container_ty = ptr_container_ty.childType(); - const field_offset = @intCast(i32, switch (container_ty.containerLayout()) { - .Auto, .Extern => container_ty.structFieldOffset(index, self.target.*), - .Packed => if (container_ty.zigTypeTag() == .Struct and - ptr_field_ty.ptrInfo().data.host_size == 0) - container_ty.packedStructFieldByteOffset(index, self.target.*) + const mod = self.bin_file.options.module.?; + const ptr_field_ty = self.typeOfIndex(inst); + const ptr_container_ty = self.typeOf(operand); + const container_ty = ptr_container_ty.childType(mod); + const field_offset = @intCast(i32, switch (container_ty.containerLayout(mod)) { + .Auto, .Extern => container_ty.structFieldOffset(index, mod), + .Packed => if (container_ty.zigTypeTag(mod) == .Struct and + ptr_field_ty.ptrInfo(mod).host_size == 0) + container_ty.packedStructFieldByteOffset(index, mod) else 0, }); @@ -5577,24 +5556,25 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32 } fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; const result: MCValue = result: { const operand = extra.struct_operand; const index = extra.field_index; - const container_ty = self.air.typeOf(operand); - const container_rc = regClassForType(container_ty); - const field_ty = container_ty.structFieldType(index); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) break :result .none; - const field_rc = regClassForType(field_ty); + const container_ty = self.typeOf(operand); + const container_rc = regClassForType(container_ty, mod); + const field_ty = container_ty.structFieldType(index, mod); + if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) break :result .none; + const field_rc = regClassForType(field_ty, mod); const field_is_gp = field_rc.supersetOf(gp); const src_mcv = try self.resolveInst(operand); - const field_off = switch (container_ty.containerLayout()) { - .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, self.target.*) * 8), - .Packed => if (container_ty.castTag(.@"struct")) |struct_obj| - struct_obj.data.packedFieldBitOffset(self.target.*, index) + const field_off = switch (container_ty.containerLayout(mod)) { + .Auto, .Extern => @intCast(u32, container_ty.structFieldOffset(index, mod) * 8), + .Packed => if (mod.typeToStruct(container_ty)) |struct_obj| + struct_obj.packedFieldBitOffset(mod, index) else 0, }; @@ -5611,7 +5591,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { break :result dst_mcv; } - const field_abi_size = @intCast(u32, field_ty.abiSize(self.target.*)); + const field_abi_size = @intCast(u32, field_ty.abiSize(mod)); const limb_abi_size: u32 = @min(field_abi_size, 8); const limb_abi_bits = limb_abi_size * 8; const field_byte_off = @intCast(i32, field_off / limb_abi_bits * limb_abi_size); @@ -5733,12 +5713,13 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; - const inst_ty = self.air.typeOfIndex(inst); - const parent_ty = inst_ty.childType(); - const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, self.target.*)); + const inst_ty = self.typeOfIndex(inst); + const parent_ty = inst_ty.childType(mod); + const field_offset = @intCast(i32, parent_ty.structFieldOffset(extra.field_index, mod)); const src_mcv = try self.resolveInst(extra.field_ptr); const dst_mcv = if (src_mcv.isRegisterOffset() and @@ -5751,9 +5732,10 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { } fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue { - const src_ty = self.air.typeOf(src_air); + const mod = self.bin_file.options.module.?; + const src_ty = self.typeOf(src_air); const src_mcv = try self.resolveInst(src_air); - if (src_ty.zigTypeTag() == .Vector) { + if (src_ty.zigTypeTag(mod) == .Vector) { return self.fail("TODO implement genUnOp for {}", .{src_ty.fmt(self.bin_file.options.module.?)}); } @@ -5786,28 +5768,22 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: switch (tag) { .not => { - const limb_abi_size = @intCast(u16, @min(src_ty.abiSize(self.target.*), 8)); - const int_info = if (src_ty.tag() == .bool) + const limb_abi_size = @intCast(u16, @min(src_ty.abiSize(mod), 8)); + const int_info = if (src_ty.ip_index == .bool_type) std.builtin.Type.Int{ .signedness = .unsigned, .bits = 1 } else - src_ty.intInfo(self.target.*); + src_ty.intInfo(mod); var byte_off: i32 = 0; while (byte_off * 8 < int_info.bits) : (byte_off += limb_abi_size) { - var limb_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (int_info.signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = @intCast(u16, @min(int_info.bits - byte_off * 8, limb_abi_size * 8)), - }; - const limb_ty = Type.initPayload(&limb_pl.base); + const limb_bits = @intCast(u16, @min(int_info.bits - byte_off * 8, limb_abi_size * 8)); + const limb_ty = try mod.intType(int_info.signedness, limb_bits); const limb_mcv = switch (byte_off) { 0 => dst_mcv, else => dst_mcv.address().offset(byte_off).deref(), }; - if (limb_pl.base.tag == .int_unsigned and self.regExtraBits(limb_ty) > 0) { - const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_pl.data); + if (int_info.signedness == .unsigned and self.regExtraBits(limb_ty) > 0) { + const mask = @as(u64, math.maxInt(u64)) >> @intCast(u6, 64 - limb_bits); try self.genBinOpMir(.{ ._, .xor }, limb_ty, limb_mcv, .{ .immediate = mask }); } else try self.genUnOpMir(.{ ._, .not }, limb_ty, limb_mcv); } @@ -5819,7 +5795,8 @@ fn genUnOp(self: *Self, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: } fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MCValue) !void { - const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, dst_ty.abiSize(mod)); if (abi_size > 8) return self.fail("TODO implement {} for {}", .{ mir_tag, dst_ty.fmt(self.bin_file.options.module.?), @@ -5866,6 +5843,7 @@ fn genShiftBinOpMir( lhs_mcv: MCValue, shift_mcv: MCValue, ) !void { + const mod = self.bin_file.options.module.?; const rhs_mcv: MCValue = rhs: { switch (shift_mcv) { .immediate => |imm| switch (imm) { @@ -5880,7 +5858,7 @@ fn genShiftBinOpMir( break :rhs .{ .register = .rcx }; }; - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); if (abi_size <= 8) { switch (lhs_mcv) { .register => |lhs_reg| switch (rhs_mcv) { @@ -6099,13 +6077,14 @@ fn genShiftBinOp( lhs_ty: Type, rhs_ty: Type, ) !MCValue { - if (lhs_ty.zigTypeTag() == .Vector) { + const mod = self.bin_file.options.module.?; + if (lhs_ty.zigTypeTag(mod) == .Vector) { return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()}); } - assert(rhs_ty.abiSize(self.target.*) == 1); + assert(rhs_ty.abiSize(mod) == 1); - const lhs_abi_size = lhs_ty.abiSize(self.target.*); + const lhs_abi_size = lhs_ty.abiSize(mod); if (lhs_abi_size > 16) { return self.fail("TODO implement genShiftBinOp for {}", .{lhs_ty.fmtDebug()}); } @@ -6136,7 +6115,7 @@ fn genShiftBinOp( break :dst dst_mcv; }; - const signedness = lhs_ty.intInfo(self.target.*).signedness; + const signedness = lhs_ty.intInfo(mod).signedness; try self.genShiftBinOpMir(switch (air_tag) { .shl, .shl_exact => switch (signedness) { .signed => .{ ._l, .sa }, @@ -6163,11 +6142,12 @@ fn genMulDivBinOp( lhs: MCValue, rhs: MCValue, ) !MCValue { - if (dst_ty.zigTypeTag() == .Vector or dst_ty.zigTypeTag() == .Float) { + const mod = self.bin_file.options.module.?; + if (dst_ty.zigTypeTag(mod) == .Vector or dst_ty.zigTypeTag(mod) == .Float) { return self.fail("TODO implement genMulDivBinOp for {}", .{dst_ty.fmtDebug()}); } - const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); - const src_abi_size = @intCast(u32, src_ty.abiSize(self.target.*)); + const dst_abi_size = @intCast(u32, dst_ty.abiSize(mod)); + const src_abi_size = @intCast(u32, src_ty.abiSize(mod)); if (switch (tag) { else => unreachable, .mul, .mulwrap => dst_abi_size != src_abi_size and dst_abi_size != src_abi_size * 2, @@ -6184,7 +6164,7 @@ fn genMulDivBinOp( const reg_locks = self.register_manager.lockRegs(2, .{ .rax, .rdx }); defer for (reg_locks) |reg_lock| if (reg_lock) |lock| self.register_manager.unlockReg(lock); - const signedness = ty.intInfo(self.target.*).signedness; + const signedness = ty.intInfo(mod).signedness; switch (tag) { .mul, .mulwrap, @@ -6338,13 +6318,14 @@ fn genBinOp( lhs_air: Air.Inst.Ref, rhs_air: Air.Inst.Ref, ) !MCValue { - const lhs_ty = self.air.typeOf(lhs_air); - const rhs_ty = self.air.typeOf(rhs_air); - const abi_size = @intCast(u32, lhs_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const lhs_ty = self.typeOf(lhs_air); + const rhs_ty = self.typeOf(rhs_air); + const abi_size = @intCast(u32, lhs_ty.abiSize(mod)); const maybe_mask_reg = switch (air_tag) { else => null, - .max, .min => if (lhs_ty.scalarType().isRuntimeFloat()) registerAlias( + .max, .min => if (lhs_ty.scalarType(mod).isRuntimeFloat()) registerAlias( if (!self.hasFeature(.avx) and self.hasFeature(.sse4_1)) mask: { try self.register_manager.getReg(.xmm0, null); break :mask .xmm0; @@ -6384,7 +6365,7 @@ fn genBinOp( else => false, }; - const vec_op = switch (lhs_ty.zigTypeTag()) { + const vec_op = switch (lhs_ty.zigTypeTag(mod)) { else => false, .Float, .Vector => true, }; @@ -6456,7 +6437,7 @@ fn genBinOp( const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); - const elem_size = lhs_ty.elemType2().abiSize(self.target.*); + const elem_size = lhs_ty.elemType2(mod).abiSize(mod); try self.genIntMulComplexOpMir(rhs_ty, tmp_mcv, .{ .immediate = elem_size }); try self.genBinOpMir( switch (air_tag) { @@ -6506,7 +6487,7 @@ fn genBinOp( try self.genBinOpMir(.{ ._, .cmp }, lhs_ty, dst_mcv, mat_src_mcv); - const int_info = lhs_ty.intInfo(self.target.*); + const int_info = lhs_ty.intInfo(mod); const cc: Condition = switch (int_info.signedness) { .unsigned => switch (air_tag) { .min => .a, @@ -6520,7 +6501,7 @@ fn genBinOp( }, }; - const cmov_abi_size = @max(@intCast(u32, lhs_ty.abiSize(self.target.*)), 2); + const cmov_abi_size = @max(@intCast(u32, lhs_ty.abiSize(mod)), 2); const tmp_reg = switch (dst_mcv) { .register => |reg| reg, else => try self.copyToTmpRegister(lhs_ty, dst_mcv), @@ -6581,7 +6562,7 @@ fn genBinOp( } const dst_reg = registerAlias(dst_mcv.getReg().?, abi_size); - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { else => unreachable, .Float => switch (lhs_ty.floatBits(self.target.*)) { 16 => if (self.hasFeature(.f16c)) { @@ -6657,10 +6638,10 @@ fn genBinOp( 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { else => null, - .Int => switch (lhs_ty.childType().intInfo(self.target.*).bits) { - 8 => switch (lhs_ty.vectorLen()) { + .Int => switch (lhs_ty.childType(mod).intInfo(mod).bits) { + 8 => switch (lhs_ty.vectorLen(mod)) { 1...16 => switch (air_tag) { .add, .addwrap, @@ -6671,7 +6652,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_b, .mins } else if (self.hasFeature(.sse4_1)) @@ -6685,7 +6666,7 @@ fn genBinOp( else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_b, .maxs } else if (self.hasFeature(.sse4_1)) @@ -6711,11 +6692,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_b, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_b, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_b, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_b, .maxu } else null, }, @@ -6723,7 +6704,7 @@ fn genBinOp( }, else => null, }, - 16 => switch (lhs_ty.vectorLen()) { + 16 => switch (lhs_ty.vectorLen(mod)) { 1...8 => switch (air_tag) { .add, .addwrap, @@ -6737,7 +6718,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_w, .mins } else @@ -6747,7 +6728,7 @@ fn genBinOp( else .{ .p_w, .minu }, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_w, .maxs } else @@ -6772,11 +6753,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_w, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_w, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_w, .maxu } else null, }, @@ -6784,7 +6765,7 @@ fn genBinOp( }, else => null, }, - 32 => switch (lhs_ty.vectorLen()) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => switch (air_tag) { .add, .addwrap, @@ -6803,7 +6784,7 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" }, .bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" }, .xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor }, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_d, .mins } else if (self.hasFeature(.sse4_1)) @@ -6817,7 +6798,7 @@ fn genBinOp( else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx)) .{ .vp_d, .maxs } else if (self.hasFeature(.sse4_1)) @@ -6846,11 +6827,11 @@ fn genBinOp( .bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null, .bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null, .xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null, - .min => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .mins } else null, .unsigned => if (self.hasFeature(.avx)) .{ .vp_d, .minu } else null, }, - .max => switch (lhs_ty.childType().intInfo(self.target.*).signedness) { + .max => switch (lhs_ty.childType(mod).intInfo(mod).signedness) { .signed => if (self.hasFeature(.avx2)) .{ .vp_d, .maxs } else null, .unsigned => if (self.hasFeature(.avx2)) .{ .vp_d, .maxu } else null, }, @@ -6858,7 +6839,7 @@ fn genBinOp( }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => switch (air_tag) { .add, .addwrap, @@ -6887,8 +6868,8 @@ fn genBinOp( }, else => null, }, - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 16 => if (self.hasFeature(.f16c)) switch (lhs_ty.vectorLen()) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 16 => if (self.hasFeature(.f16c)) switch (lhs_ty.vectorLen(mod)) { 1 => { const tmp_reg = (try self.register_manager.allocReg(null, sse)).to128(); const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); @@ -7063,7 +7044,7 @@ fn genBinOp( }, else => null, } else null, - 32 => switch (lhs_ty.vectorLen()) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1 => switch (air_tag) { .add => if (self.hasFeature(.avx)) .{ .v_ss, .add } else .{ ._ss, .add }, .sub => if (self.hasFeature(.avx)) .{ .v_ss, .sub } else .{ ._ss, .sub }, @@ -7101,7 +7082,7 @@ fn genBinOp( } else null, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1 => switch (air_tag) { .add => if (self.hasFeature(.avx)) .{ .v_sd, .add } else .{ ._sd, .add }, .sub => if (self.hasFeature(.avx)) .{ .v_sd, .sub } else .{ ._sd, .sub }, @@ -7206,21 +7187,21 @@ fn genBinOp( const rhs_copy_reg = registerAlias(src_mcv.getReg().?, abi_size); try self.asmRegisterRegisterRegisterImmediate( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .cmp }, 64 => .{ .v_sd, .cmp }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1 => .{ .v_ss, .cmp }, 2...8 => .{ .v_ps, .cmp }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1 => .{ .v_sd, .cmp }, 2...4 => .{ .v_pd, .cmp }, else => null, @@ -7240,20 +7221,20 @@ fn genBinOp( Immediate.u(3), // unord ); try self.asmRegisterRegisterRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ .v_ps, .blendv }, 64 => .{ .v_pd, .blendv }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...8 => .{ .v_ps, .blendv }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ .v_pd, .blendv }, else => null, }, @@ -7274,21 +7255,21 @@ fn genBinOp( } else { const has_blend = self.hasFeature(.sse4_1); try self.asmRegisterRegisterImmediate( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ss, .cmp }, 64 => .{ ._sd, .cmp }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1 => .{ ._ss, .cmp }, 2...4 => .{ ._ps, .cmp }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1 => .{ ._sd, .cmp }, 2 => .{ ._pd, .cmp }, else => null, @@ -7307,20 +7288,20 @@ fn genBinOp( Immediate.u(if (has_blend) 3 else 7), // unord, ord ); if (has_blend) try self.asmRegisterRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .blendv }, 64 => .{ ._pd, .blendv }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ ._ps, .blendv }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => .{ ._pd, .blendv }, else => null, }, @@ -7338,20 +7319,20 @@ fn genBinOp( mask_reg, ) else { try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .@"and" }, 64 => .{ ._pd, .@"and" }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ ._ps, .@"and" }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => .{ ._pd, .@"and" }, else => null, }, @@ -7368,20 +7349,20 @@ fn genBinOp( mask_reg, ); try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .andn }, 64 => .{ ._pd, .andn }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ ._ps, .andn }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => .{ ._pd, .andn }, else => null, }, @@ -7398,20 +7379,20 @@ fn genBinOp( lhs_copy_reg.?, ); try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (lhs_ty.zigTypeTag(mod)) { .Float => switch (lhs_ty.floatBits(self.target.*)) { 32 => .{ ._ps, .@"or" }, 64 => .{ ._pd, .@"or" }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (lhs_ty.childType().zigTypeTag()) { - .Float => switch (lhs_ty.childType().floatBits(self.target.*)) { - 32 => switch (lhs_ty.vectorLen()) { + .Vector => switch (lhs_ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (lhs_ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (lhs_ty.vectorLen(mod)) { 1...4 => .{ ._ps, .@"or" }, else => null, }, - 64 => switch (lhs_ty.vectorLen()) { + 64 => switch (lhs_ty.vectorLen(mod)) { 1...2 => .{ ._pd, .@"or" }, else => null, }, @@ -7442,7 +7423,8 @@ fn genBinOpMir( dst_mcv: MCValue, src_mcv: MCValue, ) !void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); switch (dst_mcv) { .none, .unreach, @@ -7562,11 +7544,7 @@ fn genBinOpMir( .load_got, .load_tlv, => { - var ptr_pl = Type.Payload.ElemType{ - .base = .{ .tag = .single_const_pointer }, - .data = ty, - }; - const ptr_ty = Type.initPayload(&ptr_pl.base); + const ptr_ty = try mod.singleConstPtrType(ty); const addr_reg = try self.copyToTmpRegister(ptr_ty, src_mcv.address()); return self.genBinOpMir(mir_tag, ty, dst_mcv, .{ .indirect = .{ .reg = addr_reg }, @@ -7640,7 +7618,7 @@ fn genBinOpMir( defer if (src_info) |info| self.register_manager.unlockReg(info.addr_lock); const ty_signedness = - if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned; + if (ty.isAbiInt(mod)) ty.intInfo(mod).signedness else .unsigned; const limb_ty = if (abi_size <= 8) ty else switch (ty_signedness) { .signed => Type.usize, .unsigned => Type.isize, @@ -7796,7 +7774,8 @@ fn genBinOpMir( /// Performs multi-operand integer multiplication between dst_mcv and src_mcv, storing the result in dst_mcv. /// Does not support byte-size operands. fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, dst_ty.abiSize(mod)); switch (dst_mcv) { .none, .unreach, @@ -7896,6 +7875,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M } fn airArg(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; // skip zero-bit arguments as they don't have a corresponding arg instruction var arg_index = self.arg_index; while (self.args[arg_index] == .none) arg_index += 1; @@ -7909,9 +7889,9 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { else => return self.fail("TODO implement arg for {}", .{dst_mcv}), } - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); const src_index = self.air.instructions.items(.data)[inst].arg.src_index; - const name = self.owner.mod_fn.getParamName(self.bin_file.options.module.?, src_index); + const name = self.owner.mod_fn.getParamName(mod, src_index); try self.genArgDbgInfo(ty, name, dst_mcv); break :result dst_mcv; @@ -7920,6 +7900,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { } fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void { + const mod = self.bin_file.options.module.?; switch (self.debug_output) { .dwarf => |dw| { const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (mcv) { @@ -7938,7 +7919,7 @@ fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void { // TODO: this might need adjusting like the linkers do. // Instead of flattening the owner and passing Decl.Index here we may // want to special case LazySymbol in DWARF linker too. - try dw.genArgDbgInfo(name, ty, self.owner.getDecl(), loc); + try dw.genArgDbgInfo(name, ty, self.owner.getDecl(mod), loc); }, .plan9 => {}, .none => {}, @@ -7952,6 +7933,7 @@ fn genVarDbgInfo( mcv: MCValue, name: [:0]const u8, ) !void { + const mod = self.bin_file.options.module.?; const is_ptr = switch (tag) { .dbg_var_ptr => true, .dbg_var_val => false, @@ -7982,7 +7964,7 @@ fn genVarDbgInfo( // TODO: this might need adjusting like the linkers do. // Instead of flattening the owner and passing Decl.Index here we may // want to special case LazySymbol in DWARF linker too. - try dw.genVarDbgInfo(name, ty, self.owner.getDecl(), is_ptr, loc); + try dw.genVarDbgInfo(name, ty, self.owner.getDecl(mod), is_ptr, loc); }, .plan9 => {}, .none => {}, @@ -8022,20 +8004,23 @@ fn airFence(self: *Self, inst: Air.Inst.Index) !void { } fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void { + const mod = self.bin_file.options.module.?; if (modifier == .always_tail) return self.fail("TODO implement tail calls for x86_64", .{}); const pl_op = self.air.instructions.items(.data)[inst].pl_op; const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); - const ty = self.air.typeOf(callee); + const ty = self.typeOf(callee); - const fn_ty = switch (ty.zigTypeTag()) { + const fn_ty = switch (ty.zigTypeTag(mod)) { .Fn => ty, - .Pointer => ty.childType(), + .Pointer => ty.childType(mod), else => unreachable, }; - var info = try self.resolveCallingConventionValues(fn_ty, args[fn_ty.fnParamLen()..], .call_frame); + const fn_info = mod.typeToFunc(fn_ty).?; + + var info = try self.resolveCallingConventionValues(fn_info, args[fn_info.param_types.len..], .call_frame); defer info.deinit(self); // We need a properly aligned and sized call frame to be able to call this function. @@ -8062,7 +8047,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier else => unreachable, } for (args, info.args) |arg, mc_arg| { - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(arg); switch (mc_arg) { .none => {}, @@ -8076,8 +8061,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const ret_lock = switch (info.return_value.long) { .none, .unreach => null, .indirect => |reg_off| lock: { - const ret_ty = fn_ty.fnReturnType(); - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ret_ty, self.target.*)); + const ret_ty = fn_info.return_type.toType(); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(ret_ty, mod)); try self.genSetReg(reg_off.reg, Type.usize, .{ .lea_frame = .{ .index = frame_index, .off = -reg_off.off }, }); @@ -8089,7 +8074,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier defer if (ret_lock) |lock| self.register_manager.unlockReg(lock); for (args, info.args) |arg, mc_arg| { - const arg_ty = self.air.typeOf(arg); + const arg_ty = self.typeOf(arg); const arg_mcv = try self.resolveInst(arg); switch (mc_arg) { .none, .load_frame => {}, @@ -8100,15 +8085,16 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier // Due to incremental compilation, how function calls are generated depends // on linking. - const mod = self.bin_file.options.module.?; - if (self.air.value(callee)) |func_value| { - if (if (func_value.castTag(.function)) |func_payload| - func_payload.data.owner_decl - else if (func_value.castTag(.decl_ref)) |decl_ref_payload| - decl_ref_payload.data - else - null) |owner_decl| - { + if (try self.air.value(callee, mod)) |func_value| { + const func_key = mod.intern_pool.indexToKey(func_value.ip_index); + if (switch (func_key) { + .func => |func| mod.funcPtr(func.index).owner_decl, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| decl, + else => null, + }, + else => null, + }) |owner_decl| { if (self.bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForDecl(owner_decl); const atom = elf_file.getAtom(atom_index); @@ -8141,10 +8127,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .disp = @intCast(i32, fn_got_addr), })); } else unreachable; - } else if (func_value.castTag(.extern_fn)) |func_payload| { - const extern_fn = func_payload.data; - const decl_name = mem.sliceTo(mod.declPtr(extern_fn.owner_decl).name, 0); - const lib_name = mem.sliceTo(extern_fn.lib_name, 0); + } else if (func_value.getExternFunc(mod)) |extern_func| { + const decl_name = mod.intern_pool.stringToSlice(mod.declPtr(extern_func.decl).name); + const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name); if (self.bin_file.cast(link.File.Coff)) |coff_file| { const atom_index = try self.owner.getSymbolIndex(self); const sym_index = try coff_file.getGlobalSymbol(decl_name, lib_name); @@ -8178,7 +8163,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - assert(ty.zigTypeTag() == .Pointer); + assert(ty.zigTypeTag(mod) == .Pointer); const mcv = try self.resolveInst(callee); try self.genSetReg(.rax, Type.usize, mcv); try self.asmRegister(.{ ._, .call }, .rax); @@ -8193,9 +8178,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier } fn airRet(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ret_ty = self.fn_type.fnReturnType(); + const ret_ty = self.fn_type.fnReturnType(mod); switch (self.ret_mcv.short) { .none => {}, .register => try self.genCopy(ret_ty, self.ret_mcv.short, operand), @@ -8219,7 +8205,7 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); switch (self.ret_mcv.short) { .none => {}, .register => try self.load(self.ret_mcv.short, ptr_ty, ptr), @@ -8234,8 +8220,9 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { } fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.typeOf(bin_op.lhs); + const ty = self.typeOf(bin_op.lhs); try self.spillEflagsIfOccupied(); self.eflags_inst = inst; @@ -8255,9 +8242,9 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock); const result = MCValue{ - .eflags = switch (ty.zigTypeTag()) { + .eflags = switch (ty.zigTypeTag(mod)) { else => result: { - const abi_size = @intCast(u16, ty.abiSize(self.target.*)); + const abi_size = @intCast(u16, ty.abiSize(mod)); const may_flip: enum { may_flip, must_flip, @@ -8290,7 +8277,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { defer if (src_lock) |lock| self.register_manager.unlockReg(lock); break :result Condition.fromCompareOperator( - if (ty.isAbiInt()) ty.intInfo(self.target.*).signedness else .unsigned, + if (ty.isAbiInt(mod)) ty.intInfo(mod).signedness else .unsigned, result_op: { const flipped_op = if (flipped) op.reverse() else op; if (abi_size > 8) switch (flipped_op) { @@ -8404,7 +8391,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { try self.asmRegisterRegister(.{ .v_, .movshdup }, tmp2_reg, tmp1_reg); try self.genBinOpMir(.{ ._ss, .ucomi }, ty, tmp1_mcv, tmp2_mcv); } else return self.fail("TODO implement airCmp for {}", .{ - ty.fmt(self.bin_file.options.module.?), + ty.fmt(mod), }), 32 => try self.genBinOpMir( .{ ._ss, .ucomi }, @@ -8419,7 +8406,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { src_mcv, ), else => return self.fail("TODO implement airCmp for {}", .{ - ty.fmt(self.bin_file.options.module.?), + ty.fmt(mod), }), } @@ -8453,8 +8440,8 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { try self.spillEflagsIfOccupied(); self.eflags_inst = inst; - const op_ty = self.air.typeOf(un_op); - const op_abi_size = @intCast(u32, op_ty.abiSize(self.target.*)); + const op_ty = self.typeOf(un_op); + const op_abi_size = @intCast(u32, op_ty.abiSize(mod)); const op_mcv = try self.resolveInst(un_op); const dst_reg = switch (op_mcv) { .register => |reg| reg, @@ -8473,16 +8460,17 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(pl_op.operand); + const err_union_ty = self.typeOf(pl_op.operand); const result = try self.genTry(inst, pl_op.operand, body, err_union_ty, false); return self.finishAir(inst, result, .{ .none, .none, .none }); } fn airTryPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.TryPtr, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; - const err_union_ty = self.air.typeOf(extra.data.ptr).childType(); + const err_union_ty = self.typeOf(extra.data.ptr).childType(mod); const result = try self.genTry(inst, extra.data.ptr, body, err_union_ty, true); return self.finishAir(inst, result, .{ .none, .none, .none }); } @@ -8546,8 +8534,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { } fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { - const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + const ty_fn = self.air.instructions.items(.data)[inst].ty_fn; + const mod = self.bin_file.options.module.?; + const function = mod.funcPtr(ty_fn.func); // TODO emit debug info for function change _ = function; return self.finishAir(inst, .unreach, .{ .none, .none, .none }); @@ -8561,7 +8550,7 @@ fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void { fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = pl_op.operand; - const ty = self.air.typeOf(operand); + const ty = self.typeOf(operand); const mcv = try self.resolveInst(operand); const name = self.air.nullTerminatedString(pl_op.payload); @@ -8573,7 +8562,8 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { } fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 { - const abi_size = ty.abiSize(self.target.*); + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); switch (mcv) { .eflags => |cc| { // Here we map the opposites since the jump is to the false branch. @@ -8602,7 +8592,7 @@ fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 { fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); - const cond_ty = self.air.typeOf(pl_op.operand); + const cond_ty = self.typeOf(pl_op.operand); const extra = self.air.extraData(Air.CondBr, pl_op.payload); const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; @@ -8646,6 +8636,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { } fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MCValue { + const mod = self.bin_file.options.module.?; switch (opt_mcv) { .register_overflow => |ro| return .{ .eflags = ro.eflags.negate() }, else => {}, @@ -8654,14 +8645,12 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC try self.spillEflagsIfOccupied(); self.eflags_inst = inst; - var pl_buf: Type.Payload.ElemType = undefined; - const pl_ty = opt_ty.optionalChild(&pl_buf); + const pl_ty = opt_ty.optionalChild(mod); - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload()) - .{ .off = 0, .ty = if (pl_ty.isSlice()) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } + const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) + .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty } else - .{ .off = @intCast(i32, pl_ty.abiSize(self.target.*)), .ty = Type.bool }; + .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; switch (opt_mcv) { .none, @@ -8681,14 +8670,14 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC .register => |opt_reg| { if (some_info.off == 0) { - const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); + const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); const alias_reg = registerAlias(opt_reg, some_abi_size); assert(some_abi_size * 8 == alias_reg.bitSize()); try self.asmRegisterRegister(.{ ._, .@"test" }, alias_reg, alias_reg); return .{ .eflags = .z }; } - assert(some_info.ty.tag() == .bool); - const opt_abi_size = @intCast(u32, opt_ty.abiSize(self.target.*)); + assert(some_info.ty.ip_index == .bool_type); + const opt_abi_size = @intCast(u32, opt_ty.abiSize(mod)); try self.asmRegisterImmediate( .{ ._, .bt }, registerAlias(opt_reg, opt_abi_size), @@ -8707,7 +8696,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC defer self.register_manager.unlockReg(addr_reg_lock); try self.genSetReg(addr_reg, Type.usize, opt_mcv.address()); - const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); + const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{ @@ -8720,7 +8709,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC }, .indirect, .load_frame => { - const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); + const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), switch (opt_mcv) { @@ -8742,18 +8731,17 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC } fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) !MCValue { + const mod = self.bin_file.options.module.?; try self.spillEflagsIfOccupied(); self.eflags_inst = inst; - const opt_ty = ptr_ty.childType(); - var pl_buf: Type.Payload.ElemType = undefined; - const pl_ty = opt_ty.optionalChild(&pl_buf); + const opt_ty = ptr_ty.childType(mod); + const pl_ty = opt_ty.optionalChild(mod); - var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined; - const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload()) - .{ .off = 0, .ty = if (pl_ty.isSlice()) pl_ty.slicePtrFieldType(&ptr_buf) else pl_ty } + const some_info: struct { off: i32, ty: Type } = if (opt_ty.optionalReprIsPayload(mod)) + .{ .off = 0, .ty = if (pl_ty.isSlice(mod)) pl_ty.slicePtrFieldType(mod) else pl_ty } else - .{ .off = @intCast(i32, pl_ty.abiSize(self.target.*)), .ty = Type.bool }; + .{ .off = @intCast(i32, pl_ty.abiSize(mod)), .ty = Type.bool }; const ptr_reg = switch (ptr_mcv) { .register => |reg| reg, @@ -8762,7 +8750,7 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) const ptr_lock = self.register_manager.lockReg(ptr_reg); defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); - const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); + const some_abi_size = @intCast(u32, some_info.ty.abiSize(mod)); try self.asmMemoryImmediate( .{ ._, .cmp }, Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{ @@ -8775,9 +8763,10 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) } fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) !MCValue { - const err_type = ty.errorUnionSet(); + const mod = self.bin_file.options.module.?; + const err_type = ty.errorUnionSet(mod); - if (err_type.errorSetIsEmpty()) { + if (err_type.errorSetIsEmpty(mod)) { return MCValue{ .immediate = 0 }; // always false } @@ -8786,7 +8775,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) ! self.eflags_inst = inst; } - const err_off = errUnionErrorOffset(ty.errorUnionPayload(), self.target.*); + const err_off = errUnionErrorOffset(ty.errorUnionPayload(mod), mod); switch (operand) { .register => |reg| { const eu_lock = self.register_manager.lockReg(reg); @@ -8844,7 +8833,7 @@ fn isNonErr(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCVa fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = try self.isNull(inst, ty, operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8852,7 +8841,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = try self.isNullPtr(inst, ty, operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8860,7 +8849,7 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = switch (try self.isNull(inst, ty, operand)) { .eflags => |cc| .{ .eflags = cc.negate() }, else => unreachable, @@ -8871,7 +8860,7 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = switch (try self.isNullPtr(inst, ty, operand)) { .eflags => |cc| .{ .eflags = cc.negate() }, else => unreachable, @@ -8882,12 +8871,13 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = try self.isErr(inst, ty, operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand_ptr = try self.resolveInst(un_op); @@ -8905,10 +8895,10 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); try self.load(operand, ptr_ty, operand_ptr); - const result = try self.isErr(inst, ptr_ty.childType(), operand); + const result = try self.isErr(inst, ptr_ty.childType(mod), operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -8916,12 +8906,13 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); + const ty = self.typeOf(un_op); const result = try self.isNonErr(inst, ty, operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; const operand_ptr = try self.resolveInst(un_op); @@ -8939,10 +8930,10 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { break :blk try self.allocRegOrMem(inst, true); } }; - const ptr_ty = self.air.typeOf(un_op); + const ptr_ty = self.typeOf(un_op); try self.load(operand, ptr_ty, operand_ptr); - const result = try self.isNonErr(inst, ptr_ty.childType(), operand); + const result = try self.isNonErr(inst, ptr_ty.childType(mod), operand); return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -9005,7 +8996,7 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void { fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const condition = try self.resolveInst(pl_op.operand); - const condition_ty = self.air.typeOf(pl_op.operand); + const condition_ty = self.typeOf(pl_op.operand); const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload); var extra_index: usize = switch_br.end; var case_i: u32 = 0; @@ -9088,12 +9079,13 @@ fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void { } fn airBr(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const br = self.air.instructions.items(.data)[inst].br; const src_mcv = try self.resolveInst(br.operand); - const block_ty = self.air.typeOfIndex(br.block_inst); + const block_ty = self.typeOfIndex(br.block_inst); const block_unused = - !block_ty.hasRuntimeBitsIgnoreComptime() or self.liveness.isUnused(br.block_inst); + !block_ty.hasRuntimeBitsIgnoreComptime(mod) or self.liveness.isUnused(br.block_inst); const block_tracking = self.inst_tracking.getPtr(br.block_inst).?; const block_data = self.blocks.getPtr(br.block_inst).?; const first_br = block_data.relocs.items.len == 0; @@ -9216,7 +9208,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const arg_mcv = try self.resolveInst(input); try self.register_manager.getReg(reg, null); - try self.genSetReg(reg, self.air.typeOf(input), arg_mcv); + try self.genSetReg(reg, self.typeOf(input), arg_mcv); } { @@ -9402,7 +9394,8 @@ const MoveStrategy = union(enum) { }; }; fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { - switch (ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + switch (ty.zigTypeTag(mod)) { else => return .{ .move = .{ ._, .mov } }, .Float => switch (ty.floatBits(self.target.*)) { 16 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ @@ -9419,9 +9412,9 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, else => {}, }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Int => switch (ty.childType().intInfo(self.target.*).bits) { - 8 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Int => switch (ty.childType(mod).intInfo(mod).bits) { + 8 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) return .{ .vex_insert_extract = .{ .insert = .{ .vp_b, .insr }, .extract = .{ .vp_b, .extr }, @@ -9451,7 +9444,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 16 => switch (ty.vectorLen()) { + 16 => switch (ty.vectorLen(mod)) { 1 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ .insert = .{ .vp_w, .insr }, .extract = .{ .vp_w, .extr }, @@ -9474,7 +9467,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 32 => switch (ty.vectorLen()) { + 32 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_d, .mov } else @@ -9490,7 +9483,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_q, .mov } else @@ -9502,7 +9495,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 128 => switch (ty.vectorLen()) { + 128 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, @@ -9510,15 +9503,15 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 256 => switch (ty.vectorLen()) { + 256 => switch (ty.vectorLen(mod)) { 1 => if (self.hasFeature(.avx)) return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, else => {}, }, - .Float => switch (ty.childType().floatBits(self.target.*)) { - 16 => switch (ty.vectorLen()) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 16 => switch (ty.vectorLen(mod)) { 1 => return if (self.hasFeature(.avx)) .{ .vex_insert_extract = .{ .insert = .{ .vp_w, .insr }, .extract = .{ .vp_w, .extr }, @@ -9541,7 +9534,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } }, else => {}, }, - 32 => switch (ty.vectorLen()) { + 32 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_ss, .mov } else @@ -9557,7 +9550,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_ps, .mova } else .{ .v_ps, .movu } }, else => {}, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) .{ .v_sd, .mov } else @@ -9569,7 +9562,7 @@ fn moveStrategy(self: *Self, ty: Type, aligned: bool) !MoveStrategy { return .{ .move = if (aligned) .{ .v_pd, .mova } else .{ .v_pd, .movu } }, else => {}, }, - 128 => switch (ty.vectorLen()) { + 128 => switch (ty.vectorLen(mod)) { 1 => return .{ .move = if (self.hasFeature(.avx)) if (aligned) .{ .v_, .movdqa } else .{ .v_, .movdqu } else if (aligned) .{ ._, .movdqa } else .{ ._, .movdqu } }, @@ -9647,7 +9640,8 @@ fn genCopy(self: *Self, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError } fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); if (abi_size * 8 > dst_reg.bitSize()) return self.fail("genSetReg called with a value larger than dst_reg", .{}); switch (src_mcv) { @@ -9730,7 +9724,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr .{ .register = try self.copyToTmpRegister(ty, src_mcv) }, ), .sse => try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (ty.scalarType().zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (ty.scalarType(mod).zigTypeTag(mod)) { else => switch (abi_size) { 1...4 => if (self.hasFeature(.avx)) .{ .v_d, .mov } else .{ ._d, .mov }, 5...8 => if (self.hasFeature(.avx)) .{ .v_q, .mov } else .{ ._q, .mov }, @@ -9738,7 +9732,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr 17...32 => if (self.hasFeature(.avx)) .{ .v_, .movdqa } else null, else => null, }, - .Float => switch (ty.scalarType().floatBits(self.target.*)) { + .Float => switch (ty.scalarType(mod).floatBits(self.target.*)) { 16, 128 => switch (abi_size) { 2...4 => if (self.hasFeature(.avx)) .{ .v_d, .mov } else .{ ._d, .mov }, 5...8 => if (self.hasFeature(.avx)) .{ .v_q, .mov } else .{ ._q, .mov }, @@ -9789,7 +9783,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr .indirect => try self.moveStrategy(ty, false), .load_frame => |frame_addr| try self.moveStrategy( ty, - self.getFrameAddrAlignment(frame_addr) >= ty.abiAlignment(self.target.*), + self.getFrameAddrAlignment(frame_addr) >= ty.abiAlignment(mod), ), .lea_frame => .{ .move = .{ ._, .lea } }, else => unreachable, @@ -9821,7 +9815,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr switch (try self.moveStrategy(ty, mem.isAlignedGeneric( u32, @bitCast(u32, small_addr), - ty.abiAlignment(self.target.*), + ty.abiAlignment(mod), ))) { .move => |tag| try self.asmRegisterMemory(tag, dst_alias, src_mem), .insert_extract => |ie| try self.asmRegisterMemoryImmediate( @@ -9839,7 +9833,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr ), } }, - .load_direct => |sym_index| switch (ty.zigTypeTag()) { + .load_direct => |sym_index| switch (ty.zigTypeTag(mod)) { else => { const atom_index = try self.owner.getSymbolIndex(self); _ = try self.addInst(.{ @@ -9933,7 +9927,8 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr } fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCValue) InnerError!void { - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const mod = self.bin_file.options.module.?; + const abi_size = @intCast(u32, ty.abiSize(mod)); const dst_ptr_mcv: MCValue = switch (base) { .none => .{ .immediate = @bitCast(u64, @as(i64, disp)) }, .reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } }, @@ -9945,7 +9940,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal try self.genInlineMemset(dst_ptr_mcv, .{ .immediate = 0xaa }, .{ .immediate = abi_size }), .immediate => |imm| switch (abi_size) { 1, 2, 4 => { - const immediate = if (ty.isSignedInt()) + const immediate = if (ty.isSignedInt(mod)) Immediate.s(@truncate(i32, @bitCast(i64, imm))) else Immediate.u(@intCast(u32, imm)); @@ -9967,7 +9962,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal while (offset < abi_size) : (offset += 4) try self.asmMemoryImmediate( .{ ._, .mov }, Memory.sib(.dword, .{ .base = base, .disp = disp + offset }), - if (ty.isSignedInt()) + if (ty.isSignedInt(mod)) Immediate.s(@truncate( i32, @bitCast(i64, imm) >> (math.cast(u6, offset * 8) orelse 63), @@ -9991,19 +9986,19 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .none => mem.isAlignedGeneric( u32, @bitCast(u32, disp), - ty.abiAlignment(self.target.*), + ty.abiAlignment(mod), ), .reg => |reg| switch (reg) { .es, .cs, .ss, .ds => mem.isAlignedGeneric( u32, @bitCast(u32, disp), - ty.abiAlignment(self.target.*), + ty.abiAlignment(mod), ), else => false, }, .frame => |frame_index| self.getFrameAddrAlignment( .{ .index = frame_index, .off = disp }, - ) >= ty.abiAlignment(self.target.*), + ) >= ty.abiAlignment(mod), })) { .move => |tag| try self.asmMemoryRegister(tag, dst_mem, src_alias), .insert_extract, .vex_insert_extract => |ie| try self.asmMemoryRegisterImmediate( @@ -10017,14 +10012,14 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal .register_overflow => |ro| { try self.genSetMem( base, - disp + @intCast(i32, ty.structFieldOffset(0, self.target.*)), - ty.structFieldType(0), + disp + @intCast(i32, ty.structFieldOffset(0, mod)), + ty.structFieldType(0, mod), .{ .register = ro.reg }, ); try self.genSetMem( base, - disp + @intCast(i32, ty.structFieldOffset(1, self.target.*)), - ty.structFieldType(1), + disp + @intCast(i32, ty.structFieldOffset(1, mod)), + ty.structFieldType(1, mod), .{ .eflags = ro.eflags }, ); }, @@ -10138,7 +10133,7 @@ fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void { if (self.reuseOperand(inst, un_op, 0, src_mcv)) break :result src_mcv; const dst_mcv = try self.allocRegOrMem(inst, true); - const dst_ty = self.air.typeOfIndex(inst); + const dst_ty = self.typeOfIndex(inst); try self.genCopy(dst_ty, dst_mcv, src_mcv); break :result dst_mcv; }; @@ -10146,13 +10141,14 @@ fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void { } fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const dst_ty = self.air.typeOfIndex(inst); - const src_ty = self.air.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); + const src_ty = self.typeOf(ty_op.operand); const result = result: { - const dst_rc = regClassForType(dst_ty); - const src_rc = regClassForType(src_ty); + const dst_rc = regClassForType(dst_ty, mod); + const src_rc = regClassForType(src_ty, mod); const src_mcv = try self.resolveInst(ty_op.operand); const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null; @@ -10172,13 +10168,13 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { }; const dst_signedness = - if (dst_ty.isAbiInt()) dst_ty.intInfo(self.target.*).signedness else .unsigned; + if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned; const src_signedness = - if (src_ty.isAbiInt()) src_ty.intInfo(self.target.*).signedness else .unsigned; + if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned; if (dst_signedness == src_signedness) break :result dst_mcv; - const abi_size = @intCast(u16, dst_ty.abiSize(self.target.*)); - const bit_size = @intCast(u16, dst_ty.bitSize(self.target.*)); + const abi_size = @intCast(u16, dst_ty.abiSize(mod)); + const bit_size = @intCast(u16, dst_ty.bitSize(mod)); if (abi_size * 8 <= bit_size) break :result dst_mcv; const dst_limbs_len = math.divCeil(i32, bit_size, 64) catch unreachable; @@ -10192,14 +10188,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { const high_lock = self.register_manager.lockReg(high_reg); defer if (high_lock) |lock| self.register_manager.unlockReg(lock); - var high_pl = Type.Payload.Bits{ - .base = .{ .tag = switch (dst_signedness) { - .signed => .int_signed, - .unsigned => .int_unsigned, - } }, - .data = bit_size % 64, - }; - const high_ty = Type.initPayload(&high_pl.base); + const high_ty = try mod.intType(dst_signedness, bit_size % 64); try self.truncateRegister(high_ty, high_reg); if (!dst_mcv.isRegister()) try self.genCopy( @@ -10213,19 +10202,20 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { } fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const slice_ty = self.air.typeOfIndex(inst); - const ptr_ty = self.air.typeOf(ty_op.operand); + const slice_ty = self.typeOfIndex(inst); + const ptr_ty = self.typeOf(ty_op.operand); const ptr = try self.resolveInst(ty_op.operand); - const array_ty = ptr_ty.childType(); - const array_len = array_ty.arrayLen(); + const array_ty = ptr_ty.childType(mod); + const array_len = array_ty.arrayLen(mod); - const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, self.target.*)); + const frame_index = try self.allocFrameIndex(FrameAlloc.initType(slice_ty, mod)); try self.genSetMem(.{ .frame = frame_index }, 0, ptr_ty, ptr); try self.genSetMem( .{ .frame = frame_index }, - @intCast(i32, ptr_ty.abiSize(self.target.*)), + @intCast(i32, ptr_ty.abiSize(mod)), Type.usize, .{ .immediate = array_len }, ); @@ -10235,20 +10225,21 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { } fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); - const src_bits = @intCast(u32, src_ty.bitSize(self.target.*)); + const src_ty = self.typeOf(ty_op.operand); + const src_bits = @intCast(u32, src_ty.bitSize(mod)); const src_signedness = - if (src_ty.isAbiInt()) src_ty.intInfo(self.target.*).signedness else .unsigned; - const dst_ty = self.air.typeOfIndex(inst); + if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned; + const dst_ty = self.typeOfIndex(inst); const src_size = math.divCeil(u32, @max(switch (src_signedness) { .signed => src_bits, .unsigned => src_bits + 1, }, 32), 8) catch unreachable; if (src_size > 8) return self.fail("TODO implement airIntToFloat from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(mod), dst_ty.fmt(mod), }); const src_mcv = try self.resolveInst(ty_op.operand); @@ -10261,12 +10252,12 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { if (src_bits < src_size * 8) try self.truncateRegister(src_ty, src_reg); - const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty)); + const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty, mod)); const dst_mcv = MCValue{ .register = dst_reg }; const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); - const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag()) { + const mir_tag = if (@as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag(mod)) { .Float => switch (dst_ty.floatBits(self.target.*)) { 32 => if (self.hasFeature(.avx)) .{ .v_ss, .cvtsi2 } else .{ ._ss, .cvtsi2 }, 64 => if (self.hasFeature(.avx)) .{ .v_sd, .cvtsi2 } else .{ ._sd, .cvtsi2 }, @@ -10275,7 +10266,7 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { }, else => null, })) |tag| tag else return self.fail("TODO implement airIntToFloat from {} to {}", .{ - src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?), + src_ty.fmt(mod), dst_ty.fmt(mod), }); const dst_alias = dst_reg.to128(); const src_alias = registerAlias(src_reg, src_size); @@ -10288,13 +10279,14 @@ fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { } fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const src_ty = self.air.typeOf(ty_op.operand); - const dst_ty = self.air.typeOfIndex(inst); - const dst_bits = @intCast(u32, dst_ty.bitSize(self.target.*)); + const src_ty = self.typeOf(ty_op.operand); + const dst_ty = self.typeOfIndex(inst); + const dst_bits = @intCast(u32, dst_ty.bitSize(mod)); const dst_signedness = - if (dst_ty.isAbiInt()) dst_ty.intInfo(self.target.*).signedness else .unsigned; + if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned; const dst_size = math.divCeil(u32, @max(switch (dst_signedness) { .signed => dst_bits, @@ -10312,13 +10304,13 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { const src_lock = self.register_manager.lockRegAssumeUnused(src_reg); defer self.register_manager.unlockReg(src_lock); - const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty)); + const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty, mod)); const dst_mcv = MCValue{ .register = dst_reg }; const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg); defer self.register_manager.unlockReg(dst_lock); try self.asmRegisterRegister( - if (@as(?Mir.Inst.FixedTag, switch (src_ty.zigTypeTag()) { + if (@as(?Mir.Inst.FixedTag, switch (src_ty.zigTypeTag(mod)) { .Float => switch (src_ty.floatBits(self.target.*)) { 32 => if (self.hasFeature(.avx)) .{ .v_, .cvttss2si } else .{ ._, .cvttss2si }, 64 => if (self.hasFeature(.avx)) .{ .v_, .cvttsd2si } else .{ ._, .cvttsd2si }, @@ -10339,12 +10331,13 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { } fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data; - const ptr_ty = self.air.typeOf(extra.ptr); - const val_ty = self.air.typeOf(extra.expected_value); - const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); + const ptr_ty = self.typeOf(extra.ptr); + const val_ty = self.typeOf(extra.expected_value); + const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); try self.spillRegisters(&.{ .rax, .rdx, .rbx, .rcx }); const regs_lock = self.register_manager.lockRegsAssumeUnused(4, .{ .rax, .rdx, .rbx, .rcx }); @@ -10433,6 +10426,7 @@ fn atomicOp( rmw_op: ?std.builtin.AtomicRmwOp, order: std.builtin.AtomicOrder, ) InnerError!MCValue { + const mod = self.bin_file.options.module.?; const ptr_lock = switch (ptr_mcv) { .register => |reg| self.register_manager.lockReg(reg), else => null, @@ -10445,7 +10439,7 @@ fn atomicOp( }; defer if (val_lock) |lock| self.register_manager.unlockReg(lock); - const val_abi_size = @intCast(u32, val_ty.abiSize(self.target.*)); + const val_abi_size = @intCast(u32, val_ty.abiSize(mod)); const ptr_size = Memory.PtrSize.fromSize(val_abi_size); const ptr_mem = switch (ptr_mcv) { .immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(ptr_size), @@ -10539,8 +10533,8 @@ fn atomicOp( .Or => try self.genBinOpMir(.{ ._, .@"or" }, val_ty, tmp_mcv, val_mcv), .Xor => try self.genBinOpMir(.{ ._, .xor }, val_ty, tmp_mcv, val_mcv), .Min, .Max => { - const cc: Condition = switch (if (val_ty.isAbiInt()) - val_ty.intInfo(self.target.*).signedness + const cc: Condition = switch (if (val_ty.isAbiInt(mod)) + val_ty.intInfo(mod).signedness else .unsigned) { .unsigned => switch (op) { @@ -10682,10 +10676,10 @@ fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void { const unused = self.liveness.isUnused(inst); - const ptr_ty = self.air.typeOf(pl_op.operand); + const ptr_ty = self.typeOf(pl_op.operand); const ptr_mcv = try self.resolveInst(pl_op.operand); - const val_ty = self.air.typeOf(extra.operand); + const val_ty = self.typeOf(extra.operand); const val_mcv = try self.resolveInst(extra.operand); const result = @@ -10696,7 +10690,7 @@ fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void { fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void { const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; - const ptr_ty = self.air.typeOf(atomic_load.ptr); + const ptr_ty = self.typeOf(atomic_load.ptr); const ptr_mcv = try self.resolveInst(atomic_load.ptr); const ptr_lock = switch (ptr_mcv) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), @@ -10717,10 +10711,10 @@ fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void { fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ptr_ty = self.air.typeOf(bin_op.lhs); + const ptr_ty = self.typeOf(bin_op.lhs); const ptr_mcv = try self.resolveInst(bin_op.lhs); - const val_ty = self.air.typeOf(bin_op.rhs); + const val_ty = self.typeOf(bin_op.rhs); const val_mcv = try self.resolveInst(bin_op.rhs); const result = try self.atomicOp(ptr_mcv, val_mcv, ptr_ty, val_ty, true, null, order); @@ -10728,6 +10722,7 @@ fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOr } fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { + const mod = self.bin_file.options.module.?; if (safety) { // TODO if the value is undef, write 0xaa bytes to dest } else { @@ -10737,7 +10732,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dst_ptr = try self.resolveInst(bin_op.lhs); - const dst_ptr_ty = self.air.typeOf(bin_op.lhs); + const dst_ptr_ty = self.typeOf(bin_op.lhs); const dst_ptr_lock: ?RegisterLock = switch (dst_ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, @@ -10745,26 +10740,26 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { defer if (dst_ptr_lock) |lock| self.register_manager.unlockReg(lock); const src_val = try self.resolveInst(bin_op.rhs); - const elem_ty = self.air.typeOf(bin_op.rhs); + const elem_ty = self.typeOf(bin_op.rhs); const src_val_lock: ?RegisterLock = switch (src_val) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, }; defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock); - const elem_abi_size = @intCast(u31, elem_ty.abiSize(self.target.*)); + const elem_abi_size = @intCast(u31, elem_ty.abiSize(mod)); if (elem_abi_size == 1) { - const ptr: MCValue = switch (dst_ptr_ty.ptrSize()) { + const ptr: MCValue = switch (dst_ptr_ty.ptrSize(mod)) { // TODO: this only handles slices stored in the stack .Slice => dst_ptr, .One => dst_ptr, .C, .Many => unreachable, }; - const len: MCValue = switch (dst_ptr_ty.ptrSize()) { + const len: MCValue = switch (dst_ptr_ty.ptrSize(mod)) { // TODO: this only handles slices stored in the stack .Slice => dst_ptr.address().offset(8).deref(), - .One => .{ .immediate = dst_ptr_ty.childType().arrayLen() }, + .One => .{ .immediate = dst_ptr_ty.childType(mod).arrayLen(mod) }, .C, .Many => unreachable, }; const len_lock: ?RegisterLock = switch (len) { @@ -10780,10 +10775,9 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { // Store the first element, and then rely on memcpy copying forwards. // Length zero requires a runtime check - so we handle arrays specially // here to elide it. - switch (dst_ptr_ty.ptrSize()) { + switch (dst_ptr_ty.ptrSize(mod)) { .Slice => { - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(&buf); + const slice_ptr_ty = dst_ptr_ty.slicePtrFieldType(mod); // TODO: this only handles slices stored in the stack const ptr = dst_ptr; @@ -10823,13 +10817,9 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { try self.performReloc(skip_reloc); }, .One => { - var elem_ptr_pl = Type.Payload.ElemType{ - .base = .{ .tag = .single_mut_pointer }, - .data = elem_ty, - }; - const elem_ptr_ty = Type.initPayload(&elem_ptr_pl.base); + const elem_ptr_ty = try mod.singleMutPtrType(elem_ty); - const len = dst_ptr_ty.childType().arrayLen(); + const len = dst_ptr_ty.childType(mod).arrayLen(mod); assert(len != 0); // prevented by Sema try self.store(elem_ptr_ty, dst_ptr, src_val); @@ -10854,10 +10844,11 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void { } fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dst_ptr = try self.resolveInst(bin_op.lhs); - const dst_ptr_ty = self.air.typeOf(bin_op.lhs); + const dst_ptr_ty = self.typeOf(bin_op.lhs); const dst_ptr_lock: ?RegisterLock = switch (dst_ptr) { .register => |reg| self.register_manager.lockRegAssumeUnused(reg), else => null, @@ -10871,9 +10862,9 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { }; defer if (src_ptr_lock) |lock| self.register_manager.unlockReg(lock); - const len: MCValue = switch (dst_ptr_ty.ptrSize()) { + const len: MCValue = switch (dst_ptr_ty.ptrSize(mod)) { .Slice => dst_ptr.address().offset(8).deref(), - .One => .{ .immediate = dst_ptr_ty.childType().arrayLen() }, + .One => .{ .immediate = dst_ptr_ty.childType(mod).arrayLen(mod) }, .C, .Many => unreachable, }; const len_lock: ?RegisterLock = switch (len) { @@ -10891,14 +10882,14 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { fn airTagName(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; - const inst_ty = self.air.typeOfIndex(inst); - const enum_ty = self.air.typeOf(un_op); + const inst_ty = self.typeOfIndex(inst); + const enum_ty = self.typeOf(un_op); // We need a properly aligned and sized call frame to be able to call this function. { const needed_call_frame = FrameAlloc.init(.{ - .size = inst_ty.abiSize(self.target.*), - .alignment = inst_ty.abiAlignment(self.target.*), + .size = inst_ty.abiSize(mod), + .alignment = inst_ty.abiAlignment(mod), }); const frame_allocs_slice = self.frame_allocs.slice(); const stack_frame_size = @@ -10923,7 +10914,7 @@ fn airTagName(self: *Self, inst: Air.Inst.Index) !void { try self.genLazySymbolRef( .call, .rax, - link.File.LazySymbol.initDecl(.code, enum_ty.getOwnerDecl(), mod), + link.File.LazySymbol.initDecl(.code, enum_ty.getOwnerDecl(mod), mod), ); return self.finishAir(inst, dst_mcv, .{ un_op, .none, .none }); @@ -10933,7 +10924,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { const mod = self.bin_file.options.module.?; const un_op = self.air.instructions.items(.data)[inst].un_op; - const err_ty = self.air.typeOf(un_op); + const err_ty = self.typeOf(un_op); const err_mcv = try self.resolveInst(un_op); const err_reg = try self.copyToTmpRegister(err_ty, err_mcv); const err_lock = self.register_manager.lockRegAssumeUnused(err_reg); @@ -11013,17 +11004,18 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { } fn airSplat(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const vector_ty = self.air.typeOfIndex(inst); - const dst_rc = regClassForType(vector_ty); - const scalar_ty = vector_ty.scalarType(); + const vector_ty = self.typeOfIndex(inst); + const dst_rc = regClassForType(vector_ty, mod); + const scalar_ty = vector_ty.scalarType(mod); const src_mcv = try self.resolveInst(ty_op.operand); const result: MCValue = result: { - switch (scalar_ty.zigTypeTag()) { + switch (scalar_ty.zigTypeTag(mod)) { else => {}, .Float => switch (scalar_ty.floatBits(self.target.*)) { - 32 => switch (vector_ty.vectorLen()) { + 32 => switch (vector_ty.vectorLen(mod)) { 1 => { if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; const dst_reg = try self.register_manager.allocReg(inst, dst_rc); @@ -11103,7 +11095,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void { }, else => {}, }, - 64 => switch (vector_ty.vectorLen()) { + 64 => switch (vector_ty.vectorLen(mod)) { 1 => { if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; const dst_reg = try self.register_manager.allocReg(inst, dst_rc); @@ -11169,7 +11161,7 @@ fn airSplat(self: *Self, inst: Air.Inst.Index) !void { }, else => {}, }, - 128 => switch (vector_ty.vectorLen()) { + 128 => switch (vector_ty.vectorLen(mod)) { 1 => { if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv; const dst_reg = try self.register_manager.allocReg(inst, dst_rc); @@ -11233,36 +11225,37 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void { } fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { - const result_ty = self.air.typeOfIndex(inst); - const len = @intCast(usize, result_ty.arrayLen()); + const mod = self.bin_file.options.module.?; + const result_ty = self.typeOfIndex(inst); + const len = @intCast(usize, result_ty.arrayLen(mod)); const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); const result: MCValue = result: { - switch (result_ty.zigTypeTag()) { + switch (result_ty.zigTypeTag(mod)) { .Struct => { const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(result_ty, self.target.*)); - if (result_ty.containerLayout() == .Packed) { - const struct_obj = result_ty.castTag(.@"struct").?.data; + try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod)); + if (result_ty.containerLayout(mod) == .Packed) { + const struct_obj = mod.typeToStruct(result_ty).?; try self.genInlineMemset( .{ .lea_frame = .{ .index = frame_index } }, .{ .immediate = 0 }, - .{ .immediate = result_ty.abiSize(self.target.*) }, + .{ .immediate = result_ty.abiSize(mod) }, ); for (elements, 0..) |elem, elem_i| { - if (result_ty.structFieldValueComptime(elem_i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; - const elem_ty = result_ty.structFieldType(elem_i); - const elem_bit_size = @intCast(u32, elem_ty.bitSize(self.target.*)); + const elem_ty = result_ty.structFieldType(elem_i, mod); + const elem_bit_size = @intCast(u32, elem_ty.bitSize(mod)); if (elem_bit_size > 64) { return self.fail( "TODO airAggregateInit implement packed structs with large fields", .{}, ); } - const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const elem_abi_size = @intCast(u32, elem_ty.abiSize(mod)); const elem_abi_bits = elem_abi_size * 8; - const elem_off = struct_obj.packedFieldBitOffset(self.target.*, elem_i); + const elem_off = struct_obj.packedFieldBitOffset(mod, elem_i); const elem_byte_off = @intCast(i32, elem_off / elem_abi_bits * elem_abi_size); const elem_bit_off = elem_off % elem_abi_bits; const elem_mcv = try self.resolveInst(elem); @@ -11322,10 +11315,10 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } } } else for (elements, 0..) |elem, elem_i| { - if (result_ty.structFieldValueComptime(elem_i) != null) continue; + if ((try result_ty.structFieldValueComptime(mod, elem_i)) != null) continue; - const elem_ty = result_ty.structFieldType(elem_i); - const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, self.target.*)); + const elem_ty = result_ty.structFieldType(elem_i, mod); + const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, mod)); const elem_mcv = try self.resolveInst(elem); const mat_elem_mcv = switch (elem_mcv) { .load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index }, @@ -11337,9 +11330,9 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { }, .Array => { const frame_index = - try self.allocFrameIndex(FrameAlloc.initType(result_ty, self.target.*)); - const elem_ty = result_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + try self.allocFrameIndex(FrameAlloc.initType(result_ty, mod)); + const elem_ty = result_ty.childType(mod); + const elem_size = @intCast(u32, elem_ty.abiSize(mod)); for (elements, 0..) |elem, elem_i| { const elem_mcv = try self.resolveInst(elem); @@ -11350,7 +11343,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const elem_off = @intCast(i32, elem_size * elem_i); try self.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, mat_elem_mcv); } - if (result_ty.sentinel()) |sentinel| try self.genSetMem( + if (result_ty.sentinel(mod)) |sentinel| try self.genSetMem( .{ .frame = frame_index }, @intCast(i32, elem_size * elements.len), elem_ty, @@ -11374,13 +11367,14 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { } fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const result: MCValue = result: { - const union_ty = self.air.typeOfIndex(inst); - const layout = union_ty.unionGetLayout(self.target.*); + const union_ty = self.typeOfIndex(inst); + const layout = union_ty.unionGetLayout(mod); - const src_ty = self.air.typeOf(extra.init); + const src_ty = self.typeOf(extra.init); const src_mcv = try self.resolveInst(extra.init); if (layout.tag_size == 0) { if (self.reuseOperand(inst, extra.init, 0, src_mcv)) break :result src_mcv; @@ -11392,15 +11386,13 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void { const dst_mcv = try self.allocRegOrMem(inst, false); - const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const union_obj = mod.typeToUnion(union_ty).?; const field_name = union_obj.fields.keys()[extra.field_index]; - const tag_ty = union_ty.unionTagTypeSafety().?; - const field_index = @intCast(u32, tag_ty.enumFieldIndex(field_name).?); - var tag_pl = Value.Payload.U32{ .base = .{ .tag = .enum_field_index }, .data = field_index }; - const tag_val = Value.initPayload(&tag_pl.base); - var tag_int_pl: Value.Payload.U64 = undefined; - const tag_int_val = tag_val.enumToInt(tag_ty, &tag_int_pl); - const tag_int = tag_int_val.toUnsignedInt(self.target.*); + const tag_ty = union_obj.tag_ty; + const field_index = tag_ty.enumFieldIndex(field_name, mod).?; + const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index); + const tag_int_val = try tag_val.enumToInt(tag_ty, mod); + const tag_int = tag_int_val.toUnsignedInt(mod); const tag_off = if (layout.tag_align < layout.payload_align) @intCast(i32, layout.payload_size) else @@ -11424,9 +11416,10 @@ fn airPrefetch(self: *Self, inst: Air.Inst.Index) !void { } fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { + const mod = self.bin_file.options.module.?; const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Bin, pl_op.payload).data; - const ty = self.air.typeOfIndex(inst); + const ty = self.typeOfIndex(inst); if (!self.hasFeature(.fma)) return self.fail("TODO implement airMulAdd for {}", .{ ty.fmt(self.bin_file.options.module.?), @@ -11466,21 +11459,21 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { const mir_tag = if (@as( ?Mir.Inst.FixedTag, if (mem.eql(u2, &order, &.{ 1, 3, 2 }) or mem.eql(u2, &order, &.{ 3, 1, 2 })) - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .fmadd132 }, 64 => .{ .v_sd, .fmadd132 }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { 1 => .{ .v_ss, .fmadd132 }, 2...8 => .{ .v_ps, .fmadd132 }, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => .{ .v_sd, .fmadd132 }, 2...4 => .{ .v_pd, .fmadd132 }, else => null, @@ -11493,21 +11486,21 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, } else if (mem.eql(u2, &order, &.{ 2, 1, 3 }) or mem.eql(u2, &order, &.{ 1, 2, 3 })) - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .fmadd213 }, 64 => .{ .v_sd, .fmadd213 }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { 1 => .{ .v_ss, .fmadd213 }, 2...8 => .{ .v_ps, .fmadd213 }, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => .{ .v_sd, .fmadd213 }, 2...4 => .{ .v_pd, .fmadd213 }, else => null, @@ -11520,21 +11513,21 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, } else if (mem.eql(u2, &order, &.{ 2, 3, 1 }) or mem.eql(u2, &order, &.{ 3, 2, 1 })) - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Float => switch (ty.floatBits(self.target.*)) { 32 => .{ .v_ss, .fmadd231 }, 64 => .{ .v_sd, .fmadd231 }, 16, 80, 128 => null, else => unreachable, }, - .Vector => switch (ty.childType().zigTypeTag()) { - .Float => switch (ty.childType().floatBits(self.target.*)) { - 32 => switch (ty.vectorLen()) { + .Vector => switch (ty.childType(mod).zigTypeTag(mod)) { + .Float => switch (ty.childType(mod).floatBits(self.target.*)) { + 32 => switch (ty.vectorLen(mod)) { 1 => .{ .v_ss, .fmadd231 }, 2...8 => .{ .v_ps, .fmadd231 }, else => null, }, - 64 => switch (ty.vectorLen()) { + 64 => switch (ty.vectorLen(mod)) { 1 => .{ .v_sd, .fmadd231 }, 2...4 => .{ .v_pd, .fmadd231 }, else => null, @@ -11555,7 +11548,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { var mops: [3]MCValue = undefined; for (order, mcvs) |mop_index, mcv| mops[mop_index - 1] = mcv; - const abi_size = @intCast(u32, ty.abiSize(self.target.*)); + const abi_size = @intCast(u32, ty.abiSize(mod)); const mop1_reg = registerAlias(mops[0].getReg().?, abi_size); const mop2_reg = registerAlias(mops[1].getReg().?, abi_size); if (mops[2].isRegister()) try self.asmRegisterRegisterRegister( @@ -11573,22 +11566,22 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void { } fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { - const ty = self.air.typeOf(ref); + const mod = self.bin_file.options.module.?; + const ty = self.typeOf(ref); // If the type has no codegen bits, no need to store it. - if (!ty.hasRuntimeBitsIgnoreComptime()) return .none; + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return .none; if (Air.refToIndex(ref)) |inst| { const mcv = switch (self.air.instructions.items(.tag)[inst]) { - .constant => tracking: { + .interned => tracking: { const gop = try self.const_tracking.getOrPut(self.gpa, inst); if (!gop.found_existing) gop.value_ptr.* = InstTracking.init(try self.genTypedValue(.{ .ty = ty, - .val = self.air.value(ref).?, + .val = self.air.instructions.items(.data)[inst].interned.toValue(), })); break :tracking gop.value_ptr; }, - .const_ty => unreachable, else => self.inst_tracking.getPtr(inst).?, }.short; switch (mcv) { @@ -11597,13 +11590,12 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue { } } - return self.genTypedValue(.{ .ty = ty, .val = self.air.value(ref).? }); + return self.genTypedValue(.{ .ty = ty, .val = (try self.air.value(ref, mod)).? }); } fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) *InstTracking { const tracking = switch (self.air.instructions.items(.tag)[inst]) { - .constant => &self.const_tracking, - .const_ty => unreachable, + .interned => &self.const_tracking, else => &self.inst_tracking, }.getPtr(inst).?; return switch (tracking.short) { @@ -11634,7 +11626,8 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV } fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { - return switch (try codegen.genTypedValue(self.bin_file, self.src_loc, arg_tv, self.owner.getDecl())) { + const mod = self.bin_file.options.module.?; + return switch (try codegen.genTypedValue(self.bin_file, self.src_loc, arg_tv, self.owner.getDecl(mod))) { .mcv => |mcv| switch (mcv) { .none => .none, .undef => .undef, @@ -11666,17 +11659,23 @@ const CallMCValues = struct { /// Caller must call `CallMCValues.deinit`. fn resolveCallingConventionValues( self: *Self, - fn_ty: Type, + fn_info: InternPool.Key.FuncType, var_args: []const Air.Inst.Ref, stack_frame_base: FrameIndex, ) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_len = fn_ty.fnParamLen(); - const param_types = try self.gpa.alloc(Type, param_len + var_args.len); + const mod = self.bin_file.options.module.?; + const cc = fn_info.cc; + const param_types = try self.gpa.alloc(Type, fn_info.param_types.len + var_args.len); defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); + + for (param_types[0..fn_info.param_types.len], fn_info.param_types) |*dest, src| { + dest.* = src.toType(); + } // TODO: promote var arg types - for (param_types[param_len..], var_args) |*param_ty, arg| param_ty.* = self.air.typeOf(arg); + for (param_types[fn_info.param_types.len..], var_args) |*param_ty, arg| { + param_ty.* = self.typeOf(arg); + } + var result: CallMCValues = .{ .args = try self.gpa.alloc(MCValue, param_types.len), // These undefined values must be populated before returning from this function. @@ -11686,7 +11685,7 @@ fn resolveCallingConventionValues( }; errdefer self.gpa.free(result.args); - const ret_ty = fn_ty.fnReturnType(); + const ret_ty = fn_info.return_type.toType(); switch (cc) { .Naked => { @@ -11702,21 +11701,21 @@ fn resolveCallingConventionValues( switch (self.target.os.tag) { .windows => { // Align the stack to 16bytes before allocating shadow stack space (if any). - result.stack_byte_count += @intCast(u31, 4 * Type.usize.abiSize(self.target.*)); + result.stack_byte_count += @intCast(u31, 4 * Type.usize.abiSize(mod)); }, else => {}, } // Return values - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = InstTracking.init(.unreach); - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { // TODO: is this even possible for C calling convention? result.return_value = InstTracking.init(.none); } else { const classes = switch (self.target.os.tag) { - .windows => &[1]abi.Class{abi.classifyWindows(ret_ty, self.target.*)}, - else => mem.sliceTo(&abi.classifySystemV(ret_ty, self.target.*, .ret), .none), + .windows => &[1]abi.Class{abi.classifyWindows(ret_ty, mod)}, + else => mem.sliceTo(&abi.classifySystemV(ret_ty, mod, .ret), .none), }; if (classes.len > 1) { return self.fail("TODO handle multiple classes per type", .{}); @@ -11725,7 +11724,7 @@ fn resolveCallingConventionValues( result.return_value = switch (classes[0]) { .integer => InstTracking.init(.{ .register = registerAlias( ret_reg, - @intCast(u32, ret_ty.abiSize(self.target.*)), + @intCast(u32, ret_ty.abiSize(mod)), ) }), .float, .sse => InstTracking.init(.{ .register = .xmm0 }), .memory => ret: { @@ -11744,11 +11743,11 @@ fn resolveCallingConventionValues( // Input params for (param_types, result.args) |ty, *arg| { - assert(ty.hasRuntimeBitsIgnoreComptime()); + assert(ty.hasRuntimeBitsIgnoreComptime(mod)); const classes = switch (self.target.os.tag) { - .windows => &[1]abi.Class{abi.classifyWindows(ty, self.target.*)}, - else => mem.sliceTo(&abi.classifySystemV(ty, self.target.*, .arg), .none), + .windows => &[1]abi.Class{abi.classifyWindows(ty, mod)}, + else => mem.sliceTo(&abi.classifySystemV(ty, mod, .arg), .none), }; if (classes.len > 1) { return self.fail("TODO handle multiple classes per type", .{}); @@ -11783,8 +11782,8 @@ fn resolveCallingConventionValues( }), } - const param_size = @intCast(u31, ty.abiSize(self.target.*)); - const param_align = @intCast(u31, ty.abiAlignment(self.target.*)); + const param_size = @intCast(u31, ty.abiSize(mod)); + const param_align = @intCast(u31, ty.abiAlignment(mod)); result.stack_byte_count = mem.alignForwardGeneric(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ @@ -11798,13 +11797,13 @@ fn resolveCallingConventionValues( result.stack_align = 16; // Return values - if (ret_ty.zigTypeTag() == .NoReturn) { + if (ret_ty.zigTypeTag(mod) == .NoReturn) { result.return_value = InstTracking.init(.unreach); - } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) { result.return_value = InstTracking.init(.none); } else { const ret_reg = abi.getCAbiIntReturnRegs(self.target.*)[0]; - const ret_ty_size = @intCast(u31, ret_ty.abiSize(self.target.*)); + const ret_ty_size = @intCast(u31, ret_ty.abiSize(mod)); if (ret_ty_size <= 8 and !ret_ty.isRuntimeFloat()) { const aliased_reg = registerAlias(ret_reg, ret_ty_size); result.return_value = .{ .short = .{ .register = aliased_reg }, .long = .none }; @@ -11819,12 +11818,12 @@ fn resolveCallingConventionValues( // Input params for (param_types, result.args) |ty, *arg| { - if (!ty.hasRuntimeBitsIgnoreComptime()) { + if (!ty.hasRuntimeBitsIgnoreComptime(mod)) { arg.* = .none; continue; } - const param_size = @intCast(u31, ty.abiSize(self.target.*)); - const param_align = @intCast(u31, ty.abiAlignment(self.target.*)); + const param_size = @intCast(u31, ty.abiSize(mod)); + const param_align = @intCast(u31, ty.abiAlignment(mod)); result.stack_byte_count = mem.alignForwardGeneric(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ @@ -11908,9 +11907,10 @@ fn registerAlias(reg: Register, size_bytes: u32) Register { /// Truncates the value in the register in place. /// Clobbers any remaining bits. fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { - const int_info = if (ty.isAbiInt()) ty.intInfo(self.target.*) else std.builtin.Type.Int{ + const mod = self.bin_file.options.module.?; + const int_info = if (ty.isAbiInt(mod)) ty.intInfo(mod) else std.builtin.Type.Int{ .signedness = .unsigned, - .bits = @intCast(u16, ty.bitSize(self.target.*)), + .bits = @intCast(u16, ty.bitSize(mod)), }; const max_reg_bit_width = Register.rax.bitSize(); switch (int_info.signedness) { @@ -11953,8 +11953,9 @@ fn truncateRegister(self: *Self, ty: Type, reg: Register) !void { } fn regBitSize(self: *Self, ty: Type) u64 { - const abi_size = ty.abiSize(self.target.*); - return switch (ty.zigTypeTag()) { + const mod = self.bin_file.options.module.?; + const abi_size = ty.abiSize(mod); + return switch (ty.zigTypeTag(mod)) { else => switch (abi_size) { 1 => 8, 2 => 16, @@ -11971,7 +11972,8 @@ fn regBitSize(self: *Self, ty: Type) u64 { } fn regExtraBits(self: *Self, ty: Type) u64 { - return self.regBitSize(ty) - ty.bitSize(self.target.*); + const mod = self.bin_file.options.module.?; + return self.regBitSize(ty) - ty.bitSize(mod); } fn hasFeature(self: *Self, feature: Target.x86.Feature) bool { @@ -11983,3 +11985,13 @@ fn hasAnyFeatures(self: *Self, features: anytype) bool { fn hasAllFeatures(self: *Self, features: anytype) bool { return Target.x86.featureSetHasAll(self.target.cpu.features, features); } + +fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOf(inst, &mod.intern_pool); +} + +fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { + const mod = self.bin_file.options.module.?; + return self.air.typeOfIndex(inst, &mod.intern_pool); +} diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index e79424d6d8..69df5dbf4c 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -1,10 +1,3 @@ -const std = @import("std"); -const Type = @import("../../type.zig").Type; -const Target = std.Target; -const assert = std.debug.assert; -const Register = @import("bits.zig").Register; -const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; - pub const Class = enum { integer, sse, @@ -19,7 +12,7 @@ pub const Class = enum { float_combine, }; -pub fn classifyWindows(ty: Type, target: Target) Class { +pub fn classifyWindows(ty: Type, mod: *Module) Class { // https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017 // "There's a strict one-to-one correspondence between a function call's arguments // and the registers used for those arguments. Any argument that doesn't fit in 8 @@ -28,7 +21,7 @@ pub fn classifyWindows(ty: Type, target: Target) Class { // "All floating point operations are done using the 16 XMM registers." // "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed // as if they were integers of the same size." - switch (ty.zigTypeTag()) { + switch (ty.zigTypeTag(mod)) { .Pointer, .Int, .Bool, @@ -43,12 +36,12 @@ pub fn classifyWindows(ty: Type, target: Target) Class { .ErrorUnion, .AnyFrame, .Frame, - => switch (ty.abiSize(target)) { + => switch (ty.abiSize(mod)) { 0 => unreachable, 1, 2, 4, 8 => return .integer, - else => switch (ty.zigTypeTag()) { + else => switch (ty.zigTypeTag(mod)) { .Int => return .win_i128, - .Struct, .Union => if (ty.containerLayout() == .Packed) { + .Struct, .Union => if (ty.containerLayout(mod) == .Packed) { return .win_i128; } else { return .memory; @@ -75,14 +68,15 @@ pub const Context = enum { ret, arg, other }; /// There are a maximum of 8 possible return slots. Returned values are in /// the beginning of the array; unused slots are filled with .none. -pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { +pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class { + const target = mod.getTarget(); const memory_class = [_]Class{ .memory, .none, .none, .none, .none, .none, .none, .none, }; var result = [1]Class{.none} ** 8; - switch (ty.zigTypeTag()) { - .Pointer => switch (ty.ptrSize()) { + switch (ty.zigTypeTag(mod)) { + .Pointer => switch (ty.ptrSize(mod)) { .Slice => { result[0] = .integer; result[1] = .integer; @@ -94,7 +88,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { }, }, .Int, .Enum, .ErrorSet => { - const bits = ty.intInfo(target).bits; + const bits = ty.intInfo(mod).bits; if (bits <= 64) { result[0] = .integer; return result; @@ -164,8 +158,8 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { else => unreachable, }, .Vector => { - const elem_ty = ty.childType(); - const bits = elem_ty.bitSize(target) * ty.arrayLen(); + const elem_ty = ty.childType(mod); + const bits = elem_ty.bitSize(mod) * ty.arrayLen(mod); if (bits <= 64) return .{ .sse, .none, .none, .none, .none, .none, .none, .none, @@ -204,7 +198,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { return memory_class; }, .Optional => { - if (ty.isPtrLikeOptional()) { + if (ty.isPtrLikeOptional(mod)) { result[0] = .integer; return result; } @@ -215,8 +209,8 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { // it contains unaligned fields, it has class MEMORY" // "If the size of the aggregate exceeds a single eightbyte, each is classified // separately.". - const ty_size = ty.abiSize(target); - if (ty.containerLayout() == .Packed) { + const ty_size = ty.abiSize(mod); + if (ty.containerLayout(mod) == .Packed) { assert(ty_size <= 128); result[0] = .integer; if (ty_size > 64) result[1] = .integer; @@ -227,15 +221,15 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { var result_i: usize = 0; // out of 8 var byte_i: usize = 0; // out of 8 - const fields = ty.structFields(); + const fields = ty.structFields(mod); for (fields.values()) |field| { if (field.abi_align != 0) { - if (field.abi_align < field.ty.abiAlignment(target)) { + if (field.abi_align < field.ty.abiAlignment(mod)) { return memory_class; } } - const field_size = field.ty.abiSize(target); - const field_class_array = classifySystemV(field.ty, target, .other); + const field_size = field.ty.abiSize(mod); + const field_class_array = classifySystemV(field.ty, mod, .other); const field_class = std.mem.sliceTo(&field_class_array, .none); if (byte_i + field_size <= 8) { // Combine this field with the previous one. @@ -334,8 +328,8 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { // it contains unaligned fields, it has class MEMORY" // "If the size of the aggregate exceeds a single eightbyte, each is classified // separately.". - const ty_size = ty.abiSize(target); - if (ty.containerLayout() == .Packed) { + const ty_size = ty.abiSize(mod); + if (ty.containerLayout(mod) == .Packed) { assert(ty_size <= 128); result[0] = .integer; if (ty_size > 64) result[1] = .integer; @@ -344,15 +338,15 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { if (ty_size > 64) return memory_class; - const fields = ty.unionFields(); + const fields = ty.unionFields(mod); for (fields.values()) |field| { if (field.abi_align != 0) { - if (field.abi_align < field.ty.abiAlignment(target)) { + if (field.abi_align < field.ty.abiAlignment(mod)) { return memory_class; } } // Combine this field with the previous one. - const field_class = classifySystemV(field.ty, target, .other); + const field_class = classifySystemV(field.ty, mod, .other); for (&result, 0..) |*result_item, i| { const field_item = field_class[i]; // "If both classes are equal, this is the resulting class." @@ -426,7 +420,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class { return result; }, .Array => { - const ty_size = ty.abiSize(target); + const ty_size = ty.abiSize(mod); if (ty_size <= 64) { result[0] = .integer; return result; @@ -527,10 +521,17 @@ pub const RegisterClass = struct { }; }; -const testing = std.testing; -const Module = @import("../../Module.zig"); -const Value = @import("../../value.zig").Value; const builtin = @import("builtin"); +const std = @import("std"); +const Target = std.Target; +const assert = std.debug.assert; +const testing = std.testing; + +const Module = @import("../../Module.zig"); +const Register = @import("bits.zig").Register; +const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; +const Type = @import("../../type.zig").Type; +const Value = @import("../../value.zig").Value; fn _field(comptime tag: Type.Tag, offset: u32) Module.Struct.Field { return .{ @@ -541,34 +542,3 @@ fn _field(comptime tag: Type.Tag, offset: u32) Module.Struct.Field { .is_comptime = false, }; } - -test "C_C_D" { - var fields = Module.Struct.Fields{}; - // const C_C_D = extern struct { v1: i8, v2: i8, v3: f64 }; - try fields.ensureTotalCapacity(testing.allocator, 3); - defer fields.deinit(testing.allocator); - fields.putAssumeCapacity("v1", _field(.i8, 0)); - fields.putAssumeCapacity("v2", _field(.i8, 1)); - fields.putAssumeCapacity("v3", _field(.f64, 4)); - - var C_C_D_struct = Module.Struct{ - .fields = fields, - .namespace = undefined, - .owner_decl = undefined, - .zir_index = undefined, - .layout = .Extern, - .status = .fully_resolved, - .known_non_opv = true, - .is_tuple = false, - }; - var C_C_D = Type.Payload.Struct{ .data = &C_C_D_struct }; - - try testing.expectEqual( - [_]Class{ .integer, .sse, .none, .none, .none, .none, .none, .none }, - classifySystemV(Type.initPayload(&C_C_D.base), builtin.target, .ret), - ); - try testing.expectEqual( - [_]Class{ .integer, .sse, .none, .none, .none, .none, .none, .none }, - classifySystemV(Type.initPayload(&C_C_D.base), builtin.target, .arg), - ); -} diff --git a/src/codegen.zig b/src/codegen.zig index adce183833..b39c3c5ec0 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -14,6 +14,7 @@ const Air = @import("Air.zig"); const Allocator = mem.Allocator; const Compilation = @import("Compilation.zig"); const ErrorMsg = Module.ErrorMsg; +const InternPool = @import("InternPool.zig"); const Liveness = @import("Liveness.zig"); const Module = @import("Module.zig"); const Target = std.Target; @@ -66,7 +67,7 @@ pub const DebugInfoOutput = union(enum) { pub fn generateFunction( bin_file: *link.File, src_loc: Module.SrcLoc, - func: *Module.Fn, + func_index: Module.Fn.Index, air: Air, liveness: Liveness, code: *std.ArrayList(u8), @@ -75,17 +76,17 @@ pub fn generateFunction( switch (bin_file.options.target.cpu.arch) { .arm, .armeb, - => return @import("arch/arm/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), + => return @import("arch/arm/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), .aarch64, .aarch64_be, .aarch64_32, - => return @import("arch/aarch64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), - .riscv64 => return @import("arch/riscv64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), - .sparc64 => return @import("arch/sparc64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), - .x86_64 => return @import("arch/x86_64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), + => return @import("arch/aarch64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), + .riscv64 => return @import("arch/riscv64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), + .sparc64 => return @import("arch/sparc64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), + .x86_64 => return @import("arch/x86_64/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), .wasm32, .wasm64, - => return @import("arch/wasm/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output), + => return @import("arch/wasm/CodeGen.zig").generate(bin_file, src_loc, func_index, air, liveness, code, debug_output), else => unreachable, } } @@ -139,13 +140,14 @@ pub fn generateLazySymbol( return generateLazyFunction(bin_file, src_loc, lazy_sym, code, debug_output); } - if (lazy_sym.ty.isAnyError()) { + if (lazy_sym.ty.isAnyError(mod)) { alignment.* = 4; - const err_names = mod.error_name_list.items; + const err_names = mod.global_error_set.keys(); mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, err_names.len), endian); var offset = code.items.len; try code.resize((1 + err_names.len + 1) * 4); - for (err_names) |err_name| { + for (err_names) |err_name_nts| { + const err_name = mod.intern_pool.stringToSlice(err_name_nts); mem.writeInt(u32, code.items[offset..][0..4], @intCast(u32, code.items.len), endian); offset += 4; try code.ensureUnusedCapacity(err_name.len + 1); @@ -154,9 +156,10 @@ pub fn generateLazySymbol( } mem.writeInt(u32, code.items[offset..][0..4], @intCast(u32, code.items.len), endian); return Result.ok; - } else if (lazy_sym.ty.zigTypeTag() == .Enum) { + } else if (lazy_sym.ty.zigTypeTag(mod) == .Enum) { alignment.* = 1; - for (lazy_sym.ty.enumFields().keys()) |tag_name| { + for (lazy_sym.ty.enumFields(mod)) |tag_name_ip| { + const tag_name = mod.intern_pool.stringToSlice(tag_name_ip); try code.ensureUnusedCapacity(tag_name.len + 1); code.appendSliceAssumeCapacity(tag_name); code.appendAssumeCapacity(0); @@ -181,529 +184,110 @@ pub fn generateSymbol( const tracy = trace(@src()); defer tracy.end(); + const mod = bin_file.options.module.?; var typed_value = arg_tv; - if (arg_tv.val.castTag(.runtime_value)) |rt| { - typed_value.val = rt.data; + switch (mod.intern_pool.indexToKey(typed_value.val.toIntern())) { + .runtime_value => |rt| typed_value.val = rt.val.toValue(), + else => {}, } - const target = bin_file.options.target; + const target = mod.getTarget(); const endian = target.cpu.arch.endian(); - const mod = bin_file.options.module.?; log.debug("generateSymbol: ty = {}, val = {}", .{ typed_value.ty.fmt(mod), typed_value.val.fmtValue(typed_value.ty, mod), }); - if (typed_value.val.isUndefDeep()) { - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; + if (typed_value.val.isUndefDeep(mod)) { + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; try code.appendNTimes(0xaa, abi_size); - return Result.ok; + return .ok; } - switch (typed_value.ty.zigTypeTag()) { - .Fn => { - return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol function pointers", - .{}, - ), - }; - }, - .Float => { - switch (typed_value.ty.floatBits(target)) { - 16 => writeFloat(f16, typed_value.val.toFloat(f16), target, endian, try code.addManyAsArray(2)), - 32 => writeFloat(f32, typed_value.val.toFloat(f32), target, endian, try code.addManyAsArray(4)), - 64 => writeFloat(f64, typed_value.val.toFloat(f64), target, endian, try code.addManyAsArray(8)), - 80 => { - writeFloat(f80, typed_value.val.toFloat(f80), target, endian, try code.addManyAsArray(10)); - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; - try code.appendNTimes(0, abi_size - 10); - }, - 128 => writeFloat(f128, typed_value.val.toFloat(f128), target, endian, try code.addManyAsArray(16)), + switch (mod.intern_pool.indexToKey(typed_value.val.toIntern())) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => unreachable, // types, not values + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + .undefined, + .void, + .null, + .empty_struct, + .@"unreachable", + .generic_poison, + => unreachable, // non-runtime values + .false, .true => try code.append(switch (simple_value) { + .false => 0, + .true => 1, else => unreachable, - } - return Result.ok; + }), }, - .Array => switch (typed_value.val.tag()) { - .bytes => { - const bytes = typed_value.val.castTag(.bytes).?.data; - const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel()); - // The bytes payload already includes the sentinel, if any - try code.ensureUnusedCapacity(len); - code.appendSliceAssumeCapacity(bytes[0..len]); - return Result.ok; - }, - .str_lit => { - const str_lit = typed_value.val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - try code.ensureUnusedCapacity(bytes.len + 1); - code.appendSliceAssumeCapacity(bytes); - if (typed_value.ty.sentinel()) |sent_val| { - const byte = @intCast(u8, sent_val.toUnsignedInt(target)); - code.appendAssumeCapacity(byte); - } - return Result.ok; - }, - .aggregate => { - const elem_vals = typed_value.val.castTag(.aggregate).?.data; - const elem_ty = typed_value.ty.elemType(); - const len = @intCast(usize, typed_value.ty.arrayLenIncludingSentinel()); - for (elem_vals[0..len]) |elem_val| { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = elem_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - return Result.ok; - }, - .repeated => { - const array = typed_value.val.castTag(.repeated).?.data; - const elem_ty = typed_value.ty.childType(); - const sentinel = typed_value.ty.sentinel(); - const len = typed_value.ty.arrayLen(); - - var index: u64 = 0; - while (index < len) : (index += 1) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = array, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - - if (sentinel) |sentinel_val| { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = sentinel_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - - return Result.ok; - }, - .empty_array_sentinel => { - const elem_ty = typed_value.ty.childType(); - const sentinel_val = typed_value.ty.sentinel().?; - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = sentinel_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - return Result.ok; - }, - else => return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for array type value: {s}", - .{@tagName(typed_value.val.tag())}, - ), - }, + .variable, + .extern_func, + .func, + .enum_literal, + .empty_enum_value, + => unreachable, // non-runtime values + .int => { + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; + var space: Value.BigIntSpace = undefined; + const val = typed_value.val.toBigInt(&space, mod); + val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian); }, - .Pointer => switch (typed_value.val.tag()) { - .null_value => { - switch (target.ptrBitWidth()) { - 32 => { - mem.writeInt(u32, try code.addManyAsArray(4), 0, endian); - if (typed_value.ty.isSlice()) try code.appendNTimes(0xaa, 4); - }, - 64 => { - mem.writeInt(u64, try code.addManyAsArray(8), 0, endian); - if (typed_value.ty.isSlice()) try code.appendNTimes(0xaa, 8); - }, - else => unreachable, - } - return Result.ok; - }, - .zero, .one, .int_u64, .int_big_positive => { - switch (target.ptrBitWidth()) { - 32 => { - const x = typed_value.val.toUnsignedInt(target); - mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, x), endian); - }, - 64 => { - const x = typed_value.val.toUnsignedInt(target); - mem.writeInt(u64, try code.addManyAsArray(8), x, endian); - }, - else => unreachable, - } - return Result.ok; - }, - .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( - bin_file, - src_loc, - typed_value, - switch (tag) { - .variable => typed_value.val.castTag(.variable).?.data.owner_decl, - .decl_ref => typed_value.val.castTag(.decl_ref).?.data, - .decl_ref_mut => typed_value.val.castTag(.decl_ref_mut).?.data.decl_index, - else => unreachable, - }, - code, - debug_output, - reloc_info, - ), - .slice => { - const slice = typed_value.val.castTag(.slice).?.data; - - // generate ptr - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf); - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = slice_ptr_field_type, - .val = slice.ptr, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - // generate length - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = Type.initTag(.usize), - .val = slice.len, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - return Result.ok; - }, - .field_ptr, .elem_ptr, .opt_payload_ptr => return lowerParentPtr( - bin_file, - src_loc, - typed_value, - typed_value.val, - code, - debug_output, - reloc_info, - ), - else => return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for pointer type value: '{s}'", - .{@tagName(typed_value.val.tag())}, - ), - }, + .err => |err| { + const int = try mod.getErrorValue(err.name); + try code.writer().writeInt(u16, @intCast(u16, int), endian); }, - .Int => { - const info = typed_value.ty.intInfo(target); - if (info.bits <= 8) { - const x: u8 = switch (info.signedness) { - .unsigned => @intCast(u8, typed_value.val.toUnsignedInt(target)), - .signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt(target))), - }; - try code.append(x); - return Result.ok; - } - if (info.bits > 64) { - var bigint_buffer: Value.BigIntSpace = undefined; - const bigint = typed_value.val.toBigInt(&bigint_buffer, target); - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; - const start = code.items.len; - try code.resize(start + abi_size); - bigint.writeTwosComplement(code.items[start..][0..abi_size], endian); - return Result.ok; - } - switch (info.signedness) { - .unsigned => { - if (info.bits <= 16) { - const x = @intCast(u16, typed_value.val.toUnsignedInt(target)); - mem.writeInt(u16, try code.addManyAsArray(2), x, endian); - } else if (info.bits <= 32) { - const x = @intCast(u32, typed_value.val.toUnsignedInt(target)); - mem.writeInt(u32, try code.addManyAsArray(4), x, endian); - } else { - const x = typed_value.val.toUnsignedInt(target); - mem.writeInt(u64, try code.addManyAsArray(8), x, endian); - } - }, - .signed => { - if (info.bits <= 16) { - const x = @intCast(i16, typed_value.val.toSignedInt(target)); - mem.writeInt(i16, try code.addManyAsArray(2), x, endian); - } else if (info.bits <= 32) { - const x = @intCast(i32, typed_value.val.toSignedInt(target)); - mem.writeInt(i32, try code.addManyAsArray(4), x, endian); - } else { - const x = typed_value.val.toSignedInt(target); - mem.writeInt(i64, try code.addManyAsArray(8), x, endian); - } - }, - } - return Result.ok; - }, - .Enum => { - var int_buffer: Value.Payload.U64 = undefined; - const int_val = typed_value.enumToInt(&int_buffer); + .error_union => |error_union| { + const payload_ty = typed_value.ty.errorUnionPayload(mod); + const err_val = switch (error_union.val) { + .err_name => |err_name| @intCast(u16, try mod.getErrorValue(err_name)), + .payload => @as(u16, 0), + }; - const info = typed_value.ty.intInfo(target); - if (info.bits <= 8) { - const x = @intCast(u8, int_val.toUnsignedInt(target)); - try code.append(x); - return Result.ok; - } - if (info.bits > 64) { - return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for big int enums ('{}')", - .{typed_value.ty.fmt(mod)}, - ), - }; - } - switch (info.signedness) { - .unsigned => { - if (info.bits <= 16) { - const x = @intCast(u16, int_val.toUnsignedInt(target)); - mem.writeInt(u16, try code.addManyAsArray(2), x, endian); - } else if (info.bits <= 32) { - const x = @intCast(u32, int_val.toUnsignedInt(target)); - mem.writeInt(u32, try code.addManyAsArray(4), x, endian); - } else { - const x = int_val.toUnsignedInt(target); - mem.writeInt(u64, try code.addManyAsArray(8), x, endian); - } - }, - .signed => { - if (info.bits <= 16) { - const x = @intCast(i16, int_val.toSignedInt(target)); - mem.writeInt(i16, try code.addManyAsArray(2), x, endian); - } else if (info.bits <= 32) { - const x = @intCast(i32, int_val.toSignedInt(target)); - mem.writeInt(i32, try code.addManyAsArray(4), x, endian); - } else { - const x = int_val.toSignedInt(target); - mem.writeInt(i64, try code.addManyAsArray(8), x, endian); - } - }, - } - return Result.ok; - }, - .Bool => { - const x: u8 = @boolToInt(typed_value.val.toBool()); - try code.append(x); - return Result.ok; - }, - .Struct => { - if (typed_value.ty.containerLayout() == .Packed) { - const struct_obj = typed_value.ty.castTag(.@"struct").?.data; - const fields = struct_obj.fields.values(); - const field_vals = typed_value.val.castTag(.aggregate).?.data; - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; - const current_pos = code.items.len; - try code.resize(current_pos + abi_size); - var bits: u16 = 0; - - for (field_vals, 0..) |field_val, index| { - const field_ty = fields[index].ty; - // pointer may point to a decl which must be marked used - // but can also result in a relocation. Therefore we handle those seperately. - if (field_ty.zigTypeTag() == .Pointer) { - const field_size = math.cast(usize, field_ty.abiSize(target)) orelse return error.Overflow; - var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size); - defer tmp_list.deinit(); - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = field_ty, - .val = field_val, - }, &tmp_list, debug_output, reloc_info)) { - .ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items), - .fail => |em| return Result{ .fail = em }, - } - } else { - field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable; - } - bits += @intCast(u16, field_ty.bitSize(target)); - } - - return Result.ok; + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + try code.writer().writeInt(u16, err_val, endian); + return .ok; } - const struct_begin = code.items.len; - const field_vals = typed_value.val.castTag(.aggregate).?.data; - for (field_vals, 0..) |field_val, index| { - const field_ty = typed_value.ty.structFieldType(index); - if (!field_ty.hasRuntimeBits()) continue; - - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = field_ty, - .val = field_val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - const unpadded_field_end = code.items.len - struct_begin; - - // Pad struct members if required - const padded_field_end = typed_value.ty.structFieldOffset(index + 1, target); - const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse return error.Overflow; - - if (padding > 0) { - try code.writer().writeByteNTimes(0, padding); - } - } - - return Result.ok; - }, - .Union => { - const union_obj = typed_value.val.castTag(.@"union").?.data; - const layout = typed_value.ty.unionGetLayout(target); - - if (layout.payload_size == 0) { - return generateSymbol(bin_file, src_loc, .{ - .ty = typed_value.ty.unionTagType().?, - .val = union_obj.tag, - }, code, debug_output, reloc_info); - } - - // Check if we should store the tag first. - if (layout.tag_align >= layout.payload_align) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = typed_value.ty.unionTagType().?, - .val = union_obj.tag, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - - const union_ty = typed_value.ty.cast(Type.Payload.Union).?.data; - const field_index = typed_value.ty.unionTagFieldIndex(union_obj.tag, mod).?; - assert(union_ty.haveFieldTypes()); - const field_ty = union_ty.fields.values()[field_index].ty; - if (!field_ty.hasRuntimeBits()) { - try code.writer().writeByteNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow); - } else { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = field_ty, - .val = union_obj.val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(target)) orelse return error.Overflow; - if (padding > 0) { - try code.writer().writeByteNTimes(0, padding); - } - } - - if (layout.tag_size > 0) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = union_ty.tag_ty, - .val = union_obj.tag, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } - - if (layout.padding > 0) { - try code.writer().writeByteNTimes(0, layout.padding); - } - - return Result.ok; - }, - .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_type = typed_value.ty.optionalChild(&opt_buf); - const is_pl = !typed_value.val.isNull(); - const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; - - if (!payload_type.hasRuntimeBits()) { - try code.writer().writeByteNTimes(@boolToInt(is_pl), abi_size); - return Result.ok; - } - - if (typed_value.ty.optionalReprIsPayload()) { - if (typed_value.val.castTag(.opt_payload)) |payload| { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = payload_type, - .val = payload.data, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } else if (!typed_value.val.isNull()) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = payload_type, - .val = typed_value.val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - } else { - try code.writer().writeByteNTimes(0, abi_size); - } - - return Result.ok; - } - - const padding = abi_size - (math.cast(usize, payload_type.abiSize(target)) orelse return error.Overflow) - 1; - const value = if (typed_value.val.castTag(.opt_payload)) |payload| payload.data else Value.initTag(.undef); - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = payload_type, - .val = value, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - try code.writer().writeByte(@boolToInt(is_pl)); - try code.writer().writeByteNTimes(0, padding); - - return Result.ok; - }, - .ErrorUnion => { - const error_ty = typed_value.ty.errorUnionSet(); - const payload_ty = typed_value.ty.errorUnionPayload(); - const is_payload = typed_value.val.errorUnionIsPayload(); - - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - const err_val = if (is_payload) Value.initTag(.zero) else typed_value.val; - return generateSymbol(bin_file, src_loc, .{ - .ty = error_ty, - .val = err_val, - }, code, debug_output, reloc_info); - } - - const payload_align = payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); - const abi_align = typed_value.ty.abiAlignment(target); + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + const abi_align = typed_value.ty.abiAlignment(mod); // error value first when its type is larger than the error union's payload if (error_align > payload_align) { - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = error_ty, - .val = if (is_payload) Value.initTag(.zero) else typed_value.val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } + try code.writer().writeInt(u16, err_val, endian); } // emit payload part of the error union { const begin = code.items.len; - const payload_val = if (typed_value.val.castTag(.eu_payload)) |val| val.data else Value.initTag(.undef); switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_ty, - .val = payload_val, + .val = switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_ty.toIntern() }), + .payload => |payload| payload, + }.toValue(), }, code, debug_output, reloc_info)) { .ok => {}, - .fail => |em| return Result{ .fail = em }, + .fail => |em| return .{ .fail = em }, } const unpadded_end = code.items.len - begin; const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); @@ -717,13 +301,7 @@ pub fn generateSymbol( // Payload size is larger than error set, so emit our error set last if (error_align <= payload_align) { const begin = code.items.len; - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = error_ty, - .val = if (is_payload) Value.initTag(.zero) else typed_value.val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } + try code.writer().writeInt(u16, err_val, endian); const unpadded_end = code.items.len - begin; const padded_end = mem.alignForwardGeneric(u64, unpadded_end, abi_align); const padding = math.cast(usize, padded_end - unpadded_end) orelse return error.Overflow; @@ -732,198 +310,386 @@ pub fn generateSymbol( try code.writer().writeByteNTimes(0, padding); } } - - return Result.ok; }, - .ErrorSet => { - switch (typed_value.val.tag()) { - .@"error" => { - const name = typed_value.val.getError().?; - const kv = try bin_file.options.module.?.getErrorValue(name); - try code.writer().writeInt(u32, kv.value, endian); - }, - else => { - try code.writer().writeByteNTimes(0, @intCast(usize, Type.anyerror.abiSize(target))); - }, + .enum_tag => |enum_tag| { + const int_tag_ty = typed_value.ty.intTagType(mod); + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = int_tag_ty, + .val = try mod.getCoerced(enum_tag.int.toValue(), int_tag_ty), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, } - return Result.ok; }, - .Vector => switch (typed_value.val.tag()) { - .bytes => { - const bytes = typed_value.val.castTag(.bytes).?.data; - const len = math.cast(usize, typed_value.ty.arrayLen()) orelse return error.Overflow; - const padding = math.cast(usize, typed_value.ty.abiSize(target) - len) orelse - return error.Overflow; - try code.ensureUnusedCapacity(len + padding); - code.appendSliceAssumeCapacity(bytes[0..len]); - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; + .float => |float| switch (float.storage) { + .f16 => |f16_val| writeFloat(f16, f16_val, target, endian, try code.addManyAsArray(2)), + .f32 => |f32_val| writeFloat(f32, f32_val, target, endian, try code.addManyAsArray(4)), + .f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(8)), + .f80 => |f80_val| { + writeFloat(f80, f80_val, target, endian, try code.addManyAsArray(10)); + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; + try code.appendNTimes(0, abi_size - 10); }, - .aggregate => { - const elem_vals = typed_value.val.castTag(.aggregate).?.data; - const elem_ty = typed_value.ty.elemType(); - const len = math.cast(usize, typed_value.ty.arrayLen()) orelse return error.Overflow; - const padding = math.cast(usize, typed_value.ty.abiSize(target) - - (math.divCeil(u64, elem_ty.bitSize(target) * len, 8) catch |err| switch (err) { - error.DivisionByZero => unreachable, - else => |e| return e, - })) orelse return error.Overflow; - for (elem_vals[0..len]) |elem_val| { + .f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(16)), + }, + .ptr => |ptr| { + // generate ptr + switch (try lowerParentPtr(bin_file, src_loc, switch (ptr.len) { + .none => typed_value.val, + else => typed_value.val.slicePtr(mod), + }.toIntern(), code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + } + if (ptr.len != .none) { + // generate len + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = Type.usize, + .val = ptr.len.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return Result{ .fail = em }, + } + } + }, + .opt => { + const payload_type = typed_value.ty.optionalChild(mod); + const payload_val = typed_value.val.optionalValue(mod); + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse return error.Overflow; + + if (typed_value.ty.optionalReprIsPayload(mod)) { + if (payload_val) |value| { switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = elem_val, + .ty = payload_type, + .val = value, + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return Result{ .fail = em }, + } + } else { + try code.writer().writeByteNTimes(0, abi_size); + } + } else { + const padding = abi_size - (math.cast(usize, payload_type.abiSize(mod)) orelse return error.Overflow) - 1; + if (payload_type.hasRuntimeBits(mod)) { + const value = payload_val orelse (try mod.intern(.{ .undef = payload_type.toIntern() })).toValue(); + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = payload_type, + .val = value, }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } } - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; + try code.writer().writeByte(@boolToInt(payload_val != null)); + try code.writer().writeByteNTimes(0, padding); + } + }, + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(typed_value.ty.toIntern())) { + .array_type => |array_type| switch (aggregate.storage) { + .bytes => |bytes| try code.appendSlice(bytes), + .elems, .repeated_elem => { + var index: u64 = 0; + var len_including_sentinel = + array_type.len + @boolToInt(array_type.sentinel != .none); + while (index < len_including_sentinel) : (index += 1) { + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = array_type.child.toType(), + .val = switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems[@intCast(usize, index)], + .repeated_elem => |elem| elem, + }.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + } + } + }, }, - .repeated => { - const array = typed_value.val.castTag(.repeated).?.data; - const elem_ty = typed_value.ty.childType(); - const len = typed_value.ty.arrayLen(); - const padding = math.cast(usize, typed_value.ty.abiSize(target) - - (math.divCeil(u64, elem_ty.bitSize(target) * len, 8) catch |err| switch (err) { + .vector_type => |vector_type| { + switch (aggregate.storage) { + .bytes => |bytes| try code.appendSlice(bytes), + .elems, .repeated_elem => { + var index: u64 = 0; + while (index < vector_type.len) : (index += 1) { + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = vector_type.child.toType(), + .val = switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems[@intCast(usize, index)], + .repeated_elem => |elem| elem, + }.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return .{ .fail = em }, + } + } + }, + } + + const padding = math.cast(usize, typed_value.ty.abiSize(mod) - + (math.divCeil(u64, vector_type.child.toType().bitSize(mod) * vector_type.len, 8) catch |err| switch (err) { error.DivisionByZero => unreachable, else => |e| return e, })) orelse return error.Overflow; - var index: u64 = 0; - while (index < len) : (index += 1) { + if (padding > 0) try code.writer().writeByteNTimes(0, padding); + }, + .anon_struct_type => |tuple| { + const struct_begin = code.items.len; + for (tuple.types, tuple.values, 0..) |field_ty, comptime_val, index| { + if (comptime_val != .none) continue; + if (!field_ty.toType().hasRuntimeBits(mod)) continue; + + const field_val = switch (aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field_ty, + .storage = .{ .u64 = bytes[index] }, + } }), + .elems => |elems| elems[index], + .repeated_elem => |elem| elem, + }; + switch (try generateSymbol(bin_file, src_loc, .{ - .ty = elem_ty, - .val = array, + .ty = field_ty.toType(), + .val = field_val.toValue(), }, code, debug_output, reloc_info)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } + const unpadded_field_end = code.items.len - struct_begin; + + // Pad struct members if required + const padded_field_end = typed_value.ty.structFieldOffset(index + 1, mod); + const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse + return error.Overflow; + + if (padding > 0) { + try code.writer().writeByteNTimes(0, padding); + } } - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; }, - .str_lit => { - const str_lit = typed_value.val.castTag(.str_lit).?.data; - const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - const padding = math.cast(usize, typed_value.ty.abiSize(target) - str_lit.len) orelse - return error.Overflow; - try code.ensureUnusedCapacity(str_lit.len + padding); - code.appendSliceAssumeCapacity(bytes); - if (padding > 0) try code.writer().writeByteNTimes(0, padding); - return Result.ok; + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + + if (struct_obj.layout == .Packed) { + const fields = struct_obj.fields.values(); + const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse + return error.Overflow; + const current_pos = code.items.len; + try code.resize(current_pos + abi_size); + var bits: u16 = 0; + + for (fields, 0..) |field, index| { + const field_ty = field.ty; + + const field_val = switch (aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field_ty.toIntern(), + .storage = .{ .u64 = bytes[index] }, + } }), + .elems => |elems| elems[index], + .repeated_elem => |elem| elem, + }; + + // pointer may point to a decl which must be marked used + // but can also result in a relocation. Therefore we handle those separately. + if (field_ty.zigTypeTag(mod) == .Pointer) { + const field_size = math.cast(usize, field_ty.abiSize(mod)) orelse + return error.Overflow; + var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size); + defer tmp_list.deinit(); + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = field_ty, + .val = field_val.toValue(), + }, &tmp_list, debug_output, reloc_info)) { + .ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items), + .fail => |em| return Result{ .fail = em }, + } + } else { + field_val.toValue().writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable; + } + bits += @intCast(u16, field_ty.bitSize(mod)); + } + } else { + const struct_begin = code.items.len; + for (struct_obj.fields.values(), 0..) |field, index| { + const field_ty = field.ty; + if (!field_ty.hasRuntimeBits(mod)) continue; + + const field_val = switch (mod.intern_pool.indexToKey(typed_value.val.toIntern()).aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field_ty.toIntern(), + .storage = .{ .u64 = bytes[index] }, + } }), + .elems => |elems| elems[index], + .repeated_elem => |elem| elem, + }; + + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = field_ty, + .val = field_val.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return Result{ .fail = em }, + } + const unpadded_field_end = code.items.len - struct_begin; + + // Pad struct members if required + const padded_field_end = typed_value.ty.structFieldOffset(index + 1, mod); + const padding = math.cast(usize, padded_field_end - unpadded_field_end) orelse return error.Overflow; + + if (padding > 0) { + try code.writer().writeByteNTimes(0, padding); + } + } + } }, else => unreachable, }, - else => |tag| return Result{ .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for type '{s}'", - .{@tagName(tag)}, - ) }, + .un => |un| { + const layout = typed_value.ty.unionGetLayout(mod); + + if (layout.payload_size == 0) { + return generateSymbol(bin_file, src_loc, .{ + .ty = typed_value.ty.unionTagType(mod).?, + .val = un.tag.toValue(), + }, code, debug_output, reloc_info); + } + + // Check if we should store the tag first. + if (layout.tag_align >= layout.payload_align) { + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = typed_value.ty.unionTagType(mod).?, + .val = un.tag.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return Result{ .fail = em }, + } + } + + const union_ty = mod.typeToUnion(typed_value.ty).?; + const field_index = typed_value.ty.unionTagFieldIndex(un.tag.toValue(), mod).?; + assert(union_ty.haveFieldTypes()); + const field_ty = union_ty.fields.values()[field_index].ty; + if (!field_ty.hasRuntimeBits(mod)) { + try code.writer().writeByteNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow); + } else { + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = field_ty, + .val = un.val.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return Result{ .fail = em }, + } + + const padding = math.cast(usize, layout.payload_size - field_ty.abiSize(mod)) orelse return error.Overflow; + if (padding > 0) { + try code.writer().writeByteNTimes(0, padding); + } + } + + if (layout.tag_size > 0) { + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = union_ty.tag_ty, + .val = un.tag.toValue(), + }, code, debug_output, reloc_info)) { + .ok => {}, + .fail => |em| return Result{ .fail = em }, + } + } + }, + .memoized_call => unreachable, } + return .ok; } fn lowerParentPtr( bin_file: *link.File, src_loc: Module.SrcLoc, - typed_value: TypedValue, - parent_ptr: Value, + parent_ptr: InternPool.Index, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, reloc_info: RelocInfo, ) CodeGenError!Result { - const target = bin_file.options.target; - switch (parent_ptr.tag()) { - .field_ptr => { - const field_ptr = parent_ptr.castTag(.field_ptr).?.data; - return lowerParentPtr( - bin_file, - src_loc, - typed_value, - field_ptr.container_ptr, - code, - debug_output, - reloc_info.offset(@intCast(u32, switch (field_ptr.container_ty.zigTypeTag()) { - .Pointer => offset: { - assert(field_ptr.container_ty.isSlice()); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - break :offset switch (field_ptr.field_index) { - 0 => 0, - 1 => field_ptr.container_ty.slicePtrFieldType(&buf).abiSize(target), - else => unreachable, - }; - }, - .Struct, .Union => field_ptr.container_ty.structFieldOffset( - field_ptr.field_index, - target, - ), - else => return Result{ .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement lowerParentPtr for field_ptr with a container of type {}", - .{field_ptr.container_ty.fmt(bin_file.options.module.?)}, - ) }, - })), - ); - }, - .elem_ptr => { - const elem_ptr = parent_ptr.castTag(.elem_ptr).?.data; - return lowerParentPtr( - bin_file, - src_loc, - typed_value, - elem_ptr.array_ptr, - code, - debug_output, - reloc_info.offset(@intCast(u32, elem_ptr.index * elem_ptr.elem_ty.abiSize(target))), - ); - }, - .opt_payload_ptr => { - const opt_payload_ptr = parent_ptr.castTag(.opt_payload_ptr).?.data; - return lowerParentPtr( - bin_file, - src_loc, - typed_value, - opt_payload_ptr.container_ptr, - code, - debug_output, - reloc_info, - ); - }, - .eu_payload_ptr => { - const eu_payload_ptr = parent_ptr.castTag(.eu_payload_ptr).?.data; - const pl_ty = eu_payload_ptr.container_ty.errorUnionPayload(); - return lowerParentPtr( - bin_file, - src_loc, - typed_value, - eu_payload_ptr.container_ptr, - code, - debug_output, - reloc_info.offset(@intCast(u32, errUnionPayloadOffset(pl_ty, target))), - ); - }, - .variable, .decl_ref, .decl_ref_mut => |tag| return lowerDeclRef( + const mod = bin_file.options.module.?; + const ptr = mod.intern_pool.indexToKey(parent_ptr).ptr; + assert(ptr.len == .none); + return switch (ptr.addr) { + .decl, .mut_decl => try lowerDeclRef( bin_file, src_loc, - typed_value, - switch (tag) { - .variable => parent_ptr.castTag(.variable).?.data.owner_decl, - .decl_ref => parent_ptr.castTag(.decl_ref).?.data, - .decl_ref_mut => parent_ptr.castTag(.decl_ref_mut).?.data.decl_index, + switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, else => unreachable, }, code, debug_output, reloc_info, ), - else => |tag| return Result{ .fail = try ErrorMsg.create( - bin_file.allocator, + .int => |int| try generateSymbol(bin_file, src_loc, .{ + .ty = Type.usize, + .val = int.toValue(), + }, code, debug_output, reloc_info), + .eu_payload => |eu_payload| try lowerParentPtr( + bin_file, src_loc, - "TODO implement lowerParentPtr for type '{s}'", - .{@tagName(tag)}, - ) }, - } + eu_payload, + code, + debug_output, + reloc_info.offset(@intCast(u32, errUnionPayloadOffset( + mod.intern_pool.typeOf(eu_payload).toType(), + mod, + ))), + ), + .opt_payload => |opt_payload| try lowerParentPtr( + bin_file, + src_loc, + opt_payload, + code, + debug_output, + reloc_info, + ), + .elem => |elem| try lowerParentPtr( + bin_file, + src_loc, + elem.base, + code, + debug_output, + reloc_info.offset(@intCast(u32, elem.index * + mod.intern_pool.typeOf(elem.base).toType().elemType2(mod).abiSize(mod))), + ), + .field => |field| { + const base_type = mod.intern_pool.indexToKey(mod.intern_pool.typeOf(field.base)).ptr_type.child; + return lowerParentPtr( + bin_file, + src_loc, + field.base, + code, + debug_output, + reloc_info.offset(switch (mod.intern_pool.indexToKey(base_type)) { + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .One, .Many, .C => unreachable, + .Slice => switch (field.index) { + 0 => 0, + 1 => @divExact(mod.getTarget().ptrBitWidth(), 8), + else => unreachable, + }, + }, + .struct_type, + .anon_struct_type, + .union_type, + => @intCast(u32, base_type.toType().structFieldOffset( + @intCast(u32, field.index), + mod, + )), + else => unreachable, + }), + ); + }, + .comptime_field => unreachable, + }; } const RelocInfo = struct { @@ -938,51 +704,25 @@ const RelocInfo = struct { fn lowerDeclRef( bin_file: *link.File, src_loc: Module.SrcLoc, - typed_value: TypedValue, decl_index: Module.Decl.Index, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, reloc_info: RelocInfo, ) CodeGenError!Result { + _ = src_loc; + _ = debug_output; const target = bin_file.options.target; - const module = bin_file.options.module.?; - if (typed_value.ty.isSlice()) { - // generate ptr - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = typed_value.ty.slicePtrFieldType(&buf); - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = slice_ptr_field_type, - .val = typed_value.val, - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - // generate length - var slice_len: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = typed_value.val.sliceLen(module), - }; - switch (try generateSymbol(bin_file, src_loc, .{ - .ty = Type.usize, - .val = Value.initPayload(&slice_len.base), - }, code, debug_output, reloc_info)) { - .ok => {}, - .fail => |em| return Result{ .fail = em }, - } - - return Result.ok; - } + const mod = bin_file.options.module.?; const ptr_width = target.ptrBitWidth(); - const decl = module.declPtr(decl_index); - const is_fn_body = decl.ty.zigTypeTag() == .Fn; - if (!is_fn_body and !decl.ty.hasRuntimeBits()) { + const decl = mod.declPtr(decl_index); + const is_fn_body = decl.ty.zigTypeTag(mod) == .Fn; + if (!is_fn_body and !decl.ty.hasRuntimeBits(mod)) { try code.writer().writeByteNTimes(0xaa, @divExact(ptr_width, 8)); return Result.ok; } - module.markDeclAlive(decl); + try mod.markDeclAlive(decl); const vaddr = try bin_file.getDeclVAddr(decl_index, .{ .parent_atom_index = reloc_info.parent_atom_index, @@ -1059,16 +799,16 @@ fn genDeclRef( tv: TypedValue, decl_index: Module.Decl.Index, ) CodeGenError!GenResult { - const module = bin_file.options.module.?; - log.debug("genDeclRef: ty = {}, val = {}", .{ tv.ty.fmt(module), tv.val.fmtValue(tv.ty, module) }); + const mod = bin_file.options.module.?; + log.debug("genDeclRef: ty = {}, val = {}", .{ tv.ty.fmt(mod), tv.val.fmtValue(tv.ty, mod) }); const target = bin_file.options.target; const ptr_bits = target.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); - const decl = module.declPtr(decl_index); + const decl = mod.declPtr(decl_index); - if (!decl.ty.isFnOrHasRuntimeBitsIgnoreComptime()) { + if (!decl.ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) { const imm: u64 = switch (ptr_bytes) { 1 => 0xaa, 2 => 0xaaaa, @@ -1080,20 +820,20 @@ fn genDeclRef( } // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`? - if (tv.ty.castPtrToFn()) |fn_ty| { - if (fn_ty.fnInfo().is_generic) { - return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(target) }); + if (tv.ty.castPtrToFn(mod)) |fn_ty| { + if (mod.typeToFunc(fn_ty).?.is_generic) { + return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(mod) }); } - } else if (tv.ty.zigTypeTag() == .Pointer) { - const elem_ty = tv.ty.elemType2(); - if (!elem_ty.hasRuntimeBits()) { - return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(target) }); + } else if (tv.ty.zigTypeTag(mod) == .Pointer) { + const elem_ty = tv.ty.elemType2(mod); + if (!elem_ty.hasRuntimeBits(mod)) { + return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(mod) }); } } - module.markDeclAlive(decl); + try mod.markDeclAlive(decl); - const is_threadlocal = tv.val.isPtrToThreadLocal(module) and !bin_file.options.single_threaded; + const is_threadlocal = tv.val.isPtrToThreadLocal(mod) and !bin_file.options.single_threaded; if (bin_file.cast(link.File.Elf)) |elf_file| { const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index); @@ -1157,57 +897,56 @@ pub fn genTypedValue( arg_tv: TypedValue, owner_decl_index: Module.Decl.Index, ) CodeGenError!GenResult { + const mod = bin_file.options.module.?; var typed_value = arg_tv; - if (typed_value.val.castTag(.runtime_value)) |rt| { - typed_value.val = rt.data; + switch (mod.intern_pool.indexToKey(typed_value.val.toIntern())) { + .runtime_value => |rt| typed_value.val = rt.val.toValue(), + else => {}, } - const mod = bin_file.options.module.?; log.debug("genTypedValue: ty = {}, val = {}", .{ typed_value.ty.fmt(mod), typed_value.val.fmtValue(typed_value.ty, mod), }); - if (typed_value.val.isUndef()) + if (typed_value.val.isUndef(mod)) return GenResult.mcv(.undef); const target = bin_file.options.target; const ptr_bits = target.ptrBitWidth(); - if (!typed_value.ty.isSlice()) { - if (typed_value.val.castTag(.variable)) |payload| { - return genDeclRef(bin_file, src_loc, typed_value, payload.data.owner_decl); - } - if (typed_value.val.castTag(.decl_ref)) |payload| { - return genDeclRef(bin_file, src_loc, typed_value, payload.data); - } - if (typed_value.val.castTag(.decl_ref_mut)) |payload| { - return genDeclRef(bin_file, src_loc, typed_value, payload.data.decl_index); - } - } + if (!typed_value.ty.isSlice(mod)) switch (mod.intern_pool.indexToKey(typed_value.val.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| return genDeclRef(bin_file, src_loc, typed_value, decl), + .mut_decl => |mut_decl| return genDeclRef(bin_file, src_loc, typed_value, mut_decl.decl), + else => {}, + }, + else => {}, + }; - switch (typed_value.ty.zigTypeTag()) { + switch (typed_value.ty.zigTypeTag(mod)) { .Void => return GenResult.mcv(.none), - .Pointer => switch (typed_value.ty.ptrSize()) { + .Pointer => switch (typed_value.ty.ptrSize(mod)) { .Slice => {}, - else => { - switch (typed_value.val.tag()) { - .null_value => { - return GenResult.mcv(.{ .immediate = 0 }); - }, - .int_u64 => { - return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(target) }); + else => switch (typed_value.val.toIntern()) { + .null_value => { + return GenResult.mcv(.{ .immediate = 0 }); + }, + .none => {}, + else => switch (mod.intern_pool.indexToKey(typed_value.val.toIntern())) { + .int => { + return GenResult.mcv(.{ .immediate = typed_value.val.toUnsignedInt(mod) }); }, else => {}, - } + }, }, }, .Int => { - const info = typed_value.ty.intInfo(target); + const info = typed_value.ty.intInfo(mod); if (info.bits <= ptr_bits) { const unsigned = switch (info.signedness) { - .signed => @bitCast(u64, typed_value.val.toSignedInt(target)), - .unsigned => typed_value.val.toUnsignedInt(target), + .signed => @bitCast(u64, typed_value.val.toSignedInt(mod)), + .unsigned => typed_value.val.toUnsignedInt(mod), }; return GenResult.mcv(.{ .immediate = unsigned }); } @@ -1216,78 +955,46 @@ pub fn genTypedValue( return GenResult.mcv(.{ .immediate = @boolToInt(typed_value.val.toBool()) }); }, .Optional => { - if (typed_value.ty.isPtrLikeOptional()) { - if (typed_value.val.tag() == .null_value) return GenResult.mcv(.{ .immediate = 0 }); - - var buf: Type.Payload.ElemType = undefined; + if (typed_value.ty.isPtrLikeOptional(mod)) { return genTypedValue(bin_file, src_loc, .{ - .ty = typed_value.ty.optionalChild(&buf), - .val = if (typed_value.val.castTag(.opt_payload)) |pl| pl.data else typed_value.val, + .ty = typed_value.ty.optionalChild(mod), + .val = typed_value.val.optionalValue(mod) orelse return GenResult.mcv(.{ .immediate = 0 }), }, owner_decl_index); - } else if (typed_value.ty.abiSize(target) == 1) { - return GenResult.mcv(.{ .immediate = @boolToInt(!typed_value.val.isNull()) }); + } else if (typed_value.ty.abiSize(mod) == 1) { + return GenResult.mcv(.{ .immediate = @boolToInt(!typed_value.val.isNull(mod)) }); } }, .Enum => { - if (typed_value.val.castTag(.enum_field_index)) |field_index| { - switch (typed_value.ty.tag()) { - .enum_simple => { - return GenResult.mcv(.{ .immediate = field_index.data }); - }, - .enum_numbered, .enum_full, .enum_nonexhaustive => { - const enum_values = if (typed_value.ty.castTag(.enum_numbered)) |pl| - pl.data.values - else - typed_value.ty.cast(Type.Payload.EnumFull).?.data.values; - if (enum_values.count() != 0) { - const tag_val = enum_values.keys()[field_index.data]; - var buf: Type.Payload.Bits = undefined; - return genTypedValue(bin_file, src_loc, .{ - .ty = typed_value.ty.intTagType(&buf), - .val = tag_val, - }, owner_decl_index); - } else { - return GenResult.mcv(.{ .immediate = field_index.data }); - } - }, - else => unreachable, - } - } else { - var int_tag_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer); - return genTypedValue(bin_file, src_loc, .{ - .ty = int_tag_ty, - .val = typed_value.val, - }, owner_decl_index); - } + const enum_tag = mod.intern_pool.indexToKey(typed_value.val.toIntern()).enum_tag; + const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); + return genTypedValue(bin_file, src_loc, .{ + .ty = int_tag_ty.toType(), + .val = enum_tag.int.toValue(), + }, owner_decl_index); }, .ErrorSet => { - switch (typed_value.val.tag()) { - .@"error" => { - const err_name = typed_value.val.castTag(.@"error").?.data.name; - const module = bin_file.options.module.?; - const global_error_set = module.global_error_set; - const error_index = global_error_set.get(err_name).?; - return GenResult.mcv(.{ .immediate = error_index }); - }, - else => { - // In this case we are rendering an error union which has a 0 bits payload. - return GenResult.mcv(.{ .immediate = 0 }); - }, - } + const err_name = mod.intern_pool.indexToKey(typed_value.val.toIntern()).err.name; + const error_index = mod.global_error_set.getIndex(err_name).?; + return GenResult.mcv(.{ .immediate = error_index }); }, .ErrorUnion => { - const error_type = typed_value.ty.errorUnionSet(); - const payload_type = typed_value.ty.errorUnionPayload(); - const is_pl = typed_value.val.errorUnionIsPayload(); - - if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + const err_type = typed_value.ty.errorUnionSet(mod); + const payload_type = typed_value.ty.errorUnionPayload(mod); + if (!payload_type.hasRuntimeBitsIgnoreComptime(mod)) { // We use the error type directly as the type. - const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero); - return genTypedValue(bin_file, src_loc, .{ - .ty = error_type, - .val = err_val, - }, owner_decl_index); + switch (mod.intern_pool.indexToKey(typed_value.val.toIntern()).error_union.val) { + .err_name => |err_name| return genTypedValue(bin_file, src_loc, .{ + .ty = err_type, + .val = (try mod.intern(.{ .err = .{ + .ty = err_type.toIntern(), + .name = err_name, + } })).toValue(), + }, owner_decl_index), + .payload => return genTypedValue(bin_file, src_loc, .{ + .ty = Type.err_int, + .val = try mod.intValue(Type.err_int, 0), + }, owner_decl_index), + } } }, @@ -1306,23 +1013,23 @@ pub fn genTypedValue( return genUnnamedConst(bin_file, src_loc, typed_value, owner_decl_index); } -pub fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u64 { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return 0; - const payload_align = payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); - if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime()) { +pub fn errUnionPayloadOffset(payload_ty: Type, mod: *Module) u64 { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0; + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + if (payload_align >= error_align or !payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return 0; } else { - return mem.alignForwardGeneric(u64, Type.anyerror.abiSize(target), payload_align); + return mem.alignForwardGeneric(u64, Type.anyerror.abiSize(mod), payload_align); } } -pub fn errUnionErrorOffset(payload_ty: Type, target: std.Target) u64 { - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return 0; - const payload_align = payload_ty.abiAlignment(target); - const error_align = Type.anyerror.abiAlignment(target); - if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime()) { - return mem.alignForwardGeneric(u64, payload_ty.abiSize(target), error_align); +pub fn errUnionErrorOffset(payload_ty: Type, mod: *Module) u64 { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) return 0; + const payload_align = payload_ty.abiAlignment(mod); + const error_align = Type.anyerror.abiAlignment(mod); + if (payload_align >= error_align and payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + return mem.alignForwardGeneric(u64, payload_ty.abiSize(mod), error_align); } else { return 0; } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 86b74b1429..c1b7bd72b1 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -16,6 +16,7 @@ const trace = @import("../tracy.zig").trace; const LazySrcLoc = Module.LazySrcLoc; const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); +const InternPool = @import("../InternPool.zig"); const BigIntLimb = std.math.big.Limb; const BigInt = std.math.big.int; @@ -256,7 +257,7 @@ pub fn fmtIdent(ident: []const u8) std.fmt.Formatter(formatIdent) { return .{ .data = ident }; } -/// This data is available when outputting .c code for a `*Module.Fn`. +/// This data is available when outputting .c code for a `Module.Fn.Index`. /// It is not available when generating .h file. pub const Function = struct { air: Air, @@ -267,7 +268,7 @@ pub const Function = struct { next_block_index: usize = 0, object: Object, lazy_fns: LazyFnMap, - func: *Module.Fn, + func_index: Module.Fn.Index, /// All the locals, to be emitted at the top of the function. locals: std.ArrayListUnmanaged(Local) = .{}, /// Which locals are available for reuse, based on Type. @@ -285,10 +286,11 @@ pub const Function = struct { const gop = try f.value_map.getOrPut(inst); if (gop.found_existing) return gop.value_ptr.*; - const val = f.air.value(ref).?; - const ty = f.air.typeOf(ref); + const mod = f.object.dg.module; + const val = (try f.air.value(ref, mod)).?; + const ty = f.typeOf(ref); - const result: CValue = if (lowersToArray(ty, f.object.dg.module.getTarget())) result: { + const result: CValue = if (lowersToArray(ty, mod)) result: { const writer = f.object.code_header.writer(); const alignment = 0; const decl_c_value = try f.allocLocalValue(ty, alignment); @@ -318,11 +320,11 @@ pub const Function = struct { /// those which go into `allocs`. This function does not add the resulting local into `allocs`; /// that responsibility lies with the caller. fn allocLocalValue(f: *Function, ty: Type, alignment: u32) !CValue { + const mod = f.object.dg.module; const gpa = f.object.dg.gpa; - const target = f.object.dg.module.getTarget(); try f.locals.append(gpa, .{ .cty_idx = try f.typeToIndex(ty, .complete), - .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)), + .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)), }); return .{ .new_local = @intCast(LocalIndex, f.locals.items.len - 1) }; } @@ -336,10 +338,10 @@ pub const Function = struct { /// Only allocates the local; does not print anything. Will attempt to re-use locals, so should /// not be used for persistent locals (i.e. those in `allocs`). fn allocAlignedLocal(f: *Function, ty: Type, _: CQualifiers, alignment: u32) !CValue { - const target = f.object.dg.module.getTarget(); + const mod = f.object.dg.module; if (f.free_locals_map.getPtr(.{ .cty_idx = try f.typeToIndex(ty, .complete), - .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(target)), + .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(mod)), })) |locals_list| { if (locals_list.popOrNull()) |local_entry| { return .{ .new_local = local_entry.key }; @@ -352,8 +354,9 @@ pub const Function = struct { fn writeCValue(f: *Function, w: anytype, c_value: CValue, location: ValueRenderLocation) !void { switch (c_value) { .constant => |inst| { - const ty = f.air.typeOf(inst); - const val = f.air.value(inst).?; + const mod = f.object.dg.module; + const ty = f.typeOf(inst); + const val = (try f.air.value(inst, mod)).?; return f.object.dg.renderValue(w, ty, val, location); }, .undef => |ty| return f.object.dg.renderValue(w, ty, Value.undef, location), @@ -364,8 +367,9 @@ pub const Function = struct { fn writeCValueDeref(f: *Function, w: anytype, c_value: CValue) !void { switch (c_value) { .constant => |inst| { - const ty = f.air.typeOf(inst); - const val = f.air.value(inst).?; + const mod = f.object.dg.module; + const ty = f.typeOf(inst); + const val = (try f.air.value(inst, mod)).?; try w.writeAll("(*"); try f.object.dg.renderValue(w, ty, val, .Other); return w.writeByte(')'); @@ -377,8 +381,9 @@ pub const Function = struct { fn writeCValueMember(f: *Function, w: anytype, c_value: CValue, member: CValue) !void { switch (c_value) { .constant => |inst| { - const ty = f.air.typeOf(inst); - const val = f.air.value(inst).?; + const mod = f.object.dg.module; + const ty = f.typeOf(inst); + const val = (try f.air.value(inst, mod)).?; try f.object.dg.renderValue(w, ty, val, .Other); try w.writeByte('.'); return f.writeCValue(w, member, .Other); @@ -390,8 +395,9 @@ pub const Function = struct { fn writeCValueDerefMember(f: *Function, w: anytype, c_value: CValue, member: CValue) !void { switch (c_value) { .constant => |inst| { - const ty = f.air.typeOf(inst); - const val = f.air.value(inst).?; + const mod = f.object.dg.module; + const ty = f.typeOf(inst); + const val = (try f.air.value(inst, mod)).?; try w.writeByte('('); try f.object.dg.renderValue(w, ty, val, .Other); try w.writeAll(")->"); @@ -446,6 +452,7 @@ pub const Function = struct { var promoted = f.object.dg.ctypes.promote(gpa); defer f.object.dg.ctypes.demote(promoted); const arena = promoted.arena.allocator(); + const mod = f.object.dg.module; gop.value_ptr.* = .{ .fn_name = switch (key) { @@ -454,12 +461,12 @@ pub const Function = struct { .never_inline, => |owner_decl| try std.fmt.allocPrint(arena, "zig_{s}_{}__{d}", .{ @tagName(key), - fmtIdent(mem.span(f.object.dg.module.declPtr(owner_decl).name)), + fmtIdent(mod.intern_pool.stringToSlice(mod.declPtr(owner_decl).name)), @enumToInt(owner_decl), }), }, .data = switch (key) { - .tag_name => .{ .tag_name = try data.tag_name.copy(arena) }, + .tag_name => .{ .tag_name = data.tag_name }, .never_tail => .{ .never_tail = data.never_tail }, .never_inline => .{ .never_inline = data.never_inline }, }, @@ -480,6 +487,16 @@ pub const Function = struct { f.object.dg.ctypes.deinit(gpa); f.object.dg.fwd_decl.deinit(); } + + fn typeOf(f: *Function, inst: Air.Inst.Ref) Type { + const mod = f.object.dg.module; + return f.air.typeOf(inst, &mod.intern_pool); + } + + fn typeOfIndex(f: *Function, inst: Air.Inst.Index) Type { + const mod = f.object.dg.module; + return f.air.typeOfIndex(inst, &mod.intern_pool); + } }; /// This data is available when outputting .c code for a `Module`. @@ -508,8 +525,9 @@ pub const DeclGen = struct { fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { @setCold(true); + const mod = dg.module; const src = LazySrcLoc.nodeOffset(0); - const src_loc = src.toSrcLoc(dg.decl.?); + const src_loc = src.toSrcLoc(dg.decl.?, mod); dg.error_msg = try Module.ErrorMsg.create(dg.gpa, src_loc, format, args); return error.AnalysisFail; } @@ -522,53 +540,28 @@ pub const DeclGen = struct { decl_index: Decl.Index, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { - const decl = dg.module.declPtr(decl_index); + const mod = dg.module; + const decl = mod.declPtr(decl_index); assert(decl.has_tv); // Render an undefined pointer if we have a pointer to a zero-bit or comptime type. - if (ty.isPtrAtRuntime() and !decl.ty.isFnOrHasRuntimeBits()) { + if (ty.isPtrAtRuntime(mod) and !decl.ty.isFnOrHasRuntimeBits(mod)) { return dg.writeCValue(writer, .{ .undef = ty }); } // Chase function values in order to be able to reference the original function. - inline for (.{ .function, .extern_fn }) |tag| - if (decl.val.castTag(tag)) |func| - if (func.data.owner_decl != decl_index) - return dg.renderDeclValue(writer, ty, val, func.data.owner_decl, location); + if (decl.val.getFunction(mod)) |func| if (func.owner_decl != decl_index) + return dg.renderDeclValue(writer, ty, val, func.owner_decl, location); + if (decl.val.getExternFunc(mod)) |extern_func| if (extern_func.decl != decl_index) + return dg.renderDeclValue(writer, ty, val, extern_func.decl, location); - if (decl.val.castTag(.variable)) |var_payload| - try dg.renderFwdDecl(decl_index, var_payload.data); - - if (ty.isSlice()) { - if (location == .StaticInitializer) { - try writer.writeByte('{'); - } else { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeAll("){ .ptr = "); - } - - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - try dg.renderValue(writer, ty.slicePtrFieldType(&buf), val.slicePtr(), .Initializer); - - var len_pl: Value.Payload.U64 = .{ - .base = .{ .tag = .int_u64 }, - .data = val.sliceLen(dg.module), - }; - const len_val = Value.initPayload(&len_pl.base); - - if (location == .StaticInitializer) { - return writer.print(", {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)}); - } else { - return writer.print(", .len = {} }}", .{try dg.fmtIntLiteral(Type.usize, len_val, .Other)}); - } - } + if (decl.val.getVariable(mod)) |variable| try dg.renderFwdDecl(decl_index, variable); // We shouldn't cast C function pointers as this is UB (when you call // them). The analysis until now should ensure that the C function // pointers are compatible. If they are not, then there is a bug // somewhere and we should let the C compiler tell us about it. - const need_typecast = if (ty.castPtrToFn()) |_| false else !ty.eql(decl.ty, dg.module); + const need_typecast = if (ty.castPtrToFn(mod)) |_| false else !ty.childType(mod).eql(decl.ty, mod); if (need_typecast) { try writer.writeAll("(("); try dg.renderType(writer, ty); @@ -579,127 +572,124 @@ pub const DeclGen = struct { if (need_typecast) try writer.writeByte(')'); } - // Renders a "parent" pointer by recursing to the root decl/variable - // that its contents are defined with respect to. - // - // Used for .elem_ptr, .field_ptr, .opt_payload_ptr, .eu_payload_ptr - fn renderParentPtr(dg: *DeclGen, writer: anytype, ptr_val: Value, ptr_ty: Type, location: ValueRenderLocation) error{ OutOfMemory, AnalysisFail }!void { - if (!ptr_ty.isSlice()) { - try writer.writeByte('('); - try dg.renderType(writer, ptr_ty); - try writer.writeByte(')'); - } - switch (ptr_val.tag()) { - .int_u64, .one => try writer.print("{x}", .{try dg.fmtIntLiteral(Type.usize, ptr_val, .Other)}), - .decl_ref_mut, .decl_ref, .variable => { - const decl_index = switch (ptr_val.tag()) { - .decl_ref => ptr_val.castTag(.decl_ref).?.data, - .decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index, - .variable => ptr_val.castTag(.variable).?.data.owner_decl, + /// Renders a "parent" pointer by recursing to the root decl/variable + /// that its contents are defined with respect to. + fn renderParentPtr( + dg: *DeclGen, + writer: anytype, + ptr_val: InternPool.Index, + location: ValueRenderLocation, + ) error{ OutOfMemory, AnalysisFail }!void { + const mod = dg.module; + const ptr_ty = mod.intern_pool.typeOf(ptr_val).toType(); + const ptr_cty = try dg.typeToIndex(ptr_ty, .complete); + const ptr = mod.intern_pool.indexToKey(ptr_val).ptr; + switch (ptr.addr) { + .decl, .mut_decl => try dg.renderDeclValue( + writer, + ptr_ty, + ptr_val.toValue(), + switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, + else => unreachable, + }, + location, + ), + .int => |int| try writer.print("{x}", .{ + try dg.fmtIntLiteral(Type.usize, int.toValue(), .Other), + }), + .eu_payload, .opt_payload => |base| { + const ptr_base_ty = mod.intern_pool.typeOf(base).toType(); + const base_ty = ptr_base_ty.childType(mod); + // Ensure complete type definition is visible before accessing fields. + _ = try dg.typeToIndex(base_ty, .complete); + const payload_ty = switch (ptr.addr) { + .eu_payload => base_ty.errorUnionPayload(mod), + .opt_payload => base_ty.optionalChild(mod), else => unreachable, }; - try dg.renderDeclValue(writer, ptr_ty, ptr_val, decl_index, location); + const ptr_payload_ty = try mod.adjustPtrTypeChild(ptr_base_ty, payload_ty); + const ptr_payload_cty = try dg.typeToIndex(ptr_payload_ty, .complete); + if (ptr_cty != ptr_payload_cty) { + try writer.writeByte('('); + try dg.renderCType(writer, ptr_cty); + try writer.writeByte(')'); + } + try writer.writeAll("&("); + try dg.renderParentPtr(writer, base, location); + try writer.writeAll(")->payload"); }, - .field_ptr => { - const target = dg.module.getTarget(); - const field_ptr = ptr_val.castTag(.field_ptr).?.data; - + .elem => |elem| { + const ptr_base_ty = mod.intern_pool.typeOf(elem.base).toType(); + const elem_ty = ptr_base_ty.elemType2(mod); + const ptr_elem_ty = try mod.adjustPtrTypeChild(ptr_base_ty, elem_ty); + const ptr_elem_cty = try dg.typeToIndex(ptr_elem_ty, .complete); + if (ptr_cty != ptr_elem_cty) { + try writer.writeByte('('); + try dg.renderCType(writer, ptr_cty); + try writer.writeByte(')'); + } + try writer.writeAll("&("); + if (mod.intern_pool.indexToKey(ptr_base_ty.toIntern()).ptr_type.flags.size == .One) + try writer.writeByte('*'); + try dg.renderParentPtr(writer, elem.base, location); + try writer.print(")[{d}]", .{elem.index}); + }, + .field => |field| { + const ptr_base_ty = mod.intern_pool.typeOf(field.base).toType(); + const base_ty = ptr_base_ty.childType(mod); // Ensure complete type definition is visible before accessing fields. - _ = try dg.typeToIndex(field_ptr.container_ty, .complete); - - var container_ptr_pl = ptr_ty.ptrInfo(); - container_ptr_pl.data.pointee_type = field_ptr.container_ty; - const container_ptr_ty = Type.initPayload(&container_ptr_pl.base); - - switch (fieldLocation( - field_ptr.container_ty, - ptr_ty, - @intCast(u32, field_ptr.field_index), - target, - )) { - .begin => try dg.renderParentPtr( - writer, - field_ptr.container_ptr, - container_ptr_ty, - location, - ), - .field => |field| { + _ = try dg.typeToIndex(base_ty, .complete); + const field_ty = switch (mod.intern_pool.indexToKey(base_ty.toIntern())) { + .anon_struct_type, .struct_type, .union_type => base_ty.structFieldType(@intCast(usize, field.index), mod), + .ptr_type => |ptr_type| switch (ptr_type.flags.size) { + .One, .Many, .C => unreachable, + .Slice => switch (field.index) { + Value.slice_ptr_index => base_ty.slicePtrFieldType(mod), + Value.slice_len_index => Type.usize, + else => unreachable, + }, + }, + else => unreachable, + }; + const ptr_field_ty = try mod.adjustPtrTypeChild(ptr_base_ty, field_ty); + const ptr_field_cty = try dg.typeToIndex(ptr_field_ty, .complete); + if (ptr_cty != ptr_field_cty) { + try writer.writeByte('('); + try dg.renderCType(writer, ptr_cty); + try writer.writeByte(')'); + } + switch (fieldLocation(base_ty, ptr_ty, @intCast(u32, field.index), mod)) { + .begin => try dg.renderParentPtr(writer, field.base, location), + .field => |name| { try writer.writeAll("&("); - try dg.renderParentPtr( - writer, - field_ptr.container_ptr, - container_ptr_ty, - location, - ); + try dg.renderParentPtr(writer, field.base, location); try writer.writeAll(")->"); - try dg.writeCValue(writer, field); + try dg.writeCValue(writer, name); }, .byte_offset => |byte_offset| { - var u8_ptr_pl = ptr_ty.ptrInfo(); - u8_ptr_pl.data.pointee_type = Type.u8; - const u8_ptr_ty = Type.initPayload(&u8_ptr_pl.base); - - var byte_offset_pl = Value.Payload.U64{ - .base = .{ .tag = .int_u64 }, - .data = byte_offset, - }; - const byte_offset_val = Value.initPayload(&byte_offset_pl.base); + const u8_ptr_ty = try mod.adjustPtrTypeChild(ptr_ty, Type.u8); + const byte_offset_val = try mod.intValue(Type.usize, byte_offset); try writer.writeAll("(("); try dg.renderType(writer, u8_ptr_ty); try writer.writeByte(')'); - try dg.renderParentPtr( - writer, - field_ptr.container_ptr, - container_ptr_ty, - location, - ); + try dg.renderParentPtr(writer, field.base, location); try writer.print(" + {})", .{ try dg.fmtIntLiteral(Type.usize, byte_offset_val, .Other), }); }, .end => { try writer.writeAll("(("); - try dg.renderParentPtr( - writer, - field_ptr.container_ptr, - container_ptr_ty, - location, - ); + try dg.renderParentPtr(writer, field.base, location); try writer.print(") + {})", .{ - try dg.fmtIntLiteral(Type.usize, Value.one, .Other), + try dg.fmtIntLiteral(Type.usize, try mod.intValue(Type.usize, 1), .Other), }); }, } }, - .elem_ptr => { - const elem_ptr = ptr_val.castTag(.elem_ptr).?.data; - var elem_ptr_ty_pl: Type.Payload.ElemType = .{ - .base = .{ .tag = .c_mut_pointer }, - .data = elem_ptr.elem_ty, - }; - const elem_ptr_ty = Type.initPayload(&elem_ptr_ty_pl.base); - - try writer.writeAll("&("); - try dg.renderParentPtr(writer, elem_ptr.array_ptr, elem_ptr_ty, location); - try writer.print(")[{d}]", .{elem_ptr.index}); - }, - .opt_payload_ptr, .eu_payload_ptr => { - const payload_ptr = ptr_val.cast(Value.Payload.PayloadPtr).?.data; - var container_ptr_ty_pl: Type.Payload.ElemType = .{ - .base = .{ .tag = .c_mut_pointer }, - .data = payload_ptr.container_ty, - }; - const container_ptr_ty = Type.initPayload(&container_ptr_ty_pl.base); - - // Ensure complete type definition is visible before accessing fields. - _ = try dg.typeToIndex(payload_ptr.container_ty, .complete); - - try writer.writeAll("&("); - try dg.renderParentPtr(writer, payload_ptr.container_ptr, container_ptr_ty, location); - try writer.writeAll(")->payload"); - }, - else => unreachable, + .comptime_field => unreachable, } } @@ -710,23 +700,25 @@ pub const DeclGen = struct { arg_val: Value, location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { + const mod = dg.module; var val = arg_val; - if (val.castTag(.runtime_value)) |rt| { - val = rt.data; + switch (mod.intern_pool.indexToKey(val.ip_index)) { + .runtime_value => |rt| val = rt.val.toValue(), + else => {}, } - const target = dg.module.getTarget(); + const target = mod.getTarget(); const initializer_type: ValueRenderLocation = switch (location) { .StaticInitializer => .StaticInitializer, else => .Initializer, }; - const safety_on = switch (dg.module.optimizeMode()) { + const safety_on = switch (mod.optimizeMode()) { .Debug, .ReleaseSafe => true, .ReleaseFast, .ReleaseSmall => false, }; - if (val.isUndefDeep()) { - switch (ty.zigTypeTag()) { + if (val.isUndefDeep(mod)) { + switch (ty.zigTypeTag(mod)) { .Bool => { if (safety_on) { return writer.writeAll("0xaa"); @@ -737,8 +729,8 @@ pub const DeclGen = struct { .Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, val, location)}), .Float => { const bits = ty.floatBits(target); - var repr_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = bits }; - const repr_ty = Type.initPayload(&repr_pl.base); + // All unsigned ints matching float types are pre-allocated. + const repr_ty = mod.intType(.unsigned, bits) catch unreachable; try writer.writeAll("zig_cast_"); try dg.renderTypeForBuiltinFnName(writer, ty); @@ -757,7 +749,7 @@ pub const DeclGen = struct { try dg.renderValue(writer, repr_ty, Value.undef, .FunctionArgument); return writer.writeByte(')'); }, - .Pointer => if (ty.isSlice()) { + .Pointer => if (ty.isSlice(mod)) { if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderType(writer, ty); @@ -765,8 +757,7 @@ pub const DeclGen = struct { } try writer.writeAll("{("); - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const ptr_ty = ty.slicePtrFieldType(&buf); + const ptr_ty = ty.slicePtrFieldType(mod); try dg.renderType(writer, ptr_ty); return writer.print("){x}, {0x}}}", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); } else { @@ -775,14 +766,13 @@ pub const DeclGen = struct { return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); }, .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&opt_buf); + const payload_ty = ty.optionalChild(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, Type.bool, val, location); } - if (ty.optionalReprIsPayload()) { + if (ty.optionalReprIsPayload(mod)) { return dg.renderValue(writer, payload_ty, val, location); } @@ -798,7 +788,7 @@ pub const DeclGen = struct { try dg.renderValue(writer, Type.bool, val, initializer_type); return writer.writeAll(" }"); }, - .Struct => switch (ty.containerLayout()) { + .Struct => switch (ty.containerLayout(mod)) { .Auto, .Extern => { if (!location.isInitializer()) { try writer.writeByte('('); @@ -808,10 +798,10 @@ pub const DeclGen = struct { try writer.writeByte('{'); var empty = true; - for (0..ty.structFieldCount()) |field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBits()) continue; + for (0..ty.structFieldCount(mod)) |field_i| { + if (ty.structFieldIsComptime(field_i, mod)) continue; + const field_ty = ty.structFieldType(field_i, mod); + if (!field_ty.hasRuntimeBits(mod)) continue; if (!empty) try writer.writeByte(','); try dg.renderValue(writer, field_ty, val, initializer_type); @@ -831,29 +821,29 @@ pub const DeclGen = struct { } try writer.writeByte('{'); - if (ty.unionTagTypeSafety()) |tag_ty| { - const layout = ty.unionGetLayout(target); + if (ty.unionTagTypeSafety(mod)) |tag_ty| { + const layout = ty.unionGetLayout(mod); if (layout.tag_size != 0) { try writer.writeAll(" .tag = "); try dg.renderValue(writer, tag_ty, val, initializer_type); } - if (ty.unionHasAllZeroBitFieldTypes()) return try writer.writeByte('}'); + if (ty.unionHasAllZeroBitFieldTypes(mod)) return try writer.writeByte('}'); if (layout.tag_size != 0) try writer.writeByte(','); try writer.writeAll(" .payload = {"); } - for (ty.unionFields().values()) |field| { - if (!field.ty.hasRuntimeBits()) continue; + for (ty.unionFields(mod).values()) |field| { + if (!field.ty.hasRuntimeBits(mod)) continue; try dg.renderValue(writer, field.ty, val, initializer_type); break; } - if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}'); + if (ty.unionTagTypeSafety(mod)) |_| try writer.writeByte('}'); return writer.writeByte('}'); }, .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); - const error_ty = ty.errorUnionSet(); + const payload_ty = ty.errorUnionPayload(mod); + const error_ty = ty.errorUnionSet(mod); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { return dg.renderValue(writer, error_ty, val, location); } @@ -870,11 +860,11 @@ pub const DeclGen = struct { return writer.writeAll(" }"); }, .Array, .Vector => { - const ai = ty.arrayInfo(); - if (ai.elem_type.eql(Type.u8, dg.module)) { + const ai = ty.arrayInfo(mod); + if (ai.elem_type.eql(Type.u8, mod)) { var literal = stringLiteral(writer); try literal.start(); - const c_len = ty.arrayLenIncludingSentinel(); + const c_len = ty.arrayLenIncludingSentinel(mod); var index: u64 = 0; while (index < c_len) : (index += 1) try literal.writeChar(0xaa); @@ -887,11 +877,11 @@ pub const DeclGen = struct { } try writer.writeByte('{'); - const c_len = ty.arrayLenIncludingSentinel(); + const c_len = ty.arrayLenIncludingSentinel(mod); var index: u64 = 0; while (index < c_len) : (index += 1) { if (index > 0) try writer.writeAll(", "); - try dg.renderValue(writer, ty.childType(), val, initializer_type); + try dg.renderValue(writer, ty.childType(mod), val, initializer_type); } return writer.writeByte('}'); } @@ -916,23 +906,129 @@ pub const DeclGen = struct { } unreachable; } - switch (ty.zigTypeTag()) { - .Int => switch (val.tag()) { - .field_ptr, - .elem_ptr, - .opt_payload_ptr, - .eu_payload_ptr, - .decl_ref_mut, - .decl_ref, - => try dg.renderParentPtr(writer, val, ty, location), - else => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, location)}), - }, - .Float => { - const bits = ty.floatBits(target); - const f128_val = val.toFloat(f128); - var repr_ty_pl = Type.Payload.Bits{ .base = .{ .tag = .int_unsigned }, .data = bits }; - const repr_ty = Type.initPayload(&repr_ty_pl.base); + switch (mod.intern_pool.indexToKey(val.ip_index)) { + // types, not values + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .struct_type, + .anon_struct_type, + .union_type, + .opaque_type, + .enum_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + // memoization, not values + .memoized_call, + => unreachable, + + .undef, .runtime_value => unreachable, // handled above + .simple_value => |simple_value| switch (simple_value) { + // non-runtime values + .undefined => unreachable, + .void => unreachable, + .null => unreachable, + .empty_struct => unreachable, + .@"unreachable" => unreachable, + .generic_poison => unreachable, + + .false => try writer.writeAll("false"), + .true => try writer.writeAll("true"), + }, + .variable, + .extern_func, + .func, + .enum_literal, + .empty_enum_value, + => unreachable, // non-runtime values + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, location)}), + .lazy_align, .lazy_size => { + try writer.writeAll("(("); + try dg.renderType(writer, ty); + return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); + }, + }, + .err => |err| try writer.print("zig_error_{}", .{ + fmtIdent(mod.intern_pool.stringToSlice(err.name)), + }), + .error_union => |error_union| { + const payload_ty = ty.errorUnionPayload(mod); + const error_ty = ty.errorUnionSet(mod); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) { + switch (error_union.val) { + .err_name => |err_name| return dg.renderValue( + writer, + error_ty, + (try mod.intern(.{ .err = .{ + .ty = error_ty.toIntern(), + .name = err_name, + } })).toValue(), + location, + ), + .payload => return dg.renderValue( + writer, + Type.err_int, + try mod.intValue(Type.err_int, 0), + location, + ), + } + } + + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + + try writer.writeAll("{ .payload = "); + try dg.renderValue( + writer, + payload_ty, + switch (error_union.val) { + .err_name => try mod.intern(.{ .undef = payload_ty.ip_index }), + .payload => |payload| payload, + }.toValue(), + initializer_type, + ); + try writer.writeAll(", .error = "); + switch (error_union.val) { + .err_name => |err_name| try dg.renderValue( + writer, + error_ty, + (try mod.intern(.{ .err = .{ + .ty = error_ty.toIntern(), + .name = err_name, + } })).toValue(), + location, + ), + .payload => try dg.renderValue( + writer, + Type.err_int, + try mod.intValue(Type.err_int, 0), + location, + ), + } + try writer.writeAll(" }"); + }, + .enum_tag => { + const enum_tag = mod.intern_pool.indexToKey(val.ip_index).enum_tag; + const int_tag_ty = mod.intern_pool.typeOf(enum_tag.int); + try dg.renderValue(writer, int_tag_ty.toType(), enum_tag.int.toValue(), location); + }, + .float => { + const bits = ty.floatBits(target); + const f128_val = val.toFloat(f128, mod); + + // All unsigned ints matching float types are pre-allocated. + const repr_ty = mod.intType(.unsigned, bits) catch unreachable; assert(bits <= 128); var repr_val_limbs: [BigInt.calcTwosCompLimbCount(128)]BigIntLimb = undefined; @@ -943,21 +1039,15 @@ pub const DeclGen = struct { }; switch (bits) { - 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16))), - 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32))), - 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64))), - 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80))), + 16 => repr_val_big.set(@bitCast(u16, val.toFloat(f16, mod))), + 32 => repr_val_big.set(@bitCast(u32, val.toFloat(f32, mod))), + 64 => repr_val_big.set(@bitCast(u64, val.toFloat(f64, mod))), + 80 => repr_val_big.set(@bitCast(u80, val.toFloat(f80, mod))), 128 => repr_val_big.set(@bitCast(u128, f128_val)), else => unreachable, } - var repr_val_pl = Value.Payload.BigInt{ - .base = .{ - .tag = if (repr_val_big.positive) .int_big_positive else .int_big_negative, - }, - .data = repr_val_big.limbs[0..repr_val_big.len], - }; - const repr_val = Value.initPayload(&repr_val_pl.base); + const repr_val = try mod.intValue_big(repr_ty, repr_val_big.toConst()); try writer.writeAll("zig_cast_"); try dg.renderTypeForBuiltinFnName(writer, ty); @@ -968,10 +1058,10 @@ pub const DeclGen = struct { try dg.renderTypeForBuiltinFnName(writer, ty); try writer.writeByte('('); switch (bits) { - 16 => try writer.print("{x}", .{val.toFloat(f16)}), - 32 => try writer.print("{x}", .{val.toFloat(f32)}), - 64 => try writer.print("{x}", .{val.toFloat(f64)}), - 80 => try writer.print("{x}", .{val.toFloat(f80)}), + 16 => try writer.print("{x}", .{val.toFloat(f16, mod)}), + 32 => try writer.print("{x}", .{val.toFloat(f32, mod)}), + 64 => try writer.print("{x}", .{val.toFloat(f64, mod)}), + 80 => try writer.print("{x}", .{val.toFloat(f80, mod)}), 128 => try writer.print("{x}", .{f128_val}), else => unreachable, } @@ -1011,10 +1101,10 @@ pub const DeclGen = struct { if (std.math.isNan(f128_val)) switch (bits) { // We only actually need to pass the significand, but it will get // properly masked anyway, so just pass the whole value. - 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16))}), - 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32))}), - 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64))}), - 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80))}), + 16 => try writer.print("\"0x{x}\"", .{@bitCast(u16, val.toFloat(f16, mod))}), + 32 => try writer.print("\"0x{x}\"", .{@bitCast(u32, val.toFloat(f32, mod))}), + 64 => try writer.print("\"0x{x}\"", .{@bitCast(u64, val.toFloat(f64, mod))}), + 80 => try writer.print("\"0x{x}\"", .{@bitCast(u80, val.toFloat(f80, mod))}), 128 => try writer.print("\"0x{x}\"", .{@bitCast(u128, f128_val)}), else => unreachable, }; @@ -1023,144 +1113,132 @@ pub const DeclGen = struct { } try writer.print("{x}", .{try dg.fmtIntLiteral(repr_ty, repr_val, location)}); if (!empty) try writer.writeByte(')'); - return; }, - .Pointer => switch (val.tag()) { - .null_value, .zero => if (ty.isSlice()) { - var slice_pl = Value.Payload.Slice{ - .base = .{ .tag = .slice }, - .data = .{ .ptr = val, .len = Value.undef }, - }; - const slice_val = Value.initPayload(&slice_pl.base); - - return dg.renderValue(writer, ty, slice_val, location); - } else { - try writer.writeAll("(("); - try dg.renderType(writer, ty); - try writer.writeAll(")NULL)"); - }, - .variable => { - const decl = val.castTag(.variable).?.data.owner_decl; - return dg.renderDeclValue(writer, ty, val, decl, location); - }, - .slice => { + .ptr => |ptr| { + if (ptr.len != .none) { if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderType(writer, ty); try writer.writeByte(')'); } - - const slice = val.castTag(.slice).?.data; - var buf: Type.SlicePtrFieldTypeBuffer = undefined; - try writer.writeByte('{'); - try dg.renderValue(writer, ty.slicePtrFieldType(&buf), slice.ptr, initializer_type); + } + const ptr_location = switch (ptr.len) { + .none => location, + else => initializer_type, + }; + const ptr_ty = switch (ptr.len) { + .none => ty, + else => ty.slicePtrFieldType(mod), + }; + const ptr_val = switch (ptr.len) { + .none => val, + else => val.slicePtr(mod), + }; + switch (ptr.addr) { + .decl, .mut_decl => try dg.renderDeclValue( + writer, + ptr_ty, + ptr_val, + switch (ptr.addr) { + .decl => |decl| decl, + .mut_decl => |mut_decl| mut_decl.decl, + else => unreachable, + }, + ptr_location, + ), + .int => |int| { + try writer.writeAll("(("); + try dg.renderType(writer, ptr_ty); + try writer.print("){x})", .{ + try dg.fmtIntLiteral(Type.usize, int.toValue(), ptr_location), + }); + }, + .eu_payload, + .opt_payload, + .elem, + .field, + => try dg.renderParentPtr(writer, ptr_val.ip_index, ptr_location), + .comptime_field => unreachable, + } + if (ptr.len != .none) { try writer.writeAll(", "); - try dg.renderValue(writer, Type.usize, slice.len, initializer_type); + try dg.renderValue(writer, Type.usize, ptr.len.toValue(), initializer_type); try writer.writeByte('}'); - }, - .function => { - const func = val.castTag(.function).?.data; - try dg.renderDeclName(writer, func.owner_decl, 0); - }, - .extern_fn => { - const extern_fn = val.castTag(.extern_fn).?.data; - try dg.renderDeclName(writer, extern_fn.owner_decl, 0); - }, - .int_u64, .one, .int_big_positive, .lazy_align, .lazy_size => { - try writer.writeAll("(("); - try dg.renderType(writer, ty); - return writer.print("){x})", .{try dg.fmtIntLiteral(Type.usize, val, .Other)}); - }, - .field_ptr, - .elem_ptr, - .opt_payload_ptr, - .eu_payload_ptr, - .decl_ref_mut, - .decl_ref, - => try dg.renderParentPtr(writer, val, ty, location), - else => unreachable, + } }, - .Array, .Vector => { - if (location == .FunctionArgument) { + .opt => |opt| { + const payload_ty = ty.optionalChild(mod); + + const is_null_val = Value.makeBool(opt.val == .none); + if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) + return dg.renderValue(writer, Type.bool, is_null_val, location); + + if (ty.optionalReprIsPayload(mod)) return dg.renderValue( + writer, + payload_ty, + switch (opt.val) { + .none => switch (payload_ty.zigTypeTag(mod)) { + .ErrorSet => try mod.intValue(Type.err_int, 0), + .Pointer => try mod.getCoerced(val, payload_ty), + else => unreachable, + }, + else => |payload| payload.toValue(), + }, + location, + ); + + if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderType(writer, ty); try writer.writeByte(')'); } - // First try specific tag representations for more efficiency. - switch (val.tag()) { - .undef, .empty_struct_value, .empty_array => { - const ai = ty.arrayInfo(); - try writer.writeByte('{'); - if (ai.sentinel) |s| { - try dg.renderValue(writer, ai.elem_type, s, initializer_type); - } else { - try writer.writeByte('0'); - } - try writer.writeByte('}'); - }, - .bytes, .str_lit => |t| { - const bytes = switch (t) { - .bytes => val.castTag(.bytes).?.data, - .str_lit => bytes: { - const str_lit = val.castTag(.str_lit).?.data; - break :bytes dg.module.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; - }, - else => unreachable, - }; - const sentinel = if (ty.sentinel()) |sentinel| @intCast(u8, sentinel.toUnsignedInt(target)) else null; - try writer.print("{s}", .{ - fmtStringLiteral(bytes[0..@intCast(usize, ty.arrayLen())], sentinel), - }); - }, - else => { - // Fall back to generic implementation. - var arena = std.heap.ArenaAllocator.init(dg.gpa); - defer arena.deinit(); - const arena_allocator = arena.allocator(); + try writer.writeAll("{ .payload = "); + try dg.renderValue(writer, payload_ty, switch (opt.val) { + .none => try mod.intern(.{ .undef = payload_ty.ip_index }), + else => |payload| payload, + }.toValue(), initializer_type); + try writer.writeAll(", .is_null = "); + try dg.renderValue(writer, Type.bool, is_null_val, initializer_type); + try writer.writeAll(" }"); + }, + .aggregate => |aggregate| switch (mod.intern_pool.indexToKey(ty.ip_index)) { + .array_type, .vector_type => { + if (location == .FunctionArgument) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + // Fall back to generic implementation. - // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal - const max_string_initializer_len = 65535; + // MSVC throws C2078 if an array of size 65536 or greater is initialized with a string literal + const max_string_initializer_len = 65535; - const ai = ty.arrayInfo(); - if (ai.elem_type.eql(Type.u8, dg.module)) { - if (ai.len <= max_string_initializer_len) { - var literal = stringLiteral(writer); - try literal.start(); - var index: usize = 0; - while (index < ai.len) : (index += 1) { - const elem_val = try val.elemValue(dg.module, arena_allocator, index); - const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(target)); - try literal.writeChar(elem_val_u8); - } - if (ai.sentinel) |s| { - const s_u8 = @intCast(u8, s.toUnsignedInt(target)); - if (s_u8 != 0) try literal.writeChar(s_u8); - } - try literal.end(); - } else { - try writer.writeByte('{'); - var index: usize = 0; - while (index < ai.len) : (index += 1) { - if (index != 0) try writer.writeByte(','); - const elem_val = try val.elemValue(dg.module, arena_allocator, index); - const elem_val_u8 = if (elem_val.isUndef()) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(target)); - try writer.print("'\\x{x}'", .{elem_val_u8}); - } - if (ai.sentinel) |s| { - if (index != 0) try writer.writeByte(','); - try dg.renderValue(writer, ai.elem_type, s, initializer_type); - } - try writer.writeByte('}'); + const ai = ty.arrayInfo(mod); + if (ai.elem_type.eql(Type.u8, mod)) { + if (ai.len <= max_string_initializer_len) { + var literal = stringLiteral(writer); + try literal.start(); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + const elem_val = try val.elemValue(mod, index); + const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + try literal.writeChar(elem_val_u8); } + if (ai.sentinel) |s| { + const s_u8 = @intCast(u8, s.toUnsignedInt(mod)); + if (s_u8 != 0) try literal.writeChar(s_u8); + } + try literal.end(); } else { try writer.writeByte('{'); var index: usize = 0; while (index < ai.len) : (index += 1) { if (index != 0) try writer.writeByte(','); - const elem_val = try val.elemValue(dg.module, arena_allocator, index); - try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type); + const elem_val = try val.elemValue(mod, index); + const elem_val_u8 = if (elem_val.isUndef(mod)) undefPattern(u8) else @intCast(u8, elem_val.toUnsignedInt(mod)); + try writer.print("'\\x{x}'", .{elem_val_u8}); } if (ai.sentinel) |s| { if (index != 0) try writer.writeByte(','); @@ -1168,122 +1246,22 @@ pub const DeclGen = struct { } try writer.writeByte('}'); } - }, - } - }, - .Bool => { - if (val.toBool()) { - return writer.writeAll("true"); - } else { - return writer.writeAll("false"); - } - }, - .Optional => { - var opt_buf: Type.Payload.ElemType = undefined; - const payload_ty = ty.optionalChild(&opt_buf); - - const is_null_val = Value.makeBool(val.tag() == .null_value); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) - return dg.renderValue(writer, Type.bool, is_null_val, location); - - if (ty.optionalReprIsPayload()) { - const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else val; - return dg.renderValue(writer, payload_ty, payload_val, location); - } - - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else Value.undef; - - try writer.writeAll("{ .payload = "); - try dg.renderValue(writer, payload_ty, payload_val, initializer_type); - try writer.writeAll(", .is_null = "); - try dg.renderValue(writer, Type.bool, is_null_val, initializer_type); - try writer.writeAll(" }"); - }, - .ErrorSet => { - if (val.castTag(.@"error")) |error_pl| { - // Error values are already defined by genErrDecls. - try writer.print("zig_error_{}", .{fmtIdent(error_pl.data.name)}); - } else { - try writer.print("{}", .{try dg.fmtIntLiteral(ty, val, .Other)}); - } - }, - .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(); - const error_ty = ty.errorUnionSet(); - const error_val = if (val.errorUnionIsPayload()) Value.zero else val; - - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - return dg.renderValue(writer, error_ty, error_val, location); - } - - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.undef; - try writer.writeAll("{ .payload = "); - try dg.renderValue(writer, payload_ty, payload_val, initializer_type); - try writer.writeAll(", .error = "); - try dg.renderValue(writer, error_ty, error_val, initializer_type); - try writer.writeAll(" }"); - }, - .Enum => { - switch (val.tag()) { - .enum_field_index => { - const field_index = val.castTag(.enum_field_index).?.data; - switch (ty.tag()) { - .enum_simple => return writer.print("{d}", .{field_index}), - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Type.Payload.EnumFull).?.data; - if (enum_full.values.count() != 0) { - const tag_val = enum_full.values.keys()[field_index]; - return dg.renderValue(writer, enum_full.tag_ty, tag_val, location); - } else { - return writer.print("{d}", .{field_index}); - } - }, - .enum_numbered => { - const enum_obj = ty.castTag(.enum_numbered).?.data; - if (enum_obj.values.count() != 0) { - const tag_val = enum_obj.values.keys()[field_index]; - return dg.renderValue(writer, enum_obj.tag_ty, tag_val, location); - } else { - return writer.print("{d}", .{field_index}); - } - }, - else => unreachable, + } else { + try writer.writeByte('{'); + var index: usize = 0; + while (index < ai.len) : (index += 1) { + if (index != 0) try writer.writeByte(','); + const elem_val = try val.elemValue(mod, index); + try dg.renderValue(writer, ai.elem_type, elem_val, initializer_type); } - }, - else => { - var int_tag_ty_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = ty.intTagType(&int_tag_ty_buffer); - return dg.renderValue(writer, int_tag_ty, val, location); - }, - } - }, - .Fn => switch (val.tag()) { - .function => { - const decl = val.castTag(.function).?.data.owner_decl; - return dg.renderDeclValue(writer, ty, val, decl, location); + if (ai.sentinel) |s| { + if (index != 0) try writer.writeByte(','); + try dg.renderValue(writer, ai.elem_type, s, initializer_type); + } + try writer.writeByte('}'); + } }, - .extern_fn => { - const decl = val.castTag(.extern_fn).?.data.owner_decl; - return dg.renderDeclValue(writer, ty, val, decl, location); - }, - else => unreachable, - }, - .Struct => switch (ty.containerLayout()) { - .Auto, .Extern => { - const field_vals = val.castTag(.aggregate).?.data; - + .anon_struct_type => |tuple| { if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderType(writer, ty); @@ -1292,133 +1270,184 @@ pub const DeclGen = struct { try writer.writeByte('{'); var empty = true; - for (field_vals, 0..) |field_val, field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + for (tuple.types, tuple.values, 0..) |field_ty, comptime_ty, field_i| { + if (comptime_ty != .none) continue; + if (!field_ty.toType().hasRuntimeBitsIgnoreComptime(mod)) continue; if (!empty) try writer.writeByte(','); - try dg.renderValue(writer, field_ty, field_val, initializer_type); + + const field_val = switch (aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field_ty, + .storage = .{ .u64 = bytes[field_i] }, + } }), + .elems => |elems| elems[field_i], + .repeated_elem => |elem| elem, + }; + try dg.renderValue(writer, field_ty.toType(), field_val.toValue(), initializer_type); empty = false; } try writer.writeByte('}'); }, - .Packed => { - const field_vals = val.castTag(.aggregate).?.data; - const int_info = ty.intInfo(target); - - var bit_offset_ty_pl = Type.Payload.Bits{ - .base = .{ .tag = .int_unsigned }, - .data = Type.smallestUnsignedBits(int_info.bits - 1), - }; - const bit_offset_ty = Type.initPayload(&bit_offset_ty_pl.base); - - var bit_offset_val_pl: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, .data = 0 }; - const bit_offset_val = Value.initPayload(&bit_offset_val_pl.base); - - var eff_num_fields: usize = 0; - for (0..field_vals.len) |field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; - - eff_num_fields += 1; - } - - if (eff_num_fields == 0) { - try writer.writeByte('('); - try dg.renderValue(writer, ty, Value.undef, initializer_type); - try writer.writeByte(')'); - } else if (ty.bitSize(target) > 64) { - // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off)) - var num_or = eff_num_fields - 1; - while (num_or > 0) : (num_or -= 1) { - try writer.writeAll("zig_or_"); - try dg.renderTypeForBuiltinFnName(writer, ty); - try writer.writeByte('('); - } - - var eff_index: usize = 0; - var needs_closing_paren = false; - for (field_vals, 0..) |field_val, field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; - - const cast_context = IntCastContext{ .value = .{ .value = field_val } }; - if (bit_offset_val_pl.data != 0) { - try writer.writeAll("zig_shl_"); - try dg.renderTypeForBuiltinFnName(writer, ty); + .struct_type => |struct_type| { + const struct_obj = mod.structPtrUnwrap(struct_type.index).?; + switch (struct_obj.layout) { + .Auto, .Extern => { + if (!location.isInitializer()) { try writer.writeByte('('); - try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); - try writer.writeAll(", "); - try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); + try dg.renderType(writer, ty); try writer.writeByte(')'); - } else { - try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument); } - if (needs_closing_paren) try writer.writeByte(')'); - if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); + try writer.writeByte('{'); + var empty = true; + for (struct_obj.fields.values(), 0..) |field, field_i| { + if (field.is_comptime) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; - bit_offset_val_pl.data += field_ty.bitSize(target); - needs_closing_paren = true; - eff_index += 1; - } - } else { - try writer.writeByte('('); - // a << a_off | b << b_off | c << c_off - var empty = true; - for (field_vals, 0..) |field_val, field_i| { - if (ty.structFieldIsComptime(field_i)) continue; - const field_ty = ty.structFieldType(field_i); - if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue; + if (!empty) try writer.writeByte(','); + const field_val = switch (aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field.ty.toIntern(), + .storage = .{ .u64 = bytes[field_i] }, + } }), + .elems => |elems| elems[field_i], + .repeated_elem => |elem| elem, + }; + try dg.renderValue(writer, field.ty, field_val.toValue(), initializer_type); - if (!empty) try writer.writeAll(" | "); - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); + empty = false; + } + try writer.writeByte('}'); + }, + .Packed => { + const int_info = ty.intInfo(mod); - if (bit_offset_val_pl.data != 0) { - try dg.renderValue(writer, field_ty, field_val, .Other); - try writer.writeAll(" << "); - try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); - } else { - try dg.renderValue(writer, field_ty, field_val, .Other); + const bits = Type.smallestUnsignedBits(int_info.bits - 1); + const bit_offset_ty = try mod.intType(.unsigned, bits); + + var bit_offset: u64 = 0; + var eff_num_fields: usize = 0; + + for (struct_obj.fields.values()) |field| { + if (field.is_comptime) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + eff_num_fields += 1; } - bit_offset_val_pl.data += field_ty.bitSize(target); - empty = false; - } - try writer.writeByte(')'); + if (eff_num_fields == 0) { + try writer.writeByte('('); + try dg.renderValue(writer, ty, Value.undef, initializer_type); + try writer.writeByte(')'); + } else if (ty.bitSize(mod) > 64) { + // zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off)) + var num_or = eff_num_fields - 1; + while (num_or > 0) : (num_or -= 1) { + try writer.writeAll("zig_or_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte('('); + } + + var eff_index: usize = 0; + var needs_closing_paren = false; + for (struct_obj.fields.values(), 0..) |field, field_i| { + if (field.is_comptime) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + const field_val = switch (aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field.ty.toIntern(), + .storage = .{ .u64 = bytes[field_i] }, + } }), + .elems => |elems| elems[field_i], + .repeated_elem => |elem| elem, + }; + const cast_context = IntCastContext{ .value = .{ .value = field_val.toValue() } }; + if (bit_offset != 0) { + try writer.writeAll("zig_shl_"); + try dg.renderTypeForBuiltinFnName(writer, ty); + try writer.writeByte('('); + try dg.renderIntCast(writer, ty, cast_context, field.ty, .FunctionArgument); + try writer.writeAll(", "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); + try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); + try writer.writeByte(')'); + } else { + try dg.renderIntCast(writer, ty, cast_context, field.ty, .FunctionArgument); + } + + if (needs_closing_paren) try writer.writeByte(')'); + if (eff_index != eff_num_fields - 1) try writer.writeAll(", "); + + bit_offset += field.ty.bitSize(mod); + needs_closing_paren = true; + eff_index += 1; + } + } else { + try writer.writeByte('('); + // a << a_off | b << b_off | c << c_off + var empty = true; + for (struct_obj.fields.values(), 0..) |field, field_i| { + if (field.is_comptime) continue; + if (!field.ty.hasRuntimeBitsIgnoreComptime(mod)) continue; + + if (!empty) try writer.writeAll(" | "); + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + + const field_val = switch (aggregate.storage) { + .bytes => |bytes| try mod.intern_pool.get(mod.gpa, .{ .int = .{ + .ty = field.ty.toIntern(), + .storage = .{ .u64 = bytes[field_i] }, + } }), + .elems => |elems| elems[field_i], + .repeated_elem => |elem| elem, + }; + + if (bit_offset != 0) { + try dg.renderValue(writer, field.ty, field_val.toValue(), .Other); + try writer.writeAll(" << "); + const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset); + try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument); + } else { + try dg.renderValue(writer, field.ty, field_val.toValue(), .Other); + } + + bit_offset += field.ty.bitSize(mod); + empty = false; + } + try writer.writeByte(')'); + } + }, } }, + else => unreachable, }, - .Union => { - const union_obj = val.castTag(.@"union").?.data; - + .un => |un| { if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderType(writer, ty); try writer.writeByte(')'); } - const field_i = ty.unionTagFieldIndex(union_obj.tag, dg.module).?; - const field_ty = ty.unionFields().values()[field_i].ty; - const field_name = ty.unionFields().keys()[field_i]; - if (ty.containerLayout() == .Packed) { - if (field_ty.hasRuntimeBits()) { - if (field_ty.isPtrAtRuntime()) { + const field_i = ty.unionTagFieldIndex(un.tag.toValue(), mod).?; + const field_ty = ty.unionFields(mod).values()[field_i].ty; + const field_name = ty.unionFields(mod).keys()[field_i]; + if (ty.containerLayout(mod) == .Packed) { + if (field_ty.hasRuntimeBits(mod)) { + if (field_ty.isPtrAtRuntime(mod)) { try writer.writeByte('('); try dg.renderType(writer, ty); try writer.writeByte(')'); - } else if (field_ty.zigTypeTag() == .Float) { + } else if (field_ty.zigTypeTag(mod) == .Float) { try writer.writeByte('('); try dg.renderType(writer, ty); try writer.writeByte(')'); } - try dg.renderValue(writer, field_ty, union_obj.val, initializer_type); + try dg.renderValue(writer, field_ty, un.val.toValue(), initializer_type); } else { try writer.writeAll("0"); } @@ -1426,44 +1455,28 @@ pub const DeclGen = struct { } try writer.writeByte('{'); - if (ty.unionTagTypeSafety()) |tag_ty| { - const layout = ty.unionGetLayout(target); + if (ty.unionTagTypeSafety(mod)) |tag_ty| { + const layout = ty.unionGetLayout(mod); if (layout.tag_size != 0) { try writer.writeAll(" .tag = "); - try dg.renderValue(writer, tag_ty, union_obj.tag, initializer_type); + try dg.renderValue(writer, tag_ty, un.tag.toValue(), initializer_type); } - if (ty.unionHasAllZeroBitFieldTypes()) return try writer.writeByte('}'); + if (ty.unionHasAllZeroBitFieldTypes(mod)) return try writer.writeByte('}'); if (layout.tag_size != 0) try writer.writeByte(','); try writer.writeAll(" .payload = {"); } - if (field_ty.hasRuntimeBits()) { - try writer.print(" .{ } = ", .{fmtIdent(field_name)}); - try dg.renderValue(writer, field_ty, union_obj.val, initializer_type); + if (field_ty.hasRuntimeBits(mod)) { + try writer.print(" .{ } = ", .{fmtIdent(mod.intern_pool.stringToSlice(field_name))}); + try dg.renderValue(writer, field_ty, un.val.toValue(), initializer_type); try writer.writeByte(' '); - } else for (ty.unionFields().values()) |field| { - if (!field.ty.hasRuntimeBits()) continue; + } else for (ty.unionFields(mod).values()) |field| { + if (!field.ty.hasRuntimeBits(mod)) continue; try dg.renderValue(writer, field.ty, Value.undef, initializer_type); break; } - if (ty.unionTagTypeSafety()) |_| try writer.writeByte('}'); + if (ty.unionTagTypeSafety(mod)) |_| try writer.writeByte('}'); try writer.writeByte('}'); }, - - .ComptimeInt => unreachable, - .ComptimeFloat => unreachable, - .Type => unreachable, - .EnumLiteral => unreachable, - .Void => unreachable, - .NoReturn => unreachable, - .Undefined => unreachable, - .Null => unreachable, - .Opaque => unreachable, - - .Frame, - .AnyFrame, - => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{ - @tagName(tag), - }), } } @@ -1478,12 +1491,12 @@ pub const DeclGen = struct { }, ) !void { const store = &dg.ctypes.set; - const module = dg.module; + const mod = dg.module; - const fn_decl = module.declPtr(fn_decl_index); + const fn_decl = mod.declPtr(fn_decl_index); const fn_cty_idx = try dg.typeToIndex(fn_decl.ty, kind); - const fn_info = fn_decl.ty.fnInfo(); + const fn_info = mod.typeToFunc(fn_decl.ty).?; if (fn_info.cc == .Naked) { switch (kind) { .forward => try w.writeAll("zig_naked_decl "), @@ -1491,14 +1504,13 @@ pub const DeclGen = struct { else => unreachable, } } - if (fn_decl.val.castTag(.function)) |func_payload| - if (func_payload.data.is_cold) try w.writeAll("zig_cold "); - if (fn_info.return_type.tag() == .noreturn) try w.writeAll("zig_noreturn "); + if (fn_decl.val.getFunction(mod)) |func| if (func.is_cold) try w.writeAll("zig_cold "); + if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn "); const trailing = try renderTypePrefix( dg.decl_index, store.*, - module, + mod, w, fn_cty_idx, .suffix, @@ -1512,8 +1524,8 @@ pub const DeclGen = struct { switch (kind) { .forward => {}, - .complete => if (fn_info.alignment > 0) - try w.print(" zig_align_fn({})", .{fn_info.alignment}), + .complete => if (fn_info.alignment.toByteUnitsOptional()) |a| + try w.print(" zig_align_fn({})", .{a}), else => unreachable, } @@ -1525,7 +1537,7 @@ pub const DeclGen = struct { try renderTypeSuffix( dg.decl_index, store.*, - module, + mod, w, fn_cty_idx, .suffix, @@ -1537,8 +1549,8 @@ pub const DeclGen = struct { ); switch (kind) { - .forward => if (fn_info.alignment > 0) - try w.print(" zig_align_fn({})", .{fn_info.alignment}), + .forward => if (fn_info.alignment.toByteUnitsOptional()) |a| + try w.print(" zig_align_fn({})", .{a}), .complete => {}, else => unreachable, } @@ -1577,9 +1589,9 @@ pub const DeclGen = struct { fn renderCType(dg: *DeclGen, w: anytype, idx: CType.Index) error{ OutOfMemory, AnalysisFail }!void { const store = &dg.ctypes.set; - const module = dg.module; - _ = try renderTypePrefix(dg.decl_index, store.*, module, w, idx, .suffix, .{}); - try renderTypeSuffix(dg.decl_index, store.*, module, w, idx, .suffix, .{}); + const mod = dg.module; + _ = try renderTypePrefix(dg.decl_index, store.*, mod, w, idx, .suffix, .{}); + try renderTypeSuffix(dg.decl_index, store.*, mod, w, idx, .suffix, .{}); } const IntCastContext = union(enum) { @@ -1619,18 +1631,18 @@ pub const DeclGen = struct { /// | > 64 bit integer | < 64 bit integer | zig_make_